blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a0d0361f2f60af0ac21ff945ffa08ca2d43c5ac9 | Python | pabou061/FrameDifference | /FrameDifference.py | UTF-8 | 1,013 | 2.859375 | 3 | [] | no_license | import numpy as np
import cv2
vid = cv2.VideoCapture("video/park.avi")
if (vid.isOpened()== False):
print("Error opening video stream or file")
# get all properties
count = int(vid.get(7))
width= int(vid.get(3))
height=int(vid.get(4))
framerate= int(vid.get(5))
#create an empty video to save all our new frames
out = cv2.VideoWriter('result.avi',-1, framerate, (width,height),1)
#loop through all the frames
for frame_no in range(count):
vid.set(1,frame_no)
#read current frame
ret, frame_current = vid.read()
#read next frame
ret1,frame_next= vid.read()
#in case of errpr:
if ret == False or ret1==False:
break
else:
# get the difference in pixels
diff = cv2.subtract(frame_current,frame_next)
#create the threshold
_ ,thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)
#write it in the video
out.write(thresh)
#release the resources
vid.release()
out.release()
| true |
e77df088e1d905c71c8faebaa4f89ab3155ab2b6 | Python | fvictorio/project-euler | /049/problem_049.py | UTF-8 | 275 | 3.328125 | 3 | [] | no_license | def is_prime (n):
if n == 2: return True
if (n == 1) or (n % 2 == 0): return False
i = 3
while i*i <= n:
if n % i == 0: return False
i += 2
return True
def same_digits (a, b):
return ''.join(sorted(str(a))) == ''.join(sorted(str(b)))
| true |
f36d55da1e602a58d3c610334e0c3b30bb013b44 | Python | Santhosh136/DCN-lab-programs | /DCN/3_FullDuplex/client.py | UTF-8 | 863 | 3.28125 | 3 | [] | no_license | import socket
import time
def client_program():
host = "10.1.24.97" # as both code is running on same pc
port = 5000 # socket server port number
client_socket = socket.socket() # instantiate
client_socket.connect((host, port)) # connect to the server
message = 'H'
i=0
s=0
while i<1000:
'''t1=time.time()
for j in range(2048):'''
client_socket.send(message.encode()) # send message
data = client_socket.recv(1024).decode() # receive response
#t2=time.time()'''
print('Received from server: ' + data) # show in terminal
#s=s+t
#i=i+1
message = input(" -> ") # again take input
#avg=s/10
00
#print("The Rotational Latency:"+str(avg))
client_socket.close() # close the connection
if __name__ == '__main__':
client_program()
| true |
7075af1de64bfbb5164414819dc59346adb781c1 | Python | jchelsy/DiscordBot | /src/bot.py | UTF-8 | 1,084 | 2.84375 | 3 | [] | no_license | import sys
import discord
from src import settings
# Set to remember if the bot is already running (since on_ready can be called more than once)
this = sys.modules[__name__]
this.running = False
######################################################################
def main():
# Initialize the client
print("Starting up...")
client = discord.Client()
# Define event handlers for the client
# on_ready() may be called multiple times
# (in the event of a reconnect). Hence, the 'running' flag.
@client.event
async def on_ready():
if this.running:
return
this.running = True
# Set the playing status
if settings.NOW_PLAYING:
print("Setting 'Now Playing' game...", flush=True)
await client.change_presence(
activity=discord.Game(name=settings.NOW_PLAYING))
print("Logged in!", flush=True)
# Run the bot
client.run(settings.BOT_TOKEN)
######################################################################
if __name__ == "__main__":
main()
| true |
776b407969bf994ae6b0e7ca3dd26f410087ef9c | Python | hurenkam/AoC | /2022/Day03/part2.py | UTF-8 | 765 | 3.296875 | 3 | [] | no_license | #!/bin/env python
with open('input.txt','r') as file:
lines = [line.strip() for line in file]
def findBadge(packs):
pack1 = packs.pop(0);
pack2 = packs.pop(0);
pack3 = packs.pop(0);
for letter in pack1:
if ((letter in pack2) and (letter in pack3)):
return letter
def findAllBadges(packs):
result = []
while (len(packs)):
badge = findBadge(packs)
result.append(badge)
return result
def determinePriorities(letters):
priorities = " abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
result = []
for letter in letters:
result.append(priorities.index(letter))
return result
badges = findAllBadges(lines)
priorities = determinePriorities(badges)
print(sum(priorities))
| true |
dcdfe80bb461590282f3bf6b7242d1f0c35097c2 | Python | danieljanes/keras-tfds-example | /src/python/keras-tfds-example/main.py | UTF-8 | 3,750 | 2.78125 | 3 | [] | no_license | from typing import Tuple
import tensorflow as tf
from tensorflow.data import Dataset
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import optimizers
import tensorflow_datasets as tfds
LOG_DIR: str = "logs"
EPOCHS: int = 5
BATCH_SIZE: int = 32
def main() -> None:
# Dataset: Use either ds_real() or ds_random()
# - ds_mnist: provides a tf.data.Dataset after downloading MNIST
# - ds_rndm: provides a tf.data.Dataset of the same shape, w/o any download
ds_train, ds_test, num_classes, m_train, m_test = ds_rndm()
STEPS_PER_EPOCH: int = int(m_train / BATCH_SIZE)
# Zero-pad images to make them compatible with the LeNet-5 architecture
ds_train = ds_train.map(preprocessing)
ds_test = ds_test.map(preprocessing)
# Training
optimizer = tf.train.AdamOptimizer()
train(ds_train, ds_test, m_train, m_test, num_classes, BATCH_SIZE, STEPS_PER_EPOCH, optimizer)
def train(ds_train, ds_test, m_train, m_test, num_classes, batch_size, steps_per_epoch, optimizer):
ds_train = ds_train.repeat().shuffle(buffer_size=10000).batch(batch_size)
ds_test = ds_test.batch(m_test)
# Build model
model = build_model(num_classes)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
# Training
tb_callback = callbacks.TensorBoard(LOG_DIR)
history = model.fit(ds_train, epochs=EPOCHS,
steps_per_epoch=steps_per_epoch,
callbacks=[tb_callback])
print(history)
# Evaluation
score = model.evaluate(ds_test, steps=1)
print("Test set loss: ", score[0])
print("Test set accuracy:", score[1])
def ds_mnist() -> Tuple[Dataset, Dataset, int, int, int]:
# Download and extract dataset using TFDS
(ds_train, ds_test), info = tfds.load(name="mnist",
split=["train", "test"],
as_supervised=True,
with_info=True)
# Number of classes, number of training/test examples
num_classes: int = info.features['label'].num_classes
m_train: int = info.splits['train'].num_examples
m_test: int = info.splits['test'].num_examples
return ds_train, ds_test, num_classes, m_train, m_test
def ds_rndm() -> Tuple[Dataset, Dataset, int, int, int]:
# Hardcoded values taken from MNIST
num_classes = 10
m_train = 60000
m_test = 10000
# Random noise
ds_image = Dataset.from_tensor_slices((
tf.random_uniform([m_train, 28, 28, 1], maxval=255, dtype=tf.int32)
))
ds_label = Dataset.from_tensor_slices((
tf.random_uniform([m_train], maxval=9, dtype=tf.int64)
))
ds_train = Dataset.zip((ds_image, ds_label))
ds_test = ds_train.take(m_test)
return ds_train, ds_test, num_classes, m_train, m_test
def preprocessing(x, y):
x = tf.image.pad_to_bounding_box(x, 2, 2, 32, 32)
x = tf.cast(x, tf.float32)
x = x / 255
y = tf.one_hot(y, 10)
return x, y
def build_model(num_classes: int) -> tf.keras.Model:
inputs = tf.keras.Input(shape=(32, 32, 1))
x = layers.Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1))(inputs)
x = layers.Activation('tanh')(x)
x = layers.AveragePooling2D(strides=(2, 2))(x)
x = layers.Conv2D(filters=16, kernel_size=(5, 5), strides=(1, 1))(x)
x = layers.Activation('tanh')(x)
x = layers.AveragePooling2D(strides=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(120, activation='tanh')(x)
x = layers.Dense(84, activation='tanh')(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
return tf.keras.Model(inputs=inputs, outputs=outputs)
if __name__ == "__main__":
main()
| true |
3f6571cfab4fbc816d21a21270f2bcdf4e5b60d6 | Python | chase-kusterer/machine_learning_trinkets | /visual_cm.py | UTF-8 | 987 | 3.453125 | 3 | [] | no_license | # required packages
from sklearn.metrics import confusion_matrix # confusion matrix
import seaborn as sns # enhanced data viz
# visual_cm
def visual_cm(true_y, pred_y, labels = None):
"""
Creates a visualization of a confusion matrix.
PARAMETERS
----------
true_y : true values for the response variable
pred_y : predicted values for the response variable
labels : , default None
"""
# visualizing the confusion matrix
# setting labels
lbls = labels
# declaring a confusion matrix object
cm = confusion_matrix(y_true = true_y,
y_pred = pred_y)
# heatmap
sns.heatmap(cm,
annot = True,
xticklabels = lbls,
yticklabels = lbls,
cmap = 'Blues',
fmt = 'g')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix of the Classifier')
plt.show()
| true |
063728ba9757f1c0c9f703a3c262e152ad214c6a | Python | parhambz/pythonLearn | /python project/project.py | UTF-8 | 13,599 | 2.75 | 3 | [] | no_license | import random
def readFile(name): # read file from txt
try:
file = open(name, "r")
lines = file.readlines()
return lines
except FileNotFoundError:
print("file not found")
def joinLines(xs): # join lines and make a string
res = ""
for x in xs:
res = res + x
return res
def sep(string): # seperate with seprators
string = string + " "
seps = [" ", "(", ")", ":", ";", "=", ",", "&", "|", "~", "\n"]
res = []
temp = ""
for x in string:
if x in seps:
if x != " " and x != "\n":
res = res + [temp] + [x]
temp = ""
else:
res = res + [temp]
temp = ""
else:
temp = temp + x
return [x for x in res if x != ""]
def sepModules(xs): # seprate modules from each other
temp = []
res = []
for x in xs:
if x == "module":
temp = ["module"]
else:
if x != "endmodule":
temp = temp + [x]
else:
temp = temp + ["endmodule"]
res = res + [temp]
return res
def lineOne(xs): # code ye module ro migire objectesho misaze input output ro ham ezafe mikone tahesh objectesho mide
modules.addModu(xs[1])
temp = modules.modus[-1]
k = 0
for x in xs:
if x == ":":
break
k = k + 1
inout = xs[2:k]
k = 0
for x in inout:
if x == "input":
temp.input(inout[k + 1])
if x == "output":
temp.output(inout[k + 1])
a = wire(inout[k + 1], len(temp.wires))
temp.wires = temp.wires + [a]
k = k + 1
return temp
def removeP(ys):#remove () and seprate it to line
xs=[]
xs=xs+ys
k=1
for x in xs[1:]:
m=0
a=-1
b=1
for y in x:
if y=="(":
a=m
break
m=m+1
m=-1
for y in x[::-1]:
if y==")":
b=m
break
m=m-1
if b!=1 and a!=-1:
name=str(random.randrange(10000))
p=xs[k][a+1:len(xs[k])+b]
xs[k]=xs[k][:a]+[name]+xs[k][1+b+len(xs[k]):]
xs=xs[:k]+[["wire",name,"="]+p]+xs[k:]
k=k+1
k=k+1
return xs
def removeC(string):
while True:
k=0
z=-2
for x in string:
if x=="/" and string[k+1]=="/":
a=k
z=2
break
k=k+1
m=0
for x in string[a+1:]:
if x=="\n":
b=m
break
m=m+1
if z==-2:
break
return string[:a]+string[a+b+1:]
def gnot(ys,mobject):
x=[]
x=x+ys
xs=x
k=0
for x in xs:
m=0
for y in x:
if y=="~":
w=mobject.addWire("*"+x[m+1])
w.content(["~",x[m+1]])
xs[k]=xs[k][:m]+["*"+xs[k][m+1]]+xs[k][m+2:]
m=m+1
k=k+1
return xs
def body(xs, mobject):
k = 0
for x in xs:
if x == ":":
xs[k] = ";"
k = k + 1
body = []
temp = []
for x in xs:
if x == ";":
body = body + [temp]
temp = []
else:
temp = temp + [x]
body=removeP(body)
body=gnot(body,mobject)
mobject.body(body)
setWires(body, mobject)
wireContent(body, mobject)
def setWires(xs, mobject): # give body and add wires
for y in xs:
k = 0
for x in y:
if x == "wire":
temp = wire(y[k + 1], len(mobject.wires))
working = mobject
working.wires = working.wires + [temp]
def wireContent(xs, mobject): # give body and module object and set wire contents
for x in xs:
k = 0
for y in x:
'''if y=="~":
wireContent=["~"]+[x[k+1]]
wireName="~"+x[k+1]
wireKey = mobject.wireNameToKey(wireName)
wire=mobject.wires[wireKey]
wire.content(wireContent)'''
if y == "=":
wireContent = x[k + 1:]
wireName = x[k - 1]
wireKey = mobject.wireNameToKey(wireName)
wire = mobject.wires[wireKey]
wire.content(wireContent)
k = k + 1
'''if len(x)!=3:
for y in x:
if y=="=":
wireContent=[x[k+1]]+[x[k+2]]+[x[k+3]]
wireName=x[k-1]
wireKey=mobject.wireNameToKey(wireName)
wire=mobject.wires[wireKey]
wire.content(wireContent)
k=k+1
if len(x)==3:
for y in x:
if y=="=":
wireContent=[x[k+1]]
wireName=x[k-1]
wireKey=mobject.wireNameToKey(wireName)
wire=mobject.wires[wireKey]
wire.content(wireContent)
k = k + 1'''
def createE():
global errorFile
error = errorFile
l=["=" for x in range(125)]+["\n"]
line=""
for x in l:
line=line+x
error.write(line)
l=["*"]+[" " for x in range(55)]+["Symtax result"]+[" " for x in range(55)]+["*"]+["\n"]
title=""
for x in l:
title=title+x
error.write(title)
error.write(line)
return error
def graphMaker(mobject):#give an object and return graph for it
g=graph(mobject.name)
op=["&","|"]
k=0
for x in mobject.body:
m=0
for y in x :
if y =="=":
if len(x)==5 or len(x)==6:
n=g.addNode(x[m+2])
g.addVector(x[m-1],n)
g.addVector(x[m +3],n)
g.addVector(x[m +1],n)
if len(x)==3 or len(x)==4:
n = g.addNode(x[m])
g.addVector(x[m - 1],n)
g.addVector(x[m + 1],n)
m=m+1
k=k+1
k=0
for x in g.vectors:
if x.name[0]=="*":
temp=x.node[0]
no=g.addNode("~")
x.node=x.node[1:]+[no]
no.vector=no.vector+[x]
v=g.addVector(x.name[1:],temp)
v.node=v.node+[no]
no.vector=no.vector+[v]
k=k+1
return g
'''def lineNumber(string):
for x in string:
if x=="\n":
string=string[]+";+"+str(m)+";"+string[]'''
def setE(msg,line):
global errorFile
errorFile.write("Error : "+msg+" line :"+line+"\n")
def setW(msg):
global errorFile
errorFile.write("Warning : "+msg+"\n")
def findLine(xs,andis):
for x in xs[andis:]:
'''if x[0]=="+" :
return x[1:]'''
return "0"
def moduleCheckEW1(xs):
sep=[" ", "(", ")", ":", ";", "=", ",", "&", "|", "~", "\n","input","output","wire","module","endmodule"]
o=0
global m
if xs[0]!="module":
setE("should start with 'module'",findLine(xs,0))
return False
if xs[-1]!="endmodule":
setE("module should end with 'endmodule'",findLine(xs,-1))
name=xs[1]
if ord(name[0]) not in [x for x in range(97,123)]:
setE("module should start with a-z only",findLine(xs,1))
return False
def nameChek(xs,name,andis):
global errorFile
if ord(name[0]) not in range(97,123):
setE("invalid wire name: " + name, findLine(xs, andis))
return False
for x in name:
if ord(x)>122 or ord(x)<65:
if x=="_":
pass
else:
if x in [str(y) for y in range(10)]:
pass
else:
setE("invalid wire name: "+name,findLine(xs,andis))
return False
if xs[2]!="(":
setE("unexpected-> '"+xs[2]+"'/ expect-> '('",findLine(xs,2))
return False
k=0
'''for x in xs :
if x==";":
if xs[k-1]!=")":
setE("unexpected :"+xs[k-1],findLine(xs,k))
return False
k=k+1'''
k=0
for x in xs:
if x=="wire":
nameChek(xs,xs[k+1],k+1)
k=k+1
k = 0
for x in xs[2:]:
if x == ":":
a = k
k = k + 1
wirelist = []
for x in xs[a + 1:]:
if x not in sep:
wirelist = wirelist + [x]
for x in wirelist:
if x not in [y.name for y in m.wires]:
setE("wire " + x + " not defined", findLine(xs,0))
return False
for x in m.wires:
if x.name not in wirelist:
setW("wire " + x + " not used")
o=2
if o==0:
errorFile.write("OK")
def writeGraph(g):
global errorFile
res="\n Vectors : \n"
for x in g.vectors:
res =res+x.name+" nodes : "
for y in x.node:
res=res+str(y.key)+"("+y.type+")"+","
res=res[0:-1]+"\n"
res=res+" Nodes : \n"
for x in g.nodes:
res=res+str(x.key)+"("+x.type+")"+ " Vectors : "
for y in x.vector:
res=res+y.name+","
res=res[0:-1]+"\n"
errorFile.write(res)
def createR():
global errorFile
error=errorFile
l=["=" for x in range(125)]+["\n"]
line=""
for x in l:
line=line+x
error.write("\n")
error.write(line)
l=["*"]+[" " for x in range(55)]+["Circuit Graph"]+[" " for x in range(55)]+["*"]+["\n"]
title=""
for x in l:
title=title+x
error.write(title)
error.write(line)
return error
class modu:
def __init__(self, name, key):
x = ""
x = x + name
self.name = x
x = 0
x = x + key
self.key = x
self.wires = []
self.inp = []
self.out = []
def body(self,body):
x=[]
x=x+body
self.body=x
def input(self, name):
x = ""
x = x + name
self.inp += [x]
def output(self, name):
x = []
x = x + [name]
self.out = self.out + x
def wireNameToKey(self, name):
for x in self.wires:
if x.name == name:
return x.key
def res(self):
r = [[x.name] for x in self.wires if x.name in self.out]
z = [x.name for x in self.wires]
while True:
k = 0
m = 0
for a in r:
l = 0
for x in a:
if x in z:
q = self.wireNameToKey(r[k][l])
q = self.wires[q]
# r[k][l]=q.con
r[k] = r[k][:l] + q.con + r[k][l + 1:]
m = 2
l = l + 1
k = k + 1
if m == 0:
break
return r
def addWire(self,wname):
w= wire(wname,len(self.wires))
self.wires=self.wires+[w]
return w
def __str__(self):
result = []
for x in self.res():
res = ""
for y in x:
res = res + y
result = result + [res]
return str(result)
class modus:
def mnameToKey(self, name):
for x in self.modus:
if x.name == name:
return x.key
def __init__(self):
self.modus = []
def addModu(self, name):
key = len(self.modus)
a = modu(name, key)
self.modus = self.modus + [a]
return key
class wire:
def __init__(self, name, key,ng=False):
x = ""
x = x + name
self.name = x
x = 0
x = x + key
self.key = x
self.con = []
def content(self, xs):
x = []
x = x + ["("] + xs + [")"]
self.con = x
def __str__(self):
return str(self.con)
class graph:
def __init__(self,mname):
x=""
x=x+mname
self.name=x
self.vectors=[]
self.nodes=[]
def addNode(self,type):
temp=node(len(self.nodes),type)
self.nodes=self.nodes+[temp]
return temp
def addVector(self,name,n):
if name not in [x.name for x in self.vectors]:
v = vector(name,n,len(self.vectors))
n.vector=n.vector+[v]
self.vectors = self.vectors + [v]
return v
k = 0
for x in self.vectors:
if x.name == name:
break
k = k + 1
v = self.vectors[k]
n.vector = n.vector + [v]
v.node=v.node+[n]
return v
class node:
def __init__(self,id,type):
self.type=type[0]
x=0
x=x+id
self.key=x
self.vector=[]
class vector:
def __init__(self,name,n,key):
x=""
x=x+name
self.name=x
x = 0
x = x + key
self.key = x
self.node=[]
self.node=self.node+[n]
fileName = input("Enter your file name please: ")
modules = modus()
errorFile=open("result.data","w")
createE()
lines = readFile(fileName)
string = joinLines(lines)
string=removeC(string)
code = sep(string)
code = sepModules(code)
m = lineOne(code[0])
if moduleCheckEW1(code[0])!=False:
body(code[0], m)
g=graphMaker(m)
createR()
writeGraph(g)
errorFile.close()
'''print(g.nodes)
#string=lineNumber(string)
print(code)
print(modules.modus)
print(m.out)
print(m.inp)
x = m.wires[1]
# wireContent([["mid1","=","a","|","b",";"]], m)
print(m.wires[0])
print(m.wires[1])
print(m.out)
print(m)'''
| true |
5b23bd73e00f7cb159274ee1fafaa1ded686c243 | Python | sapuri/srandom.com | /main/templatetags/custom_filter.py | UTF-8 | 1,942 | 2.515625 | 3 | [
"MIT"
] | permissive | from django import template
from main.models import Music, Medal, Bad_Count, Extra_Option
register = template.Library()
@register.filter
def get_at_index(list, index):
"""
リストの添字に変数を与える
:param list:
:param index:
:return:
"""
return list[index]
@register.filter
def join_comma(var, args):
"""
2つの変数をコンマで区切った文字列を作成 (filterに3つ引数を渡すため)
:param var:
:param args:
:return:
"""
return "%s,%s" % (var, args)
@register.filter
def medal_int(medal_list: list[Medal], music: Music) -> int:
"""
指定された曲の値をlistから探して返す
:param medal_list:
:param music:
:return:
"""
if not medal_list:
return 0
for medal in medal_list:
if medal.music.id == music.id:
if medal.int() != 12:
# 未プレイ以外ならメダルを返す
return medal.int() # 1-11
else:
return 0
return 0
@register.filter
def bad_count_int(bad_count_list: list[Bad_Count], music: Music):
if not bad_count_list:
return '-'
for bad_count in bad_count_list:
if bad_count.music.id == music.id:
return bad_count.int()
return '-'
@register.filter
def bad_count_updated_at(bad_count_list: list[Bad_Count], music: Music):
if not bad_count_list:
return '-'
for bad_count in bad_count_list:
if bad_count.music.id == music.id:
return bad_count.updated_at
return '-'
@register.filter
def is_hard(extra_option_list: list[Extra_Option], music: Music) -> bool:
if not extra_option_list:
return False
for extra_option in extra_option_list:
if extra_option.music.id == music.id:
if extra_option.is_hard():
return True
else:
return False
| true |
90b5fb215b18c37b714344b3012de14b791c41df | Python | kaushikroychowdhury/Genetic-Variant-Classifications-using-Deep-Learning | /Code/clinvar_conflicting_(ML_MODEL).py | UTF-8 | 1,968 | 3.0625 | 3 | [] | no_license | # Importing relevant libraries
import numpy as np
import tensorflow as tf
# DATA
npz = np.load("clinvar_conflicting_train.npz")
train_inputs = npz["inputs"].astype(np.float)
train_targets = npz["targets"].astype(np.int)
npz = np.load("clinvar_conflicting_validation.npz")
validation_inputs = npz["inputs"].astype(np.float)
validation_targets = npz["targets"].astype(np.int)
npz = np.load("clinvar_conflicting_test.npz")
test_inputs = npz["inputs"].astype(np.float)
test_targets = npz["targets"].astype(np.int)
# Model (Outline, optimizer, loss function, Training )
input_size = 9
output_size = 2
hidden_layer_size = 200
## Outline the model
model = tf.keras.Sequential([
tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
# tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
# tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
# tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
# tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
# tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'),
tf.keras.layers.Dense(output_size, activation = 'softmax')
])
# Optimizer
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss= 'sparse_categorical_crossentropy', metrics=['accuracy'])
# Training
batch_size = 100
max_epoch = 100
early_stopping = tf.keras.callbacks.EarlyStopping(patience=2)
#fit the model
model.fit(train_inputs, train_targets, batch_size= batch_size, epochs= max_epoch,
validation_data=(validation_inputs, validation_targets),
callbacks=[early_stopping], verbose = 2)
### TEST THE MODEl
test_loss , test_accuracy = model.evaluate(test_inputs, test_targets)
print('Test loss : {0:.2f} Test accuracy : {1:.2f}%'.format(test_loss, test_accuracy*100.))
#### Accuracy 77% | true |
137e73d4ffe9c784e381b75eb9586b991b4efc91 | Python | goheea/CSOS-Algorithm | /BOJ/2908/ghwns82.py | UTF-8 | 154 | 2.96875 | 3 | [] | no_license | #풀이 1
num1, num2 = input().split()
print(max(
int(num1[::-1]),
int(num2[::-1])
))
# 풀이 2
#print(max(*map(int, input()[::-1].split())))
| true |
3addc276badb87e854d6c3653e15981f4e1b94d5 | Python | Livcrst/PIBIC-2020 | /Algoritmos/Scale-Free.py | UTF-8 | 305 | 2.6875 | 3 | [] | no_license | #Criar Scale Free
import networkx as nx
import matplotlib.pyplot as plt
G = nx.scale_free_graph(100)
nx.draw(G)
plt.show()
#Barabasi network
n=100 #Number of nodes
m=4 #Number of initial links
seed=100
G=nx.barabasi_albert_graph(n, m, seed)
nx.draw(G)
plt.show()
kmax = 1 + (math.log(x)/Alpha)
| true |
46b0ac7f7c6c49f1953f891c4781539ad88ee2c6 | Python | mlynarzsrem/ChessBot | /Extras/GradedMove.py | UTF-8 | 416 | 2.796875 | 3 | [] | no_license | class GradedMove:
def __init__(self,state,move,initialRank,stateAfter):
self.move=move
self.state=state
self.rank=initialRank
self.stateAfter = stateAfter
def updateRank(self,toAdd):
self.rank+=toAdd
def getTrainingData(self):
finalReward =self.rank/float(100)
return self.state,self.move,finalReward
def getRank(self):
return self.rank | true |
2b9a9a9647fd19306ed1e22de671bb9c84ae8595 | Python | hanghang2333/disease_classify | /src/util/text_segment.py | UTF-8 | 2,270 | 2.78125 | 3 | [] | no_license | # coding=utf-8
# 这里做的工作有:合并字典,停止词,导入字典。
# 使用的时候对于路径有些要求,需要对应路径上有对应文件
import jieba, os, ConfigParser
import text
# 导入相关配置
cfdic = 'config'
cf = ConfigParser.ConfigParser()
cf.read(cfdic)
home_path = cf.get('info', 'home_path')
# home_path = '/home/lihang/disease_analysis/'
rawfile = home_path + 'data/segment_data/'
output = home_path + 'data_output/'
# 合并字典和停止词到一个文件(因为只能load一个用户字典)
def mergefile(filename, original):
temp = open(filename, 'r')
cont = temp.readlines()
for i in cont:
original.append(i)
temp.close()
def treefile(treedir, filepath):
f = open(filepath, 'a+')
original = f.readlines()
f.close()
for root, dirs, files in os.walk(treedir):
for file in files:
filename = os.path.join(root, file)
mergefile(filename, original)
f = open(filepath, 'w')
s = set(original)
for i in s:
f.write(i)
f.close()
def merge():
dictfile = rawfile + 'dict/'
stopfile = rawfile + 'stop/'
dict_now = rawfile + 'dict_now'
stop_now = rawfile + 'stop_now'
treefile(dictfile, dict_now)
treefile(stopfile, stop_now)
return dict_now, stop_now
def is_num(num): # 判断分完词的词是否是纯数字,因为一个纯数字的话对应于tfidf和doc2vec似乎都是没有什么意义的
try:
float(num)
return True
except ValueError:
return False
def is_eng(eng):
try:
eng.decode('ascii')
return True
except UnicodeError:
return False
dict_now, stop_now = merge()
jieba.load_userdict(dict_now) # 这一步和下一步时间较久,故而全局只运行一次
stopwords = set(text.get_text_from_file(stop_now))
def get_dict_stop_path():
return dict_now, stop_now
def segment(text):
# text:文本串
# 返回:分好词的文本串,以空格分割
seg_list = jieba.cut(text)
result = ''
for i in seg_list:
if (i not in stopwords and not is_num(i) and not is_eng(i)):
result = result + ' ' + i
result = result.strip()
return result
print segment(u'hunt综合征')
| true |
abfcb4bb07704e9c0859dfe3126fae11b73a6654 | Python | panayiotissoteriou/programming-meets-biology | /Week_2.py | UTF-8 | 4,491 | 3.765625 | 4 | [] | no_license | # PatternCount from Ch1
def PatternCount(Text, Pattern):
count = 0
last_position = len(Text) - len(Pattern) + 1
for x in range(last_position):
if Text[x : x + len(Pattern)] == Pattern:
count += 1
return(count)
#symbol array: counts of e.g. A in a sliding window # ! slow algorithm won't work for big datasets
def SymbolArray(Genome, symbol):
array = {}
n = len(Genome)
ExtendedGenome = Genome + Genome[0:n//2] # This is needed for circular genomes - n//2 is used becuase we keep track
for i in range(n): #of half the genome's bases from OrI to Ter
array[i] = PatternCount(ExtendedGenome[i:i+(n//2)], symbol) # adds no. occurances to value of array{index,value}
return array
# This prints a dictionary with {key = index, value = no. occrances of A}
print(SymbolArray("AAAAGGGG", 'A'))
#with E.coli genome
Ecoli = open('/Users/panayiotissoteriou/Desktop/panayiotis/online courses/bioinformatics specialisation/Programming meets biology/E_coli_genome.txt', 'r')
Ecoli_genome = Ecoli.read()
print(SymbolArray('Ecoli_genome', 'C'))
# more efficient Symbol array algorithm:
def FasterSymbolArray(Genome, symbol):
array = {}
n = len(Genome)
ExtendedGenome = Genome + Genome[0:n//2]
# look at the first half of Genome to compute first array value
array[0] = PatternCount(symbol, Genome[0:n//2])
for i in range(1, n):
# start by setting the current array value equal to the previous array value
array[i] = array[i-1]
# the current array value can differ from the previous array value by at most 1
if ExtendedGenome[i-1] == symbol: # asks if the base that just disappeared out of our moving window was the same as the base we're looking for
array[i] = array[i]-1 #if so, we remove one from the number of bases in the current window
if ExtendedGenome[i+(n//2)-1] == symbol: # this asks if the base that just came into 'front' of the moving window is the same as the base we're looking for
array[i] = array[i]+1 # if so, we add one to the number of bases in the current window
return array
# Skew array
def SkewArray(Genome):
Skew = [0]
for i in range(len(Genome)):
if Genome[i] == "C":
Skew.append(Skew[i] - 1) #skew decreases
elif Genome[i] == "G":
Skew.append(Skew[i] + 1) # skew increases
else: #skew remains unchanged
Skew.append(Skew[i])
return Skew
#print(type(SkewArray("CATGGGCATCGGCCATACGCC")))
#print("0 -1 -1 -1 0 1 2 1 1 1 0 1 2 1 0 0 0 0 -1 0 -1 -2")
#minimum Skew
def MinimumSkew(Genome):
# generate an empty list positions
positions = list()
# set a variable equal to SkewArray(Genome)
array = SkewArray(Genome)
# find the minimum value of all values in the skew array
min_array = min(array)
for i in range(len(array)):
if min_array == array[i]:
positions.append(i)
return positions
# range over the length of the skew array and add all positions achieving the min to positions
print(MinimumSkew("GATACACTTCCCGAGTAGGTACTG"))
# Hamming distance
def HammingDistance(p, q):
count = 0
for i in range(len(p)):
if p[i] != q[i]:
count += 1
return count
#print(HammingDistance("GGGCCGTTGGT", "GGACCGTTGAC"))
print(HammingDistance("CTACAGCAATACGATCATATGCGGATCCGCAGTGGCCGGTAGACACACGT", "CTACCCCGCTGCTCAATGACCGGGACTAAAGAGGCGAAGATTATGGTGTG"))
# approximate Pattern matching
def ApproximatePatternMatching(Text, Pattern, d):
positions = []
for i in range(len(Text)-len(Pattern)+1):
if HammingDistance(Text[i:i+len(Pattern)],Pattern) <= d:
positions.append(i)
return positions
print(ApproximatePatternMatching('CGCCCGAATCCAGAACGCATTCCCATATTTCGGGACCACTGGCCTCCACGGTACGGACGTCAATCAAAT','ATTCTGGA',3))
#modifying ApproximatePatternMatching to find no. of occurances of k-mers with 1 mismatch
def ApproximatePatternCount(Text, Pattern, d):
count = 0
for i in range(len(Text)-len(Pattern)+1):
if HammingDistance(Text[i:i+len(Pattern)],Pattern) <= d:
count += 1
return count
def HammingDistance(p, q):
count = 0
for i in range(len(p)):
if p[i] != q[i]:
count += 1
return count
print(ApproximatePatternCount('TTTAGAGCCTTCAGAGG','GAGG', 2))
| true |
a2b6ab3ea40f30bbbb824fbbf24937f167c2f1d2 | Python | DixitIshan/Machine_Learning | /K_means/basic_K-Means.py | UTF-8 | 838 | 3.46875 | 3 | [] | no_license | # IMPORTING ALL THE NECESSARY LIBRARIES
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.cluster import KMeans
style.use('ggplot')
# FALSIFIED DATASET
X = np.array([[1, 2],[1.5, 1.8],[5, 8],[8, 8],[1, 0.6],[9, 11]])
# INVOKING THE KMEANS CLUSTERING ALGORITHM
Kmeans = KMeans(n_clusters = 2)
# CREATING A BEST FIT ON THE DATASET
Kmeans.fit(X)
# THESE ARE THE CENTROIDS AND THE LABELS
centroids = Kmeans.cluster_centers_
print(centroids)
labels = Kmeans.labels_
print(labels)
# ITERATING THROUGH THE DATASET AND PLOTTING A SCATTERPLOT GRAPH OF DATASET AND CENTROIDS
colors = ["g.","r.","c.","y."]
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize = 10)
plt.scatter(centroids[:, 0],centroids[:, 1], marker = "x", s=150, linewidths = 5, zorder = 10)
plt.show() | true |
49fa3bcfcdc6b34881273bcb545119a496f161a2 | Python | grosenkj/ParaViewGeophysics | /src/filters/filter_points_to_tube.py | UTF-8 | 695 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | Name = 'PointsToTube'
Label = 'Points To Tube'
FilterCategory = 'PVGP Filters'
Help = 'Takes points from a vtkPolyData object and constructs a line of those points then builds a polygonal tube around that line with some specified radius and number of sides.'
NumberOfInputs = 1
InputDataType = 'vtkPolyData'
OutputDataType = 'vtkPolyData'
ExtraXml = ''
Properties = dict(
Number_of_Sides=20,
Radius=10.0,
Use_nearest_nbr=True,
)
def RequestData():
from PVGPpy.filt import pointsToTube
pdi = self.GetInput() # VTK PolyData Type
pdo = self.GetOutput() # VTK PolyData Type
pointsToTube(pdi, radius=Radius, numSides=Number_of_Sides, nrNbr=Use_nearest_nbr, pdo=pdo)
| true |
0ae0ab6c71c3d00fa13713586c5e5d2f68956cfb | Python | cristobal-vildosola/AMV-detector | /src/Main.py | UTF-8 | 1,630 | 2.625 | 3 | [] | no_license | import time
import sys
from BusquedaKNN import frames_mas_cercanos_video, agrupar_caracteristicas
from Deteccion import buscar_secuencias
from Evaluacion import evaluar_resultados
from Extraccion import caracteristicas_video
from Indices import KDTree
def buscar_clips_amv(video: str):
carpeta = '../videos/AMV'
tamano = (10, 10)
fps = 6
# extracción de caracteísticas
caracteristicas_video(f'{carpeta}/{video}.mp4', f'{carpeta}_car_{tamano}_{fps}',
fps_extraccion=fps, tamano=tamano)
# busqueda de vecinos mas cercanos
t0 = time.time()
etiquetas, caracteristicas = agrupar_caracteristicas(f'../videos/Shippuden_car_{tamano}_{fps}',
recargar=True, tamano=tamano)
print(f'la agrupación de datos tomó {int(time.time() - t0)} segundos')
indice = KDTree(datos=caracteristicas, etiquetas=etiquetas, trees=10)
print(f'la construcción del índice tomó {indice.build_time:.1f} segundos')
frames_mas_cercanos_video(f'../videos/AMV_car_{tamano}_{fps}/{video}.txt',
f'../videos/AMV_cerc_{tamano}_{fps}',
indice=indice, checks=500, k=20)
# detección de secuencias
buscar_secuencias(f'../videos/AMV_cerc_{tamano}_{fps}/{video}.txt',
max_errores_continuos=12, tiempo_minimo=1, max_offset=0.15)
# evaluación
evaluar_resultados(video)
return
if __name__ == '__main__':
if len(sys.argv) == 1:
nombre = 'mushroom'
else:
nombre = sys.argv[1]
buscar_clips_amv(nombre)
| true |
e73d4849ea3e8ea6f71cd594c190c194eca5e30c | Python | akesling/genimager | /genimagef/general-unstable/sample_genimage_script.py | UTF-8 | 899 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
# Sample script to test genetic imaging libraries
#
import genetic_imager
# specifies the path to the image
image_path = '/home/ajray/images/eyekey.jpg'
# specifies the path to save the images to
archive_dir = '/home/ajray/images/eyekey/'
# specifies the maximum number of generations
max_generations = 10000
# specifies color mode 'RGB' = color, 'L' = black & white
color_mode = 'L'
# specifies the interval to save images at, None means don't save images
save_interval = 10
# specifies the output format, None means no output
output_type = 'XML'
genetic_imager.genimage(image_path = image_path, \
archive_dir = archive_dir, \
max_generations = max_generations, \
color_mode = color_mode, \
save_interval = save_interval, \
output_type = output_type)
| true |
7444e00eae9fa6c1cac388ba17d4cfb9a148ebfa | Python | AlexandrSech/Z49-TMS | /students/klimovich/Homework15/main.py | UTF-8 | 1,888 | 3.3125 | 3 | [] | no_license | import pyodbc
from datetime import datetime
'''
Создать таблицу продуктов. Атрибуты продукта: id, название, цена, количество, комментарий.
Реализовать CRUD(создание, чтение, обновление по id, удаление по id) для продуктов.
Создать пользовательский интерфейс.
'''
class Sql:
def __init__(self, database, server="DESKTOP-OVIDSIB"):
self.cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=" + server + ";"
"Database=" + database + ";"
"Trusted_Connection=yes;")
self.cursor = self.cnxn.cursor()
def write_query(self, query: str):
for n, row in enumerate(list(self.cursor.execute(query))):
print(n, row)
self.cursor.commit()
try:
sql = Sql('homework15')
print('Подключено')
except Exception:
print('Не подключился чел')
while True:
print('1. вывести таблицу Products')
print('2. удалить строку из таблицы')
print('3. написать запрос')
print('0. выход')
sign = input('введите номер: ')
if sign == '1':
sql.write_query('select * from Products')
if sign == '2':
try:
ss = input('Введите id строки: ')
sql.write_query('delete Products where id={}'.format(ss))
except Exception:
print('Походу такой строки нет')
if sign == '3':
query = input('напишите запрос: ')
sql.write_query(query)
if sign == '0':
break
| true |
8f7b1004c95396445d9da6da9675e65a34f905a8 | Python | dneo007/pi4fyp | /generator.py | UTF-8 | 688 | 3.203125 | 3 | [] | no_license | import csv
import numpy as np
import sys
def main():
print("Starting", str(sys.argv[0]), "V1.0")
inputfile = input('Input Filename: ')
outputfile = input('Output Filename: ')
print('Generating Filename:', str(inputfile))
while True:
with open (inputfile,'r') as csv_file:
reader =csv.reader(csv_file)
# next(reader) # skip first row
#print first 30 rows
for row in reader:
with open(outputfile, 'a') as csvnew:
rows = [ [row[0], row[1]]]
csvwriter = csv.writer(csvnew)
csvwriter.writerows(rows)
if __name__ == '__main__':
main()
| true |
b8bd4803711f93ca2a9c51c06bdec8d09bd0f5d7 | Python | Shazthestorylover/Analysis-POTW--2020-2021- | /POTW-2/Secret Dates.py | UTF-8 | 1,870 | 3.265625 | 3 | [] | no_license | # Link to the Hackerrank exercise: https://www.hackerrank.com/contests/uwi-comp2211-2021-potw-02/challenges
# -----------------------------------------------
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'find_earliest' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER m
# 3. INTEGER_ARRAY enc_dates
#
# Helper Function from Dr.Fokums Project 1 - Modified
def ext_Euclid(m, n):
"""Extended Euclidean algorithm. It returns the multiplicative
inverse of n mod m"""
a = (1, 0, m)
b = (0, 1, n)
while True:
if b[2] == 0:
return a[2]
if b[2] == 1:
# return int(b[1] + (m if b[1] < 0 else 0))
if b[1] < 0:
return int(b[1] + m)
return int(b[1])
q = math.floor(a[2] / float(b[2]))
t = (a[0] - (q * b[0]), a[1] - (q*b[1]), a[2] - (q*b[2]))
a = b
b = t
def find_earliest(k, m, enc_dates):
# Decipher the dates and return the earliest one
# print(enc_dates)
m_inverse = ext_Euclid(m, k)
# Collects all deciphered dates
all_dates = list()
for date in enc_dates:
encoded_date = (m_inverse * date) % m
all_dates.append(encoded_date)
# print(all_dates)
return min(all_dates)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
k = int(first_multiple_input[0])
m = int(first_multiple_input[1])
n = int(first_multiple_input[2])
enc_dates = []
for _ in range(n):
enc_dates_item = int(input().strip())
enc_dates.append(enc_dates_item)
result = find_earliest(k, m, enc_dates)
fptr.write(str(result) + '\n')
fptr.close()
| true |
dc3b186e6580a34b3d8194b20169d74db20ea309 | Python | RokKrivicic/Homework-lesson8 | /guessTheSecretNumber.py | UTF-8 | 168 | 3.515625 | 4 | [] | no_license | secret = 15
guess = int(input("Your guess?"))
if guess != secret:
print("You are dead wrong my friend")
else:
print("You guessed correctly, congratulation")
| true |
360bb9fa0e9a34020cf7f05d72497b3a5efc4637 | Python | dawoeh/HPLC_chromatogramm | /fhplc.py | UTF-8 | 6,654 | 2.828125 | 3 | [
"MIT"
] | permissive | import sys
import os
import numpy as np
import re
if '-nograph' in sys.argv:
pass
else:
import matplotlib.pyplot as plt
#########################################################################################################################################
# Script for Knauer ASCII-file conversion and plotting #
# ---------------------------------------------------- #
# #
# The script converts ASCII-files from EZCHROM into two .txt files (UV and Fluorescence) with time and volume data. Both #
# curves are plotted and saved as .png file. The script handles two curves, but can be extended to handle more. #
# #
# Dependencies: Matplotlib, use without graph output (-nograph) in case. #
# #
# Usage: Execute fhplc.py in folder with ASCII files (.asc). #
# #
# Essential arguments: -f flow rate in ml/min (i.e. "python fhplc.py -f 0.25") #
# Optional arguments: -notxt (no .txt file output) #
# -nograph (no graph output) #
# -dpi 500 (for specific resolution, 200 standard) #
# #
#########################################################################################################################################
def is_number(s): ##### definition to check for floats
try:
float(s)
return True
except ValueError:
return False
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_png = dir_path + '/png'
if not os.path.exists(dir_png):
os.makedirs(dir_png)
dir_txt = dir_path + '/txt'
if not os.path.exists(dir_txt):
os.makedirs(dir_txt)
i = 1
while i <= len(sys.argv)-1: ##### Flow rate input as first argument, convert to float
if '-f' in sys.argv[i]:
if is_number(sys.argv[i+1]):
flow = float(sys.argv[i+1])
print('Flow rate: '+str(flow)+' ml/min\n')
else:
print("Flow rate not properly set! Usage: -f 0.5 (in ml/min)\n")
print("Script quit\n")
quit()
break
else:
if i == len(sys.argv)-1:
print("Flow rate not set! Usage: -f 0.5 (in ml/min)\n")
print("Script quit\n")
quit()
i+=1
try:
flow
except NameError:
print ("Please set flow rate properly! Usage: -f 0.5 (in ml/min)\n")
print ('Exit!')
quit()
if '-notxt' in sys.argv:
print("Omit text file output.\n")
if '-nograph' in sys.argv:
print("Omit graph output.\n")
i = 1
while i <= len(sys.argv)-1: ##### check for dpi argument
if '-dpi' in sys.argv[i]:
if is_number(sys.argv[i+1]):
dpi = int(sys.argv[i+1])
else:
break
break
else:
i+=1
files = [g for g in os.listdir('.') if os.path.isfile(g)] ##### check for files in same folder
files = np.sort(files, axis=0)
##### definitions for progress bar
number = 0
progress = 1.0
percent = 0
bar_length = 50
#####
for g in files: #### count ascii files in folder
if '.asc' in g:
number += 1
print('Number of ASCII files to process: %d\n' %number)
if number == 0:
print('No files to process!\n')
print('Exit!')
quit()
for g in files: #### data manipulation routine
if '.asc' in g:
data = []
minutes1 = []
volume1 = []
data1 = []
minutes2 = []
volume2 = []
data2 = []
nr = 0
f = open(g, 'r')
for line in f:
line = line.replace(',','.')
if 'Sample ID:' in line:
splitline = line.split()
splitline.pop(0)
splitline.pop(0)
sample = ' '.join(splitline)
if 'Date and Time:' in line:
splitline = line.split()
date = splitline[4]
time = splitline[5]
if 'X Axis Title:' in line:
splitline = line.split()
xtitle = [splitline[3],splitline[4]]
if 'Y Axis Title:' in line:
splitline = line.split()
ytitle = [splitline[3],splitline[4]]
if 'Rate:' in line:
rates=list(map(float, re.findall(r"[-+]?\d*\.\d+|\d+", line)))
if 'Total Data Points:' in line:
points=list(map(int, re.findall(r"[-+]?\d*\.\d+|\d+", line)))
if 'X Axis Multiplier:' in line:
xmulti=list(map(float, re.findall(r"[-+]?\d*\.\d+|\d+", line)))
if 'Y Axis Multiplier:' in line:
ymulti=list(map(float, re.findall(r"[-+]?\d*\.\d+|\d+", line)))
if is_number(line):
data = np.append(data, int(line))
f.close()
i=0
while i < len(data): ###### create arrays for time, volume and data
if i < points[0]:
time = (float(i)/float(points[0])*float(points[0])/rates[0]/60)
minutes1 = np.append(minutes1,time)
vol = time * flow
volume1 = np.append(volume1,vol)
data1 = np.append(data1,data[i]*ymulti[0])
if i >= points[0]:
time = ((float(i)-float(points[0]))/float(points[1])*float(points[1])/rates[1]/60)
minutes2 = np.append(minutes2,time)
vol = time * flow
volume2 = np.append(volume2,vol)
data2 = np.append(data2,data[i]*ymulti[1])
i += 1
if '-nograph' in sys.argv:
pass
else:
plt.clf()
fig, ax1 = plt.subplots() ###### Plotting
ax2 = ax1.twinx()
ax1.plot(volume1, data1, 'g-')
ax2.plot(volume2, data2, 'b-')
#fig.suptitle(sample, fontsize=12, fontweight='bold')
ax1.set_xlabel('Volume (ml)')
ax1.set_ylabel('Fluorescence'+' ('+ytitle[0]+')', color='g')
ax2.set_ylabel('UV'+' ('+ytitle[1]+')', color='b')
ax1.grid(b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
try:
dpi
plt.savefig(dir_png+'/'+sample, dpi=dpi)
except NameError:
plt.savefig(dir_png+'/'+sample, dpi=200)
plt.close()
if '-notxt' in sys.argv:
pass
else:
set1 = np.column_stack((minutes1,volume1,data1))
set2 = np.column_stack((minutes2,volume2,data2))
with open(dir_txt+'/'+sample+'_Fluorescence.txt', 'wb') as h: ##### Output txt files for excel import
h.write(b'time(min), volume(ml), data('+(ytitle[0].encode("UTF-8"))+b')\n')
np.set_printoptions(precision=3)
np.savetxt(h, set1, fmt='%10.3f',delimiter=',')
h.close()
with open(dir_txt+'/'+sample+'_UV.txt', 'wb') as j: ##### Output txt files for excel import
j.write(b'time(min), volume(ml), data('+(ytitle[1].encode("UTF-8"))+b')\n')
np.set_printoptions(precision=3)
np.savetxt(j, set2, fmt='%10.3f',delimiter=',')
j.close()
percent = (progress/number) ##### Progress bar
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rProgress: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
progress +=1
##### End of routine
print('\n\nDone!\n')
| true |
ba20151bb20dd458bf36a13289de56bf106a8021 | Python | aam035/qbb2015-homework | /day2/filter.py | UTF-8 | 590 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
filename="/Users/cmdb/qbb2015/stringtie/SRR072893/t_data.ctab"
#check to see if this is correct by typing in terminal ls w ot w/o quotes#
f = open( filename )
#files are itterables that can be used in list, commas surpress new lines after file is printed
for line_count, line in enumerate(f): #enumerate adds the increments and does not need line_count +=1
if line_count <=10:#limits the lines to 10 to 15
pass #does nothing
elif line_count <= 15:
print line,
else:
break #to stop looking through lines
| true |
2ec345430f50001e93af8e597786d1deef00697c | Python | njkrichardson/fouriernets | /src/utils.py | UTF-8 | 7,023 | 2.796875 | 3 | [] | no_license | import numpy as np
import numpy.random as npr
from distributions import uniform, linear, semi_circular, von_mises
from builtins import range
import numpy as np
import pandas as pd
import sys, time
import matplotlib.pyplot as plt
import seaborn as sns
from math import pi
sns.set_style('white')
COLORS = np.array([
[106,61,154], # Dark colors
[31,120,180],
[51,160,44],
[227,26,28],
[255,127,0],
[166,206,227], # Light colors
[178,223,138],
[251,154,153],
[253,191,111],
[202,178,214],
]) / 256.0
def plot_sample(inputs : np.ndarray, targets : np.ndarray, n_samples : int = 4):
classes = {0: 'semicircular', 1:'uniform', 2:'linear', 3:'von mises'}
n = inputs.shape[0]
targets = one_hot_decoding(targets)
dist_by_type = [np.where(targets==0)[0], np.where(targets==1)[0], np.where(targets==2)[0], np.where(targets==3)[0]]
j = 0
fig, axs = plt.subplots(1, 4, figsize=(15, 6), facecolor='w', edgecolor='k', subplot_kw={'projection': 'polar'})
plt.suptitle('samples')
fig.subplots_adjust(hspace = .5, wspace=.001)
axs = axs.ravel()
for i in range(n_samples):
idx = dist_by_type[j][i]
name=targets[idx]
radius, theta = unroll_distribution(inputs[idx])
axs[i].scatter(theta, radius, 5, c=COLORS[j].reshape(-1, 1).T)
axs[i].set_title(classes[name], pad=15)
j+=1
if j > 3:
j = 0
plt.show()
round = (lambda x: lambda y: int(x(y)))(round)
# NOTE: datetime.timedelta.__str__ doesn't allow formatting the number of digits
def sec2str(seconds):
hours, rem = divmod(seconds,3600)
minutes, seconds = divmod(rem,60)
if hours > 0:
return '%02d:%02d:%02d' % (hours,minutes,round(seconds))
elif minutes > 0:
return '%02d:%02d' % (minutes,round(seconds))
else:
return '%0.2f' % seconds
def progprint_xrange(*args,**kwargs):
xr = range(*args)
return progprint(xr,total=len(xr),**kwargs)
def progprint(iterator,total=None,perline=25,show_times=True):
times = []
idx = 0
if total is not None:
numdigits = len('%d' % total)
for thing in iterator:
prev_time = time.time()
yield thing
times.append(time.time() - prev_time)
sys.stdout.write('.')
if (idx+1) % perline == 0:
if show_times:
avgtime = np.mean(times)
if total is not None:
eta = sec2str(avgtime*(total-(idx+1)))
sys.stdout.write((
' [ %%%dd/%%%dd, %%7.2fsec avg, ETA %%s ]\n'
% (numdigits,numdigits)) % (idx+1,total,avgtime,eta))
else:
sys.stdout.write(' [ %d done, %7.2fsec avg ]\n' % (idx+1,avgtime))
else:
if total is not None:
sys.stdout.write((' [ %%%dd/%%%dd ]\n' % (numdigits,numdigits) ) % (idx+1,total))
else:
sys.stdout.write(' [ %d ]\n' % (idx+1))
idx += 1
sys.stdout.flush()
print('')
if show_times and len(times) > 0:
total = sec2str(seconds=np.sum(times))
print('%7.2fsec avg, %s total\n' % (np.mean(times),total))
def make_data(n_per_class : int = 100, n_bins : int = 12, n_draws : int = 100, split : bool = False, test_proportion : float = 0.2, \
kappa : float = 0.5, slope_mag : float = 5., bias : float = .85):
n_data = n_per_class * 4
inputs, targets = np.zeros((n_data, n_bins)), np.zeros((n_data, 4))
for idx in progprint_xrange(0, n_data-3, 4):
# generate sample data
inputs[idx], targets[idx][0] = semi_circular(n_bins, n_draws, bias=bias), 1
inputs[idx+1], targets[idx+1][1] = uniform(n_bins, n_draws), 1
inputs[idx+2], targets[idx+2][2]= linear(n_bins, n_draws, slope_mag=slope_mag), 1
inputs[idx+3], targets[idx+3][3] = von_mises(n_bins, n_draws, kappa=kappa), 1
if split is True:
return train_test_split(inputs, targets, test_proportion=test_proportion)
return inputs, targets
def train_test_split(inputs, targets, test_proportion : float = 0.2):
n_data = inputs.shape[0]
mask = npr.choice(n_data, size=int(n_data*(1-test_proportion)), replace=False)
train_inputs, train_targets = inputs[(mask)], targets[(mask)]
test_inputs, test_targets = np.delete(inputs, mask, 0), np.delete(targets, mask, 0)
return train_inputs, test_inputs, train_targets, test_targets
def one_hot_decoding(oh_arr : np.ndarray) -> np.ndarray:
return np.array([np.where(oh_arr[i]==1)[0][0] for i in range(len(oh_arr))])
def plot_distribution(distribution : np.ndarray, r : int = 5, name : str = 'distribution'):
radius, theta = unroll_distribution(distribution, r=r)
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
c = ax.scatter(theta, radius)
ax.title.set_text(name)
plt.show()
def unroll_distribution(distribution : np.ndarray, r : int = 5):
radius, theta = [], []
for i in range(len(distribution)):
angle = (i / len(distribution)) * (pi * 2)
for j in range(int(distribution[i])):
radius.append(0.1*j+r)
theta.append(angle)
return radius, theta
def load_maddie():
# import Maddie's data to confirm distribution discrimination
maddie_sc = pd.read_csv('/Users/nickrichardson/Desktop/academics/2019-20/fouriernets/maddie/SemiCircleData_testing.csv', header=None).values[:, :-1]
maddie_linear = pd.read_csv('/Users/nickrichardson/Desktop/academics/2019-20/fouriernets/maddie/LineData_testing.csv', header=None).values[:, :-1]
maddie_uniform = pd.read_csv('/Users/nickrichardson/Desktop/academics/2019-20/fouriernets/maddie/UniformData_testing.csv', header=None).values[:, :-1]
maddie_vonmises = pd.read_csv('/Users/nickrichardson/Desktop/academics/2019-20/fouriernets/maddie/VonMisesData_testing.csv', header=None).values[:, :-1]
n_p_class = maddie_linear.shape[0]
inputs = np.vstack((maddie_linear, maddie_sc, maddie_uniform, maddie_vonmises))
targets = np.vstack((np.tile([1, 0, 0, 0], n_p_class).reshape(n_p_class, 4), np.tile([0, 1, 0, 0], n_p_class).reshape(n_p_class, 4),\
np.tile([0, 0, 1, 0], n_p_class).reshape(n_p_class, 4), np.tile([0, 0, 0, 1], n_p_class).reshape(n_p_class, 4)))
return train_test_split(inputs, targets)
# df = pd.DataFrame({'radius': radius, name : theta})
# # Convert the dataframe to long-form or "tidy" format
# df = pd.melt(df, id_vars=['radius'], var_name='distribution type', value_name='theta')
# # Set up a grid of axes with a polar projection
# g = sns.FacetGrid(df, col='distribution type', hue="distribution type",
# subplot_kws=dict(projection='polar'), height=4.5,
# sharex=False, sharey=False, despine=False, margin_titles=False)
# # Draw a scatterplot
# g.map(sns.scatterplot, "theta", "radius"); | true |
30c57fda7eefc62ee414d7ebbf70f5b8d7aa3cc2 | Python | smaeland/ML-2HDM | /scan/find_parameters_for_given_theta.py | UTF-8 | 1,571 | 2.734375 | 3 | [
"BSD-3-Clause"
] | permissive | import csv
import numpy as np
def find_theta(resultsfile='results_ratio_scan_sorted.csv'):
masses = []
br_H = []
br_A = []
br_ratios = []
xsec_H = []
xsec_A = []
xsec_ratios = []
total_ratios = []
tanbs = []
m12s = []
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
# mH,mA,m12_2,tanb,xsec-H,xsec-A,xsec-ratio,BR-H,BR-A,BR-ratio,tot-ratio
with open(resultsfile, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue
if row[0] == 'mH':
continue # Header
masses.append(float(row[0]))
m12s.append(float(row[2]))
xsec_H.append(float(row[4]))
xsec_A.append(float(row[5]))
xsec_ratios.append(float(row[6]))
br_H.append(float(row[7]))
br_A.append(float(row[8]))
br_ratios.append(float(row[9]))
total_ratios.append(float(row[10]))
tanbs.append(float(row[3]))
xsec_times_Br_H = np.array(br_H)*np.array(xsec_H)
xsec_times_Br_A = np.array(br_A)*np.array(xsec_A)
theta = xsec_times_Br_A/(xsec_times_Br_A+xsec_times_Br_H)
def isclose(a, b):
return abs(a-b) < 0.01
mytheta = 0.9
for i in range(len(theta)):
if isclose(theta[i], mytheta):
print 'theta = %.4f, tanb = %.4f, m12 = %.1f' % (theta[i], tanbs[i], m12s[i])
#raw_input('cont')
if __name__ == '__main__':
find_theta()
| true |
1c7a7a5979a8a73cd2c4e3b63a239376e2200f8c | Python | yuc330/hindroid_replication | /src/matrix.py | UTF-8 | 5,648 | 2.90625 | 3 | [] | no_license | import numpy as np
import json
import os
import re
import pandas as pd
import scipy.sparse
from sklearn.preprocessing import MultiLabelBinarizer
def get_Xy(cat1, cat2, y_col = 'malware'):
"""
given two lists of lists of smali files, each for different category, return a dataframe for smali files and a list of labels
Args:
cat1: first category of smali data with label 0
cat2: second category of smali data with label 1
y_col: column name for labels, default malware
"""
df1 = pd.DataFrame(cat1)
df1[y_col] = 0
df2 = pd.DataFrame(cat2)
df2[y_col] = 1
all_df = pd.concat([df1, df2]).dropna(how='all')
smalis = all_df.drop(y_col,1)
y = all_df[y_col]
#store mediate files
if not os.path.exists('mediate'):
os.mkdir('mediate')
with open('mediate/y.txt', 'w') as f:
for item in y:
f.write("%s\n" % item)
smalis.to_csv('mediate/smalis.csv', index = False)
return smalis, y
def get_Xy_fromfile():
"""
read already saved smalis dataframe and list of labels from mediate folder
Args:
none
"""
with open('mediate/y.txt') as f:
y = f.read().splitlines()
smalis = pd.read_csv('mediate/smalis.csv')
return smalis, y
# functions for A
def find_apis(block):
"""
find all apis in the block
Args:
block - string of smali to look for api
"""
return re.findall('invoke-\w+ {.*}, (.*?)\\(', block)
def smali2apis(row):
"""
output a set of unique apis of an app given series of smali files
Args:
row - series of smali files
"""
smalis = '\n'.join(row.dropna())
return set(find_apis(smalis))
def construct_A(apis):
"""
construct A matrix
Args:
apis - a series of set of apis, each set representing the apis for an app
"""
mlb = MultiLabelBinarizer(sparse_output = True)
A = mlb.fit_transform(apis)
return A, mlb.classes_
def construct_A_test(apis, classes):
"""
construct A matrix for testing set
Args:
apis - a series of set of apis, each set representing the apis for an app
classes - apis to check for in this series
"""
mlb = MultiLabelBinarizer(sparse_output = True, classes = classes)
A = mlb.fit_transform(apis)
return A
# functions for B
def find_blocks(smali):
"""
find all code blocks in a smali file
Args:
smali - string of smali to check for code blocks
"""
return re.findall( '\.method([\S\s]*?)\.end method', smali)
def smali2blocks(row):
"""
find all code blocks given a series of smali files
Args:
row - series of smali files
"""
smali = '\n'.join(row.dropna())
return list(set(find_blocks(smali)))
def construct_B(smalis):
"""
construct B matrix
Args:
smalis - dataframe of smali files
"""
B_dict = {}
def block2apis(block):
"""
helper method to find all apis in a block and update dictionary B_dict
Args:
block - string of block to find apis and update B_dict
"""
apis = set(re.findall('invoke-\w+ {.*}, (.*?)\\(', block))
for api in apis:
if api not in B_dict.keys():
B_dict[api] = apis
else:
B_dict[api] = B_dict[api].union(apis)
blocks = smalis.apply(smali2blocks, axis = 1).explode() #get a series of blocks
blocks.dropna().apply(block2apis) #update B_dict
mlb = MultiLabelBinarizer(sparse_output = True)
return mlb.fit_transform(pd.Series(B_dict))
#functions for P
def package(api):
"""
find the package of an api
Args:
api - string of api
"""
return re.search('(\[*[ZBSCFIJD]|\[*L[\w\/$-]+;)->', api)[1]
def construct_P(apis):
"""
construct P matrix
Args:
apis - a series of set of apis
"""
api_df = pd.DataFrame({'api':apis}).dropna()
api_df['package'] = api_df.api.apply(package)
P_dict = api_df.groupby('package')['api'].apply(set).to_dict()
api_df['same_package_apis'] = api_df['package'].apply(lambda x: P_dict[x])
P_series = api_df.drop('package',axis=1).set_index('api')['same_package_apis']
mlb = MultiLabelBinarizer(sparse_output = True)
return mlb.fit_transform(P_series)
# construct all
def construct_matrices(app_smalis, test_app_smalis):
"""
construct matrices A, A_test, B, and P
Args:
app_smalis - list of list of smali files to construct from
test_app_smalis - list of list of testing smali fils to construct A_test
"""
smalis = pd.DataFrame(app_smalis)
apis = smalis.apply(smali2apis, axis = 1)
print('constructing A...')
A, all_apis = construct_A(apis)
print('finish A construction')
test_smalis = pd.DataFrame(test_app_smalis)
test_apis = test_smalis.apply(smali2apis, axis = 1)
print('constructing A_test...')
A_test = construct_A_test(test_apis, all_apis)
print('finish A_test construction')
print('constructing B...')
B = construct_B(smalis)
print('finish B construction')
print('constructing P...')
P = construct_P(all_apis)
print('finish p construction')
return A, A_test, B, P
def save_matrix_to_file(mat, path):
"""
save a matrix as a sparse one to file
Args:
mat - matrix to save
path - path of matrix to save
"""
sparse = scipy.sparse.csc_matrix(mat)
scipy.sparse.save_npz(path, sparse) | true |
4e0c10dc5060e8478af1e75f05a38c45a0f6745d | Python | zhexia/lncRNA-project-script | /fasta_get_position.py | UTF-8 | 1,755 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 21:44:07 2018
@author: chenw
"""
#参数1:.gff 参数2:.fasta 输出:带有位置信息的fasta
import re
import sys
transcripts = dict()
with open(sys.argv[1], 'r') as gff:
for line in gff:
item = line.strip().split('\t')
if (item[2] == "lnc_RNA") or (item[2] == "lnc_RNA"):
try:
transcript_id = re.findall('Genbank:(\S+?)\;', item[8])[0]
chrome = item[0]
start = item[3]
end = item[4]
strand = item[6]
if transcript_id in transcripts:
transcripts[transcript_id].append(chrome)
transcripts[transcript_id].append(strand)
transcripts[transcript_id].append(start)
transcripts[transcript_id].append(end)
else:
transcripts[transcript_id] = list()
transcripts[transcript_id].append(chrome)
transcripts[transcript_id].append(strand)
transcripts[transcript_id].append(start)
transcripts[transcript_id].append(end)
except:
pass
file_out = open(sys.argv[3], 'w')
with open(sys.argv[2], 'r') as fasta:
for line in fasta:
if line[0] == '>':
item = line.strip().split()
genename = item[0][1:]
info = transcripts[genename]
line = '>'+ genename + ' ' + ' '.join(info) + ' ' + item[-1] + '\n'
file_out.write(line)
gff.close()
fasta.close()
file_out.close() | true |
2a1541f658ab201f6a2c5088fdc84ab83867fc86 | Python | vqpv/stepik-course-58852 | /13 Функции/13.6 Функции с возвратом значения. Часть 3/2.py | UTF-8 | 274 | 3.703125 | 4 | [] | no_license | import math
# объявление функции
def get_circle(radius):
return 2 * math.pi * radius, math.pi * radius ** 2
# считываем данные
r = float(input())
# вызываем функцию
length, square = get_circle(r)
print(length, square)
| true |
bc9eb274b10274a5f4cdfbef298589033dff179a | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_200/4691.py | UTF-8 | 315 | 3.875 | 4 | [] | no_license |
def is_tidy(n):
n_list = list(str(n))
for i in range(len(n_list) - 1):
if int(n_list[i]) > int(n_list[i+1]):
return False
return True
T = int(input())
for t in range(1, T+1):
n = int(input())
while True:
if is_tidy(n):
print("Case #{}: {}".format(t, n))
break
else:
n -= 1 | true |
d9f8c6122d8a9b450d33df4a91773f4c6a8ecdd0 | Python | ssongkim2/algorithm | /11315_오목판정/sol1.py | UTF-8 | 1,461 | 3.0625 | 3 | [] | no_license | import sys
sys.stdin = open('sample_input.txt')
def solution(omok):
for col in range(F):
for row in range(F):
#가로판정
if omok[col][row] and omok[col][row+1] and omok[col][row+2] and omok[col][row+3] and omok[col][row+4]:
return 'YES'
#대각선 판정(왼>>오)
if omok[col][row] and omok[col+1][row+1] and omok[col+2][row+2] and omok[col+3][row+3] and omok[col+4][row+4]:
return 'YES'
if omok[col][row+4] and omok[col+1][row+3] and omok[col+2][row+2] and omok[col+3][row+1] and omok[col+4][row]:
return 'YES'
if omok[col][row] and omok[col+1][row] and omok[col+2][row] and omok[col+3][row] and omok[col+4][row]:
return 'YES'
return 'NO'
#이거 함수로 짜길 잘했다... for문 두개 나와야 하니까...
def mixit(dol):
pan = [[0]*(F+4) for _ in range(F)]
pan.append([0] * (F + 4))
pan.append([0] * (F + 4))
pan.append([0] * (F + 4))
pan.append([0] * (F + 4))
for col in range(F):
for row in range(F):
if dol[col][row] == '.':
pan[col][row] = 0
if dol[col][row] == 'o':
pan[col][row] = 1
return pan
T = int(input())
for tc in range(1,T+1):
F = int(input())
dol = []
for i in range(F):
dol.append(input())
result = mixit(dol)
print('#{} {}'.format(tc,solution(result)))
| true |
45c45df68c9996e7a9bdbe333ffb342d353a073c | Python | DeveloperJoseph/PYTHON---DE---0---10000 | /Modulo 1/ejercicio11.py | UTF-8 | 5,513 | 4.40625 | 4 | [] | no_license | ########################################
# PYTHON DICTIONARIES #
# - DEVELOPER JOSEPH - #
########################################
#Definition:
#A dictionary is unordened, changeable and indexed.
#In Python dictionaries are written with curly brackets,
#and they have keys and values.
#Example:
#Create and print a dictionary
print("\n#> Loading course of Create and print a dictionary in Python....")
thisdict = {"brand":"Ford","Model":"Mustag","year":1964}
print("> Dictionary : "+str(thisdict))
#Accesing Items:
#You can access the items of a dictonary by referring to
#its key name, inside square brackets.
#Example: "Get the value of the 'model' key":
print("\n#> Loading course of Get the Value from a dictionary in Python....")
x = thisdict["year"]
print('> Get value -> '+str(x))
# OR
x = thisdict.get("brand")
print('> Get value two -> '+str(x))
#Change values:
#You can change the value of a specific item by referring
#to its key name.
#Example: "Change the values of the 'dictionary created' ":
print("\n#> Loading course of Change the values of the dictionary in Python....")
thisdict["brand"]='Suzuki'
thisdict["Model"]="S-10"
thisdict['year'] = 2017
print("> New dictionary: "+str(thisdict))
#Loop Through a Dictonary:
#You can loop through a dictionary by using a for loop:
#When looping through a dictionary, the return values are the
#key of the dictionary, but there are method to return the
#values as well:
#Example: "Print all key names in the dictionary, one by one":
print("\n#> x")
for i in thisdict:
for j in thisdict:
print("Key values -> "+str(i)+": "+str(thisdict[j]))
break
# OR
# JOSEPH TRANSLATE #
print("\n#> Loading Joseph Translate, please wait...")
languages = {"Spanish":"Hola","English":"Hello","Russian":"привет","Chinese":"你好"}
#ADDING ITEMS:
#Adding an item to the dictionary done by using a new index key and assigning a
#values to it.
languages["Arab"]="مرحبا"
languages["Croatia"]="Bok"
languages["Hindi"]="नमस्ते"
print("# Download external files for new languages translated...")
#Check if Key Exists:
#To determine if a specified key is present in a dictionary use the
#in keyword.
if "Spanish" in languages:
print("#> State Language Spanish: OK")
if "English" in languages:
print("#> State Language English: OK")
if "Russian" in languages:
print("#> State Language Russian: OK")
if "Chinese" in languages:
print("#> State Language Chinese: OK")
if "Arab" in languages:
print("#> State Language Arab: OK")
if "Croatia" in languages:
print("#> State Language Croatia: OK")
if "Hindi" in languages:
print("#> State Language Hindi: OK")
print("> Numbers of languages translated: "+str(len(languages)))
#Loop Through a Dictonary more method languages.item():
for x, y in languages.items():
print("Language: "+str(x)+"-> "+str(y))
print("\n>#Loading more languages translated....")
#Adding an item to the dictionary is done by using a
#new index key and assigning a value to it:
languages["Italy"]="Ciao"
languages["Vietman"]="Xin chào"
print("# Download external files for new languages translated...")
#Loop Through a Dictonary more method languages.item():
for z, w in languages.items():
print("Language: "+str(z)+"= "+str(w))
print("> Languages translated: "+str(len(languages)))
#Removing Items:
#There are several methods to remove items from a dictionary:
#Example: "The pop() method removes the item with the specified
#key name."
#> Removing one Item from my languages dictionary
print("\n## Removing one Item method pop()...")
print("Item delete: "+str(languages.pop("Croatia")))
print("## Removing las Item method popitem()...")
print("#> Random Item removing is: "+str(languages.popitem()))
print("## Loading New List...")
print(">+ List of languages: "+str(languages))
# The clear() keyword empties the dictionary:
languages.clear()
#Using variable languages as constructor
languages = dict(Animal="Perro",Name="Firulay's",Age=3)
print("> New Constructor dict is: "+str(languages))
#The copy() method returns a copy of the specified dictionary.
#Copy the constructor languages in new variable languages2
print("\n#> Copy the constructor languages in new variable languages2...")
languages2 = languages.copy()
print("#> Add new item Estado in languages2....")
languages2["State"]=None
print("> Output new dictionary languages2:"+str(languages2))#print console contructor languages2
print("#> Active state of dictionary languages2...")
languages2["State"]=1
print("> Output new dictionary languages2:"+str(languages2))#print console contructor languages2
#Definition and Usage
#The fromkeys() method returns a dictionary with the specified keys and values.
print("\n>#Loading The fromkeys() method ..... ")
#create 3 variables
k = ("Animal1","Aminal2","Animal3")
new_k_dictionary = dict.fromkeys(k)
print("> New constructor 'k' "+str(new_k_dictionary))
print("## Update values from new_k_dictionary....")
new_k_dictionary["Animal1"]=3
new_k_dictionary["Aminal2"]=4
new_k_dictionary["Animal3"]=5
print("## Loading cycle for in new_k_dictionary......")
for a,b in new_k_dictionary.items():
print("> The "+str(a)+" has "+str(b)+" years old.")
print("\n> Thank you for attention..!!")
| true |
2f05be88a245cacf4ad8ef2a705d0807c0206c53 | Python | BlakeBosin/google-fit-to-influxdb | /import.py | UTF-8 | 5,960 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#MIT License
#Copyright (c) 2021 Florian Völker
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
###############################################################################
#influxdb connection inspired by Noah Crowley https://www.influxdata.com/blog/getting-started-python-influxdb/
#!!!!!!! This script assumes a modified HomeAssistant schema: https://github.com/home-assistant/core/issues/34536#issuecomment-641506373
###############################################################################
from influxdb import InfluxDBClient
import csv
###############################################################################
# Configuration
dryrun = True #To test the csv without writing to influxdb
verbose = True #Output each datapoint with date, weight and bmi
calculate_bmi = True #Don't forget to edit the height!
height = 1.49 #[m] Necessary to calc your bmi
csv_file = "all.csv"
entity_id = "my_weight" #HomeAssistant entity_id for the scale
friendly_name_str = "My Weight"
influxdb_host = "localhost"
influxdb_port = 8086 #default 8086
influxdb_db = "home_assistant"
###############################################################################
# Since the csv with all days doesn't include a timestamp we have to create our own
default_time_influx = "T12:00:00.000000Z"
default_time_ha = " 12:00:00.000000"
default_timestamp = "120000.000"
###############################################################################
datapoint_counter = 0 #Just for a statistic output at the end.
###############################################################################
client = InfluxDBClient(host=influxdb_host, port=influxdb_port)
#If there is a password protection for the database please use the following definition:
#client = InfluxDBClient(host=influxdb_host, port=influxdb_port, username='myuser', password='mypass' ssl=True, verify_ssl=True)
client.switch_database(influxdb_db)
def bmi_calc(weight, height):
bmi = weight / (height **2)
bmi = round(bmi, 2)
return bmi
def datetime_create_influx(date):
datetime = date + default_time_influx
return datetime
def datetime_create_ha(date):
datetime = date + default_time_ha
return datetime
def datetime_create_timestamp(date):
date = date.replace('-', '')
datetime = date + default_timestamp
return datetime
###############################################################################
# Start to parse the csv-file
with open(csv_file, newline='') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(filereader) #skip the first csv-line which includes the header
for row in filereader:
test_weight = row[1]
if test_weight != '': #Skip the dataset if there is no mean weight available
datapoint_counter += 1
weight = round(float(row[1]),2)
date = row[0]
if calculate_bmi:
bmi = bmi_calc(weight, height)
else:
bmi = None
influx_datetime = datetime_create_influx(date)
ha_timestamp_str = datetime_create_ha(date)
ha_timestamp = float(datetime_create_timestamp(date))
datapoint = [
{
"measurement": "sensor",
"tags":{
"unit_of_measurement": "kg",
"domain": "sensor",
"entity_id": entity_id,
"external_source_import": "google-fit"
},
"fields":{
"value": weight,
"weight": weight,
"bmi": bmi,
"timestamp": ha_timestamp,
"timestamp_str": ha_timestamp_str,
"weight_unit_str": "kg",
"friendly_name_str": friendly_name_str,
"icon_str": 'mdi:scale-bathroom',
"visceral_fat": None,
"water": None,
"lean_body_mass": None,
"metabolic_age": None,
"muscle_mass": None,
"on": None,
"protein": None,
"body_fat": None,
"body_type_str": None,
"bone_mass": None,
"device_class_str": None,
"basal_metabolism": None
},
"time": influx_datetime,
}
]
if verbose:
print(date)
print(weight)
print(bmi)
#print(ha_timestamp)
#print(ha_timestamp_str)
#print(influx_datetime)
print("----------")
if not dryrun:
client.write_points(datapoint)
print("Imported datasets: " + str(datapoint_counter))
client.close()
| true |
aa348249319de1e305621fc73386f20d1c5aa70b | Python | SamarpanCoder2002/Programming-Language-Specific-Practice-Questions | /Python/HackerRank Python Problems/Basic Data Type/List_Comphrehension.py | UTF-8 | 380 | 3.15625 | 3 | [
"MIT"
] | permissive | from itertools import permutations
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
take_it= []
keep=permutations((x,y,z))
count=0
for i in list(keep):
for j in i:
count+=j
if count is n:
take_it.append(i)
count=0
print(take_it)
| true |
6f2054c618f163865baa4752b70ee81cebe10eeb | Python | kalpak92/Hello_ML | /11-785 Introduction to Deep Learning CMU/HelperRepos/Introduction-to-deep-learning/Homework/hw2/hw2p1_bonus/mytorch/pool.py | UTF-8 | 5,657 | 2.78125 | 3 | [] | no_license | import numpy as np
class MaxPoolLayer():
def __init__(self, kernel, stride):
self.kernel = kernel
self.stride = stride
self.x = None
self.pidx = None
def __call__(self, x):
return self.forward(x)
def forward(self, x):
"""
Argument:
x (np.array): (batch_size, in_channel, input_width, input_height)
Return:
out (np.array): (batch_size, out_channel, output_width, output_height)
"""
self.x = x
batch_size, in_channel, input_width, input_height = x.shape
output_width = int(np.floor((input_width - self.kernel)/self.stride) + 1)
output_height = int(np.floor((input_height - self.kernel)/self.stride) + 1)
out_channel = in_channel
output = np.zeros((batch_size, out_channel, output_width, output_height))
self.pidx = np.zeros((batch_size, in_channel, output_width, output_height), dtype=np.int64)
for b in range(batch_size):
for i in range(output_height):
for j in range(output_width):
# segment: (in_channel, kernel, kernel)
h_start = i*self.stride
h_end = i*self.stride+self.kernel
w_start = j*self.stride
w_end = j*self.stride+self.kernel
segment = x[b, :, h_start:h_end, w_start:w_end]
maxele = np.amax(segment, axis=(1, 2)) # (out_channel, )
output[b, :, i, j] = maxele
flatten_seg = segment.reshape(in_channel, -1) # (in_channel, kernel*kernel)
max_idx = flatten_seg.argmax(1)
self.pidx[b, :, i, j] = max_idx
return output
# raise NotImplementedError
def backward(self, delta):
"""
Argument:
delta (np.array): (batch_size, out_channel, output_width, output_height)
Return:
dx (np.array): (batch_size, in_channel, input_width, input_height)
"""
# import pdb; pdb.set_trace()
dx = np.zeros_like(self.x)
batch_size, in_channel, output_width, output_height = delta.shape
for b in range(batch_size):
for i in range(output_height):
for j in range(output_width):
h_start = i*self.stride
h_end = i*self.stride+self.kernel
w_start = j*self.stride
w_end = j*self.stride+self.kernel
cur_dz = np.tile(delta[b, :, i, j], (self.kernel*self.kernel, 1)).T
max_idx = self.pidx[b, :, i, j]
mask = np.arange(self.kernel * self.kernel).reshape(1, -1) == max_idx.reshape(-1, 1)
cur_pidx = np.zeros((in_channel, self.kernel*self.kernel), dtype=np.int64)
cur_pidx[np.where(mask == True)] = 1
cur_dx = (cur_pidx*cur_dz).reshape(in_channel, self.kernel, self.kernel)
cur_dx[np.where(dx[b, :, h_start:h_end, w_start:w_end] != 0)] = 0
dx[b, :, h_start:h_end, w_start:w_end] += cur_dx
return dx
class MeanPoolLayer():
def __init__(self, kernel, stride):
self.kernel = kernel
self.stride = stride
self.x = None
def __call__(self, x):
return self.forward(x)
def forward(self, x):
"""
Argument:
x (np.array): (batch_size, in_channel, input_width, input_height)
Return:
out (np.array): (batch_size, out_channel, output_width, output_height)
"""
self.x = x
batch_size, in_channel, input_width, input_height = x.shape
output_width = int(np.floor((input_width - self.kernel)/self.stride) + 1)
output_height = int(np.floor((input_height - self.kernel)/self.stride) + 1)
out_channel = in_channel
output = np.zeros((batch_size, out_channel, output_width, output_height))
for b in range(batch_size):
for i in range(output_height):
for j in range(output_width):
# segment: (in_channel, kernel, kernel)
h_start = i*self.stride
h_end = i*self.stride+self.kernel
w_start = j*self.stride
w_end = j*self.stride+self.kernel
segment = x[b, :, h_start:h_end, w_start:w_end]
mean = np.mean(segment, axis=(1, 2)) # (out_channel, )
output[b, :, i, j] = mean
return output
def backward(self, delta):
"""
Argument:
delta (np.array): (batch_size, out_channel, output_width, output_height)
Return:
dx (np.array): (batch_size, in_channel, input_width, input_height)
"""
dx = np.zeros_like(self.x)
batch_size, in_channel, output_width, output_height = delta.shape
for b in range(batch_size):
for i in range(output_height):
for j in range(output_width):
h_start = i*self.stride
h_end = i*self.stride+self.kernel
w_start = j*self.stride
w_end = j*self.stride+self.kernel
cur_dz = np.tile(delta[b, :, i, j], (self.kernel*self.kernel, 1)).T
cur_pidx = np.ones((in_channel, self.kernel*self.kernel))/(self.kernel*self.kernel)
cur_dx = (cur_pidx*cur_dz).reshape(in_channel, self.kernel, self.kernel)
dx[b, :, h_start:h_end, w_start:w_end] += cur_dx
return dx
| true |
11bc628c26df43f8cb4cfbf3b7c4b2e27d378449 | Python | TearsWillFall/AnalisisSecuencias | /src/plots/multi_pie.py | UTF-8 | 3,035 | 2.6875 | 3 | [] | no_license | import pygal
from pygal.style import Style
# Creates an entry inside a collection.
def create_entry(entry, link_body, color):
return {
"value": entry['count'],
"label": entry.description,
"xlink": {"href": f"{link_body}{entry.accession}"},
"color": color
}
# Creates a list of entries inside a collection.
def create_list(source, min_support, link_body, color):
result = []
for index, entry in source.iterrows():
if (entry['count'] > min_support):
result.append(create_entry(entry, link_body, color))
return result
# Plots the multi pie chart and stats.
def plot(go_list, kegg_list, min_support, min_identity, name):
custom_style = Style(
opacity='0.8',
opacity_hover='0.5',
title_font_size=36,
tooltip_font_size=10,
inner_radius=0.75,
plot_background="rgba(249, 249, 249, 1)"
)
multi_pie = pygal.Pie(height=800, tooltip_border_radius=1, style=custom_style)
go = create_list(go_list, min_support,
"https://www.ebi.ac.uk/QuickGO/term/",
"rgba(255, 45, 20, .6)")
kegg = create_list(kegg_list, min_support,
"https://www.genome.jp/dbget-bin/www_bget?",
"rgba(68, 108, 179, .6)")
multi_pie.add('GO', go)
multi_pie.add('KEGG', kegg)
plot_file_name = f"{name}-out.svg"
multi_pie.render_to_file(plot_file_name)
html_file = open(f"{name}.html", "w")
html_file.write(f"\
<!doctype html>\
<html>\
<head>\
<meta charset=\"utf-8\">\
<title>{name}</title>\
<style>\
body {{background-color: #f9f9f9; font-family: Helvetica, Sans-Serif;}}\
a {{color: blue; text-decoration: none;}}\
</style>\
</head>\
\
<body>\
<h1 style=\"text-align: center;\">Functional annotations of {name}</h1>\
<div style=\"display: flex;\">\
<object type=\"image/svg+xml\"data=\"{plot_file_name}\" height=\"800\"></object>\
<div>\
<div>\
<h4>Minimum identity score: {min_identity}</h4>\
<h4>Minimum support score: {min_support}</h4>\
<div style=\"display: flex;\">\
<h2>GO:</h2>\
<ul>")
for go_item in go:
html_file.write(f"<li><strong>{go_item['value']}x</strong>\
<a target=\"_blank\" href=\"{go_item['xlink']['href']}\">{go_item['label']}</a></li>")
html_file.write(f"\
</ul>\
<h2>KEGG:</h2>\
<ul>")
for kegg_item in kegg:
html_file.write(f"<li><strong>{kegg_item['value']}x</strong>\
<a target=\"_blank\" href=\"{kegg_item['xlink']['href']}\">{kegg_item['label']}</a></li>")
html_file.write(f"\
</ul>\
</div>\
</div>\
</div>\
</div>\
</body>\
</html>")
| true |
d7cd55706dbbca9487c48c2e6ed51ab160e54331 | Python | leenakh/kielioppikone | /questions.py | UTF-8 | 2,128 | 2.578125 | 3 | [] | no_license | from db import db
def get_questions(course_id):
sql = "select questions.id from questions \
where questions.course_id = :course_id"
result = db.session.execute(sql, {
"course_id":course_id})
return result.fetchall()
def get_course_questions(course_id):
sql = "select count(answers.id) as answers, \
questions.id, questions.inflection, questions.course_id, \
words.lemma from questions \
join words on questions.word_id = words.id \
left join answers on questions.id = answers.question_id \
group by questions.id, questions.inflection, questions.course_id, words.lemma \
having questions.course_id = :course_id \
order by words.lemma"
result = db.session.execute(sql, {
"course_id":course_id})
return result.fetchall()
def get_question(question_id):
sql = "select definitions.definition, words.lemma, \
questions.inflection, questions.course_id, questions.id \
from questions \
inner join definitions on questions.definition_id = definitions.id \
inner join words on words.id = questions.word_id \
where questions.id = :question_id"
result = db.session.execute(sql, {
"question_id":question_id})
return result.fetchone()
def add_question(course_id, lemma_id, definition_id, inflection):
try:
sql = "insert into questions (course_id, word_id, definition_id, inflection) \
values (:course_id, :lemma_id, :definition_id, :inflection)"
db.session.execute(sql, {
"course_id":course_id,
"lemma_id":lemma_id,
"definition_id":definition_id,
"inflection":inflection})
db.session.commit()
except:
return False
return True
def remove_question(question_id):
try:
sql = "delete from questions \
where questions.id = :question_id"
db.session.execute(sql, {
"question_id":question_id})
db.session.commit()
except:
return False
return True
| true |
c99013ae4f49964a23949352161b8827cb556420 | Python | Aasthaengg/IBMdataset | /Python_codes/p02821/s322914383.py | UTF-8 | 716 | 2.59375 | 3 | [] | no_license | N,M=map(int,input().split())
A=list(map(int,input().split()))
A.sort()
def condition(num):
count=0
s=N-1
t=0
while N-1>=t and s>=0:
if num>A[s]+A[t]:
t+=1
else:
count+=N-t
s-=1
return count>=M
subans=0
start=1
end=2*A[N-1]
while end-start>1:
test=(end+start)//2
if condition(test):
start=test
else:
end=test
if condition(end):
subans=end
else:
subans=start
data=[0]*N
count=0
s=N-1
t=0
while N-1>=t and s>=0:
if subans>A[s]+A[t]:
t+=1
else:
count+=N-t
data[s]=2*(N-t)
s-=1
ans=sum(data[i]*A[i] for i in range(0,N))
if count>M:
ans-=(count-M)*subans
print(ans)
| true |
1ecdfb29a694c2e2a405bc30a987587d0c7aa028 | Python | smartsnake/PasswordGenerator | /tests/test_Generator.py | UTF-8 | 604 | 3.046875 | 3 | [
"MIT"
] | permissive | from util.Generator import Generator
import pytest
password_length = 34
invalid_password_length = -10
gen = Generator()
#Testing password generated is the correct length
def test_generate_password():
password = gen.generate_password(password_length)
assert len(password) == password_length
#Testing invalid password length
def test_invalid_len_generate_password():
with pytest.raises(SystemExit):
gen.generate_password(invalid_password_length)
#Random char come from all_charactors list
def test_random_char():
assert gen.random_char(gen.all_charactors) in gen.all_charactors | true |
81af2717f390583b146dfb60e2223721e864f396 | Python | MarlieI/Python-exercises | /fortune_cookie.py | UTF-8 | 774 | 4.0625 | 4 | [] | no_license | # This program pairs a random number between 1 and 5 with a fortune cookie.
# The fortune cookie that matches with the chosen number gets displayed to the player.
# Challenge 1, chapter 3 python for the absolute beginner
import random
messages = ["Pay attention to your family. Don't take them for granted.",
"Your home will be filled with peace and harmony.",
"Fall for someone who's not your type.",
"Somebody appreciates the unique you.",
"If you haven't got it, just fake it!"
]
print("Welcome to the 'fortune cookie game'.")
input("\nPress enter to get your fortune cookie of the day.\n")
print(messages[random.randint(0, len(messages)-1)])
input("\nPress enter to exit the program.")
| true |
0e91a84a4da91d09ea9bee030ac749e99b7f3d0a | Python | Imbruced/sentinel_models | /gis/raster_components.py | UTF-8 | 1,879 | 2.6875 | 3 | [] | no_license | import attr
from validators.validators import ispositive
import os
from meta.meta import ConfigMeta
import typing
@attr.s
class Pixel(metaclass=ConfigMeta):
x = attr.ib(default=1.0, validator=[attr.validators.instance_of(float), ispositive])
y = attr.ib(default=1.0, validator=[attr.validators.instance_of(float), ispositive])
unit = attr.ib(default='m', validator=[attr.validators.instance_of(str)])
@classmethod
def from_text(cls, text):
x, y, unit = text.split(" ")
return cls(int(x), int(y), unit)
@attr.s
class ReferencedArray:
array = attr.ib()
extent = attr.ib()
crs = attr.ib()
shape = attr.ib()
is_generator = attr.ib(default=False)
band_number = attr.ib(default=1)
@attr.s
class Path:
path_string: str = attr.ib()
def __attrs_post_init__(self):
pass
def is_file(self):
return os.path.isfile(self.path_string)
def __exists(self, path):
pass
def create(cls, path):
if not os.path.exists(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0], exist_ok=True)
@attr.s
class ArrayShape:
shape = attr.ib()
def __attrs_post_init__(self):
self.x_shape = self.shape[0]
self.y_shape = self.shape[1]
def __ne__(self, other):
return self.x_shape != other.x_shape or self.y_shape != other.y_shape
def create_chunk(iterable: typing.Iterable, chunk_size: int):
for chunk in range(0, len(iterable), chunk_size):
yield iterable[chunk: chunk + chunk_size]
def create_two_dim_chunk(iterable: typing.Iterable, chunk_size: typing.List[int]):
for el in create_chunk(iterable, chunk_size[0]):
yield from (tel.transpose(1, 0, 2) for tel in create_chunk(el.transpose([1, 0, 2]), chunk_size[1])
if tel.shape[1] == chunk_size[0] and tel.shape[0] == chunk_size[1])
| true |
ffe553b1b0d1aac3907206e845f6fcae96509b33 | Python | piochelepiotr/crackingTheCode | /chp8/ex2.py | UTF-8 | 748 | 2.984375 | 3 | [] | no_license | import collections
def robot_path(grid):
# paths hold the path to go to the init point
n_row = len(grid)
n_columns = len(grid[0])
paths = [[None for c in range(n_columns)] for r in range(n_row)]
queue = collections.deque()
queue.append((0, 0, "START", []))
while len(queue) > 0 and paths[n_row -1][n_columns-1] is None:
r,c,d,p = queue.popleft()
if r < 0 or c < 0 or r >= n_row or c >= n_columns or grid[r][c] or paths[r][c]:
continue
paths[r][c] = p + [d]
p = paths[r][c]
queue.append((r-1,c, "UP", p))
queue.append((r+1,c, "DOWN", p))
queue.append((r,c-1, "LEFT", p))
queue.append((r,c+1, "RIGHT", p))
return paths[n_row-1][n_columns-1]
| true |
d4128591714d3ad4b5ef284fcc357a407b8bc5ca | Python | atherashraf/QueryOptimization | /app/config/config.py | UTF-8 | 610 | 2.90625 | 3 | [] | no_license | import json
import os
from pathlib import Path
class ConfigUtils:
def __init__(self):
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
config_root = Path(__file__).resolve().parent
with open(os.path.join(config_root, 'app_config.json')) as secrets_file:
self.secrets = json.load(secrets_file)
def get_data(self, setting_key):
"""Get s≤ecret setting or fail with ImproperlyConfigured"""
try:
return self.secrets[setting_key]
except KeyError:
print("Set the {} setting".format(setting_key))
| true |
20a3438ab43900ff3b5b954e6636ca39eab30931 | Python | STEM-Club-WVC/finance-calculator | /Basic_Cal.py | UTF-8 | 861 | 3.140625 | 3 | [] | no_license | from tkinter import *
class Box:
def __init__(self,column,row,text,color):
self.text_lable = Label(root, text = text, bg = color,padx = 20,width = 20).grid(row = row,column = column)
self.row = row
self.column = column
self.value = Entry(root,width = 30).grid(row = row+1,column = column)
root = Tk()
root.geometry('900x200')
beg_val = Box(0,0,'Beginning Value','pink')
int_rat = Box(1,0,'Intrest Rate','green')
sav_rat = Box(2,0,'Savings Rate(Monthly','lightblue')
dur_yer = Box(3,0,'Duration (Years)','purple')
por_val = Box(4,0,'Portfollio Value','yellow')
por_inc = Box(0,3,'Portfolio Income Production','lightblue')
cop_rat = Box(1,3,'Coupon Rate','yellow')
tax_rat = Box(2,3,'Tax Rate','green')
inf_rat = Box(3,3,'Inflation','orange')
#print(beg_val.value.get())
root.mainloop()
| true |
ad6020c54a40b601cf0737944174e839da570f76 | Python | mourakf/MVCAD | /Python_II/arquivos.py | UTF-8 | 501 | 3.84375 | 4 | [] | no_license | # arquivo = open('arquivo.txt', 'r') # abrir um arquivo
# # arquivo.writelines("Hello, wie gehts dir?") #escrever no arquivo
# #
# # print(arquivo) # cria o arquivo
arquivo = open('arquivo.txt', 'r')
# for line in arquivo: #loop para ler todas as linhas do arquivo
# print(line)
#print(arquivo.read()) #printa a leitura do conteúdo do arquivo
#arquivo.close() #fecha o arquivo
with open('arquivo.txt', 'a') as file:
file.writelines("\n hallo ich bin gut")
print(arquivo.read()) | true |
b84097b71611b9711976a5a77397232aef092c6e | Python | MNikov/Python-Advanced-September-2020 | /Old/Python-Advanced-Preliminary-Homeworks/Lists As Stacks And Queues/02E. Maximum and Minimum Element.py | UTF-8 | 590 | 3.390625 | 3 | [
"MIT"
] | permissive | def solution(number):
stack = []
for _ in range(number):
line = input().split()
command_type = int(line[0])
if command_type == 1:
num = int(line[1])
stack.append(num)
elif command_type == 2:
if len(stack) > 0:
stack.pop()
elif command_type == 3:
if len(stack) > 0:
print(max(stack))
elif command_type == 4:
if len(stack) > 0:
print(min(stack))
print(', '.join(reversed([str(n) for n in stack])))
solution(int(input()))
| true |
4b03ee0ff5830dfd0940a5d9cd8f1aea300b3c9e | Python | yangsg/linux_training_notes | /python3/basic02_syntax/defining-Functions/arbitrary-argument-lists.py | UTF-8 | 482 | 3.5 | 4 | [] | no_license | #// https://docs.python.org/3.6/tutorial/controlflow.html#unpacking-argument-lists
#// demo01
def sep_str_join(separator, *args):
str = separator.join(args)
print(str)
sep_str_join('-', '1', '2', '3')
sep_str_join('-', *['1', '2', '3'])
sep_str_join('-', *('1', '2', '3'))
#//demo02
def concat(*args, sep='/'):
print(sep.join(args))
concat('1', '2', '3', sep='/')
concat('1', '2', '3', sep='-')
concat(*('1', '2', '3'), sep='-')
concat(*['1', '2', '3'], sep='-')
| true |
917e59ab342292fe63eac3cca140766537857a34 | Python | yuxinvalo/radar_find_way_to_home | /wavegraph.py | UTF-8 | 590 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python3
# Project:
# Author: syx10
# Time 2020/12/30:19:22
import numpy as np
import pyqtgraph as pg
from PyQt5.QtWidgets import QWidget
class WaveGraph(QWidget):
def __init__(self):
super().__init__()
pg.setConfigOptions(antialias=True)
self.resize(600, 1000)
self.pw = pg.PlotWidget(self)
self.pw.resize(400, 1000)
self.data = []
self.curve = self.pw.plot(pen='y')
self.curve.getViewBox().invertY(True)
def handle_data(self, data):
t = np.arange(len(data))
self.curve.setData(data, t)
| true |
94217c81263a4c2507e7792a42f56f453e6d03bd | Python | claws/gestalt | /src/gestalt/datagram/protocols/base.py | UTF-8 | 4,632 | 3.078125 | 3 | [
"MIT"
] | permissive | import asyncio
import binascii
import logging
import os
from typing import Tuple
logger = logging.getLogger(__name__)
class BaseDatagramProtocol(asyncio.DatagramProtocol):
""" Datagram protocol for an endpoint. """
def __init__(
self,
on_message=None,
on_peer_available=None,
on_peer_unavailable=None,
**kwargs,
):
"""
:param on_message: A callback function that will be passed each message
that the protocol receives.
:param on_peer_available: A callback function that will be called when
the protocol is connected with a transport. In this state the protocol
can send and receive messages.
:param on_peer_unavailable: A callback function that will be called when
the protocol has lost the connection with its transport. In this state
the protocol can not send or receive messages.
"""
self._on_message_handler = on_message
self._on_peer_available_handler = on_peer_available
self._on_peer_unavailable_handler = on_peer_unavailable
self._identity = b""
self._remote_address = None # type: Optional[Tuple[str, int]]
self._local_address = None # type: Optional[Tuple[str, int]]
self.transport = None
@property
def identity(self):
""" Return the protocol's unique identifier """
return self._identity
@property
def raddr(self) -> Tuple[str, int]:
""" Return the remote address the protocol is connected with """
return self._remote_address
@property
def laddr(self) -> Tuple[str, int]:
""" Return the local address the protocol is using """
return self._local_address
def connection_made(self, transport):
self.transport = transport
self._identity = binascii.hexlify(os.urandom(5))
self._local_address = transport.get_extra_info("sockname")
self._remote_address = transport.get_extra_info("peername")
logger.debug(f"UDP protocol connection made. id={self._identity}")
try:
if self._on_peer_available_handler:
self._on_peer_available_handler(self, self._identity)
except Exception:
logger.exception("Error in on_peer_available callback method")
def connection_lost(self, exc):
"""
Called by the event loop when the protocol is disconnected from a transport.
"""
logger.debug(f"UDP protocol connection lost. id={self._identity}")
try:
if self._on_peer_unavailable_handler:
self._on_peer_unavailable_handler(self, self._identity)
except Exception:
logger.exception("Error in on_peer_unavailable callback method")
self.transport = None
self._identity = None
self._local_address = None
def close(self):
""" Close this connection """
logger.debug(f"Closing connection. id={self._identity}")
if self.transport:
self.transport.close()
def send(self, data, addr=None, **kwargs):
"""
Send a message to a remote UDP endpoint by writing it to the transport.
:param data: a bytes object containing the message payload.
:param addr: The address of the remote endpoint as a (host, port)
tuple. If remote_addr was specified when the endpoint was created then
the addr is optional.
"""
if not isinstance(data, bytes):
logger.error(f"data must be bytes - can't send message. data={type(data)}")
return
self.transport.sendto(data, addr=addr)
def datagram_received(self, data, addr):
"""
Process a datagram received from the transport.
When passing a message up to the endpoint, the datagram protocol
passes the senders address as an extra kwarg.
:param data: The datagram payload
:param addr: A (host, port) tuple defining the source address
"""
try:
if self._on_message_handler:
self._on_message_handler(self, self._identity, data, addr=addr)
except Exception:
logger.exception("Error in on_message callback method")
def error_received(self, exc):
"""
In many conditions undeliverable datagrams will be silently dropped.
In some rare conditions the transport can sometimes detect that the
datagram could not be delivered to the recipient.
:param exc: an OSError instance.
"""
logger.error(f"Datagram error: {exc}")
| true |
935388cd0d09902300b5d363bf80f84f36eb04e9 | Python | xiejun/python | /序列/列表.py | UTF-8 | 1,129 | 4.6875 | 5 | [] | no_license | # 列表
# 列表中的数据可以是任意类型的
[100, 'about', True]
# # 存储内容可修改
[0,"abcd"]
# 想要获得列表的长度可以使用 len() 这个东西
fruits = [1,2]
len(fruits)
# # 添加列表元素
a_list=['abc','xyz']
a_list.append('x')
print(a_list)
# # 删除列表元素
a_list.remove('x')
print(a_list)
# 统计元素在列表中的个数,或者说是元素在列表中出现的次数。
numbers = [1, 2, 2, 3, 4, 5, 5, 7]
numbers.count(5)
# 向列表的任意位置插入元素
letters = ['a', 'b']
letters.insert(0, 'c')
# 列表末尾追加另一个列表的所有元素
letters = ['a', 'b']
letters.extend(['c', 'd', 'e'])
# 按索引删除元素
letters.pop(0)
# 也可以不传递索引,这样的话默认删除并返回最后一个元素。
letters.pop()
# 删除一个列表元素也可以使用 Python 中的 del 关键字
del letters[0]
# 直接删除元素
letters.remove('b')
# 清空所有元素
letters.clear()
# 通过赋值修改列表元素
letters[2] = 'd'
# 反转整个列表
letters.reverse()
# 列表元素排序
numbers.sort()
numbers.sort(reverse=True)
| true |
4f4f94e0df36cf946859ecac3d3ff11d5b4cf4ff | Python | frankzhuzi/ithm_py_hmwrk | /09_Exception/hm_04_TransAnException.py | UTF-8 | 257 | 3.671875 | 4 | [] | no_license | def input_pswd():
pwd = input("Enter your password: ")
if len(pwd) >= 8:
return pwd
print("Error")
ex = Exception("Length not enough")
raise ex
try:
print(input_pswd())
except Exception as result:
print(result)
| true |
7fad5d5ddb0261274552b3ee74cba7d242d8df07 | Python | ecbjaeger/conditioning_scripts | /tests/temp_sensor_test.py | UTF-8 | 522 | 2.75 | 3 | [] | no_license | from time import sleep, strftime, time
import board
import adafruit_pct2075
import os
i2c = board.I2C() # uses board.SCL and board.SDA
pct = adafruit_pct2075.PCT2075(i2c)
temperature_filename = input("Temperature recording name: ")
temperature_pathname = os.path.join("../drive_upload", temperature_filename)
print("Starting temperature recording")
with open(temperature_pathname, "a") as log:
while True:
temp = pct.temperature
log.write("{0},{1}\n".format(strftime("%Y-%m-%d %H:%M:%S"),str(temp)))
sleep(1)
| true |
d92974ad2cc985d3771cfe298795bec4ad4531c3 | Python | ErvinLu/GoldMiner_v2 | /GoldMiner_v5.py | UTF-8 | 13,904 | 2.8125 | 3 | [] | no_license | import random
#from Level_0 import *
#from Level_1_2 import *
#from Level_2_1 import *
from Level_2_ZeroPlus import *
#GLOBAL
size = 0 #MATRIX SIZE
move_x = 0 #PATH X
move_y = 0 #PATH Y
curr_x = 0 #CURRENT X POSITION AGENT
curr_y = 0 #CURRENT Y POSITION AGENT
gold_x = 0
gold_y = 0
start = (0,0)
end = (0,0)
pit_count = 0
pit_loc = []
beac_loc = []
maze = None #REFERENCE MAZE
init_maze_res = None #INITIALIZE MAZE RESULT
direction = 0 #AGENT DIRECTION
move_zero = 0
#SCAN
def scan_level_2(size, pawn_x, pawn_y, maze, direction):
scan_value = 'N'
if direction == 2:
#if pawn_x < size - 1: #SCAN SOUTH
for i in range(size - pawn_x):
if maze[pawn_x + i][pawn_y] == 'G':
scan_value = 'G'
break
elif maze[pawn_x + i][pawn_y] == 'P':
scan_value = 'P'
break
elif maze[pawn_x + i][pawn_y] == 'B':
scan_value = 'B'
break
elif direction == 4:
#if pawn_y > 0: #SCAN WEST
for i in reversed(range(pawn_y)):
if maze[pawn_x][i] == 'G':
scan_value = 'G'
break
elif maze[pawn_x][i] == 'P':
scan_value = 'P'
break
elif maze[pawn_x][i] == 'B':
scan_value = 'B'
break
elif direction == 1:
#if pawn_x > 0: #SCAN NORTH
for i in reversed(range(pawn_x)):
if maze[i][pawn_y] == 'G':
scan_value = 'G'
break
elif maze[i][pawn_y] == 'P':
scan_value = 'P'
break
elif maze[i][pawn_y] == 'B':
scan_value = 'B'
break
elif direction == 3:
#if pawn_y < size - 1: #SCAN EAST
for i in range(size - pawn_y):
if maze[pawn_x][pawn_y + i] == 'G':
scan_value = 'G'
break
elif maze[pawn_x][pawn_y + i] == 'P':
scan_value = 'P'
break
elif maze[pawn_x][pawn_y + i] == 'B':
scan_value = 'B'
break
return scan_value
#END SCAN
#RUSH GOLD
def rush_gold(pawn_x, pawn_y, maze, N_mem, S_mem, E_mem, W_mem):
global stor_pawn_x
global stor_pawn_y
global stor_pawn_dir
# GOLD ENCOUNTERS
if N_mem == 'G': # RUSH G N
while maze[pawn_x][pawn_y] != 'G':
print("RUSHING GOLD NORTH")
pawn_x = pawn_x - 1
pawn_y = pawn_y
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(1)
elif S_mem == 'G': # RUSH G S
while maze[pawn_x][pawn_y] != 'G':
print("RUSHING GOLD SOUTH")
pawn_x = pawn_x + 1
pawn_y = pawn_y
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(2)
elif W_mem == 'G': # RUSH G W
while maze[pawn_x][pawn_y] != 'G':
print("RUSHING GOLD WEST")
pawn_x = pawn_x
pawn_y = pawn_y - 1
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(4)
elif E_mem == 'G': # RUSH G E
while maze[pawn_x][pawn_y] != 'G':
print("RUSHING GOLD EAST")
pawn_x = pawn_x
pawn_y = pawn_y + 1
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(3)
# END GOLD ENCOUNTERS
#END RUSH GOLD
#RUSH BEACON
def rush_beacon(pawn_x, pawn_y, maze, N_mem, S_mem, E_mem, W_mem):
global stor_pawn_x
global stor_pawn_y
global stor_pawn_dir
# BEACON OF HOPE
if N_mem == 'B': # RUSH B N
while maze[pawn_x][pawn_y] != 'B':
print("RUSHING BEACON NORTH")
pawn_x = pawn_x - 1
pawn_y = pawn_y
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(1)
elif S_mem == 'B': # RUSH B S
while maze[pawn_x][pawn_y] != 'B':
print("RUSHING BEACON SOUTH")
pawn_x = pawn_x + 1
pawn_y = pawn_y
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(2)
elif W_mem == 'B': # RUSH B W
while maze[pawn_x][pawn_y] != 'B':
print("RUSHING BEACON WEST")
pawn_x = pawn_x
pawn_y = pawn_y - 1
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(4)
elif E_mem == 'B': # RUSH B E
while maze[pawn_x][pawn_y] != 'B':
print("RUSHING BEACON EAST")
pawn_x = pawn_x
pawn_y = pawn_y + 1
stor_pawn_x.append(pawn_x)
stor_pawn_y.append(pawn_y)
stor_pawn_dir.append(3)
#END BEACON ENCOUNTERS
#END RUSH BEACON
#INITIALIZE MAZE
def init_maze(size, maze):
global init_maze_res
#maze is the REFERENCE MAZE VALUE
#maze[0][0] = '0' # STARTING POINT AT 0,0 FACING RIGHT
#maze[0][0] = '→' #STARTING POINT AT 0,0 FACING RIGHT
maze[0][0] = '↓' # STARTING POINT AT 0,0 FACING DOWN
curr_x = 0
curr_y = 0
global gold_x
global gold_y
global end
global pit_loc #FORGOT IF NEEDED 7/2/2019
global beac_loc # FORGOT IF NEEDED 7/3/2019
#GOLD POSITION
gold_x = int(input("Gold X Location: "))
gold_y = int(input("Gold Y Location: "))
end = (gold_x - 1, gold_y - 1)
maze[gold_x - 1][gold_y - 1] = 'G' #GOLD PLACED AT LOCATION (-1 FOR COMPENSATION)
#END GOLD POSITION
#BEACON POSITION
beacon_count = int(input("Enter number of beacons: "))
for i in range(beacon_count):
beacon_x = int(input("Beacon[" + str(i + 1) + "] X Location: "))
beacon_y = int(input("Beacon[" + str(i + 1) + "] Y Location: "))
maze[beacon_x - 1][beacon_y - 1] = 'B'
where_the_beacon = (beacon_x - 1, beacon_y - 1)
beac_loc.append(where_the_beacon)
#END BEACON POSITION
# PIT POSITION
pit_count = int(input("Enter number of pits: "))
for i in range(pit_count):
pit_x = int(input("Pit[" + str(i + 1) + "] X Location: "))
pit_y = int(input("Pit[" + str(i + 1) + "] Y Location: "))
maze[pit_x - 1][pit_y - 1] = 'P'
where_the_pit = (pit_x - 1, pit_y - 1)
pit_loc.append(where_the_pit)
# END PIT POSITION
init_maze_res = maze
#END INITIALIZE MAZE
#DISPLAY MAZE
def display_maze(size, maze):
for i in maze:
# print(maze[i],[i]) #CHECK CONTENTS
print(*i, sep="\t")
# END DISPLAY MAZE
def main():
global maze
global start
global end
global size
global stor_pawn_x
global stor_pawn_y
global stor_pawn_dir
size = int(input("Enter playing field size: "))
maze = [[0 for x in range(size)] for y in range(size)] #INITIALIZE MAZE
init_maze(size, maze) #UNCOMMENT TO ASK USER INPUT
print("INITIAL MAZE")
display_maze(size, init_maze_res)
# PROBLEM 1
# init_maze_res[4][0] = 'B'
# init_maze_res[0][4] = 'B'
# init_maze_res[0][3] = 'P'
# init_maze_res[2][2] = 'P'
# init_maze_res[2][7] = 'P'
# init_maze_res[5][2] = 'P'
# END PROBLEM 1
# PROBLEM 2
# init_maze_res[22][0] = 'B'
# init_maze_res[31][1] = 'B'
# init_maze_res[21][1] = 'P'
# init_maze_res[21][1] = 'P'
# init_maze_res[20][2] = 'P'
# END PROBLEM 2
# PROBLEM 3
# init_maze_res[0][26] = 'B'
# init_maze_res[8][20] = 'B'
# init_maze_res[8][17] = 'P'
# init_maze_res[9][17] = 'P'
# init_maze_res[10][18] = 'P'
# init_maze_res[10][19] = 'P'
# init_maze_res[10][20] = 'P'
# init_maze_res[1][19] = 'P'
# END PROBLEM 3
# PROBLEM 4
init_maze_res[16][21] = 'B'
init_maze_res[16][23] = 'B'
init_maze_res[15][19] = 'P'
init_maze_res[15][20] = 'P'
init_maze_res[15][21] = 'P'
init_maze_res[15][23] = 'P'
init_maze_res[16][18] = 'P'
init_maze_res[16][24] = 'P'
init_maze_res[17][19] = 'P'
init_maze_res[17][20] = 'P'
init_maze_res[17][21] = 'P'
init_maze_res[17][23] = 'P'
# END PROBLEM 4
# #LEVEL 2
global last_x
global last_y
global N_mem
global S_mem
global E_mem
global W_mem
global on_beacon
rushG = False
print("*********************************")
level_2(size, 0, 0, init_maze_res)
# print(last_x)
# print(last_y)
stor_pawn_x.reverse()
stor_pawn_y.reverse()
stor_pawn_dir.reverse()
# print(stor_pawn_x)
# print(stor_pawn_y)
for i in range(len(stor_pawn_x)):
if stor_pawn_dir[i] == 1:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↑'
if stor_pawn_dir[i] == 2:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↓'
if stor_pawn_dir[i] == 3:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '→'
if stor_pawn_dir[i] == 4:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '←'
print("ITERATION", (i))
display_maze(size, init_maze_res)
print("*********************************")
cont_i = len(stor_pawn_x) - 1
N_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 1)
S_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 2)
E_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 3)
W_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 4)
print("~~~WHAT I SEE MAIN~~~")
print("N: " + N_mem + " | S: " + S_mem + " | E: " + E_mem + " | W: " + W_mem)
if (N_mem == 'G') or (S_mem == 'G') or (E_mem == 'G') or (W_mem == 'G'):
rushG = True
rush_gold(stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, N_mem, S_mem, E_mem, W_mem)
for i in range(len(stor_pawn_x)):
if stor_pawn_dir[i] == 1:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↑'
if stor_pawn_dir[i] == 2:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↓'
if stor_pawn_dir[i] == 3:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '→'
if stor_pawn_dir[i] == 4:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '←'
print("ITERATION", (i))
display_maze(size, init_maze_res)
print("*********************************")
elif (N_mem == 'B') or (S_mem == 'B') or (W_mem == 'B') or (E_mem == 'B'):
rush_beacon(stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, N_mem, S_mem, E_mem, W_mem)
#init_maze_res[stor_pawn_x[len(stor_pawn_x) - 1]][stor_pawn_y[len(stor_pawn_y) - 1]] = 'A' #BEACON ACTIVATED
for i in range(len(stor_pawn_x)):
if stor_pawn_dir[i] == 1:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↑'
if stor_pawn_dir[i] == 2:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↓'
if stor_pawn_dir[i] == 3:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '→'
if stor_pawn_dir[i] == 4:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '←'
print("ITERATION", (i))
display_maze(size, init_maze_res)
print("*********************************")
while init_maze_res[stor_pawn_x[len(stor_pawn_x) - 1]][stor_pawn_y[len(stor_pawn_y) - 1]] != 'G' and not rushG:
N_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 1)
S_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 2)
E_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 3)
W_mem = scan_level_2(size, stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, 4)
if (N_mem == 'G') or (S_mem == 'G') or (E_mem == 'G') or (W_mem == 'G'):
rush_gold(stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, N_mem, S_mem, E_mem, W_mem)
elif (N_mem == 'B') or (S_mem == 'B') or (W_mem == 'B') or (E_mem == 'B'):
rush_beacon(stor_pawn_x[len(stor_pawn_x) - 1], stor_pawn_y[len(stor_pawn_y) - 1], init_maze_res, N_mem, S_mem, E_mem, W_mem)
#init_maze_res[stor_pawn_x[len(stor_pawn_x) - 1]][stor_pawn_y[len(stor_pawn_y) - 1]] = 'A' # BEACON ACTIVATED
for i in range(len(stor_pawn_x) - 1):
if stor_pawn_dir[i] == 1:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↑'
if stor_pawn_dir[i] == 2:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '↓'
if stor_pawn_dir[i] == 3:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '→'
if stor_pawn_dir[i] == 4:
init_maze_res[stor_pawn_x[i]][stor_pawn_y[i]] = '←'
print("ITERATION", (i))
display_maze(size, init_maze_res)
print("*********************************")
if __name__ == '__main__':
main() | true |
870f74eb450a4f22ad73352442707f9c3a32254b | Python | taihsuanho/KyleOthello | /KyleOthello/TaiAnimation.py | UTF-8 | 2,297 | 3.09375 | 3 | [
"MIT"
] | permissive | import pygame
from pygame.locals import *
import TaiTimer
DO_NOTHING = lambda *args: None
class ImageAnimator:
def __init__(self, img, dura, repeat = False, dimension = None):
# Argument "img" can be a list of images or an image containing a series of images of equal size changing graduately.
if type(img) is list:
self.img_list = img
self.sprite_number = len(self.img_list)
self.sprite_size = self.img_list[0].get_size()
else:
self.sprite_number = dimension[0] * dimension[1]
width, height = img.get_size()
self.sprite_size = w, h = (width // dimension[0], height // dimension[1])
self.img_list = []
for i in range(self.sprite_number):
col, row = i % dimension[0], i // dimension[0]
self.img_list.append(img.subsurface((col * w, row * h, w, h)))
self.dura = dura // self.sprite_number
self.repeat = repeat
self.index = 0
self.timer_id = -1
self.bHideWhenStop = False
self.lastDrawIndex = self.index
def get_size(self):
return self.sprite_size
def _anim_timer_proc(self):
if not self.repeat and self.index == self.sprite_number - 1:
self.Stop()
(event, param) = self.callback
if type(event) is int:
pygame.event.post(pygame.event.Event(event, param))
elif callable(event):
event(*param)
else:
self.index = (self.index + 1) % self.sprite_number
def Play(self, event = DO_NOTHING, param = None):
self.Stop()
if type(event) is int and param is None: param = {}
if callable(event) and param is None: param = ()
self.callback = (event, param)
self.timer_id = TaiTimer.CreateTimer(self.dura, self._anim_timer_proc, repeat = True)
def Stop(self):
if self.IsPlaying():
TaiTimer.KillTimer(self.timer_id)
self.timer_id = -1
self.index = 0
self.lastDrawIndex = self.index
def Pause(self):
if self.IsPlaying():
TaiTimer.KillTimer(self.timer_id)
self.timer_id = -1
def IsPlaying(self):
return self.timer_id >= 0
def GetCurrentImage(self):
return self.img_list[self.index]
def SetHideWhenStop(self, bHide):
self.bHideWhenStop = bHide
def Draw(self, surface, pos):
if self.bHideWhenStop and not self.IsPlaying():
return
surface.blit(self.img_list[self.index], pos)
self.lastDrawIndex = self.index
def NeedRedraw(self):
return self.lastDrawIndex != self.index
| true |
5e39f017bbc86c900a9566bc72c1fdc87fc20f01 | Python | narcis96/decrypting-alpha | /Population.py | UTF-8 | 6,290 | 2.71875 | 3 | [] | no_license | import os, progressbar, threading, random, time, bisect
import numpy as np
import statistics as stats
from thread import ThreadPool
from DNA import DNA
BAR_LENGTH = 5
def worker(data, encoded, wordsDict):
for dna in data:
dna.CalcFitness(encoded, wordsDict)
class Population:
def __init__(self, threadsCount,data, mutationRate, encoded, wordsDict, hints):
self.__data = data
self.__matingPool = []
self.__generation = 0
self.__bestScore = 0
self.__mutationRate = mutationRate
self.__encoded = encoded
self.__wordsDict = wordsDict
self.__hints = hints
self.__threadPool = ThreadPool(threadsCount)
self.__threadsCount = threadsCount
self.__consecutiveScores = 0
self.__lastScore = -1
self.__weights = [1 for i in range(len(encoded))]
@classmethod
def Random(cls, threadsCount, count, length, mutationRate, encoded, wordsDict, hints):
data = [DNA.Random(length, hints) for i in range(count)]
return cls(threadsCount, data, mutationRate, encoded, wordsDict, hints)
@classmethod
def FromFolder(cls, threadsCount, path, count, length, mutationRate, encoded, wordsDict, hints):
data = []
for file in os.listdir(path):
if file.endswith('.json'):
data.append(DNA.ReadFromJson(path + file))
print('Loaded ', len(data), 'samples')
if len(data) < count:
count = count - len(data)
print ('Adding ', count, 'random samples...')
data = data + [DNA.Random(length, hints) for i in range(count)]
return cls(threadsCount, data, mutationRate, encoded, wordsDict, hints)
def Print(self, printAll, saveBest):
average = stats.mean(self.__scores)
maxScore = max(self.__scores)
self.__generation = self.__generation + 1
os.makedirs('./generation/best/', exist_ok = True)
if printAll:
saveFolder = './generation/' + str(self.__generation)
os.makedirs(saveFolder, exist_ok = True)
scoresFile = open(saveFolder + '/scores.txt', 'w')
for i,dna in enumerate(self.__data):
print(i, dna.GetScore(), file = scoresFile)
for i,dna in enumerate(self.__data):
dna.WriteJson(saveFolder + '/' + str(i) + '.json')
print(average, file = open(saveFolder + '/average.txt', 'w'))
if average > self.__bestScore:
self.__bestScore = average
if saveBest:
for i,dna in enumerate(self.__data):
dna.WriteJson('./generation/best/' + str(i) + '.json')
for dna in self.__data:
if dna.GetScore() == maxScore:
decoded = dna.decode(self.__encoded)
print('best match: ',decoded)
if printAll:
print(decoded, file=open(saveFolder + '/best.txt', 'w'))
#break
print('generation: ', self.__generation, ' average score : ', average, ' max score: ', max(self.__scores),'\n')
'''
print('in Print')
for dna in self.__data:
print(dna)
print('\n')
'''
def CalcFitness(self):
startTime = time.time()
bad = 0.4
for dna in self.__data:
dna.CalcFitness(self.__encoded, self.__wordsDict, bad, self.__weights)
length = len(self.__data)
# self.__threadPool.Start(lambda dna, encoded, wordsDict: dna.CalcFitness(encoded, wordsDict), list(zip(self.__data, [self.__encoded] * length, [self.__cost]*length, [self.__wordsDict]*length)))
# self.__threadPool.Join()
'''
threads = []
for threadId in range(self.__threadsCount):
data = [self.__data[i] for i in range(length) if i % self.__threadsCount == threadId]
thread = threading.Thread(target=worker, args = (data, self.__encoded, self.__wordsDict, bad, self.__weights, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
'''
#bar = progressbar.ProgressBar(maxval=length)
#show = [randint(0, BAR_LENGTH) for i in range(length)]
#if show[indx] == 0:
# bar.update(indx + 1)
#bar.finish()
print("%s seconds elpased" % (time.time() - startTime))
self.__scores = [dna.GetScore() for dna in self.__data]
def __PickOne(self, cumulativeSums, maxSum):
index = 0
value = np.random.rand() * maxSum
return bisect.bisect_left(cumulativeSums, value)
def Stuck(self, maxScore):
if maxScore == self.__lastScore:
self.__consecutiveScores = self.__consecutiveScores + 1
else:
self.__consecutiveScores = 1
self.__lastScore = maxScore
if self.__consecutiveScores == 10:
return True
return False
def NaturalSelection(self):
length = len(self.__data)
maxScore = max(self.__scores)
if self.Stuck(maxScore):
for dna in self.__data:
if(dna.GetScore() == maxScore):
mutation = 0.7
else:
mutation = 0.5
dna.Mutate(mutation, self.__hints)
print ('Forced mutations was did...')
randNumbers = [np.random.random() for i in range(len(self.__encoded))]
randSum = sum(randNumbers)
length = len(self.__encoded)
for i in range(length):
self.__weights[i] = randNumbers[i]/randSum*length
return None
cumulativeSums = np.array(self.__scores).cumsum().tolist()
maxSum = cumulativeSums[-1]
newGeneration = []
currentMutation = self.__mutationRate# + (self.__generation/1000)
print ('mutation:', currentMutation*100, '%')
for i in range(length):
parent1 = self.__data[self.__PickOne(cumulativeSums, maxSum)]
parent2 = self.__data[self.__PickOne(cumulativeSums, maxSum)]
child = parent1.CrossOver(parent2, currentMutation, self.__hints)
newGeneration.append(child)
self.__data = newGeneration
| true |
f87821ccf842640258b6b690deb7b2b9af856a68 | Python | devmorra/zetaTest | /forLoop.py | UTF-8 | 374 | 3.734375 | 4 | [] | no_license | tray = ["vanilla cupcake", "chocolate cupcake", "chocolate cupcake", "vanilla cupcake"]
for index, cupcake in enumerate(tray):
print(cupcake,"is at index",index)
if(cupcake == "vanilla cupcake"):
tray[index] = "vanilla cupcake with vanilla frosting"
elif(cupcake == "chocolate cupcake"):
tray[index] = "chocolate cupcake with chocolate frosting"
print(tray)
| true |
8896d795a4e66ba570cff6acb682c710e74fe6a5 | Python | Ran-Mewo/uwuifier | /uwuifier/uwufier.py | UTF-8 | 347 | 3.0625 | 3 | [] | no_license | import random
uwutext = input("pwease input text uwu: ")
# replacing letters uwu
# TODO add a percentage for stuttering
uwutext = (
uwutext.lower()
.replace("l", "w")
.replace("r", "w")
.replace("v", "f")
.replace("i", "i-i")
.replace("d", "d-d")
.replace("n", "n-n")
+ " >~<"
)
print(uwutext)
| true |
615cb6ba59ab66885284cfa41140ee06d85d86b9 | Python | dsrizvi/algo-interview-prep | /hacker-rank/implementation/kangroo.py | UTF-8 | 767 | 3.34375 | 3 | [] | no_license | #!/bin/python3
import sys
from operator import sub
def calculate_fine(return_date, due_date):
DAY = 0
MONTH = 1
YEAR = 2
DAILY_FINE = 15
MONTHLY_FINE = 500
YEAR_FINE = 10000
diff = list(map(sub, return_date, due_date))
if diff[DAY] > 0 and diff[MONTH] == 0 and diff[YEAR] == 0:
return diff[DAY]*DAILY_FINE
if diff[MONTH] > 0 and diff[YEAR] == 0:
return diff[MONTH]*MONTHLY_FINE
if diff[YEAR] > 0:
return YEAR_FINE
return 0
def main():
d1,m1,y1 = input().strip().split(' ')
d1,m1,y1 = [int(d1),int(m1),int(y1)]
d2,m2,y2 = input().strip().split(' ')
d2,m2,y2 = [int(d2),int(m2),int(y2)]
print(calculate_fine([d1,m1,y1], [d2, m2, y2]))
if __name__ == '__main__':
main() | true |
802f4615cd7ae21504d864aa8f18d90155b645aa | Python | DoJun-Park/Algorithm | /프로그래머스/2020 카카오 인턴십/키패드 누르기/keypad.py | UTF-8 | 2,039 | 3.546875 | 4 | [] | no_license | def solution(numbers, hand):
keypad = {1: (0, 0), 2: (0, 1), 3: (0, 2),
4: (1, 0), 5: (1, 1), 6: (1, 2),
7: (2, 0), 8: (2, 1), 9: (2, 2),
'*': (3, 0), 0: (3, 1), '#': (3, 2)}
current_left_key = '*' #왼손 시작 *
current_right_key = '#' #오른손 시작 #
left_possible_key = [1,4,7]
right_possible_key = [3,6,9]
ans='' # 정답
# x = [0,0,-1,1] #상하좌우
# y = [1,-1,0,0] #상하좌우
which_hand = False #왼손잡이 인지 오른손잡이 인지
if hand == 'right':
which_hand = True # 오른손잡이 - True
for i in numbers:
if i in left_possible_key: #왼손이 갈 수 있는 키패드의 숫자일 경우
current_left_key = i #누른 키패드 위치로 바꿈
ans = ans + 'L' #누른 키패드 추가
elif i in right_possible_key: #오른손이 갈 수 있는 키패드의 숫자일 경우
current_right_key = i #누른 키패드 위치로 바꿈
ans = ans + 'R' #누른 키패드 추가
else:
x,y = 0,1
dist_left = abs(keypad[current_left_key][x] - keypad[i][x]) + abs(keypad[current_left_key][y] - keypad[i][y])
dist_right = abs(keypad[current_right_key][x] - keypad[i][x]) + abs(keypad[current_right_key][y] - keypad[i][y])
if dist_left == dist_right: #누르려는 키패드의 위치가 왼손과 오른손에서 같은 거리일 경우
if which_hand: #왼손잡이 or 오른손잡이 확인
ans = ans + 'R'
current_right_key = i
else:
ans = ans + 'L'
current_left_key = i
else:
if dist_left > dist_right:
ans = ans + 'R'
current_right_key = i
else:
ans = ans + 'L'
current_left_key = i
return ans
| true |
e4dde2bacea1fbbc45c41c6bb8d3c22d97f5f028 | Python | onstash/scrapple | /scrapple/utils/exceptions.py | UTF-8 | 1,072 | 2.90625 | 3 | [
"MIT"
] | permissive | """
scrapple.utils.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~
Functions related to handling exceptions in the input arguments
"""
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
if int(args['--levels']) < 1:
raise Exception("--levels should be greater than, or equal to 1")
return
| true |
f60547c4834be247363a319dc17c02efbd0e87a1 | Python | feiyu4581/Leetcode | /leetcode 51-100/leetcode_65.py | UTF-8 | 1,373 | 3.4375 | 3 | [] | no_license | class Solution:
def isNumber(self, s):
s = s.strip()
if not s:
return False
def get_num(nums, digit=False):
before_nums = ''
for index, num in enumerate(nums):
if num in '0123456789':
before_nums += num
elif num in '-+' and index == 0:
continue
elif num == '.':
if digit:
return False
digit = True
else:
return False
return bool(before_nums)
if 'e' in s:
index = s.index('e')
return get_num(s[0:index]) and get_num(s[index + 1:], True)
else:
return get_num(s)
x = Solution()
print(x.isNumber("0") == True)
print(x.isNumber(" 0.1 ") == True)
print(x.isNumber("abc") == False)
print(x.isNumber("1 a") == False)
print(x.isNumber("2e10") == True)
print(x.isNumber("e") == False)
print(x.isNumber(".1") == True)
print(x.isNumber(".") == False)
print(x.isNumber(".e1") == False)
print(x.isNumber("3.") == True)
print(x.isNumber("-3.") == True)
print(x.isNumber("+.8") == True)
print(x.isNumber(" -.") == False)
print(x.isNumber("46.e3") == True)
print(x.isNumber("0e") == False)
print(x.isNumber("1e.") == False)
print(x.isNumber("6e6.5") == False)
| true |
bbb33e122ad5ad51ead25285dd41e0113523a867 | Python | Marcos-A/AlfredAppWorkflowsPythonScripts | /char_word_sentence_paragraph_counter.py | UTF-8 | 3,454 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
""" word_sentence_paragraph_counter 1.0
Alfred Workflow Python script that counts the number of
words, sentences and paragraphs in a given text.
"""
import json
import sys
import re
# Grab the query with the entered text and remove leading and trailing spaces
text_str = sys.argv[1].strip()
# Obtain total number of characters
chars_count = len(text_str)
# Prepare response
if chars_count == 1:
chars_count_response = str(chars_count) + " character"
else:
chars_count_response = str(chars_count) + " characters"
# Separate words and add them to list
words_list = text_str.split()
# Obtain total number of words in words list
words_count = len(words_list)
# Prepare response
if words_count == 1:
words_count_response = str(words_count) + " word"
else:
words_count_response = str(words_count) + " words"
# Replace question and exclamation marks with dots
no_questions_text = text_str.replace("?", ".")
no_exclamations_nor_questions_text = no_questions_text.replace("!", ".")
# Remove duplicated dots
clean_dots_text = re.sub(r'(\.\.)\.*|\.', r'.', no_exclamations_nor_questions_text)
# Separate sentences and add them to list
sentences_list = clean_dots_text.split(".")
# Obtain total number of sentences in sentences list
# A sentence without a final dot is still considered a sentence
if clean_dots_text.endswith("."):
sentences_count = len(sentences_list)-1
else:
sentences_count = len(sentences_list)
# Prepare response
if sentences_count == 1:
sentences_count_response = str(sentences_count) + " sentence"
else:
sentences_count_response = str(sentences_count) + " sentences"
# Remove leading, trailing and duplicated line breaks
clean_line_breaks_text = re.sub(r'(\n\n)\n*|\n', r'\n', text_str.strip('\n'))
# Separate paragraphs and add them to list
paragraph_list = clean_line_breaks_text.split("\n")
# Obtain total number of paragraphs in paragraphs list
paragraph_count = len(paragraph_list)
# Prepare response
if paragraph_count == 1:
paragraph_count_response = str(paragraph_count) + " paragraph"
else:
paragraph_count_response = str(paragraph_count) + " paragraphs"
# Prepare response
words_sentences_paragraphs_count = chars_count_response +\
" · " +\
words_count_response +\
" · " +\
sentences_count_response +\
" · " +\
paragraph_count_response
# Alfred's JSON expected result
words_sentences_paragraphs_count_json = {"items":
[
{
"type": "file",
"title": words_sentences_paragraphs_count,
"subtitle": "Copied to clipboard",
"arg": words_sentences_paragraphs_count
}
]
}
# Convert the JSON scheme to string
words_sentences_paragraphs_count_json_string = json.dumps(words_sentences_paragraphs_count_json)
# Pass the resulting JSON string to Alfred
print(words_sentences_paragraphs_count_json_string)
| true |
e93755cdfdb0dc79494bfc4fe288de845b8cb0ee | Python | jxw7410/python_projs | /memory_game/src/components/card.py | UTF-8 | 443 | 3.3125 | 3 | [] | no_license | class Card:
def __init__(self, value, pos):
self.value = value
self.is_reveal = False
self.pos = pos
# Special Methods
def __str__(self):
return f"{ str(self.value) + ('' if self.value > 9 else ' ')}"
def __eq__(self, other):
return self.value == other.value
# Public Methods
def hide(self):
self.is_reveal = False
def reveal(self):
self.is_reveal = True
| true |
463b7e93aa3a2bd6c2608a5948741cc2613fd80d | Python | chase001/chase_learning | /Python接口自动化/auto_test/common/MultiThread.py | UTF-8 | 8,967 | 2.984375 | 3 | [] | no_license | import threadpool, os
from common.func import *
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
def cost_time_cal(func):
"""消耗时间计算装饰器"""
def warp(*args):
start_time = time.time()
func(*args)
print("****** Concurrrent test is end, Total cost {}S ******".format(round(time.time() - start_time, 2)))
return warp
def cal_weight(name, weight, unit="斤"):
# num, name, a = args
# print("Hello,{name} is {num} {unit}".format(name=name, num=weight, unit=unit))
time.sleep(1)
return "Hello,{name}{num}{unit}".format(name=name, num=weight, unit=unit)
def cal_weight_2(name_weight_list, unit="斤"):
# num, name, a = args
# for l in name_weight_list:
# print("Hello,{name} is {num} {unit}".format(name=l[0], num=l[1], unit=unit))
time.sleep(1)
return "Hello,{name}{num}{unit}".format(name=name_weight_list[0], num=name_weight_list[1], unit=unit)
class ConcurrentTools(object):
"""
目前使用两种并发库,threadpool和concurrent
从并发速度考虑,优先建议使用 run_concurrrent_threadpool
"""
def __init__(self, process_worker_quantity=None, thread_worker_quantity=None):
self.process_worker_quantity = process_worker_quantity if process_worker_quantity else os.cpu_count()
self.thread_worker_quantity = thread_worker_quantity if thread_worker_quantity else os.cpu_count() * 5
# @cost_time_cal
def run_thread_pool(self, threads_num, request_num, target_request):
"""
用于创建线程池
:param threads_num: 线程数量
:param request_num: 参数列表
:param target_request: 需要运行的函数
:return:
"""
# log.info(msg="Ready for {func_name} MultiThread running!!".format(func_name=target_request.__name__))
log.info(msg="Starting at {now}".format(now=now()))
pool = threadpool.ThreadPool(num_workers=threads_num)
requests = threadpool.makeRequests(target_request, request_num)
[pool.putRequest(req) for req in requests]
pool.wait()
log.info(msg="End at {now}".format(now=now()))
# @cost_time_cal
def run_concurrrent_threadpool(self, func, ls, *args):
"""
Concurrent 线程池方法
:param thread_num: 线程数量,默认为cpu数量*2
:param func: 被执行函数
:param args: 可变参数
:return:返回被执行函数的结果集
"""
result_list = []
with ThreadPoolExecutor(thread_name_prefix="my_thread") as executor:
future_tasks = [executor.submit(func, l, *args) for l in ls]
for future in as_completed(future_tasks):
result_list.append(future.result()) # 循环取出运行结果
return result_list
# 不建议使用
@cost_time_cal
def run_concurrrent_processpool(self, func, *args):
"""
这个模块实现的是真正的并行计算,因为它使用ProcessPoolExecutor类把工作分配给多个Python进程处理。因此,如果需要做CPU 密集型处理,
使用这个模块能绕开GIL,利用所有可用的CPU核心。
:param thread_num: 进程数量
:param func: 被执行函数
:param args: 可变参数
:return:
"""
# with ProcessPoolExecutor() as executor:
# future_tasks = [executor.submit(func, l, *args) for l in ls]
# for f in future_tasks:
# print(f.result()) if f.result() else None
result_list = []
with ProcessPoolExecutor() as executor:
for number in executor.map(func, *args):
print("{}".format(1))
# for number, prime in zip(ls, executor.map(func, ls),):
# print('%d is prime: %s' % (number, prime))
# for future in as_completed(future_tasks):
# result_list.append(future.result()) # 循环取出运行结果
#
# return result_list
@cost_time_cal
def run_concurrrent_process_thread_pool(self, func, arg_list, time_out=None):
"""
用于多进程和多线程并发侧测试
Args:
fn : 被测方法
arg_list : 被测方法中使用的可迭代的参数列表
time_out : 线程等待的最大秒数。如果不传,那么等待时间就没有限制
"""
log.info("\n****** Concurrrent Test Start ******"
"\n***** Target Function is {} ******"
"\n***** Process Quanlity is {} ******"
"\n***** Thread Quanlity is {} ******".format(func.__name__, self.process_worker_quantity,
self.thread_worker_quantity))
result_list = []
div_arg_list = self._div_list(arg_list, self.thread_worker_quantity)
print(div_arg_list)
with ProcessPoolExecutor(max_workers=self.process_worker_quantity) as e:
process_futures = [e.submit(self._thread_worker, func, i, time_out) for i in div_arg_list]
for process_future in process_futures:
for thread_result in process_future.result():
log.info(thread_result)
result_list.append(thread_result)
log.info("Concurrrent Test is END")
return result_list
def _thread_worker(self, func, sub_arg_list, time_out):
future_result_list = []
with ThreadPoolExecutor(max_workers=self.thread_worker_quantity) as e:
futures = [e.submit(func, i) for i in sub_arg_list]
# for i in sub_arg_list:
# futures_dict = {str(i): future for future in [e.submit(func, i)]}
# futures_list.append(futures_dict)
for future in as_completed(futures, timeout=time_out):
if future.exception() is not None:
log.warning("线程报错,{msg}".format(msg=future.exception()))
else:
future_result_list.append(future.result())
return future_result_list
def _div_list(self, init_list, childern_list_len):
# result = []
# cut = int(len(ls) / n)
# if cut == 0:
# ls = [[x] for x in ls]
# none_array = [[] for i in range(0, n - len(ls))]
# return ls + none_array
# for i in range(0, n - 1):
# result.append(ls[cut * i:cut * (1 + i)])
# result.append(ls[cut * (n - 1):len(ls)])
# return result
list_of_groups = zip(*(iter(init_list),) * childern_list_len)
end_list = [list(i) for i in list_of_groups]
count = len(init_list) % childern_list_len
end_list.append(init_list[-count:]) if count != 0 else end_list
return end_list
# class Particle:
# def __init__(self, i):
# self.i = i
# self.fitness = None
#
# def getfitness(self):
# self.fitness = 2 * self.i
#
#
# def thread_worker(p):
# p.getfitness()
# return (p.i, p)
def proc_worker(ps):
import concurrent.futures as cf
s = time.time()
with cf.ThreadPoolExecutor() as e:
result = list(e.map(cal_weight_2, ps))
print("Thread COST:{}".format(time.time() - s))
return result
# @cost_time_cal
def update_fitness(INFO):
import concurrent.futures as cf
with cf.ProcessPoolExecutor() as e:
for result_list in e.map(proc_worker, INFO):
for l in result_list:
print(l)
# print (result_list)
def div_list(ls, n):
result = []
cut = int(len(ls) / n)
if cut == 0:
ls = [[x] for x in ls]
none_array = [[] for i in range(0, n - len(ls))]
return ls + none_array
for i in range(0, n - 1):
result.append(ls[cut * i:cut * (1 + i)])
result.append(ls[cut * (n - 1):len(ls)])
return result
# result=[]
# for i in range(0,len(ls),n):
# result.append(ls[i:i+n])
# return result
if __name__ == '__main__':
# MultiThread.run_thread_pool(4,['xiaozi','aa','bb','cc'],sayhello)
names = ["yuting", "shuhuai", "cuirong", "panda", "xxx"]
weight = [150, 150, 150, 150, 150]
# # #a = "斤"
# MultiThread.run_concurrrent_threadpool(cal_weight, names,weight)
# #
# concurrenttools.run_concurrrent_processpool(cal_weight,names,weight )
# particles = [Particle(i) for i in range(500)]
# check all(particles[i].i == i for i in range(len(particles)))
# check all(particles[i].i == i for i in range(len(particles)))
# check all(p.fitness == 2 * p.i for p in particles)
# l = div_list(list(zip(names, weight)) * 16, 4)
l = list(zip(names, weight)) * 16
# print(l)
concurrenttools = ConcurrentTools(thread_worker_quantity=13)
concurrenttools.run_concurrrent_process_thread_pool(cal_weight_2, l)
| true |
737b6fc12c9d9b5c28096920b17735b50a44c57d | Python | aneeshjain/Data-Structures-in-Python | /linked_list.py | UTF-8 | 1,745 | 4.03125 | 4 | [] | no_license |
class Node:
def __init__(self, d, n = None):
self.data = d
self.next = n
def get_next(self):
return self.next
def set_next(self, n):
self.next = n
def get_data(self):
return self.data
def set_data(self, d):
self.data = d
class LinkedList:
def __init__(self, r = None):
self.root = r
self.size = 0
def get_size(self):
return self.size
def add(self, data):
new_node = Node(data, self.root)
self.root = new_node
self.size += 1
def remove(self, data):
this_node = self.root
prev_node = None
while this_node != None:
if this_node.get_data() == data:
if prev_node:
prev_node.set_next(this_node.get_next())
else:
self.root = this_node.get_next()
self.size -= 1
return True
else:
prev_node = this_node
this_node = this_node.get_next()
return False
def find(self, data):
this_node = self.root
while this_node:
if this_node.get_data() == data:
print("Data Found")
return True
else:
this_node = this_node.get_next()
print("Data not found")
return False
def print_list(self):
this_node = self.root
while this_node:
print(this_node.get_data())
this_node = this_node.get_next()
myList = LinkedList()
myList.add(1)
myList.add(5)
myList.add(2)
myList.add(3)
size = myList.get_size()
print("size = ", size)
myList.remove(5)
myList.print_list()
| true |
bb9209ad08e8633ec39c28ccbb36f652b60a1781 | Python | BXSS101/KMITL_HW-Data_Structure | /01 Python 1/0103.py | UTF-8 | 891 | 3.46875 | 3 | [] | no_license | ###################
# Disclaimer part #
###################
'''
Lab#1 | Basic Python 1
Course : Data Structure & Algorithm
Instructor : Kiatnarong Tongprasert, Kanut Tangtisanon
Semester / Academic Year : 1 / 2020
Institute : KMITL, Bangkok, Thailand
Developed By : BXSS101 (Ackrawin B.)
Github URL : https://github.com/BXSS101
'''
################
# Problem part #
################
print("*** Fun with permute ***")
sList = list(map(int, input("input : ").split(',')))
print("Original Cofllection: ", sList)
sList = sList[::-1]
print("Collection of distinct numbers:")
print(' ', end='')
def addPermute(pos, tList) :
return [tList[0:i] + [sList[pos]] + tList[i:] for i in range(len(tList)+1)]
def permute(tList) :
if len(tList) == 0 :
return [[]]
return [x for y in permute(tList[1:]) for x in addPermute(tList[0], y)]
print(permute([i for i in range(len(sList))])) | true |
34717ac288e907f9a37ac29201639d059effb9e1 | Python | Badalmishra/mediapipe | /main.py | UTF-8 | 2,374 | 2.578125 | 3 | [] | no_license | import cv2
import time
import numpy as np
import HandTrackingModule as htm
import math
################################
wCam, hCam = 1200, 600
################################
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
pTime = 0
detector = htm.handDetector(detectionCon=0.7)
print(detector)
vol = 0
volBar = 400
volPer = 0
drawPoints = []
mode=''
while True:
success, img = cap.read()
img = detector.findHands(img, draw=False)
lmList,bbox = detector.findPosition(img, draw=False)
for point in drawPoints:
cv2.circle(img, (point[0], point[1]), 3, (0, 0, 255), cv2.FILLED)
if len(lmList) > 20:
x1, y1 = lmList[8][1], lmList[8][2]
x2, y2 = lmList[12][1], lmList[12][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
length = math.hypot(x2 - x1, y2 - y1)
cv2.circle(img, (x1, y1), 5, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 5, (255, 0, 255), cv2.FILLED)
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
eraserX1, eraserY1 = lmList[16][1], lmList[16][2]
eraserX2, eraserY2 = lmList[12][1], lmList[12][2]
eraserCx, eraserCy = (eraserX1 + eraserX2) // 2, (eraserY1 + eraserY2) // 2
cv2.line(img, (eraserX1, eraserY1), (eraserX2, eraserY2), (123, 0, 123), 3)
eraserLength = math.hypot(eraserX2 - eraserX1, eraserY2 - eraserY1)
if length < 50 and eraserLength>50:
mode = "pencil"
print('drawPoints', len(drawPoints))
drawPoints.append([cx,cy])
cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)
if eraserLength < 50:
mode = "eraser"
cv2.line(img, (eraserX1, eraserY1), (eraserX2, eraserY2), (10, 10, 123), 3)
for point in drawPoints:
print(point[0],[eraserX1,eraserX2])
if point[0] > (eraserX1-15) and point[0] < (eraserX2+15) and point[1] < (eraserY1+15) and point[1] > (eraserY2-15) :
print('===')
drawPoints.remove(point)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
img = cv2.flip(img, 1)
cv2.putText(img, f'Mode: {mode}', (40, 50), cv2.FONT_HERSHEY_COMPLEX,
1, (255, 0, 0), 3)
cv2.imshow("Img", img)
cv2.waitKey(1) | true |
fc2d916ed9c182ca49656ded802d4e96709bce2e | Python | lambertv/dungeon-crawl | /dungeon_crawl.py | UTF-8 | 7,628 | 3.359375 | 3 | [] | no_license | import pygame
import random
from enum import Enum
CAPTION = "Game game"
SCREEN_SIZE = [800,600]
BOARD_WIDTH = 8
BOARD_HEIGHT = 10
BLOCK_SIZE = 50
BOARD_X = (SCREEN_SIZE[0]-BOARD_WIDTH*BLOCK_SIZE)/2
BOARD_Y = (SCREEN_SIZE[1]-BOARD_HEIGHT*BLOCK_SIZE)/2
BACKGROUND_COLOR = (0,0,0)
CORRIDOR_COLOR = (100,100,100)
PLAYER_COLOR = (50, 50, 200)
ENEMY_COLOR = (200, 50, 50)
class Movement(Enum):
up = (0,-1)
down = (0,1)
left = (-1,0)
right = (1,0)
stay = (0,0)
class Gamestate(Enum):
dungeon = 0
battle = 1
game_over = 2
class Dungeon():
def __init__(self, width, height):
self.board = [[True for i in range(height)] for j in range(width)]
self.width = width
self.height = height
self.player = Player(0,0)
self.enemies = []
self.battle_enemy = None
self.gamestate = Gamestate.dungeon
self.board = [ [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 0] ]
self.enemies = [Enemy(5,5), Enemy(7,0), Enemy(0,7)]
def update_movement(self):
if self.player.move(self):
for enemy in self.enemies:
enemy.move(self)
def update_battle(self):
if self.player.health <= 0:
self.gamestate = Gamestate.game_over
elif self.battle_enemy.health <= 0:
self.gamestate = Gamestate.dungeon
self.enemies.remove(self.battle_enemy)
class Player():
def __init__(self, x, y):
self.x = x
self.y = y
self.movement = Movement.stay
self.health = 100
self.attack = 5
def attacked(self, enemy):
if random.randint(0,10) < 8:
self.health -= enemy.attack
print "ENEMY HIT!"
else:
print "ENEMY MISS!"
def move(self, dungeon):
new_x = self.x + self.movement[0]
new_y = self.y + self.movement[1]
for enemy in dungeon.enemies:
if enemy.x == new_x and enemy.y == new_y:
dungeon.gamestate = Gamestate.battle
dungeon.battle_enemy = enemy
print "BATTLE"
return 0
if 0 <= new_x < len(dungeon.board) and 0 <= new_y < len(dungeon.board[0]) and dungeon.board[new_x][new_y]:
self.x = new_x
self.y = new_y
return 1
else:
return 0
class Enemy():
def __init__(self, x, y):
self.x = x
self.y = y
self.movement = Movement.stay
self.health = 30
self.attack = 3
def attacked(self, player):
if random.randint(0,10) < 9:
self.health -= player.attack
print "PLAYER HIT!"
else:
print "PLAYER MISS!"
def move(self, dungeon):
movement_list = [Movement.right, Movement.left, Movement.down, Movement.up, Movement.stay]
self.movement = random.choice(movement_list)
new_x = self.x + self.movement[0]
new_y = self.y + self.movement[1]
if dungeon.player.x == new_x and dungeon.player.y == new_y:
dungeon.gamestate = Gamestate.battle
dungeon.battle_enemy = self
print "BATTLE"
return 0
if 0 <= new_x < len(dungeon.board) and 0 <= new_y < len(dungeon.board[0]) and dungeon.board[new_x][new_y]:
for enemy in dungeon.enemies:
if enemy.x == new_x and enemy.y == new_y:
new_x = self.x
new_y = self.y
self.x = new_x
self.y = new_y
return 1
class Graphics():
def __init__(self):
self.board_x = BOARD_X
self.board_y = BOARD_Y
def coordinate_to_pixel(self, x, y):
return (x*BLOCK_SIZE+self.board_x, y*BLOCK_SIZE+self.board_y)
def draw_block(self, screen, x, y, color):
pixel_x, pixel_y = self.coordinate_to_pixel(x, y)
draw_rect = pygame.Rect(pixel_x, pixel_y, BLOCK_SIZE, BLOCK_SIZE)
pygame.draw.rect(screen, color, draw_rect)
def draw_dungeon(self, screen, dungeon):
for x in range(dungeon.width):
for y in range(dungeon.height):
if dungeon.board[x][y]:
self.draw_block(screen, x, y, CORRIDOR_COLOR)
def draw_player(self, screen, player):
self.draw_block(screen, player.x, player.y, PLAYER_COLOR)
def draw_enemies(self, screen, enemies):
for enemy in enemies:
self.draw_block(screen, enemy.x, enemy.y, ENEMY_COLOR)
def draw(self, screen, dungeon):
screen.fill(BACKGROUND_COLOR)
self.draw_dungeon(screen, dungeon)
self.draw_player(screen, dungeon.player)
self.draw_enemies(screen, dungeon.enemies)
class Game():
def __init__(self):
self.dungeon = Dungeon(BOARD_WIDTH, BOARD_HEIGHT)
self.graphics = Graphics()
self.screen = pygame.display.get_surface()
self.done = False
self.graphics.draw(self.screen, self.dungeon)
pygame.display.update()
def game_loop(self):
if self.key_presses():
self.update()
self.graphics.draw(self.screen, self.dungeon)
pygame.display.update()
def key_presses(self):
take_turn = False
if self.dungeon.gamestate == Gamestate.dungeon:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
elif event.type == pygame.KEYDOWN:
take_turn = True
self.dungeon.player.movement = Movement.stay
if event.key == pygame.K_RIGHT:
self.dungeon.player.movement = Movement.right
elif event.key == pygame.K_LEFT:
self.dungeon.player.movement = Movement.left
elif event.key == pygame.K_UP:
self.dungeon.player.movement = Movement.up
elif event.key == pygame.K_DOWN:
self.dungeon.player.movement = Movement.down
elif self.dungeon.gamestate == Gamestate.battle:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
take_turn = True
self.dungeon.player.attacked(self.dungeon.battle_enemy)
self.dungeon.battle_enemy.attacked(self.dungeon.player)
print "PLAYER:", self.dungeon.player.health
print "ENEMY:", self.dungeon.battle_enemy.health
return take_turn
def update(self):
if self.dungeon.gamestate == Gamestate.dungeon:
self.dungeon.update_movement()
elif self.dungeon.gamestate == Gamestate.battle:
self.dungeon.update_battle()
else:
print "GAME OVER"
self.done = True
if __name__ == '__main__':
pygame.init()
pygame.display.set_caption(CAPTION)
pygame.display.set_mode(SCREEN_SIZE)
game_instance = Game()
while not game_instance.done:
game_instance.game_loop()
pygame.quit()
| true |
f7e51d72059f02840ceffb0445972fbc014c18f4 | Python | elParaguayo/football_notifications | /main.py | UTF-8 | 4,687 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
"""Live Football Scores Notification Service
by elParaguayo
The script checks football scores and sends updates to a notifier which
can be handled by the user as they see fit (e.g. notifications for goals).
An Issues/To Do list will be maintained separately on GitHub at this address:
https://github.com/elParaguayo/football_notifications/issues
Any errors can be discussed on the Raspberry Pi forum at this address:
https://www.raspberrypi.org/forums/viewtopic.php?f=41&t=118203
Version: 0.1
"""
import logging
from service.scoresservice import ScoreNotifierService
from notifiers.notifier_autoremote import AutoRemoteNotifier
from notifiers.notifier_email import EmailNotifier
##############################################################################
# USER SETTINGS - CHANGE AS APPROPRIATE #
##############################################################################
# myTeam: Name of the team for which you want to receive updates.
# NB the team name needs to match the name used by the BBC
myTeam = "Chelsea"
# LIVE_UPDATE_TIME: Time in seconds until data refreshes while match is live
# NON_LIVE_UPDATE_TIME: Time in seconds until data refreshes after match or
# when there is no match on the day
# NB. Once a match is found, the script will try to sleep until 5 minutes
# before kick-off
LIVE_UPDATE_TIME = 30
NON_LIVE_UPDATE_TIME = 60 * 60
# DETAILED - Request additional information on match (e.g. goalscorers)
# Should be updated to reflect the needs of the specific notifier
DETAILED = True
# LOGFILE:
LOGFILE = "/home/pi/service.log"
# DEBUG_LEVEL: set the log level here
# logging.DEBUG: Very verbose. Will provide updates about everything. Probably
# best left to developers
# logging.INFO: Reduced info. Just provides updates for errors and
# notification events
# logging.ERROR: Just provide log info when an error is encountered.
DEBUG_LEVEL = logging.ERROR
##############################################################################
# NOTIFIERS - You should only initialise one notifier and comment out the #
# other. Future versions may allow for multiple notifiers #
##############################################################################
# E-MAIL #####################################################################
# FROMADDR - string representing sender
FROMADDR = 'Football Score Service'
# TOADDR - list of recipients
TOADDR = ['foo@bar.com']
# USER - username for mail account
USER = 'foobar@gmail.com'
# PWD - password
PWD = 'password'
# SERVER - address of mail server
SERVER = 'smtp.gmail.com'
# PORT - mail server port number
PORT = 587
# TITLE - optional prefix for email subject line
TITLE = ""
notifier = EmailNotifier(SERVER, PORT, USER, PWD, FROMADDR, TOADDR, TITLE)
# AUTOREMOTE #################################################################
# myAutoRemoteKey - long string key used in web requests for AutoRemote
myAutoRemoteKey = ""
# prefix - single word used by AutoRemote/Tasker to identify notifications
prefix = "scores"
# notifier = AutoRemoteNotifier(myAutoRemoteKey, prefix)
##############################################################################
# DO NOT CHANGE ANYTHING BELOW THIS LINE #
##############################################################################
# Create a logger object for providing output.
logger = logging.getLogger("ScoresService")
logger.setLevel(DEBUG_LEVEL)
# Tell the logger to use our filepath
fh = logging.FileHandler(LOGFILE)
# Set the format for our output
formatter = logging.Formatter('%(asctime)s: '
'%(levelname)s: %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug("Logger initialised.")
if __name__ == "__main__":
try:
logger.debug("Initialising service...")
service = ScoreNotifierService(myTeam,
notifier=notifier,
livetime=LIVE_UPDATE_TIME,
nonlivetime=NON_LIVE_UPDATE_TIME,
logger=logger,
detailed=DETAILED)
logger.debug("Starting service...")
service.run()
except KeyboardInterrupt:
logger.error("User exited with ctrl+C.")
except:
# We want to catch error messages
logger.exception("Exception encountered. See traceback message.\n"
"Please help improve development by reporting"
" errors.")
raise
| true |
f1d6c0f0ebdccc3c0b7166523a087b18e9d1c578 | Python | hyejun18/daily-rosalind | /prepare/template_scripts/bioinformatics-textbook-track/BA9A.py | UTF-8 | 1,068 | 3.4375 | 3 | [] | no_license | ##################################################
# Construct a Trie from a Collection of Patterns
#
# http://rosalind.info/problems/BA9A/
#
# Given: A collection of strings Patterns.
#
# Return: The adjacency list corresponding to Trie(Patterns),
# in the following format. If Trie(Patterns) has
# n nodes, first label the root with 1 and then
# label the remaining nodes with the integers 2
# through n in any order you like. Each edge of
# the adjacency list of Trie(Patterns) will be
# encoded by a triple: the first two members of
# the triple must be the integers labeling the
# initial and terminal nodes of the edge, respectively;
# the third member of the triple must be the symbol
# labeling the edge.
#
# AUTHOR : dohlee
##################################################
# Your imports here
# Your codes here
if __name__ == '__main__':
# Load the data.
with open('../../datasets/rosalind_BA9A.txt') as inFile:
pass
# Print output
with open('../../answers/rosalind_BA9A_out.txt', 'w') as outFile:
pass
| true |
869682c3a3f2d1ee90cb51530d067534aea1b65c | Python | fiboc/what-are-you-doing- | /Dars/dars1.py | UTF-8 | 258 | 3.1875 | 3 | [] | no_license | # def son_daraja(son, daraja= 2):
# print(son**daraja)
#
# son_daraja(25)
def juftmi(num):
return False if num % 2 else True
def musbatmi(num):
return True if num > 0 else False
def tubmi(num):
# print(juftmi(4))
# print(musbatmi(-1))
| true |
898a79086b60c8c6d52acfe7546d050b66c7b7d0 | Python | ebbitten/ScratchEtc | /nexus_calc.py | UTF-8 | 502 | 3.75 | 4 | [] | no_license | def main():
while True:
nexuses_left = int(input("Nexuses: \n"))
cards_left = int(input("cards_left: \n"))
number_viewed = int(input("number_viewed: \n"))
print(calc_nexus(nexuses_left, cards_left, number_viewed))
def calc_nexus(nexuses_left, cards_left, cards_viewing=4):
not_drawn = 1
for card_viewed in range(cards_viewing):
not_drawn *= (cards_left - nexuses_left) / cards_left
cards_left -= 1
return (1 - not_drawn)
main() | true |
e1dac66e8b88427252152f638edd26216a6dd26d | Python | jbro321/Python_Basic_Enthusiastic | /Python_Basic_Enthusiastic/Chapter_05/P_05_2_3.py | UTF-8 | 83 | 3.015625 | 3 | [] | no_license | # Enthusiastic_Python_Basic #P_05_2_3
st = [1, 2, 3, 4, 5]
st[1:4] = []
print(st) | true |
7bc9f93ea072b127e41662c7ce5fb18fa2e324c1 | Python | gabriel-bettanin/MinicursoPython | /parte-1/hello.py | UTF-8 | 63 | 3.359375 | 3 | [] | no_license | print('Insira o seu nome')
nome = input()
print('Ola ' + nome ) | true |
85288978ab150612ff1d0217c401309f7f948098 | Python | victorylau/LeetcodeJS | /Leetcode-PY/74. 搜索二维矩阵.py | UTF-8 | 793 | 3.390625 | 3 | [
"MIT"
] | permissive | import math
class Solution:
def searchMatrix(self, matrix, target):
if len(matrix) == 0: return False
for arr in matrix:
if arr[0] == target or arr[-1] == target:
return True
if arr[0] < target and arr[-1] > target:
low = 0
high = len(arr)-1
while low <= high:
mid = int(math.floor((low+high)/2))
if target == arr[mid]:
return True
elif target > arr[mid]:
low = mid + 1
else:
high = mid - 1
break
return False
print(Solution().searchMatrix([
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
],13)) | true |
4317d66c77d2bc543ddac524989cb9ae68c2116a | Python | ucaiado/IdentifyingFraud | /featureSelection.py | UTF-8 | 7,217 | 2.984375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Select features to be used by ML algorithms
Created on 07/09/2015
'''
__author__='ucaiado'
import pandas as pd
import numpy as np
import sys
from sklearn.feature_selection import SelectPercentile, f_classif
'''
Begin of Help Functions
'''
def selectFeatures(features, labels, features_list, percentile= 20):
'''
Select features according to the 20th percentile of the highest scores.
Return a list of features selected and a dataframe showing the ranking
of each feature related to their p values
features: numpy array with the features to be used to test sklearn models
labels: numpy array with the real output
features_list: a list of names of each feature
percentile: int with percentile to be used to select features
'''
#feature selection
selector = SelectPercentile(f_classif, percentile=percentile)
selector.fit(features, labels)
features_transformed = selector.transform(features)
#filter names to be returned
l_rtn = [x for x, t in zip(features_list,
list(selector.get_support())) if t]
# pd.DataFrame(features_transformed, columns = l_labels2).head()
#calculate scores
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
df_rtn = pd.DataFrame(pd.Series(dict(zip(features_list,scores))))
df_rtn.columns = ["pValue_Max"]
df_rtn = df_rtn.sort("pValue_Max", ascending=False)
# df_rtn["different_from_zero"]=((df!=0).sum()*1./df.shape[0])
return l_rtn, df_rtn
'''
End of Help Functions
'''
class Features(object):
'''
Test features and create new ones
'''
def __init__(self):
'''
Initialize a Features instance
'''
self.payments_features = ['bonus', 'deferral_payments',
'deferred_income', 'director_fees', 'expenses','loan_advances',
'long_term_incentive', 'other','salary']
self.stock_features = ['exercised_stock_options','restricted_stock',
'restricted_stock_deferred']
self.email_features = ['from_messages', 'from_poi_to_this_person',
'from_this_person_to_poi','shared_receipt_with_poi', 'to_messages']
self.new_features = ['biggest_expenses', 'percentual_exercised']
self.total_features = ['total_payments', 'total_stock_value']
def getFeaturesList(self, o_dataset, o_eda, f_validNumMin = 0.):
'''
Return a list of columns names from the self data
f_validNumMin: float with the minimum percentual of valid numbers in
each feature to be tested
o_dataset: an object with the dataset loaded
o_eda: an object with the eda methods
'''
l_columns = self.payments_features + self.stock_features
l_columns+= self.email_features + self.new_features
df_rtn = o_eda.notValidNumbersTable(o_dataset)
na_exclude = (df_rtn.T<f_validNumMin).values
l_exclude = list(df_rtn.loc[list(na_exclude)[0]].index)
l_rtn = [ x for x in l_columns if x not in l_exclude]
return l_rtn
def getFeaturesAndLabels(self, o_dataset,o_eda = None, l_columns = False,
scaled = False, f_validNumMin = 0.):
'''
Return two nuumpy arrays with labels and features splitted
scaled: boolean. should return scaled features?
f_validNumMin: float with the minimum percentual of a valid number from
a feature to be tested
l_columns: target features to be filtered. If any, use all.
o_dataset: an object with the dataset loaded
'''
#load data needed
df = o_dataset.getData(scaled = scaled)
if not l_columns:
l_columns = self.getFeaturesList(o_dataset, o_eda, f_validNumMin)
#split data
na_labels = df.poi.values.astype(np.float32)
na_features = df.loc[:,l_columns].values.astype(np.float32)
return na_labels, na_features
def createNewFeatures(self, o_dataset):
'''
create the features biggest_expenses and percentual_exercised. Save them
as new columns in df attribute in o_dataset
o_dataset: an object with the dataset loaded
'''
#get a copy of the data
df = o_dataset.getData()
#compare the expenses to the biggest one scaling it
# f_min = df.expenses.astype(float).min()
# f_max = df.expenses.astype(float).max()
# df_t2 = (df.expenses.astype(float) - f_min)/(f_max - f_min)
df_aux = df.salary.astype(float)
df_aux[df_aux==0]=None
df_t2 = df.expenses.astype(float)/df_aux
df_t2 = pd.DataFrame(df_t2)
df_t2.columns = ["biggest_expenses"]
# df_t2 = df_t2.fillna(df_t2.mean())
df_t2["poi"]=df.poi
# df_t2 = df_t2.fillna(0)
#scale the new feature
f_min = df_t2.min()
f_max = df_t2.max()
df_t2 = (df_t2-f_min)/ (f_max - f_min)
#compare the exercised stock options to total payment
df_aux = df.total_payments.astype(float)
df_aux[df_aux==0]=None
df_t3 = df.exercised_stock_options.astype(float)/df_aux
df_t3 = pd.DataFrame(df_t3)
#scale the new feature
f_min = df_t3.min()
f_max = df_t3.max()
df_t3 = (df_t3-f_min)/ (f_max - f_min)
# df_t3 = df_t3.fillna(df_t3.mean())
# df_t3 = df_t3.fillna(0)
#exclude some outliers just to this plot
df_t3.columns = ["percentual_exercised"]
df_t3["poi"]=df.poi
#include the new features in the original dataset
df['biggest_expenses'] = df_t2['biggest_expenses']
df["percentual_exercised"] = df_t3["percentual_exercised"]
o_dataset.setData(df)
def scallingAll(self, o_dataset):
'''
Scale each group of features, keep the result as an attribute
'''
#load data
df = o_dataset.getData()
l_payment = self.payments_features
l_stock = self.stock_features
l_email = self.email_features
#scale money related features
df_aux = df.loc[:,l_payment + l_stock]
f_max = df_aux.max().max()
f_min = df_aux.min().min()
df_aux = (df_aux - f_min) * 1./(f_max - f_min)
df.loc[:,l_payment + l_stock] = df_aux.values
#scale email features
df_aux = df.loc[:,l_email ]
f_max = df_aux.max().max()
f_min = df_aux.min().min()
df_aux = (df_aux - f_min) * 1./(f_max - f_min)
df.loc[:,l_email ] = df_aux.values
#keep results and show description
o_dataset.df_scaled = df
def select(self, features, labels, features_list, percentile= 20):
'''
Select features using selectFeatures function. Return a list with the
features selected and a p-values ranking.
features: numpy array with the features to be used to test sklearn models
labels: numpy array with the real output
features_list: a list of names of each feature
'''
l_rtn, df_rtn = selectFeatures(features, labels, features_list,
percentile= percentile)
return l_rtn, df_rtn
| true |
4cb21192e5147c996d3eb631a7fc4a2495d0a0dd | Python | AlexLi-98/misc | /quaternion/test.py | UTF-8 | 276 | 2.59375 | 3 | [] | no_license | import quatlib as ql
import numpy as np
def test1():
q = ql.rot2Quat(np.pi/2, [0, 1, 0])
v = np.asarray([1, 2, 3])
a = ql.rotate(q, v)
m = ql.quat2RotMatrix(q)
b = m.dot(v)
assert np.all(np.abs(a - b) <= 1E-10)
if __name__ == '__main__':
test1() | true |
466d2331745447d9fc9607e42289327ff60935eb | Python | radityagumay/BenchmarkSentimentAnalysis_2 | /com/radityalabs/Python/origin7_textblob.py | UTF-8 | 1,889 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | # http://stevenloria.com/how-to-build-a-text-classification-system-with-python-and-textblob/
import random
from nltk.corpus import movie_reviews
from textblob.classifiers import NaiveBayesClassifier
from textblob import TextBlob
random.seed(1)
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
cl = NaiveBayesClassifier(train)
# Grab some movie review data
reviews = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(reviews)
new_train, new_test = reviews[0:100], reviews[101:200]
# Update the classifier with the new training data
cl.update(new_train)
#Classify some text
#print("Their burgers are amazing.", cl.classify("Their burgers are amazing.")) # "pos"
print("I don't like their pizza.", cl.classify("I don't like their pizza.")) # "neg"
# Classify a TextBlob
#blob = TextBlob("The beer was amazing. But the hangover was horrible. My boss was not pleased.", classifier=cl)
#print(blob)
#print(blob.classify())
# for sentence in blob.sentences:
# print(sentence, sentence.classify())
# Compute accuracy
print("Accuracy: {0}".format(cl.accuracy(test)))
# Show 5 most informative features
cl.show_informative_features(5)
| true |
7de44b792255e0d9ef78a6bdfd4a957fc122ff97 | Python | LipinskiyL0/text_clastering | /clastering.py | UTF-8 | 1,721 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 27 13:19:55 2021
@author: Leonid
"""
import pickle
import numpy as np
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, ward, linkage
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
with open('rez_table_text.pkl', 'rb') as f:
df = pickle.load(f)
Result=list(df['Q19_txt'].dropna())
# count_vectorizer = CountVectorizer()
# bag_of_words = count_vectorizer.fit_transform(Result)
# feature_names = count_vectorizer.get_feature_names()
# df_rez=pd.DataFrame(bag_of_words.toarray(), columns = feature_names)
tfidf_vectorizer = TfidfVectorizer()
values = tfidf_vectorizer.fit_transform(Result)
# Show the Model as a pandas DataFrame
feature_names = tfidf_vectorizer.get_feature_names()
df_rez=pd.DataFrame(values.toarray(), columns = feature_names)
feature_names = pd.DataFrame(feature_names, columns=['words'])
feature_names.to_excel('feature_names.xlsx')
stat=[]
for n_clusters in range(2, 10):
clusterer = KMeans(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(df_rez)
silhouette_avg = silhouette_score(df_rez, cluster_labels)
stat.append([n_clusters,silhouette_avg ])
stat=pd.DataFrame(stat, columns=['n_clusters', 'silhouette_avg'])
plt.figure()
plt.plot(stat['n_clusters'], stat['silhouette_avg'])
plt.title('Мешок слов')
plt.xlabel('n_clusters')
plt.ylabel('silhouette_avg')
plt.savefig('Мешок слов.png')
| true |
9d2b753490187e9c31cfd39a0bc7191af15e6e67 | Python | moderngl/moderngl | /examples/ported/hello_program.py | UTF-8 | 1,088 | 2.5625 | 3 | [
"MIT"
] | permissive | import numpy as np
import _example
class Example(_example.Example):
title = 'Hello Program'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
void main() {
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
out vec4 f_color;
void main() {
f_color = vec4(0.2, 0.4, 0.7, 1.0);
}
''',
)
vertices = np.array([
0.0, 0.8,
-0.6, -0.8,
0.6, -0.8,
])
self.vbo = self.ctx.buffer(vertices.astype('f4').tobytes())
self.vao = self.ctx.vertex_array(self.prog, self.vbo, 'in_vert')
def render(self, time: float, frame_time: float):
self.ctx.screen.clear(color=(1.0, 1.0, 1.0))
self.vao.render()
if __name__ == '__main__':
Example.run()
| true |
02f777a13fec370e4627247fecdbeac79fa7b66d | Python | davidlu2002/AID2002 | /PycharmProjects/Stage 2/day08/demo01.py | UTF-8 | 1,421 | 4.46875 | 4 | [] | no_license | """
读文件操作
"""
"""
# 一、读文本信息
# 1 打开文件
a = open("abc", mode="r")
# 2 读取文件中的数据
data = a.read()
print(data)
# 3 关闭文件
a.close()
"""
"""
# 二、读图片信息(要用字节串的格式读取)
# 1 打开文件(rb:字节串格式)
b = open("def.jpg", mode="rb")
# 2 读取文件中的数据
data = b.read()
print(data)
# 3 关闭
b.close()
"""
"""
# 三、按指定字符/字节读取文件
# 1 打开文件
c = open("abc", mode="r")
# 2 读取文件中的数据
while True:
data = c.read(11)
print(data)
if data == "":
break
# 3 关闭
c.close()
"""
"""
# 四、readline
# 1 打开文件
d = open("abc", mode="r")
# 2 使用readline方法读取文件
# (1) 不加参数,默认每次读一行
# while True:
# data = d.readline()
# print(data, end="")
# (2) 加参数,赋值这一行的前n个字符
data = d.readline(5)
print(data)
# 3 关闭
d.close()
"""
"""
# 五、readlines
# 将文件内容整合成列表并打印
# 1 打开文件
e = open('abc',mode="r")
# 2 读取数据
# 不加参数,列表包含文件内所有元素
# data = e.readlines()
# print(data)
# 加参数,与readline用法类似
data = e.readlines(101)
for i in data:
print(i,end="")
print(len(i))
# 3 关闭
e.close()
"""
""""""
# 六
# 1 打开文件
f = open("abc", mode="r")
for i in f:
print(i, end="")
# 4 关闭
f.close() | true |
54d35ab25e3b94bf613e182866b3c80bf267832e | Python | Siriussee/PythonSpider | /get_data.py | UTF-8 | 3,657 | 2.703125 | 3 | [] | no_license | import urllib2
import re
import json
import csv
import time
class JSONObject:
def __init__(self, d):
self.__dict__ = d
file_name = time.strftime("%Y%m%d") + '_Science_statistic_a.csv'
first_list = [
'catagory','title','doi',
'published date','Altmetric score',
'Score change in 1 year','Score change in 6 months','Score change in 3 months','Score change in 1 months',
'Score change in 1 week','Score change in 5d','Score change in 3d','Score change in 1d',
'readers count','Shared on Facebook','Mentionded in blogs',
'Shared on G+','Mentionded in news','Number of discreet mentions',
'Reddit posts','Tweeted','Number of the Youtube/Vimeo channels',
]
with open(file_name, 'wb') as file:
writer = csv.writer(file)
writer.writerow(first_list)
with open('API_url_9a.txt', 'r') as f:
datas = f.read().split('\n')
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
for data in datas:
try:
url = data.split('#')[0]
catagory = data.split('#')[1]
time.sleep(1)
try:print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + 'getting ' + url
except:pass
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request, timeout = 10)
# print response.read()
html_text = response.read()
data = json.loads(html_text)
except:
with open('log3.txt', 'ab') as f:
f.write(
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
' an error occurred when requesting *' + url + '\n'
)
continue
if not data.has_key('published_on'):
data['published_on'] = 1
if not data.has_key('cited_by_fbwalls_count'):
data['cited_by_fbwalls_count'] = 0
if not data.has_key('cited_by_tweters_count'):
data['cited_by_tweters_count'] = 0
if not data.has_key('cited_by_feeds_count'):
data['cited_by_feeds_count'] = 0
if not data.has_key('cited_by_gplus_count'):
data['cited_by_gplus_count'] = 0
if not data.has_key('cited_by_msm_count'):
data['cited_by_msm_count'] = 0
if not data.has_key('cited_by_posts_count'):
data['cited_by_posts_count'] = 0
if not data.has_key('cited_by_rdts_count'):
data['cited_by_rdts_count'] = 0
if not data.has_key('cited_by_tweeters_count'):
data['cited_by_tweeters_count'] = 0
if not data.has_key('cited_by_videos_count'):
data['cited_by_videos_count'] = 0
try:
data_list = [
catagory,data['title'].encode('UTF-8'),data['doi'],
time.strftime("%Y-%m-%d",time.localtime(data['published_on'])),data['score'],
data['history']['1y'],data['history']['6m'],data['history']['3m'],data['history']['1m'],
data['history']['1w'],data['history']['5d'],data['history']['3d'],data['history']['1d'],
data['readers_count'],data['cited_by_fbwalls_count'],data['cited_by_feeds_count'],
data['cited_by_gplus_count'],data['cited_by_msm_count'],data['cited_by_posts_count'],
data['cited_by_rdts_count'],data['cited_by_tweeters_count'],data['cited_by_videos_count'],
]
except:
with open('log3.txt', 'ab') as f:
f.write(
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
' an error occurred when saving data from *' + url + '\n'
)
continue
with open(file_name, 'ab') as file:
writer = csv.writer(file)
writer.writerow(data_list)
| true |
a6e1ab80c6f5d23d299760b7f4d321c2c3d88c49 | Python | ovr1/test | /Прочее2/test16_cekundomer.py | UTF-8 | 364 | 3.234375 | 3 | [] | no_license | import time
t0 = time.time()
for j in range(1000000):
pass
t1 = time.time()
print("Выполнение заняло %.5f секунд" % (t1 - t0))
avg = 0
for _ in range(10):
t0 = time.time()
for j in range(1000000):
pass
t1 = time.time()
avg += (t1 - t0)
avg /= 10
print("Выполнение заняло %.5f секунд" % avg) | true |
7a213f54d8e8674e118d20e68eb9ad006eddad53 | Python | rfbr/Reinforcement_Learning_Project | /main/__main__.py | UTF-8 | 11,351 | 3.15625 | 3 | [] | no_license | import os
import torch
from main.agents.eps_greedy import EpsGreedyAgent
from main.agents.sarsa import SARSA
from main.agents.q_learning import QLearning
from main.agents.expected_sarsa import ExpectedSARSA
from main.env.tic_tac_toe import TicTacToe
from main.agents.alphazero.net import Net
if __name__ == '__main__':
players = {}
os.system('clear')
while True:
try:
print('Welcome to the tic-tac-toe RL game!')
possible_agents = '''
- 0 to play with the random algorithm;
- 1 to play with the epsilon-greedy algorithm;
- 2 to play with the SARSA algorithm;
- 3 to play with the Q Learning algorithm;
- 4 to play with the Expected SARSA algorithm;
- 5 to play with the AlphaZero algorithm.
'''
possible_choices = [0, 1, 2, 3, 4, 5]
# -- Choices of players agents
while True:
try:
player_1_value = int(
input("Choose player 1 agent:" + possible_agents +
"\n"))
if player_1_value not in possible_choices:
raise ValueError
else:
break
except ValueError:
print('Player 1 agent must be in ', possible_choices)
while True:
try:
player_2_value = int(
input("Choose player 2 agent:" + possible_agents +
"\n"))
if player_2_value not in possible_choices:
raise ValueError
else:
break
except ValueError:
print('Player 2 agent must be in ', possible_choices)
# -- Player initialisation
p1_need_training = False
p2_need_training = False
# - Player 1
# Random algorithm
if player_1_value == 0:
os.system('clear')
players[1] = EpsGreedyAgent(name=1, epsilon=1)
# Epsilon-greedy algorithm
if player_1_value == 1:
os.system('clear')
while True:
try:
eps_1 = float(
input('Player 1: EpsGreedy epsilon value?\n'))
if eps_1 < 0 or eps_1 >= 1:
raise ValueError
else:
players[1] = EpsGreedyAgent(name=1, epsilon=eps_1)
p1_policy_name = 'p1_epsilon_' + str(eps_1)
try:
players[1].load_policy("main/policies/" +
p1_policy_name)
except (OSError, IOError) as e:
p1_need_training = True
env1 = TicTacToe(
players[1],
EpsGreedyAgent(name=-1, epsilon=eps_1))
break
except ValueError:
print('Epsilon must be in [0,1[')
# SARSA algorithm
if player_1_value == 2:
os.system('clear')
while True:
try:
eps_1 = float(
input('Player 1: SARSA epsilon value?\n'))
if eps_1 < 0 or eps_1 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[1] = SARSA(name=1, epsilon=eps_1)
p1_need_training = True
env1 = TicTacToe(players[1], SARSA(name=-1, epsilon=eps_1))
# Q Learning algorithm
if player_1_value == 3:
os.system('clear')
while True:
try:
eps_1 = float(
input('Player 1: QLearning epsilon value?\n'))
if eps_1 < 0 or eps_1 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[1] = QLearning(name=1, epsilon=eps_1)
p1_need_training = True
env1 = TicTacToe(players[1], QLearning(name=-1, epsilon=eps_1))
# Expected SARSA algorithm
if player_1_value == 4:
os.system('clear')
while True:
try:
eps_1 = float(
input('Player 1: ExpectedSARSA epsilon value?\n'))
if eps_1 < 0 or eps_1 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[1] = ExpectedSARSA(name=1, epsilon=eps_1)
p1_need_training = True
env1 = TicTacToe(players[1],
ExpectedSARSA(name=-1, epsilon=eps_1))
# AlphaZero algorithm
if player_1_value == 5:
net = Net(name=1)
if torch.cuda.is_available():
net.cuda()
net.eval()
best_net = './main/agents/alphazero/data/model_data/BestNet.pt'
checkpoint = torch.load(best_net, map_location='cpu')
net.load_state_dict(checkpoint['state_dict'])
players[1] = net
p1_need_training = False
# - Player 2
# Random algorithm
if player_2_value == 0:
os.system('clear')
players[2] = EpsGreedyAgent(name=-1, epsilon=1)
# Epsilon-greedy algorithm
if player_2_value == 1:
os.system('clear')
while True:
try:
eps_2 = float(
input('Player 2: EpsGreedy epsilon value?\n'))
if eps_2 < 0 or eps_2 >= 1:
raise ValueError
else:
players[2] = EpsGreedyAgent(name=-1, epsilon=eps_2)
p2_policy_name = 'p2_epsilon_' + str(eps_2)
try:
players[2].load_policy("main/policies/" +
p2_policy_name)
except (OSError, IOError) as e:
p2_need_training = True
env2 = TicTacToe(
EpsGreedyAgent(name=1, epsilon=eps_2),
players[2])
break
except ValueError:
print('Epsilon must be in [0,1[')
# SARSA algorithm
if player_2_value == 2:
os.system('clear')
while True:
try:
eps_2 = float(
input('Player 2: SARSA epsilon value?\n'))
if eps_2 < 0 or eps_2 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[2] = SARSA(name=-1, epsilon=eps_2)
p2_need_training = True
env2 = TicTacToe(SARSA(name=1, epsilon=eps_2), players[2])
# Q Learning algorithm
if player_2_value == 3:
os.system('clear')
while True:
try:
eps_2 = float(
input('Player 2: QLearning epsilon value?\n'))
if eps_2 < 0 or eps_2 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[2] = QLearning(name=-1, epsilon=eps_2)
p2_need_training = True
env2 = TicTacToe(QLearning(name=1, epsilon=eps_2), players[2])
# Expected SARSA algorithm
if player_2_value == 4:
os.system('clear')
while True:
try:
eps_2 = float(
input('Player 2: ExpectedSARSA epsilon value?\n'))
if eps_2 < 0 or eps_2 > 1:
raise ValueError
else:
break
except ValueError:
print('Epsilon must be in [0,1]')
players[2] = ExpectedSARSA(name=-1, epsilon=eps_2)
p2_need_training = True
env2 = TicTacToe(ExpectedSARSA(name=1, epsilon=eps_2),
players[2])
# AlphaZero algorithm
if player_2_value == 5:
net = Net(name=-1)
if torch.cuda.is_available():
net.cuda()
net.eval()
best_net = './main/agents/alphazero/data/model_data/BestNet.pt'
checkpoint = torch.load(best_net, map_location='cpu')
net.load_state_dict(checkpoint['state_dict'])
players[2] = net
p1_need_training = False
# -- Number of game to play
os.system('clear')
while True:
try:
nb_games = int(
input('How many games you want them to play?\n'))
if nb_games <= 0:
raise ValueError
else:
break
except ValueError:
print('Oops wrong input!')
except ValueError:
print("Invalid input :'( Try again")
# -- Training
print("Training in progress...")
if p1_need_training:
env1.train(2000)
if player_1_value == 1:
env1.player_1.save_policy("main/policies/" + p1_policy_name)
players[1].load_policy("main/policies/" + p1_policy_name)
if p2_need_training:
env2.train(2000)
if player_2_value == 1:
env2.player_2.save_policy("main/policies/" + p2_policy_name)
players[2].load_policy("main/policies/" + p2_policy_name)
# -- Playing
print("Playing games...")
if player_1_value in [1, 2, 3, 4]:
players[1].epsilon = 0
if player_2_value in [1, 2, 3, 4]:
players[2].epsilon = 0
environment = TicTacToe(players[1], players[2])
environment.simulation(nb_games)
break
| true |
63232e52f02d15b53b0cc0fec2d282b3fe554c6d | Python | ai-is-awesome/tradingview_stocks_scraper | /utils.py | UTF-8 | 439 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 01:37:47 2020
@author: Piyush
"""
def get_text_or_none(tagOrNone):
if not tagOrNone:
return None
else:
try:
return tagOrNone.text
except:
return tagOrNone
def url_formatter(ticker):
base_url = 'https://www.tradingview.com/symbols/%s/technicals/'
return base_url % (ticker) | true |
3c551a7d86e20b692e1e95abe635fa49ba32f465 | Python | renuka-fernando/project-euler-answers | /12.py | UTF-8 | 471 | 3.03125 | 3 | [] | no_license | Divisor_Count = 1
n = 0
while Divisor_Count <= 500:
Divisor_Count = 1
n += 1
i = 1
Num = (n + 1)*n/2
PrimeList = []
while Num != 1:
i += 1
PrimeCount = 0
while Num % i == 0:
Num /= i
PrimeCount += 1
PrimeList.append(PrimeCount)
for p in PrimeList:
Divisor_Count *= (p + 1)
if n%1000==0:
print 'please wait. checking ' + str(n) + 'th number...'
print (n + 1)*n/2
| true |
3062eab1bfc56ef94e47f82ee6a36a079ebce1c1 | Python | Andrew-Finn/Daily-Coding-Problems | /2021/03 March/10th.py | UTF-8 | 1,018 | 3.796875 | 4 | [] | no_license | # Good morning! Here's your coding interview problem for today.
# This problem was asked by Netflix.
# Given a sorted list of integers of length N, determine if an element x is in the list without performing any
# multiplication, division, or bit-shift operations.
# Do this in O(log N) time.
def binary_search(l, s):
if len(l) == 1:
return True if l[0] == s else False
elif l[len(l) // 2] == s:
return True
elif l[len(l) // 2] < s:
return binary_search((l[len(l) // 2:]), s)
return binary_search((l[:len(l) // 2]), s)
if __name__ == "__main__":
import random
for i in range(500):
l = sorted([random.randint(1, 999) for x in range(99)])
for i in range(10):
s = random.choice(l)
assert binary_search(l, s) == True
for _ in range(10):
while True:
n = random.randint(1, 999)
if n not in l:
assert binary_search(l, n) == False
break
| true |
42e57510dd1e5d907313bce5b3e24732b99cf783 | Python | fmacrae/Roland_Robot | /Panning.py | UTF-8 | 1,656 | 3.3125 | 3 | [] | no_license | #!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
import time
# ===========================================================================
# Example Code
# ===========================================================================
# Initialise the PWM device using the default address
pwm = PWM(0x41)
# Note if you'd like more debug output you can instead run:
#pwm = PWM(0x40, debug=True)
servoMin = 250 # Min pulse length out of 4096
servoMax = 500 # Max pulse length out of 4096
servoMid = 375 # Max pulse length out of 4096
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
def scanLeftToRight():
pwm.setPWMFreq(60) # Set frequency to 60 Hz
while (True):
# Change speed of continuous servo on channel O
pwm.setPWM(0, 0, servoMin)
time.sleep(2)
pwm.setPWM(0, 0, servoMid)
time.sleep(2)
pwm.setPWM(0, 0, servoMax)
time.sleep(2)
pwm.setPWM(0, 0, servoMid)
time.sleep(2)
def Look():
degree = 250/120
pwm.setPWMFreq(60)
while(True):
f = open('viewAngle.txt', 'r')
angle = f.readline()
#print "%s Angle Read" % angle
servoSetting = servoMid+(degree*int(angle))
f.close()
#print "%d servoSetting" % servoSetting
pwm.setPWM(0, 0, servoSetting)
time.sleep(1)
if __name__ == "__main__":
#scanLeftToRight()
Look()
| true |
c01fb40f152277ab74d9299c0dd93e32321aabb1 | Python | pranavreddym/-Contact-search | /test_users_contact.py | UTF-8 | 2,317 | 2.828125 | 3 | [] | no_license | import unittest
import json
import users_contact
import random
import string
import requests
class Test_Users_Contact(unittest.TestCase):
def test_add_contact(self):
print(">>>>Testing add contact method<<<<<")
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
data = {}
data['name'] = random_string
data['contact'] = 12323222
response = requests.post("http://localhost:8080/contact", json=data)
print(response.text)
success_sting = json.dumps({'success': True})
self.assertEqual(response.text, success_sting)
def test_get_contact(self):
print(">>>>Testing get contact method<<<<<<")
users_exists_string = json.dumps({'success': False, 'message': 'Sorry, the user already exists'}, 200,
{'ContentType': 'application/json'})
successful_user = json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
user_not_found = json.dumps({'Success':False, 'message': 'User not found'}, 200, {'Content-Type':False})
randomName = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
response = json.loads(users_contact.get_contact("Pranav"))
self.assertEqual(users_contact.get_contact(randomName), user_not_found)
self.assertTrue(response)
def test_update_contact(self):
print(">>>>>Testing update contact method<<<<<<")
users_exists_string = json.dumps({'success': False, 'message': 'Sorry, the requested user doesn\'t exists'}, 200,
{'ContentType': 'application/json'})
randomName = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
self.assertEqual(users_contact.update_contact(randomName), users_exists_string)
def test_delete_contact(self):
user_exits_string = json.dumps({'success': False, 'message': 'Sorry, the requested user doesn\'t exists'}, 200,
{'ContentType': 'application/json'})
randomName = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
self.assertEqual(users_contact.delete_contact(randomName), user_exits_string)
if __name__ == '__main__':
unittest.main() | true |
acebe0b762871a34bf43c79a630a268df78a71e0 | Python | pravarmahajan/hashembedding | /HashEmbedding/example.py | UTF-8 | 4,305 | 2.71875 | 3 | [] | no_license | import string
from layers import HashEmbedding, ReduceSum
from keras.layers import Input, Dense, Activation, Embedding
from keras.models import Model
import hashlib
import nltk
import keras
import numpy as np
from keras.callbacks import EarlyStopping
import dataloader
import random
use_hash_embeddings = True
embedding_size = 20
num_buckets = 10**6 # number of buckets in second hashing layer (hash embedding)
max_words = 10**7 # number of buckets in first hashing layer
max_epochs = 50
num_hash_functions = 2
max_len = 150
num_classes = 4
def get_model(embedding, num_classes):
input_words = Input([None], dtype='int32', name='input_words')
x = embedding(input_words)
x = ReduceSum()([x, input_words])
#x = Dense(50, activation='relu')(x)
#x = Dense(num_classes)(x)
x = Activation('softmax')(x)
model = Model(input=input_words, output=x)
return model
def word_encoder(w, max_idx):
# v = hash(w) #
v = int(hashlib.sha1(w.encode('utf-8')).hexdigest(), 16)
return (v % (max_idx-1)) + 1
def remove_punct(in_string):
return ''.join([ch.lower() if ch not in string.punctuation else ' ' for ch in in_string])
def bigram_vectorizer(documents):
docs2id = [None]*len(documents)
for (i, document) in enumerate(documents):
tokens = document.split(' ')
docs2id[i] = [None]*(len(tokens)-1)
for j in range(len(tokens)-1):
key = tokens[j]+"_"+tokens[j+1]
idx = word_encoder(key, max_words)
docs2id[i][j] = idx
return docs2id
# In[4]:
def input_dropout(docs_as_ids, min_len=4, max_len=100):
dropped_input = [None]*len(docs_as_ids)
for i, doc in enumerate(docs_as_ids):
random_len = random.randrange(min_len, max_len+1)
idx = max(len(doc)-random_len, 0)
dropped_input[i] = doc[idx:idx+random_len]
return dropped_input
def create_dataset():
dl_obj = dataloader.UniversalArticleDatasetProvider(1, valid_fraction=0.05)
dl_obj.load_data()
train_documents = [remove_punct(sample['title'] + " " + sample['text']) for sample in dl_obj.train_samples]
train_targets = [sample['class'] - 1 for sample in dl_obj.train_samples]
val_documents = [remove_punct(sample['title'] + " " + sample['text']) for sample in dl_obj.valid_samples]
val_targets = [sample['class'] - 1 for sample in dl_obj.valid_samples]
test_documents = [remove_punct(sample['title'] + " " + sample['text']) for sample in dl_obj.test_samples]
test_targets = [sample['class'] - 1 for sample in dl_obj.test_samples]
train_docs2id = bigram_vectorizer(train_documents)
val_docs2id = bigram_vectorizer(val_documents)
test_docs2id = bigram_vectorizer(test_documents)
train_docs2id = input_dropout(train_docs2id)
train_docs2id = [d+[0]*(max_len-len(d)) if len(d) <= max_len else d[:max_len] for d in train_docs2id]
val_docs2id = [d+[0]*(max_len-len(d)) if len(d) <= max_len else d[:max_len] for d in val_docs2id]
test_docs2id = [d+[0]*(max_len-len(d)) if len(d) <= max_len else d[:max_len] for d in test_docs2id]
#val_docs2id = input_dropout(val_docs2id)
#train_docs2id = train_docs2id % max_words
#val_docs2id = val_docs2id % max_words
return train_docs2id, train_targets, val_docs2id, val_targets, test_docs2id, test_targets
if __name__ == '__main__':
if use_hash_embeddings:
embedding = HashEmbedding(max_words, num_buckets, embedding_size, num_hash_functions=num_hash_functions)
else:
embedding = Embedding(max_words, embedding_size)
train_data, train_targets, val_data, val_targets, test_data, test_targets = create_dataset()
model = get_model(embedding, num_classes)
metrics = ['accuracy']
loss = 'sparse_categorical_crossentropy'
model.compile(optimizer=keras.optimizers.Adam(),loss=loss, metrics=['accuracy'])
print('Num parameters in model: %i' % model.count_params())
model.fit(train_data, train_targets, nb_epoch=max_epochs, validation_data = (val_data, val_targets),
callbacks=[EarlyStopping(patience=5)], batch_size=1024)
test_result = model.test_on_batch(test_data, test_targets)
print(test_result)
#for i, (name, res) in enumerate(zip(model.metrics_names, test_result)):
#print('%s: %1.4f' % (name, res))
| true |
ce5788330fa612c28faaaf3ad2942bcac03afdef | Python | kirillr123/Homework-repo | /completed_hw9/completed_hw9(problem9).py | UTF-8 | 199 | 2.65625 | 3 | [] | no_license | import math
[[print("Project Euler problem 9 solution:", a * b * (1000 - a - b)) for a in range(1000) if
1000 - a - b == math.sqrt(a ** 2 + b ** 2) and a < b < 1000 - a - b] for b in range(1000)]
| true |
6848e59542877fe3f65a13d1040930c4402cfc40 | Python | Vishwajeetiitb/Autumn-of-Automation | /OpenCV/task4.py | UTF-8 | 1,664 | 2.609375 | 3 | [
"MIT"
] | permissive | import numpy as np
import cv2
from time import sleep
im = cv2.imread('test_rect.jpg')
# im = cv2.imread('shapes.png')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,180,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# print(contours)
# im = cv2.drawContours(im, contours, -1, (0,255,0), 3)
i = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 10**3:
episolon = 0.01*cv2.arcLength(cnt,True)
poly = cv2.approxPolyDP(cnt,episolon,True)
im = cv2.drawContours(im, [poly],0, (0,255,0), 3)
print(poly.shape)
if len(poly)==3:
M = cv2.moments(cnt)
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
cv2.putText(im,"Triangle",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0))
if len(poly)==4:
M = cv2.moments(poly)
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
x1 = poly.ravel()[0]
y1 = poly.ravel()[1]
x2 = poly.ravel()[2]
y2 = poly.ravel()[3]
x3 = poly.ravel()[4]
y3 = poly.ravel()[5]
x4 = poly.ravel()[6]
y4 = poly.ravel()[7]
d1 = ((x1-x2)**2 + (y1-y2)**2)**0.5
d2 = ((x3-x2)**2 + (y3-y2)**2)**0.5
d3 = ((x4-x2)**2 + (y4-y2)**2)**0.5
if abs(d1 - d2) < 2:
if abs(d3 - (d1**2 + d2**2)**0.5) < 2:
cv2.putText(im,"Square",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0))
else:
cv2.putText(im,"Rhombus",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0))
else:
cv2.putText(im,"Rectangle",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0))
# cv2.putText(im,"Rectangle"+ str(i),(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0))
# i+=1
cv2.imshow("test",im)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
821edf09d8c1a623f662e0cfaa6af1712a5bb38b | Python | vyrist13/Basic-Python | /Latihan/function.py | UTF-8 | 400 | 3.90625 | 4 | [] | no_license | def function():
print("Saya makan")
print("Saya minum")
print("Saya tidur")
function()
def nama(name):
print("Nama saya",name)
nama("Ali")
nama("Badru")
def method(name="Tidak Tahu"):
print("Nama saya "+name)
method()
def fungsi(nama, tinggi, umur):
print("Nama saya",nama)
print("Tinggi saya",tinggi)
print("Umur saya",umur)
fungsi(umur=36,nama="Thoma",tinggi=172) | true |
e5fbbf15062d1e1c6db69832a99e9033cb4622a0 | Python | jackZesus/altCode | /practice.py | UTF-8 | 591 | 2.734375 | 3 | [] | no_license | Mydict = {
"apple": "wine",
"cigar": "cuban",
"winston": "churchuil",
"leo": "tolstoy",
"copydict": {"welcome": "indian"},
"ar": [12, 34, 66, 67]
}
print(list[Mydict])
print(Mydict["winston"])
print(Mydict["copydict"]["welcome"])
Mydict["ar"] = [12, 45, 66]
print(Mydict["ar"])
dict3 = {
"apple": "wine",
"cigar": "cuban",
"winston": "churchuil",
"leo": "tolstoy",
"copydict": {"welcome": "indian"},
"ar": [12, 34, 66, 67]
}
copy = {
"a": "ball",
"b": "catch"
}
dict3.update(copy)
print(list(dict3))
print(list(tuple(dict3.keys())))
| true |
53b82083b21dfd1f96a4b53484367bfbb59b066b | Python | Tarunkumar2498/skillrack_codes | /removingduplicates_in_string.py | UTF-8 | 567 | 3.328125 | 3 | [] | no_license | """ REMOVING DUPLICATES IN A STRING
This code removes duplicates in a string
Input:
contains a string
Output:
string that contains only unique characters
Example:
i/p:
"thisiscodedinpython3"
o/p:
thiscodenpy3
This code was created by TARUN KUMAR on 18/06/2018
for further queries reach me at tarunkumar2498[at]gmail[dot]com
coded on python3
"""
#code begings here
from collections import OrderedDict
ip=input()
print ("".join(OrderedDict.fromkeys(ip)))
##end## | true |
370cc408de2db3a1f8a45995431dfd58599ca38e | Python | chengxinlun/sdss-dp | /fe2_vs_o3.py | UTF-8 | 2,334 | 2.65625 | 3 | [] | no_license | import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from code.core.location import Location
# Read from lightcurve
def read_lc(rmid, line):
fd = np.loadtxt(os.path.join(Location.root, Location.lightcurve, str(rmid),
line + ".txt"))
mjd_list = fd[:, 0]
flux = fd[:, 1]
error = fd[:, 2]
return [mjd_list, flux, error]
# Filter out invalid result
def filt(mjd_list, flux, error):
n0i = np.nonzero(flux)
norm_mjdl = mjd_list[n0i]
norm_flux = flux[n0i]
norm_error = error[n0i]
if len(norm_flux) < 0.5 * len(mjd_list):
return [[], [], []]
else:
return [norm_mjdl, norm_flux, norm_error]
# Intersection
def inter(mjdl_list, fl_list, err_list):
mjd_list = mjdl_list[0]
for each in mjdl_list:
mjd_list = np.intersect1d(mjd_list, each)
flux = []
error = []
for each in range(len(mjdl_list)):
inter_i = np.nonzero(np.in1d(mjdl_list[each], mjd_list))
flux.append(fl_list[inter_i])
error.append(err_list[inter_i])
flux = np.array(flux)
error = np.array(error)
return [mjd_list, flux, error]
# Get hb, o3
def ave(rmid):
hbl = filt(*read_lc(rmid, "hbeta"))
o3l = filt(*read_lc(rmid, "o3"))
fel = filt(*read_lc(rmid, "fe2"))
a_hb = np.mean(hbl[1])
e_hb = np.mean(hbl[2])
a_o3 = np.mean(o3l[1])
e_o3 = np.mean(o3l[2])
a_fe = np.mean(fel[1])
e_fe = np.mean(fel[2])
r_o3 = a_o3 / a_hb
r_fe = a_fe / a_hb
r_o3_e = (a_o3 * e_hb + e_o3 * a_hb) / (a_hb * a_hb)
r_fe_e = (a_fe * e_hb + e_fe * a_hb) / (a_hb * a_hb)
return [r_o3, r_fe, r_o3_e, r_fe_e]
if __name__ == "__main__":
f = open(os.path.join(Location.root, "data/source_list.pkl"), "rb")
source_list = pickle.load(f)
f.close()
data_list = []
for each in source_list:
try:
temp = ave(each)
print(temp)
data_list.append(temp)
except Exception:
continue
data_list = np.array(data_list)
plt.errorbar(data_list[:, 0], data_list[:, 1], xerr=data_list[:, 2],
yerr=data_list[:, 3], linestyle='none', color='blue', fmt='o')
plt.xlim([0.0, 2.5])
plt.ylim([0.0, 25.0])
plt.xlabel("Relative OIII")
plt.ylabel("Relative FeII")
plt.show()
| true |
88c12f04c200ba104c2bc48a672d7ce73abc2250 | Python | kinggodhj/python_coding_test | /baekjoon/samsungA/14500.py | UTF-8 | 1,031 | 2.65625 | 3 | [] | no_license | N, M=map(int, input().rstrip().split(' '))
array=[list(map(int, input().rstrip().split(' '))) for _ in range(N)]
case=[[(1,0),(2,0),(3,0)],[(0,1), (0,2), (0,3)], [(1,0),(2,0),(2,1)], [(1,0), (0,1), (0,2)],\
[(0,1), (1,1), (2,1)],[(1,-2), (1,-1), (1,0)], [(2,-1),(2, 0), (1, 0)], [(1,0),(0,1),(1,1)], \
[(1,0),(1,1),(2,1)], [(1,-1),(1,0),(0,1)], [(2,-1),(1,-1),(1,0)], [(0,1),(0,2),(1,1)], \
[(0,1),(-1,1),(1,1)], [(0,1),(0,2),(-1,1)], [(1,0),(1,1),(2,0)],\
[(2,0),(1,0),(0,1)], [(1,2),(1,1),(1,0)], [(1,2),(1,1),(0,1)], [(1,2),(0,2),(0,1)]]
result=0
def check_array(array, x, y):
global result
for c in case:
tmp=array[x][y]
for idx in c:
if 0<=x+idx[0]<N and 0<=y+idx[1]<M:
array[x+idx[0]][y+idx[1]]
tmp+=array[x+idx[0]][y+idx[1]]
else:
tmp=-1
break
result=max(result, tmp)
return result
for i in range(N):
for j in range(M):
result=check_array(array, i, j)
print(result) | true |
8d4f5e799db8d189a4036c943335f789161feaf3 | Python | kir-dev/printer_client | /src/printer.py | UTF-8 | 1,624 | 3.171875 | 3 | [] | no_license | # encoding: utf-8
import errors
class User(object):
"""
State object of the application: stores the name, printers,
user's status and errors.
"""
def __init__(self, name="None", initialized=True):
self.error = None
self.requiredUpdate = False
self.initialized = initialized
self.name = name
self.printers = list()
self.status = True
def CopyFrom(self, other):
"""Clones the the target object"""
self.error = other.error
self.requiredUpdate = other.requiredUpdate
self.initialized = other.initialized
self.name = other.name
self.printers = other.printers
self.status = other.status
def AddPrinter(self, printer):
self.printers.append(printer)
def __str__(self):
return 'User "%s" having %d printer(s)' % (self.name, len(self.printers))
def GetPrinter(self, index):
return self.printers[index]
def GetPrinterFromId(self, id):
for p in self.printers:
if p.id == id:
return p
raise errors.UnknownError, "Hibás nyomtató-azonosító"
def GetPrinters(self):
return self.printers.__iter__()
def GetPrinterCount(self):
return len(self.printers)
class Printer(object):
"""Represents one printer"""
def __init__(self, id, name, status):
self.id = id
self.name = name
self.status = status
def IsOn(self):
return self.status == "on"
def __str__(self):
return 'Printer "%s", id="%s", status=%s' % (self.name, self.id, self.status)
| true |
ad3574dd5f669dc8889a36db08587fcc26202d15 | Python | Aasthaengg/IBMdataset | /Python_codes/p02721/s763714878.py | UTF-8 | 715 | 2.6875 | 3 | [] | no_license | n,k,c = map(int,input().split())
s = input()
l = [0]*k
r = [0]*k
count = 0
bef = 0
for i,j in enumerate(s):
if j == 'o':
if count == 0:
l[0] = i+1
count += 1
bef = i
elif i > bef + c:
l[count] = i+1
count += 1
bef = i
if count == k:
break
count = 0
bef = 0
for i,j in enumerate(s[::-1]):
if j == 'o':
if count == 0:
r[0] = n-i
count += 1
bef = i
elif i > bef + c:
r[count] = n-i
count += 1
bef = i
if count == k:
break
r = r[::-1]
for i in range(k):
if l[i] != 0 and l[i] == r[i]:
print(l[i]) | true |