blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6058125e251a589ce4173bb32cf9e49daafba5c3 | Python | chinmay81098/Dataset-Dashboard | /plots.py | UTF-8 | 592 | 2.65625 | 3 | [] | no_license | import streamlit as st
import plotly.express as px
import matplotlib.pyplot as plt
import numpy as np
def plot_histogram(df, xcol=None,y_col=None, color = None):
if color is None:
color = xcol
fig = px.histogram(df, x=xcol, title= 'Target column Histogram',
color = color, color_discrete_sequence= px.colors.sequential.Viridis)
st.plotly_chart(fig)
def plot_scatterplot(df, xcol= None, ycol = None, color = None):
fig = px.scatter(df, xcol, ycol, color, color_discrete_sequence= px.colors.sequential.Viridis)
st.plotly_chart(fig) | true |
ca63b6052d8275592f5ec355da7bc50b05299317 | Python | anmolagarwal999/Precog_Recruitment_Tasks | /stackoverflow.com/convert.py | UTF-8 | 896 | 3.140625 | 3 | [] | no_license | import xmltodict
# a=xmltodict.parse(GzipFile("Badges2.xml"))
# print(a)
with open("Badges2.xml") as xml_file:
a = xmltodict.parse(xml_file.read())
# pass
xml_file.close()
print(a)
# Python code to illustrate
# inserting data in MongoDB
from pymongo import MongoClient
def part():
print("------------------------------------------------")
try:
conn = MongoClient()
print("Connected successfully!!!")
except:
print("Could not connect to MongoDB")
# database
#Creates a connection to a MongoDB instance and returns the reference to the database.
db = conn.database
print(db)
# Created or Switched to collection names:
collection = db.badges_2
# Insert Data
rec_id1 = collection.insert_one(a)
print("Data inserted with record ids",rec_id1)
part()
# Printing the data inserted
cursor = collection.find()
for record in cursor:
print(record) | true |
5ff49ce8cb9d017832251fc25bfde1e441e2c18e | Python | Xelanos/Intro | /ex7/ex7.py | UTF-8 | 9,236 | 3.71875 | 4 | [] | no_license | EMPTY_STRING = ''
SPACE = ' '
def print_to_n(n):
"""
:param n: an int
:return: prints a list of 1 to n , going up
"""
if n < 1:
return
if n == 1:
print(1)
else:
print_to_n(n - 1)
print(n)
def print_reversed(n):
"""
:param n: an int
:return: prints a list of 1 to n , going down
"""
if n < 1:
return
if n == 1:
return print(n)
else:
print(n)
print_reversed(n - 1)
def has_divisor_smaller_then(n, i):
"""
:param n: an int
:param i: an int
:return: True if n has a divisor smaller then i(included) False if not.
(other then the obvious 1)
"""
if i == 2:
if (n % i) != 0:
return False
else:
return True
else:
return (n % i) == 0 or has_divisor_smaller_then(n, i - 1)
def is_prime(n):
"""
:param n: an int
:return: returns true if the nubmer is a prime number, false if not
"""
if n <= 1:
return False
# first divisor possible is sqrt(n) (rounded up just to be sure not to miss
# a divisor
if n == 2:
return True
square_root = int((n ** 0.5) + 1)
if has_divisor_smaller_then(n, square_root):
return False
else:
return True
def divisors_rec(n, i, list_divisors=None):
"""
:param list_divisors: a list of current divisors (starts as empty list)
:param n: an int
:param i: an natural number (int and >0)
:return: returns a list of the natural divisors of n starting from i,
going down.
"""
if list_divisors is None:
list_divisors = []
if i == 1:
list_divisors.insert(0, i)
return list_divisors
else:
if n % i == 0:
list_divisors.insert(0, i)
divisors_rec(n, i - 1, list_divisors)
return list_divisors
else:
divisors_rec(n, i - 1, list_divisors)
return list_divisors
def divisors(n):
"""
:param n: an int
:return: a list of all the natural divisors of n going down.
0 has no divisors
"""
if n == 0:
return []
divisor = abs(n)
divisor_list = divisors_rec(n, divisor, )
return divisor_list
def factorial(n):
"""
:param n: an int
:return: n! (1*2*3*...*n)
"""
if n == 1:
return n
else:
return factorial(n - 1) * n
def exp_n_x(n, x):
"""print a close approximation of e^x, the larger n is, the closer
is the approx value to the true value
"""
if n == 0:
return 1
else:
return exp_n_x(n - 1, x) + (x ** n) / factorial(n)
def play_hanoi(hanoi, n, src, dest, temp):
"""solves the hanoi tower problem with recursion"""
if n <= 0:
return
else:
play_hanoi(hanoi, n-1, src, temp, dest)
hanoi.move(src, dest)
play_hanoi(hanoi, n-1, temp, dest, src)
def print_binary_sequences_with_prefix(prefix, n, binary_list=None):
"""
make a list of all binary sequences of length n starting with 'prefix'
:param prefix: the prefix required : 1 or 0
:param n: length of desired list
:param binary_list: variable for memorizing for the recursion
:return: a list of lists, each inner list represent a sequence starting
"""
if binary_list is None:
binary_list = []
if n < 1:
return print(EMPTY_STRING)
else:
if n == 1:
binary_list.append([str(prefix)])
return binary_list
else:
# getting all the lists of n-1 length
binary_list = print_binary_sequences_with_prefix(prefix, n - 1
, binary_list)
# for every sequence in the n-1 list, add one with '1' in the
# end and one with '0'
final_list = []
for seq in binary_list:
final_list.append(seq + ['1'])
final_list.append(seq + ['0'])
binary_list = final_list[:]
return binary_list
def print_list_sequces(chr_list):
"""
Takes a list of lists of strings, and prints a string of the inner
lists joined, seperated by space
example : [[1,2][2,1]] will get 12 21
"""
for inner_list in chr_list:
print(''.join(inner_list))
def print_binary_sequences(n):
"""Print all of the binary sequences of length n"""
if n <= 0:
return print(EMPTY_STRING)
zero_sequences_list = print_binary_sequences_with_prefix(0, n)
one_sequences_list = print_binary_sequences_with_prefix(1, n)
final_list = zero_sequences_list + one_sequences_list
print_list_sequces(final_list)
def print_char_sequences_with_prefix(prefix, char_list, n, cr_list=None):
"""
make a list of all possible sequences consisting of char list
of length n starting with 'prefix'
:param prefix: the prefix required : any one letter
:param char_list: a list of chars
:param n: length of desired list
:param cr_list: variable for memorizing for the recursion
:return: a list of lists, each inner list represent a sequence starting
with 'prefix' and of length n
"""
if cr_list is None:
cr_list = []
if n < 1:
return print(EMPTY_STRING)
else:
if n == 1:
cr_list.append([prefix])
return cr_list
else:
# getting all the lists of n-1 length
cr_list = print_char_sequences_with_prefix(prefix, char_list,
n - 1,
cr_list)
# for every sequence in the n-1 list, add a sequence with
# a diffrent char from the char list
final_list = []
for seq in cr_list:
for char in char_list:
final_list.append(seq + [char])
cr_list = final_list[:]
return cr_list
def print_sequences(char_list, n):
"""Print all possible sequences of lenth n consisting form
chars from the char list
"""
if n <= 0:
return print(EMPTY_STRING)
printing_list = []
for character in char_list:
printing_list.extend(print_char_sequences_with_prefix
(character, char_list, n))
print_list_sequces(printing_list)
def no_repetition_prefix(prefix, char_list, n, cr_list=None):
"""
:param prefix: a letter (string)
:param char_list: list of charecters (strings)
:param n: sequences desired length
:param cr_list: memorazation list
:return: a list of lists of all possible combinations of char_list
that are of length n, and starting with 'prefix'.
(each inner list corresponds to one sequence)
"""
if cr_list is None:
cr_list = []
if n < 1:
return print(EMPTY_STRING)
else:
if n == 1:
cr_list.append([prefix])
return cr_list
else:
# getting all the lists of n-1 length
cr_list = print_char_sequences_with_prefix(prefix, char_list,
n - 1,
cr_list)
# for every sequence in the n-1 list, add a sequence with
# a diffrent char from the char list(unless it already has
# that letter)
final_list = []
for seq in cr_list:
for char in char_list:
new_sequence = seq + [char]
if char in seq:
continue
elif new_sequence not in final_list:
final_list.append(new_sequence)
cr_list = final_list[:]
return cr_list
def no_repetition_sequences_list(char_list, n):
"""
Takes a char list and returns a list of all possible combinations of
n length containing the chars from the char list, without repetition.
"""
if n == 0:
return [EMPTY_STRING]
# getting list with repetition
list_with_repetition = []
for character in char_list:
list_with_repetition.extend(print_char_sequences_with_prefix
(character, char_list, n))
# removing sequences with duplicate letters
list_without_repetition = list_with_repetition[:]
for seq in list_with_repetition:
for char in seq:
number_of_chars_in_seq = seq.count(char)
if number_of_chars_in_seq > 1:
if seq in list_without_repetition:
list_without_repetition.remove(seq)
# making the return list
sequences = []
for seq in list_without_repetition:
sequences.append(''.join(seq))
return sequences
def print_no_repetition_sequences(char_list, n):
"""Takes a char list and prints all possible combinations of
n length containing the chars from the char list, without repetition
"""
if n == 0:
return print(EMPTY_STRING)
# printing the list without duplicates
sequences = no_repetition_sequences_list(char_list, n)
for seq in sequences:
print(seq)
| true |
b163e624f5ea7615aad072135b4f9e5fd7d4ba22 | Python | burokoron/StaDeep | /Image_classification/simple_cnn_classifier/pred.py | UTF-8 | 3,485 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from PIL import Image
from sklearn.metrics import classification_report
from tqdm import tqdm
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Input, MaxPool2D
from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation
# 10層CNNの構築
def cnn(input_shape, classes):
# 入力層
inputs = Input(shape=(input_shape[0], input_shape[1], 3))
# 1層目
x = Conv2D(32, (3, 3), padding='same', kernel_initializer='he_normal')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 2層目
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 3層目
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 4層目
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 5、6層目
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 7、8層目
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# 9、10層目
x = Dense(256, kernel_initializer='he_normal')(x)
x = Dense(classes, kernel_initializer='he_normal')(x)
outputs = Activation('softmax')(x)
return Model(inputs=inputs, outputs=outputs)
def main():
directory = 'img' # 画像が保存されているフォルダ
df_test = pd.read_csv('test.csv') # テストデータの情報がかかれたDataFrame
label_list = ['AMD', 'DR_DM', 'Gla', 'MH', 'Normal', 'RD', 'RP', 'RVO'] # ラベル名
image_size = (224, 224) # 入力画像サイズ
classes = len(label_list) # 分類クラス数
# ネットワーク構築&学習済み重みの読み込み
model = cnn(image_size, classes)
model.load_weights('model_weights.h5')
# 推論
X = df_test['filename'].values
y_true = list(map(lambda x: label_list.index(x), df_test['label'].values))
y_pred = []
for file in tqdm(X, desc='pred'):
# 学習時と同じ条件になるように画像をリサイズ&変換
img = Image.open(f'{directory}/{file}')
img = img.resize(image_size, Image.LANCZOS)
img = np.array(img, dtype=np.float32)
img *= 1./255
img = np.expand_dims(img, axis=0)
y_pred.append(np.argmax(model.predict(img)[0]))
# 評価
print(classification_report(y_true, y_pred, target_names=label_list))
if __name__ == "__main__":
main()
| true |
0cd525f791630200eb2dea92cf4438bcfac0139a | Python | zoulala/CaptchaRec | /libs/noise_prc.py | UTF-8 | 3,559 | 3.109375 | 3 | [] | no_license | import os
import numpy as np
from PIL import Image
class NoiseDel():
#去除干扰噪声
def noise_del(self,img):
height = img.shape[0]
width = img.shape[1]
channels = img.shape[2]
# 清除四周噪点
for row in [0,height-1]:
for column in range(0, width):
if img[row, column, 0] == 0 and img[row, column, 1] == 0:
img[row, column, 0] = 255
img[row, column, 1] = 255
for column in [0,width-1]:
for row in range(0, height):
if img[row, column, 0] == 0 and img[row, column, 1] == 0:
img[row, column, 0] = 255
img[row, column, 1] = 255
# 清除中间区域噪点
for row in range(1,height-1):
for column in range(1,width-1):
if img[row, column, 0] == 0 and img[row, column, 1] == 0:
a = img[row - 1, column] # 上
b = img[row + 1, column] # 下
c = img[row, column - 1] # 左
d = img[row, column + 1] # 右
ps = [p for p in [a, b, c, d] if 1 < p[1] < 255]
# 如果上下or左右为白色,设置白色
if (a[1]== 255 and b[1]== 255) or (c[1]== 255 and d[1]== 255):
img[row, column, 0] = 255
img[row, column, 1] = 255
# 设置灰色
elif len(ps)>1:
kk = np.array(ps).mean(axis=0)
img[row, column, 0] = kk[0]
img[row, column, 1] = kk[1]
img[row, column, 2] = kk[2]
else:
img[row, column, 0] = 255
img[row, column, 1] = 255
return img
# 灰度化
def convert2gray(self,img):
if len(img.shape) > 2:
gray = np.mean(img, -1)
# 上面的转法较快,正规转法如下
# r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
# gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
# 二值化
def binarizing(self,img,threshold, cov=False):
w, h = img.shape
if cov:
for y in range(h):
for x in range(w):
if img[x, y] > threshold:
img[x, y] = 0
else:
img[x, y] = 255
else:
for y in range(h):
for x in range(w):
if img[x, y] < threshold:
img[x, y] = 0
else:
img[x, y] = 255
return img
if __name__=="__main__":
filepath = 'data/png'
savepath = 'data/png_a2'
if not os.path.exists(savepath):
os.mkdir(savepath)
filenames = os.listdir(filepath)
nd = NoiseDel()
for file in filenames:
openname = os.path.join(filepath,file)
image = Image.open(openname)
image = np.array(image)
# 去噪、灰度、二值化处理
image = nd.noise_del(image)
image = nd.convert2gray(image)
image = nd.binarizing(image,threshold=190, cov=True)
#
image = Image.fromarray(image).convert('L') # Image.fromarray(image)默认转换到‘F’模式,即浮点型,‘L’是整形
savename = os.path.join(savepath,file)
image.save(savename)
| true |
e3e4453ca9c34602d8fce9aeaeeb6162923b3da0 | Python | RafaelHuang87/Leet-Code-Practice | /517.py | UTF-8 | 306 | 2.765625 | 3 | [
"MIT"
] | permissive | class Solution:
def findMinMoves(self, machines: List[int]) -> int:
n=len(machines)
sm=sum(machines)
if sm%n !=0: return -1
target=sm//n
cur=res=0
for m in machines:
res=max(res,m-target,abs(cur))
cur+=m-target
return res
| true |
42880609ed606140c9a70734b53864cc656d2342 | Python | yoyoraso/Python_3 | /problem1_lab3.py | UTF-8 | 128 | 3.578125 | 4 | [] | no_license | list = []
def fg(a,b):
list=a+b
print(list)
return a
a = [1, 2, 3, 0]
b = ['Red', 'Green', 'Black']
z=fg(a,b) | true |
a2ddfa29a6930d165bdee42f054eb9e7703d1b20 | Python | yangzhangalmo/pytorch-examples | /ae_cnn.py | UTF-8 | 3,184 | 2.96875 | 3 | [] | no_license | ''' This is a pytorch implementaion on CNN autoencoder
where the embedding layer is represented as a vector.
The code is mostly based on the implemenation of https://gist.github.com/okiriza/16ec1f29f5dd7b6d822a0a3f2af39274
'''
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
class ae(nn.Module):
def __init__(self, emb_size):
super(ae, self).__init__()
self.emb_size = emb_size
# encoder components
self.enc_cnn_1 = nn.Conv2d(1, 10, kernel_size=5)
self.enc_cnn_2 = nn.Conv2d(10, 20, kernel_size=5)
self.enc_linear_1 = nn.Linear(4 * 4 * 20, 100)
self.enc_linear_2 = nn.Linear(100, self.emb_size)
# decoder components
self.dec_linear_1 = nn.Linear(self.emb_size, 100)
self.dec_linear_2 = nn.Linear(100, 20 * 4 * 4)
self.dec_de_cnn_1 = nn.ConvTranspose2d(20, 10, kernel_size=5)
self.dec_de_cnn_2 = nn.ConvTranspose2d(10, 1, kernel_size=5)
def forward(self, images):
''' auto encoder
'''
# encoder
emb = F.relu(self.enc_cnn_1(images))
emb, indices1 = F.max_pool2d(emb, 2, return_indices=True)# return indices for unpooling
emb = F.relu(self.enc_cnn_2(emb))
emb, indices2 = F.max_pool2d(emb, 2, return_indices=True)
emb = emb.view([images.size(0), -1])# unfolding
emb = F.relu(self.enc_linear_1(emb))
emb = F.relu(self.enc_linear_2(emb))
# decoder
out = F.relu(self.dec_linear_1(emb))
out = F.relu(self.dec_linear_2(out))
out = out.view([emb.shape[0], 20, 4, 4])# folding
out = F.max_unpool2d(out, indices2, 2)
out = F.relu(self.dec_de_cnn_1(out))
out = F.max_unpool2d(out, indices1, 2)
out = F.relu(self.dec_de_cnn_2(out))
return out, emb
m = n = 28
emb_size = 100
num_epochs = 5
batch_size = 128
lr = 0.002
train_data = datasets.MNIST('~/data/mnist/', train=True , transform=transforms.ToTensor())
test_data = datasets.MNIST('~/data/mnist/', train=False, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True)
# Instantiate model
autoencoder = ae(emb_size)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(autoencoder.parameters(), lr=lr)
# Training loop
for epoch in range(num_epochs):
print("Epoch %d" % epoch)
for i, (images, _) in enumerate(train_loader): # Ignore image labels
out, emb = autoencoder(Variable(images))
optimizer.zero_grad()
loss = loss_fn(out, images)
loss.backward()
optimizer.step()
print("Loss = %.3f" % loss.item())
# Try reconstructing on test data
test_image = random.choice(test_data)[0]
test_image = Variable(test_image.view([1, 1, m, n]))
test_reconst, emb = autoencoder(test_image)
torchvision.utils.save_image(test_image.data, 'orig.png')
torchvision.utils.save_image(test_reconst.data, 'reconst.png')
| true |
471053ac581cca514ea76298e00a16874efd6403 | Python | strexof/kalkulator | /kakultor2.py | UTF-8 | 167 | 3.34375 | 3 | [] | no_license | tambah1 = int(input("ketikan angka ke 1 : "))
tambah2 = int(input("ketikan angka ke 2 : "))
hasil = tambah1+tambah2
print(tambah1," + ",tambah2," = ",tambah1+tambah2)
| true |
0e243115bb8e4342fd038dba3587fb4c22bc78e5 | Python | ginak329/python-challenge | /pybank.py | UTF-8 | 1,766 | 2.8125 | 3 | [] | no_license | import os
import csv
budget_data=os.path.join("budget_data.csv")
total_months = []
total_PL = []
monthly_PL_change = []
with open(budget_data, newline='') as csvfile:
csvreader = csv.reader(csvfile,delimiter=",")
header = next(csvreader)
for row in csvreader:
total_months.append(row[0])
total_PL.append(int(row[1]))
for i in range(len(total_PL)-1):
monthly_PL_change.append(total_PL[i+1]-total_PL[i])
max_increase_value = max(monthly_PL_change)
max_decrease_value = min(monthly_PL_change)
max_increase_month = monthly_PL_change.index(max(monthly_PL_change)) + 1
max_decrease_month = monthly_PL_change.index(min(monthly_PL_change)) + 1
print("Financial Analysis")
print("----------------------------")
print(f"Total Months: {len(total_months)}")
print(f"Total: ${sum(total_PL)}")
print(f"Average Change: ${round(sum(monthly_PL_change)/len(monthly_PL_change),2)}")
print(f"Greatest Increase in Profits: {total_months[max_increase_month]} (${(str(max_increase_value))})")
print(f"Greatest Decrease in Profits: {total_months[max_decrease_month]} (${(str(max_decrease_value))})")
write_file = f"pybank_analysis.txt"
filewriter = open(write_file, mode = 'w')
filewriter.write("Financial Analysis\n")
filewriter.write("--------------------------\n")
filewriter.write(f"Total Months: {len(total_months)}\n")
filewriter.write(f"Total: ${sum(total_PL)}\n")
filewriter.write(f"Average Change:${round(sum(monthly_PL_change)/len(monthly_PL_change),2)}\n")
filewriter.write(f"Greatest Increase in Profits: {total_months[max_increase_month]} (${(str(max_increase_value))})\n")
filewriter.write(f"Greatest Decrease in Profits: {total_months[max_decrease_month]} (${(str(max_decrease_value))})\n")
filewriter.close() | true |
b96642701e6b645ee02151b3f57783dcb51c108d | Python | ailomani/manikandan | /mani.89.py | UTF-8 | 49 | 2.515625 | 3 | [] | no_license | m=input()
n=sorted(m)
print(''.join(map(str,o)))
| true |
0faeac5d4ecdbec82c28bd1edf1f3dcf1cd1c813 | Python | HarshilModi10/MCP_Competition | /medium/question973.py | UTF-8 | 636 | 3.015625 | 3 | [] | no_license | class Solution(object):
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
heap = []
output = []
val = 0
for x, y in points:
distance = -1 * (x * x + y * y)
heapq.heappush(heap,(distance, [x, y]))
if len(heap) > K:
heapq.heappop(heap)
while heap:
local, point = heapq.heappop(heap)
output.append(point)
return output
| true |
7e615b3397d460f0057b3b0933a0d151544b1549 | Python | tbweng/C-PAC | /CPAC/utils/strategy.py | UTF-8 | 2,708 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import six
import warnings
import logging
logger = logging.getLogger('workflow')
class Strategy(object):
def __init__(self):
self.resource_pool = {}
self.leaf_node = None
self.leaf_out_file = None
self.name = []
def append_name(self, name):
self.name.append(name)
def get_name(self):
return self.name
def set_leaf_properties(self, node, out_file):
self.leaf_node = node
self.leaf_out_file = out_file
def get_leaf_properties(self):
return self.leaf_node, self.leaf_out_file
def get_resource_pool(self):
return self.resource_pool
def get_nodes_names(self):
pieces = [n.split('_') for n in self.name]
assert all(p[-1].isdigit() for p in pieces)
return ['_'.join(p[:-1]) for p in pieces]
def get_node_from_resource_pool(self, resource_key):
try:
return self.resource_pool[resource_key]
except:
logger.error('No node for output: %s', resource_key)
raise
def update_resource_pool(self, resources, override=False):
for key, value in resources.items():
if key in self.resource_pool and not override:
raise Exception(
'Key %s already exists in resource pool, '
'replacing with %s ' % (key, value)
)
self.resource_pool[key] = value
def __getitem__(self, resource_key):
assert isinstance(resource_key, six.string_types)
try:
return self.resource_pool[resource_key]
except:
logger.error('No node for output: %s', resource_key)
raise
def __contains__(self, resource_key):
assert isinstance(resource_key, six.string_types)
return resource_key in self.resource_pool
def fork(self):
fork = Strategy()
fork.resource_pool = dict(self.resource_pool)
fork.leaf_node = self.leaf_node
fork.out_file = str(self.leaf_out_file)
fork.leaf_out_file = str(self.leaf_out_file)
fork.name = list(self.name)
return fork
@staticmethod
def get_forking_points(strategies):
forking_points = []
for strat in strategies:
strat_node_names = set(strat.get_nodes_names())
strat_forking = []
for counter_strat in strategies:
counter_strat_node_names = set(counter_strat.get_nodes_names())
strat_forking += list(strat_node_names - counter_strat_node_names)
strat_forking = list(set(strat_forking))
forking_points += [strat_forking]
return forking_points
| true |
d3f682f67ed35db4c902e532b314952995b773e7 | Python | bewithforce/NumericalMethods | /task1.py | UTF-8 | 1,577 | 3.484375 | 3 | [] | no_license | def main():
a = [
[2, -1, 0, 0, 0],
[-1, 2, -1, 0, 0],
[0, -1, 2, -1, 0],
[0, 0, -1, 2, -1],
[0, 0, 0, -1, 2]
]
b = [-4 / 25, 2 / 25, 2 / 25, 2 / 25, 2 / 25]
l, u1, u2 = lu3(a)
print(lu3_solve(l, u1, u2, b))
print(lu3_determinant(u1))
# LU-разложение трехдиагональной матрицы
def lu3(a):
l = [None] * (len(a) - 1)
u1 = [None] * len(a)
u2 = [None] * (len(a) - 1)
for i in range(len(a)):
if i < len(a) - 1:
u2[i] = a[i][i + 1]
if i == 0:
u1[i] = a[i][i]
else:
u1[i] = a[i][i] - l[i - 1] * u2[i - 1]
if i < len(a) - 1:
l[i] = a[i + 1][i] / u1[i]
return l, u1, u2
# находим решения слау Ax=b через LU разложение
# 1) Примем Ux = y и решим Ly = b, т.е. найдем y
# 2) Решим Ux = y
def lu3_solve(l, u1, u2, b):
y = [None] * len(b)
x = [None] * len(b)
for i in range(len(b)):
y[i] = b[i]
if i > 0:
y[i] -= l[i - 1] * y[i - 1]
for i in range(len(b) - 1, -1, -1):
x[i] = y[i]
if i < len(b) - 1:
x[i] -= u2[i] * x[i + 1]
x[i] *= 1 / u1[i]
return x
# вычисляем определитель матрицы через элементы на главной диагонали U матрицы из LU разложения
def lu3_determinant(u1):
result = 1
for i in range(len(u1)):
result *= u1[i]
return result
main()
| true |
9d7491fb98a0a750d00d012684bcaec47cbc80b8 | Python | guyBy/PythonWEB | /Lesson1/TestByRef.py | UTF-8 | 449 | 3.53125 | 4 | [] | no_license | list1 = [1, 2, 3, 'string1', (1, 2, 3)]
list2 = list1
print('==> starting values:')
print(f'list 1: {list1}')
print(f'list 2: {list2}')
print('==> after slice set value:')
list2[-1] = (1, 2, 3, 4)
print(f'list 1: {list1}')
print(f'list 2: {list2}')
# making a copy before changing
print('==> after making copy '
'(note slicer assignment result):')
list3 = list(list1)
list3[-1:] = (1, 2, 3)
print(f'list 1: {list1}')
print(f'list 3: {list3}')
| true |
5814283a4a4cf73507b169450be7ebabf3c542b5 | Python | G0PALAKRISHNAN/python | /SelenProjects2/SelenLocationByXpath.py | UTF-8 | 411 | 2.8125 | 3 | [] | no_license | from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("https://demo.actitime.com")
time.sleep(5)
Login = driver.find_element_by_xpath("//a[@class='initial']")
print("Co ordinates of login : " , Login.location)
print("X co ordinate of Login Button is : ", Login.location.get('x'))
print("Y co ordinate of Login Button is : ", Login.location.get('y'))
time.sleep(3)
driver.close() | true |
7f6b0f7bbba0b6f7c74ec8c2a86da5c73f8262af | Python | IgaoGuru/TicTacToe- | /nntraining.py | UTF-8 | 4,130 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import pickle
lr = 0.01
momentum = 0.5
epochs = 20
device = "cpu"
log_interval = 100
class TictactoeNet(nn.Module):
#defines convolutional and dropout layers as part of NN
def __init__(self):
super(TictactoeNet, self).__init__()
self.fc0 = nn.Linear(3 * 3, 9)
self.fc1 = nn.Linear(9, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 100)
self.fc4 = nn.Linear(100, 3 * 3)
#sets up more layers (to be used)
def forward(self, x):
#our input x is of shape (batch, 3 * 3)
x = F.relu(self.fc0(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return F.log_softmax(x, dim = 1)
def train(model, device, X, y, optimizer, epoch):
#tells model its training time so layers like dropout can behave acordingly;
model.train()
#for each batch in training dataset ready to be used:
batch_size = 1
for idx in range(int(X.shape[0]/batch_size)):
begin = idx * batch_size
data = X[begin:begin + batch_size, :]
target = y[begin : begin + batch_size]
target = target.reshape((target.shape[0], ))
#throw it on cpu or gpu
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
#forward pass
output = model(data)
#loss function
loss = F.nll_loss(output, target)
#backwards propagation
loss.backward()
#optimizes all layers
optimizer.step()
#logs every 11th batch index (if log_interval == 11)
if idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, idx * len(data), X.shape[0],
100. * idx / X.shape[0], loss.item()))
def test(model, device, X, y):
#for each batch in training dataset ready to be used:
batch_size = 250
num_corrects = 0
for idx in range(int(X.shape[0]/batch_size)):
begin = idx * batch_size
data = X[begin:begin + batch_size, :]
target = y[begin : begin + batch_size]
target = target.reshape((target.shape[0], ))
#throw it on cpu or gpu
data, target = data.to(device), target.to(device)
#forward pass
output = model(data)
output = output.cpu().detach().numpy()
y_pred = np.argmax(output, axis=1).flatten()
y_true = y_torch[begin:begin + batch_size].cpu().detach().numpy().flatten()
correct = y_pred == y_true
num_corrects += np.sum(correct)
#logs every 11th batch index (if log_interval == 11)
if idx % log_interval == 0:
print('Test: [{}/{} ({:.0f}%)]'.format(
idx * len(data), X.shape[0],
100. * idx / X.shape[0]))
return num_corrects
y_col = ["optimal_play"]
tictactoe_data_filepath = "tree_exported.txt"
df_tictactoe = pd.read_csv(tictactoe_data_filepath)
X_col = ["S" + str(i) for i in range(9)]
X = df_tictactoe[X_col].values
y = df_tictactoe[y_col].values
y[y == " None"] = -1
y = y.astype(np.int)
valid_idxs = y != -1
valid_idxs = valid_idxs.reshape((-1, ))
y = y[valid_idxs]
X = X[valid_idxs, :]
Y = np.zeros((y.shape[0], 9))
# for x_index in range(Y.shape[0]):
# Y[x_index, y[x_index]] = 1
X_torch = torch.tensor(X).float()
y_torch = torch.tensor(y).long()
# Y_torch = torch.tensor(Y).long()
load_model = False
if load_model:
model = pickle.load(open("TictactoeNet.p", "rb"))
else:
model = TictactoeNet().to(device)
output_y = model.forward(X_torch)
optimizer = optim.Adam(model.parameters())
#trains & tests each epoch
for epoch in range(1, epochs + 1):
train(model, device, X_torch, y_torch, optimizer, epoch)
pickle.dump(model, open("TictactoeNet.p", "wb"))
num_corrects = test(model, device, X_torch, y_torch)
print(num_corrects/X_torch.shape[0]) | true |
837f146863ee80c71562d3d82642c8489ff00bcf | Python | Lain-progressivehouse/atCoder | /atCoder/japan_contest.py | UTF-8 | 1,320 | 3.0625 | 3 | [
"MIT"
] | permissive | def p_a():
M, D = map(int, input().split())
ans = 0
for m in range(M + 1):
for d in range(D + 1):
d1 = d // 10
d2 = d - 10 * d1
if d1 >= 2 and d2 >= 2 and m == d1 * d2:
ans += 1
print(ans)
def p_b():
N, K = map(int, input().split())
A = list(map(int, input().split()))
mod = 10 ** 9 + 7
ans = 0
l = []
r = []
for i in range(N):
lc = 0
rc = 0
for j in range(N):
if A[i] > A[j]:
if i > j:
lc += 1
else:
rc += 1
l.append(lc)
r.append(rc)
an = K * (K + 1) // 2 % mod
for i in range(N):
sum_r = an * r[i]
sum_l = (an - K) * l[i]
ans += sum_r + sum_l
ans %= mod
print(ans)
def p_c():
N = int(input())
S = input()
ans = 2
for i in range(2 * N):
if S[i] == "W":
continue
l = i - 1
while l >= 0 and S[l] != "W":
l -= 1
r = i + 1
while r < N and S[r] != "W":
r += 1
if l >= 0:
ans *= i - l + 1
if r < 2 * N:
ans *= r - i + 1
print(ans)
def p_d():
N = int(input())
if __name__ == '__main__':
p_d()
| true |
cf6fb179127be0e85219c249480da8eb6e226a95 | Python | ale748/reserva | /reservations/models.py | UTF-8 | 996 | 2.515625 | 3 | [] | no_license | from django.db import models
from datetime import datetime, timedelta
from django.contrib.auth.models import User
class Reservation(models.Model):
user = models.ForeignKey(User)
date = models.DateField(blank=True, null=True, unique=True)
qty = models.IntegerField(default=8)
# place = models.ForeignKey(Place)
paid = models.BooleanField(default=False)
reservation_date = models.DateTimeField(auto_now=True)
payment_confirmation = models.CharField(null=True, blank=True, max_length=200)
def __unicode__(self):
return unicode(self.user)
@staticmethod
def get_ocuped_dates(month, year):
reservations = Reservation.objects.filter(date__gte=datetime.now())
occupied = []
for reservation in reservations:
if reservation.paid==False and reservation.date.strftime("%m")==month and reservation.date.strftime("%Y")==year:
occupied.append(reservation.date.strftime("%Y-%m-%d"))
return occupied
| true |
7b2940e4bf4885bfb1930938303d961d6578df6c | Python | lkavanau/Practice_Scripts | /complementDNA.py | UTF-8 | 299 | 4.03125 | 4 | [] | no_license | ##sequence
dna = "ACTGATCGATTACGTATAGTATTTGCTATCATACATATATATCGATGCGTTCAT"
##lowercase and compliment
dna1 = dna.replace("A", "t")
dna2 = dna1.replace("T", "a")
dna3 = dna2.replace("C", "g")
dna4 = dna3.replace("G", "c")
print(dna4)
##return to uppercase
complement= dna4.upper()
print(complement) | true |
8c4ed52d8716ca93c4464f757a901976a170d72d | Python | Alekceyka-1/algopro21 | /part1/Lections/lection-04-строки/ПИб-1/04.py | UTF-8 | 159 | 3.421875 | 3 | [] | no_license | # речь Йоды
prefix = "Я изучаю Python"
postfix = ", мой юный падаван."
new_pr = ' '.join(prefix.split(' ')[::-1])
print(new_pr + postfix)
| true |
c1cdb01e9d600ca4a75fcb96e9adba42213c0276 | Python | Xenolithes/Algorithims | /python/test_is_divisible.py | UTF-8 | 657 | 3.078125 | 3 | [] | no_license | import unittest
from algos.is_divisible import is_divisible
class isDivisible (unittest.TestCase):
def test_True(self):
data = [12,3,4]
result = is_divisible(*data)
self.assertEqual(result, True)
def test_False(self):
data = [3,3,4]
result = is_divisible(*data)
self.assertEqual(result, False)
def test_True_Two(self):
data = [48,3,4]
result = is_divisible(*data)
self.assertEqual(result, True)
def test_False_Two(self):
data = [8,3,4]
result = is_divisible(*data)
self.assertEqual(result, False)
if __name__ == '__main__':
unittest.main()
| true |
c4db9efae3661328ea7189e15fb702070b336084 | Python | valleyceo/code_journal | /1. Problems/g. Heap/Template/c. Heap - Online Median.py | UTF-8 | 1,087 | 3.984375 | 4 | [] | no_license | # Compute the Median of Online Data
'''
- Given stream of numbers
- Design a running median of sequence
Note: You cannot back up to read earlier value
'''
# O(logn) insertion time | O(n) space
def online_median(sequence: Iterator[int]) -> List[float]:
# min_heap stores the larger half seen so far.
min_heap: List[int] = []
# max_heap stores the smaller half seen so far.
# values in max_heap are negative
max_heap: List[int] = []
result = []
for x in sequence:
heapq.heappush(max_heap, -heapq.heappushpop(min_heap, x))
# Ensure min_heap and max_heap have equal number of elements if an even
# number of elements is read; otherwise, min_heap must have one more
# element than max_heap.
if len(max_heap) > len(min_heap):
heapq.heappush(min_heap, -heapq.heappop(max_heap))
result.append(0.5 * (min_heap[0] + (-max_heap[0])) if len(min_heap) ==
len(max_heap) else min_heap[0])
return result
def online_median_wrapper(sequence):
return online_median(iter(sequence))
| true |
f9b7876c19b6dc8941ff7920856866c746e56587 | Python | raul-arrabales/BigData-Hands-on | /Spark/Datio/session-1.py | UTF-8 | 1,503 | 3.421875 | 3 | [] | no_license | # RDD Data Set load example (karma)
! ls -la
! head -10 movies.dat
! head -10 ratings_verysmall.dat
# Loading CSV files from file into RDDs in cluster memory
moviesRDD = sc.textFile('movies.dat')
ratingsRDD = sc.textFile('ratings_verysmall.dat')
# See what we've got in the RDDs
print('--- Movies:')
print(moviesRDD.take(4))
print('--- Ratings:')
print(ratingsRDD.take(4))
# Current data format in the RDD
ratingsRDD.take(6)
# Split fields using a map transformation
SplittedRatingsRDD = ratingsRDD.map(lambda l : l.split('::'))
# See what we've got now:
SplittedRatingsRDD.take(6)
# Create pairs M/R style for the counting task (Mapper):
RatingCountsRDD = SplittedRatingsRDD.map( lambda (uId, mId, r, ts) : (int(uId), 1))
RatingCountsRDD.count()
# Taking a sample of our partial counts
Rsample = RatingCountsRDD.sample(False, 0.001)
# See how big the sample is and inspect
Rsample.count()
# See how big the sample is and inspect
Rsample.take(6)
# Aggregate counts by user (Reducer)
RatingsByUserRDD = RatingCountsRDD.reduceByKey(lambda r1, r2 : r1 + r2)
# Inspect:
RatingsByUserRDD.takeSample(False, 10)
# Get the top 10 users by the number of ratings:
RatingsByUserRDD.takeOrdered(10, key=lambda (uId, nr): -nr)
# Nested version of the same using a "karma" RDD:
karma = (
sc.textFile('ratings_verysmall.dat')
.map(lambda l : l.split('::'))
.map(lambda (uId, mId, r, ts) : (int(uId), 1))
.reduceByKey(lambda r1, r2 : r1 + r2)
)
karma.takeOrdered(10, key=lambda (uId, nr): -nr)
| true |
e53a90c1c641b23a96010763a26a80747d012e3f | Python | Abel2Code/CerealDispenserSystem | /PythonScripts/Testing/spinMotor.py | UTF-8 | 635 | 2.9375 | 3 | [] | no_license | from __future__ import division
import time
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
# Configure min and max servo pulse lengths
servo_min = 150 # Min pulse length out of 4096
servo_max = 600 # Max pulse length out of 4096
# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)
print('Moving servo on channel 0, press Ctrl-C to quit...')
while True:
# Move servo on channel O between extremes.
pwm.set_pwm(1, 0, servo_min)
time.sleep(1)
pwm.set_pwm(0, 0, servo_min)
time.sleep(1)
pwm.set_pwm(1, 0, servo_max)
time.sleep(1)
pwm.set_pwm(0, 0, servo_max)
time.sleep(1)
| true |
c3be779ba0b9945f63bfd036d1ce6f84947361c7 | Python | zswin/test1 | /gc_test.py | UTF-8 | 407 | 2.875 | 3 | [] | no_license | # coding=utf-8
__author__ = 'zs'
import gc
def dump_garbage():
print("\nBARBAGE:")
gc.collect()
print("\nGarbage objects:")
for x in gc.garbage:
s=str(x)
if len(s)>80:
s=s[:77]
print(type(x), ':', s)
if __name__ == '__main__':
gc.enable()
gc.set_debug(gc.DEBUG_LEAK)
l=[ ]
l.append(l)
del l
dump_garbage()
print(gc.collect()) | true |
f3fb6235e04c266e342eb5225f0fff5900715815 | Python | ascheman/eumel | /tools/convertCharset.py | UTF-8 | 2,279 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Convert file ZEICHENSATZ from graphics package to PNG files
"""
from eumel import *
class ZeichensatzDataspace(Dataspace):
TYPE = 0x44c
def __init__ (self, fd):
Dataspace.__init__ (self, fd)
# just an array with 255 elements
self.rows = []
for i in range (255):
self.rows.append (self.parseText ())
self.parseHeap ()
if __name__ == '__main__':
import argparse, sys, cairo, math
def transform (w, h, x, y):
return ((2+x), (11-y))
parser = argparse.ArgumentParser(description='Convert ZEICHENSATZ dataspace to PNG')
parser.add_argument ('-v', '--verbose', help='Enable debugging messages', action='store_true')
parser.add_argument ('file', help='Input file')
parser.add_argument ('prefix', help='Output prefix')
args = parser.parse_args ()
if args.verbose:
logging.basicConfig (level=logging.DEBUG)
else:
logging.basicConfig (level=logging.WARNING)
m = []
with open (args.file, 'rb') as fd:
ds = ZeichensatzDataspace (fd)
# no character with code 0
for (j, r) in zip (range (1, len (ds.rows)+1), ds.rows):
if len (r) == 0:
continue
out = '{}{:03d}.png'.format (args.prefix, j)
logging.info ('Converting character {} to {}'.format (j, out))
w, h = 1024, 1024
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
ctx = cairo.Context(surface)
ctx.scale (64, 64)
ctx.set_line_width (0.1)
ctx.set_source_rgb (1, 0, 0)
r = bytes (r)
lastxy = (0, 0)
for i in range (0, len (r), 4):
x0, y0, x1, y1 = struct.unpack ('<bbbb', r[i:i+4])
m.extend ([x0, y0, x1, y1])
if (x0, y0) != lastxy:
ctx.move_to (*transform (w, h, x0, y0))
if (x0, y0) != (x1, y1):
ctx.line_to (*transform (w, h, x1, y1))
else:
x1, y1 = transform (w, h, x1, y1)
ctx.arc (x1, y1, 0.1, 0, 2*math.pi)
lastxy = (x1, y1)
ctx.stroke ()
surface.write_to_png (out)
| true |
e8e3db3ae94aee63a749e5ca1855f7862845ad7c | Python | jgainerdewar/job-manager | /servers/dsub/jobs/controllers/utils/job_ids.py | UTF-8 | 2,079 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | from providers import ProviderType
from werkzeug.exceptions import BadRequest
def api_to_dsub(api_id, provider_type):
"""Convert an API ID and provider type to dsub project, job, and task IDs
Args:
api_id (str): The API ID corresponding to a particular dsub task.
Depending on the provider and semantics of the job, the ID can
have one of four possible schemas described in comments below.
provider_type (ProviderType): The dsub provider currently being
used. Currently the options are google, local, or stub.
Returns:
(str, str, str, str): dsub project ID, job ID, task ID, and attempt
number, respectively. The job ID will never be empty, but
project ID, task ID, and attempt number may be.
Raises:
BadRequest if the api_id format is invalid for the given provider
"""
id_split = api_id.split('+')
if len(id_split) != 4:
raise BadRequest(
'Job ID format is: <project-id>+<job-id>+<task-id>+<attempt>')
google_providers = [ProviderType.GOOGLE, ProviderType.GOOGLE_V2]
if not id_split[0] and provider_type in google_providers:
raise BadRequest(
'Job ID is missing project ID component with google provider')
return id_split
def dsub_to_api(proj_id, job_id, task_id, attempt):
"""Convert a dsub project, job, and task IDs to an API ID
Args:
proj_id (str): dsub Google cloud project ID (google provider only)
job_id (str): dsub job ID (all providers)
task_id (str): dsub task ID (if job was started with multiple tasks)
Returns:
(str): API ID formed by composition of one or more of the inputs
Raises:
BadRequest if no job_id is provided
"""
if not job_id:
raise BadRequest('Invalid dsub ID format, no job_id was provided')
return '{}+{}+{}+{}'.format(proj_id or '', job_id or '', task_id or '',
attempt or '')
| true |
e3636eada82d33f38e7e3974f3a8657078ebe806 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_200/3748.py | UTF-8 | 646 | 2.953125 | 3 | [] | no_license | def lister(a):
b=[]
for i in str(a):
b.append(int(i))
return b
n = int(input())
for i in range(n):
no = int(input())
j = int(no)
while j>0:
lst = lister(j)
le = len(lst)
fl =True
for k in range(le-1,0,-1):
if lst[k]<lst[k-1]:
if lst[k-1]!=0:
lst[k],lst[k-1] = 9,lst[k-1]-1
for f in range(k,le):
lst[f] = 9
j = int(''.join(str(e)for e in lst ))
if True:
print('Case #'+str(i+1)+':',j)
break
| true |
0af22af9108e9e977290fd6629ac8b7b13daa0e7 | Python | asmundur31/Kattis | /catalan.py | UTF-8 | 302 | 3.765625 | 4 | [] | no_license | fact = []
fact.append(1)
for i in range(1,10001):
fact.append(fact[i-1]*i)
def choose(n,k):
res=fact[n]
res //= fact[k]
res //= fact[n - k]
return res;
def catalan(n):
return choose(2*n,n)//(n+1)
x=int(input())
for i in range(1,x+1):
y=int(input())
print(catalan(y))
| true |
3713afca2ef55f60bb64835459162d2df7c55535 | Python | persiandog/notes_codeme | /03/Homework/summary3.py | UTF-8 | 492 | 3.8125 | 4 | [] | no_license | """W podanym przez użytkownika ciągu wejściowym policz wszystkie małe litery,
wielkie litery, cyfry i znaki specjalne."""
txt = input("Enter your birthday and birth place: ")
print("Capital Letters: ", sum(1 for c in txt if c.isupper()))
print("Lowercase Letters: ", sum(1 for c in txt if c.islower()))
print("Digits: ", sum(1 for c in txt if c.isdigit()))
# needs work:
#special_chars = [#, $, %, ^, &, *, ., ', :]
#print("Special characters: ", sum(1 for c if any c in speial_chars)) | true |
78366b859d4fac48c5f2d341e8ab69513750dd68 | Python | demianAdli/python_dsa | /pythonic_dsa_chapter_02/range_class.py | UTF-8 | 1,077 | 3.765625 | 4 | [] | no_license | """
15 August, 2020
I have upgraded the book code and eliminate an eror which was
giving a too big of range for negative steps.
I have solved this problem by returning absolute values of step
and 'stop - start' clause of the book's code line 16 formula.
(Range class is in the page 81 code fragment 2.6)
"""
class Range:
def __init__(self, start, stop=None, step=1):
if step == 0:
raise ValueError('Step cannot be equal to zero')
if stop is None:
start, stop = 0, start
self.__length = max(0, (abs(stop - start)
+ abs(step) - 1) // abs(step))
self.start = start
self.stop = step
def __len__(self):
return self.__length
def __getitem__(self, ind):
if ind < 0:
ind += len(self)
if not 0 <= ind < self.__length:
raise IndexError('Out of Range')
return self.start + ind * self.stop
if __name__ == '__main__':
my_range = Range(1, 10, 2)
print(list(my_range))
| true |
52fb139625b03a6b8f645965136b275760b6cdc3 | Python | Amonteverde04/Babies-Program | /babySortColumn/babySortColumn.py | UTF-8 | 881 | 3.5 | 4 | [] | no_license | import sqlite3
import random as r
def main():
choice = 0
while choice != 1 or choice != 2:
if choice == 1:
print(rBoy())
break
if choice == 2:
print(rGirl())
break
choice = int(input("Would you like a boy name or girl name?\nPress 1 for boy or 2 for girl!\n"))
def rBoy():
conn = sqlite3.connect('babyNamesDB_2column.db')
cur = conn.cursor()
for rowB in cur.execute('SELECT Boys FROM babyNames ORDER BY RANDOM() LIMIT 1;'):
a = list(rowB)
return a
conn.close()
def rGirl():
conn = sqlite3.connect('babyNamesDB_2column.db')
cur = conn.cursor()
for rowG in cur.execute('SELECT Girls FROM babyNames ORDER BY RANDOM() LIMIT 1;'):
b = list(rowG)
return b
conn.close()
if __name__ == "__main__":
main() | true |
7951b4e2c808f53b7a5cb4e9916ec357b22d8c9f | Python | bsextion/CodingPractice_Py | /Misc/Learning/dictionary.py | UTF-8 | 222 | 2.859375 | 3 | [] | no_license | monthConversions = {
"Jan" : "January",
"Feb" : "Februrary",
"Mar" : "March",
"Apr" : "April",
}
# print(monthConversions["Jan"])
# print(monthConversions.get("Jan"))
# print(monthConversions.get("Luv", "Not valid")) | true |
b070d2a012c468b0779d7bdf7978272fb66ede11 | Python | aidanby/493_GAN | /RenyiGan-TensorFlow2/model.py | UTF-8 | 3,887 | 3 | 3 | [
"MIT"
] | permissive | # Functions to create discriminator and generator
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, BatchNormalization, \
LeakyReLU, Conv2DTranspose, Conv2D, Dropout, Flatten, Reshape
def get_generator():
inputs = tf.keras.Input(shape=(100,))
generator = layers.Dense(7*7*256, use_bias=False)(inputs)
generator = layers.BatchNormalization()(generator)
generator = layers.LeakyReLU()(generator)
generator = layers.Reshape((7, 7, 256))(generator)
generator = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(generator)
generator = layers.BatchNormalization()(generator)
generator = layers.LeakyReLU()(generator)
generator = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(generator)
generator = layers.BatchNormalization()(generator)
generator = layers.LeakyReLU()(generator)
out = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(generator)
return tf.keras.Model(inputs=inputs, outputs=out)
def get_discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
# HIMESH MODELS
# much more stable
def build_generator():
with tf.name_scope('generator') as scope:
model = Sequential(name=scope)
model.add(Dense(7 * 7 * 256, use_bias=False, kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01), input_shape=(28 * 28,)))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01)))
assert model.output_shape == (None, 7, 7, 128)
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01)))
assert model.output_shape == (None, 14, 14, 64)
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', activation='tanh', use_bias=False,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.01)))
assert model.output_shape == (None, 28, 28, 1)
return model
def build_discriminator():
with tf.name_scope('discriminator') as scope:
model = Sequential(name=scope)
model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01)))
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01)))
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid', kernel_initializer=
RandomNormal(mean=0.0, stddev=0.01)))
return model
| true |
107e479994285064dffbe517e7ebc8d39b39db39 | Python | dario-passarello/dbsched | /tests/schedule_test.py | UTF-8 | 953 | 2.5625 | 3 | [] | no_license | import unittest
from dbsched.schedule import Schedule
cases = {
'w0(x) w1(x) w2(y) r2(y) w3(x)': True,
'w0(x) r2(x) r1(x) w2(x) w2(z)': True,
'r1(x) r1(y) r2(z) r2(y) w2(y) w2(z) r1(z)': False,
'r1(x) r2(x) w2(x) r1(x)': False,
'r1(x) r2(x) w1(x) w2(x)': False,
'w0(x) r1(x) w0(z) r1(z) r2(x) w0(y) r3(z) w3(z) w2(y) w1(x) w3(y)': True,
'r1(x) w2(x) w1(x) w3(x)': True,
'r5(x) r3(y) w3(y) r6(t) r5(t) w5(z) w4(x) r3(z) w1(y) r6(y) w6(t) w4(z) w1(t) w3(x) w1(x) r1(z) w2(t) w2(z)': False
}
class ScheduleTest(unittest.TestCase):
def test_VSR(self):
for test, result in cases.items():
self.assertEqual(result, Schedule(sched_str=test).VSR() is not None)
def test_CSR(self):
for test, result in cases.items():
print(Schedule(sched_str=test).CSR())
if __name__ == '__main__':
unittest.main() | true |
fc4ab4bbc151f03865f57f5e88ad4634b4fa144c | Python | kseniajasko/Python_study_beetroot | /hw13/hw13_task1.py | UTF-8 | 794 | 5.0625 | 5 | [] | no_license | # Create a base class named Animal with a method called talk and then create two subclasses:
# Dog and Cat, and make their own implementation of the method talk be different.
# For instance, Dog’s can be to print ‘woof woof’, while Cat’s can be to print ‘meow’.
# # Also, create a simple generic function, which takes as input instance of a Cat or Dog classes
# and performs talk method on input parameter.
class Animal:
def __init__(self, name):
self.name = name
def talk(self):
return
def simple_talk(self):
self.talk()
class Cat(Animal):
def talk(self):
print('Meow!')
class Dog(Animal):
def talk(self):
print('Woof!')
a = Cat('Sirko')
b = Dog('Bob')
list1 = [a, b]
for animal in list1:
animal.simple_talk()
| true |
210ad85100ad1e8f42b6bf1851e74fdeb67d462d | Python | AalauraaA/P5 | /Supplerende_materiale/Scripts/N_and_p_analysis.py | UTF-8 | 8,867 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
This Python file is been made by the project group Mattek5 C4-202
This is the analysis of the model order p and lenght N.
"""
from __future__ import division
import os
import sys
lib_path = '\\Scripts\\libs'
cwd = os.getcwd()[:-8]
sys.path.insert(0, cwd + lib_path)
Stemt = 1
if Stemt == True:
data_path = "\\Lydfiler\\Sound\\Stemt\\"
else:
data_path = "\\Lydfiler\\Sound\\Ustemt\\"
import scipy.io.wavfile as wav
import numpy as np
import matplotlib.pyplot as plt
import voiced_unvoiced as vu
import LP_speech as lps
import windowfunctions as win
#==============================================================================
# Optimized p and N for voiced files
#==============================================================================
""" Import data """
def downsample(d, dc, filename, data_path):
"""
Input:
d: if equal to 1 downsampling will happen. Othervise, the data
will only be imported.
dc: downsamplingconstant, which the data will be downsampled by.
filename: the filename of the data as a string.
Returns:
The (possibly downsampled) data in a numpy-array with dtype = float64.
"""
if d == False:
fs, data = wav.read(cwd + data_path + filename)
data = np.array(data, dtype = "float64")
return fs, data
else:
fullfs, fulldata = wav.read(cwd + data_path + filename)
data = np.array([fulldata[i] for i in range(0,len(fulldata),dc)], dtype = "float64")
fs = int(fullfs/dc)
return fs, data
data = {}
for filename in os.listdir(cwd + data_path):
f, data[filename] = downsample(0,1,filename, data_path)
N = int(0.02*f)
p_max = 101
""" Run the for loop if you have time enough """
E_list_total = np.zeros(p_max)
for key in data.keys(): # Run only if you have a lot of time
M = len(lps.LP_parameters(data[key],N,5,.5,window_arg = win.Hamming))-3
E_list = np.zeros((p_max,M))
for p in range(1,p_max):
if p % 10 == 0:
print "First loop. p = %d, key: %s." % (p,key)
parameters = lps.LP_parameters(data[key],N,p,.5,window_arg = win.Hamming)
for i in range(M):
E_list[p][i] = parameters[i]['gain']**2
E_list = E_list/E_list[1]
E_list_total += np.average(E_list,axis=1)
np.save("npy-files/E_list_total", E_list_total) # Save file for later use
""" Plot the error for p """
plt.figure(1)
plt.plot(E_list_total[1:])
plt.xlabel(r"$p$")
plt.ylabel(r"$\barE_p$")
plt.savefig("figures/E_p.png", dpi = 500)
Error_time = np.zeros(p_max)
Error_freq = np.zeros(p_max)
""" Run if you have enough time """
for p in range(1,p_max): # Run only if you have a lot of time
if p % 10 == 0:
print "Second loop. p = %d." % p
for key in data.keys():
Parameters = lps.LP_parameters(data[key],N,p,0.5,window_arg = win.Hamming)
Prediction = lps.LP_predict(Parameters)
amp_data = np.abs(np.fft.fft(data[key]))
amp_pred = np.abs(np.fft.fft(Prediction))
length = int(len(Prediction))
Error_time[p] += np.linalg.norm(Prediction - data[key][:length])
Error_freq[p] += np.linalg.norm(amp_pred[:int(length/2)] - amp_data[:int(length/2)])
np.save("npy-files/Error_time.npy", Error_time)
np.save("npy-files/Error_freq.npy", Error_freq)
np.save("npy-files/amp_data.npy", amp_data)
np.save("npy-files/amp_pred.npy", amp_pred)
""" Calculate the normalized amplitudes of the errors in time and frequency """
for p in range(1,p_max):
Error_freq[p] /= len(data.keys())
Error_time[p] /= len(data.keys())
amp_data_norm = amp_data/np.max([amp_data[:len(amp_pred)],amp_pred])
amp_pred_norm = amp_pred/np.max([amp_data[:len(amp_pred)],amp_pred])
""" Plot the errors for p """
plt.figure(2)
plt.plot(amp_data_norm[:int(len(amp_data)/2)], label = r"$|\mathcal{F}[s[n]](e^{j\omega)})|$")
plt.plot(amp_pred_norm[:int(len(amp_pred)/2)], label = r"$|\mathcal{F}[\hat{s}[n]](e^{j\omega)})|$")
plt.xlabel(r"$\omega$")
plt.title("Amplituderespons for de originale og praedikterede data")
plt.legend()
plt.savefig("figures/amp_data.png", dpi = 500)
plt.figure(3)
plt.plot(Error_time[1:95])
plt.xlabel(r"$p$")
plt.title("Gennemsnitlige fejl i tidsdomaenet")
plt.savefig("figures/resi_p_time.png", dpi = 500)
plt.figure(4)
plt.plot(Error_freq[1:95])
plt.xlabel(r"$p$")
plt.title("Gennemsnitlige fejl i frekvensdomaenet")
plt.savefig("figures/resi_p_freq.png", dpi = 500)
""" Finding errors for N """
Nlist = np.arange(50,400,10)
datF = {key: np.abs(np.fft.fft(data[key])) for key in data.keys()}
E_list_time = np.zeros(len(Nlist))
E_list_freq = np.zeros(len(Nlist))
p = 39 # Optimized p
""" Run if you have enough time """
for i in range(len(Nlist)):
if Nlist[i] % 10 == 0:
print "Third loop. N = %d." % Nlist[i]
for key in data.keys():
N = Nlist[i]
p = 40
parameters = lps.LP_parameters(data[key],N,p,.5,window_arg = win.Rectangular)
prediction = lps.LP_predict(parameters)
M = len(prediction)
M2 = int(M/2.)
predictF = np.abs(np.fft.fft(prediction))
E_list_time[i] += np.linalg.norm(prediction-data[key][:M])
E_list_freq[i] += np.linalg.norm(predictF[:M2]-datF[key][:M2])
for i in range(1,len(Nlist)):
E_list_freq[i] /= len(data.keys())
E_list_time[i] /= len(data.keys())
np.save("npy-files/E_list_time.npy", E_list_time)
np.save("npy-files/E_list_freq.npy", E_list_freq)
""" Plots of the errors for N in time and frequency """
plt.figure(5)
plt.title("Gennemsnitlige fejl i tidsdomaenet")
plt.xlabel(r"$N$")
plt.plot(Nlist[1:],E_list_time[1:])
plt.savefig("figures/resi_N_time.png", dpi = 500)
plt.figure(6)
plt.title("Gennemsnitlige fejl i frekvensdomaenet")
plt.xlabel(r"$N$")
plt.plot(Nlist[1:],E_list_freq[1:])
plt.savefig("figures/resi_N_freq.png", dpi = 500)
#==============================================================================
# Optimized p and N for all voiced, unvoiced and sentence files
#==============================================================================
""" Setting path up """
data_path_stemt = "\\Lydfiler\\Sound\\Stemt\\"
data_path_ustemt = "\\Lydfiler\\Sound\\Ustemt\\"
data_path_blandet = "\\Lydfiler\\Sound\\Saetning\\"
""" Data import - manual choose between voiced, unvoiced and sentences """
dat = {} # Voiced
for filename in os.listdir(cwd + data_path_stemt):
f, dat[filename] = downsample(0,1,filename,data_path_stemt)
#dat = {} # Unvoiced
#for filename in os.listdir(cwd + data_path_ustemt):
# f, dat[filename] = downsample(0,1,filename,data_path_ustemt)
#dat = {} # Sentence (Mixed)
#for filename in os.listdir(cwd + data_path_blandet):
# if os.path.isdir(cwd + data_path_blandet + filename) == False:
# f, dat[filename] = downsample(1,2,filename,data_path_blandet)
""" The found p values """
msek = 0.01 # Number of milliseconds
N = int(msek*f)
p1 = 12
p2 = 39
""" Run if you have enough time, else load the npy files """
#Error_time = np.zeros(2)
#Error_freq = np.zeros(2)
#i = 0
#Parameters = {}
#Prediction = {}
#amp_data = {}
#amp_pred = {}
#for p in [p1,p2]:
# for key in dat.keys():
# if len(dat[key]) >= 2*N:
# print "Starting %s." % key
# Parameters = lps.LP_parameters(dat[key],N,p,0.5)
# Prediction = lps.LP_predict(Parameters)
# print "Prediction done."
# amp_data = np.abs(np.fft.fft(dat[key]))
# amp_pred = np.abs(np.fft.fft(Prediction))
# print "FFT done."
# length = int(len(Prediction))
# Error_time[i] += np.linalg.norm(Prediction - dat[key][:length])/len(Prediction)
# Error_freq[i] += np.linalg.norm(amp_pred[:int(length/2)] - amp_dat[:int(length/2)])/len(amp_pred)
# i += 1
#
#for p in range(2):
# Error_freq[p] /= len(data.keys())
# Error_time[p] /= len(data.keys())
#
#""" Save the errors """
#os.chdir(cwd + "\\Scripts\\npy-files") # The right path to save the npy files
#savedir = os.getcwd()
#np.save("Error_time_%d_mix" % N, Error_time_gen)
#np.save("Error_freq_%d_mix" % N, Error_freq_gen)
""" Calculate the error in time and frequency """
os.chdir(cwd + "\\Scripts\\npy-files\\Time_errors_gen")
loaddir1 = os.getcwd()
Time_errors = {}
for filename in os.listdir(loaddir1):
Time_errors[filename] = np.load(filename)
os.chdir(cwd + "\\Scripts\\npy-files\\Freq_errors_gen")
loaddir2 = os.getcwd()
Freq_errors = {}
for filename in os.listdir(loaddir2):
Freq_errors[filename] = np.load(filename)
""" Normalized the errors in time and frequency """
normal_time = Time_errors['Error_time_160_voi.npy'][0]
normal_freq = Freq_errors['Error_freq_160_voi.npy'][0]
""" Caluclate the errors """
for key in Time_errors.keys():
Time_errors[key] /= normal_time
for key in Freq_errors.keys():
Freq_errors[key] /= normal_freq | true |
c1b6203f70a4943af33d5b05260836c302313306 | Python | C-LLOYD/DataProcessingLDA | /code/movingAverageFilterReynoldsStresses.py | UTF-8 | 3,519 | 2.96875 | 3 | [] | no_license | #Currently written as a script but will be made into a function at a later date ..
#
# Script is used to identify and remove spikes in a given data set by using the method
# of Goring and Nikora (2002)
#
#
## Initialise python
import numpy as np
import matplotlib.pyplot as plt
#
########################################################################################################################
##
## MOVING AVERAGE FILTER
##
## Define the filtering function:
## Input: velocity and the averaging window (multiple of 2)
## Output: index of spikes after several loops.
##
## Define calculation of moving average mean
def movingWeightedAverage(win,phi,tau):
# win is half averaging window
# phi is variable
# tau is weighting
Phi = np.zeros(len(phi))
Phi[:] = np.NAN
Phi[0:win] = np.divide(sum(phi[0:2*win]*tau[0:2*win]),sum(tau[0:2*win]))
Phi[-win:] = np.divide(sum(phi[-2*win:]*tau[-2*win:]),sum(tau[-2*win:]))
for i in range(win,len(phi)-win+1):
Phi[i] = np.divide(sum(phi[i-win:i+win]*tau[i-win:i+win]),sum(tau[i-win:i+win]))
return Phi
def movingAverageFilterReynoldsStresses(u,v,resT,window,Nstds):
# Half the window for consistency
W = int(window/2)
N = np.linspace(1,len(u),len(u))
U = movingWeightedAverage(W,u,resT)
V = movingWeightedAverage(W,v,resT)
ruu = (u-U)**2
rvv = (v-V)**2
ruv = (u-U)*(v-V)
Ruu = movingWeightedAverage(W,ruu,resT)
Rvv = movingWeightedAverage(W,rvv,resT)
Ruv = movingWeightedAverage(W,ruv,resT)
varRuu = movingWeightedAverage(W,(ruu-Ruu)**2,resT)
varRvv = movingWeightedAverage(W,(rvv-Rvv)**2,resT)
varRuv = movingWeightedAverage(W,(ruv-Ruv)**2,resT)
tol = Nstds
spikes = ( (u < U - tol*np.sqrt(Ruu)) + (u > U + tol*np.sqrt(Ruu)) +
(v < V - tol*np.sqrt(Rvv)) + (v > V + tol*np.sqrt(Rvv)) +
(ruu < Ruu - tol*np.sqrt(varRuu)) + (ruu > Ruu + tol*np.sqrt(varRuu)) +
(rvv < Rvv - tol*np.sqrt(varRvv)) + (rvv > Rvv + tol*np.sqrt(varRvv)) +
(ruv < Ruv - tol*np.sqrt(varRuv)) + (ruv > Ruv + tol*np.sqrt(varRuv)) )
#
plot = False
if plot == True:
plt.subplot(2,3,1)
plt.plot(N[~spikes],u[~spikes],color='k')
plt.plot(N[spikes],u[spikes],color='r',linestyle=' ',marker='.')
plt.plot(N,U)
plt.plot(N,U - tol*np.sqrt(Ruu))
plt.plot(N,U + tol*np.sqrt(Ruu))
plt.xlabel('N')
plt.ylabel('u')
plt.subplot(2,3,4)
plt.plot(N[~spikes],v[~spikes],color='k')
plt.plot(N[spikes],v[spikes],color='r',linestyle=' ',marker='.')
plt.plot(N,V)
plt.plot(N,V - tol*np.sqrt(Rvv))
plt.plot(N,V + tol*np.sqrt(Rvv))
plt.xlabel('N')
plt.ylabel('v')
plt.subplot(2,3,2)
plt.plot(N[~spikes],ruu[~spikes],color='k')
plt.plot(N[spikes],ruu[spikes],color='r',linestyle=' ',marker='.')
plt.plot(N,Ruu)
plt.plot(N,Ruu - tol*np.sqrt(varRuu))
plt.plot(N,Ruu + tol*np.sqrt(varRuu))
plt.xlabel('N')
plt.ylabel('ruu')
plt.subplot(2,3,5)
plt.plot(N[~spikes],rvv[~spikes],color='k')
plt.plot(N[spikes],rvv[spikes],color='r',linestyle=' ',marker='.')
plt.plot(N,Rvv)
plt.plot(N,Rvv - tol*np.sqrt(varRvv))
plt.plot(N,Rvv + tol*np.sqrt(varRvv))
plt.xlabel('N')
plt.ylabel('rvv')
plt.subplot(2,3,3)
plt.plot(N[~spikes],ruv[~spikes],color='k')
plt.plot(N[spikes],ruv[spikes],color='r',linestyle=' ',marker='.')
plt.plot(N,Ruv)
plt.plot(N,Ruv - tol*np.sqrt(varRuv))
plt.plot(N,Ruv + tol*np.sqrt(varRuv))
plt.xlabel('N')
plt.ylabel('ruv')
plt.show()
plt.close()
return spikes
########################################################################################################################
| true |
86e075e9861b972e9e82c1d135f919953e914c45 | Python | OrderFromChaos/ICPC | /Codeforces/preapp/668/B.py | UTF-8 | 419 | 2.84375 | 3 | [] | no_license | T = int(input())
for t in range(T):
_ = input()
a = [int(x) for x in input().split()]
# Greedy cancel all positives on right negatives
# Remaining pos sum is number of needed coins
bank = 0
# bankhist = [0]
for i in a:
if i > 0:
bank += i
elif i < 0:
bank = max([0, bank+i])
# bankhist.append(bank)
# print(bankhist)
print(bank) | true |
9c57e9c58b9c3415ca8f23e75174f595f1e16675 | Python | ChoSangwoo/Python_study | /Acmicpc/1449_acmicpc_그리디.py | UTF-8 | 280 | 2.953125 | 3 | [] | no_license | # 1449 수리공 항승 그리디
n, l = map(int, input().split())
m = list(map(int, input().split()))
m.sort()
s = m[0]
e = m[0] + l
c = 1
for i in range(n):
if s <= m[i] and m[i] < e:
continue
else:
s = m[i]
e = m[i] + l
c += 1
print(c) | true |
26be09c732d287554adc210fff44500a1c9c761c | Python | netantho/mes_presentations | /python/decembre_11/src/demo_tests.py | UTF-8 | 180 | 2.703125 | 3 | [] | no_license | import re
EMAIL_REGEX = r'[\S.]+@[\S.]+'
class testEmail:
def test_email_regex(self):
assert re.match(EMAIL_REGEX, 'test@mail.ru')
assert not re.match(EMAIL_REGEX, 'test@where')
| true |
8f39b631b188047746138da502d67897be6cbd8c | Python | daniel-momot/Design-IS | /task4.1/task2.py | UTF-8 | 887 | 4.25 | 4 | [] | no_license |
string = input("Введите строку: ")
nums_raw = input("Введите позиции в строке (разделенные пробелом, нумерация с 0): ")
numbers = list(map(int, nums_raw.split()))
to_print = "Cимволы, находящихся на заданных позициях:"
try:
# with list comprehensions
letters_str = [string[i] for i in numbers]
print(to_print, ''.join(letters_str))
# without list comprehensions 1
letters_str = []
for i in numbers:
letters_str.append(string[i])
print(to_print, ''.join(letters_str))
# without list comprehensions 2
letters_str = map(lambda x: string[x], numbers)
print(to_print, ''.join(letters_str))
except IndexError:
print("Некорректный номер позиции присутствует в списке")
| true |
f6cdbbad44ebd5703a7281865c03b952d77e4929 | Python | memcock/memcock | /lib/images.py | UTF-8 | 1,422 | 2.640625 | 3 | [] | no_license | from app import db
from models.images import Image
from lib.query import getImages as GetImagesFromReddit
from lib.database import getImages as GetImagesFromDB
class ImagePool:
def __init__(self, subreddit, used = []):
self._subreddit = subreddit
self._pool = []
self._low_watermark = 10
self._pulledFromDB = False
self._usedImages = used[:]
def _checkPoolLevel(self):
return len(self._pool) > self._low_watermark
def _getFromPool(self):
self._fillPool()
if self._pool:
item = self._pool.pop()
return item
return self._getFromPool()
def _fillPool(self, chunk = 10):
if not self._pulledFromDB:
self._fillFromDB()
self._pulledFromDB = True
if not self._checkPoolLevel():
self._fillFromReddit(chunk)
def _fillFromReddit(self, chunk):
for image in GetImagesFromReddit(self._subreddit, chunk, self._usedImages):
self._pool.append(image)
chunk = chunk - 1
if chunk == 0:
break
def _fillFromDB(self):
for image in GetImagesFromDB(self._subreddit):
self._pool.append(image)
def _getImage(self):
image = self._getFromPool()
if not image.id in self._usedImages:
self._usedImages.append(image.id)
image.updateLastUsed()
return image
return self._getImage()
def get(self, count):
while count > 0:
yield self._getImage()
count = count - 1
def getList(self, count):
urls = []
for u in self.get(count):
urls.append(u)
return urls
| true |
8bc4df0001358b41ef0a6083500847293b218a6d | Python | anuragc10/Algorithim-HackerRank | /Service Lane.py | UTF-8 | 176 | 2.75 | 3 | [] | no_license | n,m=map(int,input().split())
arr=list(map(int,input().split()))
arr1=list()
for i in range(m):
p,q=map(int,input().split())
arr1=arr[p:q+1]
print(min(arr1))
| true |
1da1011ff0343dd9e08e291df541299ff1fdde7b | Python | tedyeung/Python- | /Bootcamp/prime_number.py | UTF-8 | 385 | 3.734375 | 4 | [] | no_license | import math
question = input('Please add number and check if the number is Prime? ')
num_input = int(question)
def prime_number(number):
if (number % 2 == 0):
return False
for i in range(3, int(number**0.5) + 1, 2):
if (number % 1 == 0):
return False
return True
prime_number(num_input)
l = list(range(3, int(5**0.5)+1))
print (l)
| true |
681ca06e51b29882c31f61d12298b0903f47dcbb | Python | Wizmann/ACM-ICPC | /Leetcode/Algorithm/python/1000/00003-Longest Substring Without Repeating Characters.py | UTF-8 | 481 | 2.984375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | class Solution:
# @return an integer
def lengthOfLongestSubstring(self, s):
st = set()
(p, q) = (0, 0)
ans = 0
for c in s:
if c not in st:
st.add(c)
q += 1
ans = max(ans, q - p)
else:
while p < q and c in st:
st.remove(s[p])
p += 1
st.add(c)
q += 1
return ans
| true |
f29fcfbe334ea78d539c91599f90cf1ae46f1dab | Python | huihui7987/LagouSpider | /src/main.py | UTF-8 | 4,731 | 2.515625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from src.https import Http
from src.parse import Parse
from src.setting import headers
from src.setting import cookies
import time,random
import logging
import codecs
import sqlite3
logging.basicConfig(level=logging.ERROR,
format='%(asctime)s Process%(process)d:%(thread)d %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='diary.log',
filemode='a')
def getInfo(url, para):
"""
获取信息
"""
generalHttp = Http()
htmlCode = generalHttp.post(url, para=para, headers=headers, cookies=cookies)
generalParse = Parse(htmlCode)
pageCount = generalParse.parsePage()
print("总页数:{0}".format(pageCount))
info = []
for i in range(1, pageCount + 1):
print('第%s页' % i)
para['pn'] = str(i)
time.sleep(random.randint(1,5))
try:
htmlCode = generalHttp.post(url, para=para, headers=headers, cookies=cookies)
generalParse = Parse(htmlCode)
info = info + getInfoDetail(generalParse)
'''
每50页向文件写一次
'''
dd = 5
if(i % dd == 0):
flag = processInfo(info, para)
if flag:
print("文件写{0}~{1}页的信息".format((i-dd),i))
info=[]
if (pageCount-i) < 50:
flag = processInfo(info, para)
time.sleep(5)
if flag:
print("文件写{0}页的信息".format(i))
info = []
except Exception as e:
print(e)
time.sleep(2)
return flag
def getInfoDetail(generalParse):
"""
信息解析
"""
info = generalParse.parseInfo()
return info
def processInfo(info, para):
"""
信息存储
"""
logging.error('Process start')
try:
title = '公司名称\t公司类型\t融资阶段\t标签\t公司规模\t公司所在地\t职位类型\t' \
'学历要求\t福利\t薪资\t城市\tbusinessZones\t公司简称\t是否校招\tjobNature\t' \
'positionLables\tpositionName\tresumeProcessDay\tresumeProcessRate\t' \
'skillLables\tthirdType\tlatitude\tlongitude\tlinestaion\t工作经验\tcreateTime\n'
file = codecs.open('%s职位.xls' % para['city'], 'a+', 'utf-8')
file.write(title)
for p in info:
line = str(p['companyName']) + '\t' + \
str(p['companyType']) + '\t' + \
str(p['companyStage']) + '\t' + \
str(p['companyLabel']) + '\t' + \
str(p['companySize']) + '\t' + \
str(p['companyDistrict']) + '\t' + \
str(p['positionType']) + '\t' + \
str(p['positionEducation']) + '\t' + \
str(p['positionAdvantage']) + '\t' + \
str(p['positionSalary']) + '\t' + \
str(p['city']) + '\t' + \
str(p['businessZones']) + '\t' + \
str(p['companyShortName']) + '\t' + \
str(p['isSchoolJob']) + '\t' + \
str(p['jobNature']) + '\t' + \
str(p['positionLables']) + '\t' + \
str(p['positionName']) + '\t' + \
str(p['resumeProcessDay']) + '\t' + \
str(p['resumeProcessRate']) + '\t' + \
str(p['skillLables']) + '\t' + \
str(p['thirdType']) + '\t' + \
str(p['latitude']) + '\t' + \
str(p['longitude']) + '\t' + \
str(p['linestaion']) + '\t' + \
str(p['positionWorkYear']) + '\t' + \
str(p['createTime']) + '\n'
file.write(line)
file.close()
return True
except Exception as e:
print(e)
return None
def main(url, para):
"""
主函数逻辑
"""
logging.error('Main start')
if url:
flag = getInfo(url, para) # 获取信息
#flag = processInfo(info, para) # 信息储存
return flag
else:
return None
if __name__ == '__main__':
kdList = [u'数据',u'算法',u'数据挖掘',u'数据分析',u'大数据']
cityList = [u'北京']
url = 'https://www.lagou.com/jobs/positionAjax.json'
for city in cityList:
print('爬取%s' % city)
para = {'first': 'true', 'pn': '1', 'kd': kdList[0], 'city': city}
flag = main(url, para)
if flag:
print('%s爬取成功' % city)
else:
print('%s爬取失败' % city)
| true |
f53326b889a1537a35e07eb73361e54b73c243d8 | Python | nimasmi/buckinghamshire-council | /bc/area_finder/utils.py | UTF-8 | 1,230 | 3.046875 | 3 | [
"BSD-3-Clause"
] | permissive | import re
from django.core.exceptions import ValidationError
def validate_postcode(postcode):
"""A UK postcode validator that also formats.
A vanilla Django validator would return None for a valid value. Here we also return
a nicely-formatted version of the postcode. This can be ignored if only form
validation is needed.
"""
postcode = postcode.strip()
pcre = re.compile(
r"^(?P<outward>[A-Za-z][A-Ha-hJ-Yj-y]?[0-9][A-Za-z0-9]?)(?P<space> ?)(?P<inward>[0-9][A-Za-z]{2}|[Gg][Ii][Rr] ?0[Aa]{2})$" # noqa
)
match = pcre.match(postcode)
if not match:
raise ValidationError("Invalid Postcode")
else:
# The postcode matches a UK postcode regex. Format nicely.
return f"{match.group('outward')} {match.group('inward')}".upper()
def area_from_district(district_name):
"""Strip a trailing " District Council" from a string."""
return district_name.strip().split(" District Council")[0]
def clean_escaped_html(s):
"""
Remove ASCII from HTML string.
"""
htmlCodes = [
"'",
""",
">",
"<",
"&",
]
for code in htmlCodes:
s = s.replace(code, "")
return s
| true |
f9daba9440dcc9c1a7694565d40d26a2649b969b | Python | roaet/amadeus | /amadeus/datasources/base.py | UTF-8 | 4,578 | 2.578125 | 3 | [] | no_license | import hashlib
import logging
import numpy as np
import pandas as pd
from amadeus import constants
from amadeus.datasources import cache
from amadeus import utils
from amadeus import yaml_object as yo
LOG = logging.getLogger(__name__)
TOP_LEVEL_KEY = 'datasource'
class BaseDatasource(yo.YAMLBackedObject):
def __init__(self, yaml_obj, conf, connection_manager):
super(BaseDatasource, self).__init__(yaml_obj, conf, TOP_LEVEL_KEY)
self.connection_manager = connection_manager
self.defaults = self._gather_defaults()
self.dtypes = self.yaml_obj.get('types', {})
self.do_cache = True
def _gather_defaults(self):
return self.yaml_obj.get('defaults', {})
@property
def _cache_dir(self):
name = "%s_%s" % (self.name, self.type)
cache_dir = utils.path_join(constants.CACHE_DIR, name)
return cache_dir
def _create_cache_directory(self):
if not utils.does_directory_exist(self._cache_dir):
utils.make_directory(self._cache_dir)
LOG.debug("Made cache directory %s" % self._cache_dir)
def _hash_seed(self, **conf):
return str(conf)
def _generate_df_suffix_from_conf(self, **conf):
if not conf:
return "0" * 32
hash_arg = self._hash_seed(**conf)
suffix = hashlib.md5(hash_arg).hexdigest()
return suffix
def _generate_cache_filename(self, **conf):
suffix = self._generate_df_suffix_from_conf(**conf)
return "cache_%s" % suffix
def _target_cache_file(self, **conf):
filename = self._generate_cache_filename(**conf)
abs_filepath = utils.path_join(self._cache_dir, filename)
return abs_filepath
def _set_types(self, df_in):
df = df_in.copy()
for col in df.columns:
target_type = self.dtypes.get(col, 'string')
if target_type == 'string':
df[col] = df[col].astype(str)
if target_type == 'date':
df[col] = pd.to_datetime(df[col])
if target_type == 'int':
df[col] = df[col].astype(np.int64)
if target_type == 'float':
df[col] = df[col].astype(np.float64)
return df
def _write_cache(self, filename, df):
cache_obj = cache.CacheObject(filename)
cache_obj.write(df)
def _read_cache(self, filename):
cache_obj = cache.CacheObject(filename)
df = cache_obj.read()
return df
def _load_from_cache(self, filename):
df = self._read_cache(filename)
LOG.debug("Loaded types: %s" % df.dtypes)
df = self._set_types(df)
LOG.debug("Set types: %s" % df.dtypes)
return df
def _create_target_filename(self, **conf):
self._create_cache_directory()
return self._target_cache_file(**conf)
def _hascache(self, **conf):
if not self.do_cache:
return False
filename = self._create_target_filename(**conf)
cache_obj = cache.CacheObject(filename)
return cache_obj.exists()
def _precache(self, **conf):
filename = self._create_target_filename(**conf)
return self._load_from_cache(filename)
def _postcache(self, df, **conf):
if df is None or len(df) == 0:
return None
filename = self._create_target_filename(**conf)
self._write_cache(filename, df)
return self._load_from_cache(filename)
def _get_data(self, **conf):
return pd.DataFrame([])
def _cached_as_dataframe(self, **conf):
if self._hascache(**conf):
return self._precache(**conf)
df = self._get_data(**conf)
return self._postcache(df, **conf)
def _merge_defaults(self, conf):
final = self.defaults.copy()
for k, v in conf.iteritems():
final[k] = v
return final
def __repr__(self):
return "DS(%s:%s)" % (self.type, self.name)
def purge_cache(self):
cache_dir = self._cache_dir
try:
utils.rmtree(cache_dir)
except OSError:
LOG.info("Nothing happened")
return False
return True
def set_cache(self, flag):
self.do_cache = flag
def as_dataframe(self, conf):
final = self._merge_defaults(conf)
return self._cached_as_dataframe(**final)
@staticmethod
def check(yaml_obj, yaml_file, TOP_LEVEL_KEY):
return yo.YAMLBackedObject.check(
yaml_obj, yaml_file, TOP_LEVEL_KEY, constants.DATASOURCE_TYPES)
| true |
ad5b96ffcb0c427cdc77be11aa82ce1fe65749ff | Python | google/deepvariant | /third_party/nucleus/io/sam.py | UTF-8 | 13,415 | 2.578125 | 3 | [
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=line-too-long
"""Classes for reading and writing SAM and BAM files.
The SAM/BAM/CRAM formats are described at
https://samtools.github.io/hts-specs/SAMv1.pdf
https://samtools.github.io/hts-specs/CRAMv3.pdf
API for reading:
```python
from third_party.nucleus.io import sam
with sam.SamReader(input_path) as reader:
for read in reader:
print(read)
```
where `read` is a `nucleus.genomics.v1.Read` protocol buffer. input_path will
dynamically decode the underlying records depending the file extension, with
`.sam` for SAM files, `.bam` for BAM files, and `.cram` for CRAM files. It will
also search for an appropriate index file to use to enable calls to the
`query()` method.
API for writing SAM/BAM:
```python
from third_party.nucleus.io import sam
# reads is an iterable of nucleus.genomics.v1.Read protocol buffers.
reads = ...
with sam.SamWriter(output_path, header=header) as writer:
for read in reads:
writer.write(read)
```
API for writing CRAM:
```python
# ref_path is required for writing CRAM files. If embed_ref, the output CRAM
# file will embed reference sequences.
with sam.SamWriter(output_path, header=header, ref_path=ref_path,
embed_ref=embed_ref) as writer:
for read in reads:
writer.write(read)
```
For both reading and writing, if the path provided to the constructor contains
'.tfrecord' as an extension, a `TFRecord` file is assumed and attempted to be
read or written. Otherwise, the filename is treated as a true SAM/BAM/CRAM file.
For `TFRecord` files, ending in a '.gz' suffix causes the file to be treated as
compressed with gzip.
Notes on using CRAM with SamReader
--------------------------------
Nucleus supports reading from CRAM files using the same API as for SAM/BAM:
```python
from third_party.nucleus.io import sam
with sam.SamReader("/path/to/sample.cram") as reader:
for read in reader:
print(read)
```
There is one type of CRAM file, though, that has a slightly more complicated
API. If the CRAM file uses read sequence compression with an external reference
file, and this reference file is no longer accessible in the location specified
by the CRAM file's "UR" tag and cannot be found in the local genome cache, its
location must be passed to SamReader via the ref_path parameter:
```python
from third_party.nucleus.io import sam
cram_path = "/path/to/sample.cram"
ref_path = "/path/to/genome.fasta"
with sam.SamReader(cram_path, ref_path=ref_path) as reader:
for read in reader:
print(read)
```
Unfortunately, htslib is unable to load the ref_path from anything other than a
POSIX filesystem. (htslib plugin filesystems like S3 or GCS buckets won't work).
For that reason, we don't recommend the use of CRAM files with external
reference files, but instead suggest using read sequence compression with
embedded reference data. (This has a minor impact on file size, but
significantly improves file access simplicity and safety.)
For more information about CRAM, see:
* The `samtools` documentation at http://www.htslib.org/doc/samtools.html
* The "Global Options" section of the samtools docs at http://www.htslib.org/doc/samtools.html#GLOBAL_OPTIONS
* How reference sequences are encoded in CRAM at http://www.htslib.org/doc/samtools.html#REFERENCE_SEQUENCES
* Finally, benchmarking of different CRAM options http://www.htslib.org/benchmarks/CRAM.html
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.nucleus.io import genomics_reader
from third_party.nucleus.io import genomics_writer
from third_party.nucleus.io.python import sam_reader
from third_party.nucleus.io.python import sam_writer
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.util import ranges
from third_party.nucleus.util import utils
class NativeSamReader(genomics_reader.GenomicsReader):
"""Class for reading from native SAM/BAM/CRAM files.
Most users will want to use SamReader instead, because it dynamically
dispatches between reading native SAM/BAM/CRAM files and TFRecord files based
on the filename's extensions.
"""
def __init__(self,
input_path,
ref_path=None,
read_requirements=None,
parse_aux_fields=False,
hts_block_size=None,
downsample_fraction=None,
random_seed=None,
use_original_base_quality_scores=False,
aux_fields_to_keep=None):
"""Initializes a NativeSamReader.
Args:
input_path: str. A path to a resource containing SAM/BAM/CRAM records.
Currently supports SAM text format, BAM binary format, and CRAM.
ref_path: optional str or None. Only used for CRAM decoding, and only
necessary if the UR encoded path in the CRAM itself needs to be
overridden. If provided, we will tell the CRAM decoder to use this FASTA
for the reference sequence.
read_requirements: optional ReadRequirement proto. If not None, this proto
is used to control which reads are filtered out by the reader before
they are passed to the client.
parse_aux_fields: optional bool, defaulting to False. If False, we do not
parse the auxiliary fields of the SAM/BAM/CRAM records (see SAM spec for
details). Parsing the aux fields is unnecessary for many applications,
and adds a significant parsing cost to access. If you need these aux
fields, set parse_aux_fields to True and these fields will be parsed and
populate the appropriate Read proto fields (e.g., read.info).
hts_block_size: int or None. If specified, this configures the block size
of the underlying htslib file object. Larger values (e.g. 1M) may be
beneficial for reading remote files. If None, the reader uses the
default htslib block size.
downsample_fraction: float in the interval [0.0, 1.0] or None. If
specified as a positive float, the reader will only keep each read with
probability downsample_fraction, randomly. If None or zero, all reads
are kept.
random_seed: None or int. The random seed to use with this sam reader, if
needed. If None, a fixed random value will be assigned.
use_original_base_quality_scores: optional bool, defaulting to False. If
True, quality scores are read from OQ tag.
aux_fields_to_keep: None or list[str]. If None, we keep all aux fields if
they are parsed. If set, we only keep the aux fields with the names in
this list.
Raises:
ValueError: If downsample_fraction is not None and not in the interval
(0.0, 1.0].
ImportError: If someone tries to load a tfbam file.
"""
if input_path.endswith('.tfbam'):
# Delayed loading of tfbam_lib.
try:
from tfbam_lib import tfbam_reader # pylint: disable=g-import-not-at-top
self._reader = tfbam_reader.make_sam_reader(
input_path,
read_requirements=read_requirements,
unused_block_size=hts_block_size,
downsample_fraction=downsample_fraction,
random_seed=random_seed)
except ImportError:
raise ImportError(
'tfbam_lib module not found, cannot read .tfbam files.')
else:
aux_field_handling = reads_pb2.SamReaderOptions.SKIP_AUX_FIELDS
if parse_aux_fields:
aux_field_handling = reads_pb2.SamReaderOptions.PARSE_ALL_AUX_FIELDS
# We make 0 be a valid value that means "keep all reads" so that proto
# defaults (=0) do not omit all reads.
if downsample_fraction is not None and downsample_fraction != 0:
if not 0.0 < downsample_fraction <= 1.0:
raise ValueError(
'downsample_fraction must be in the interval (0.0, 1.0]',
downsample_fraction)
if random_seed is None:
# Fixed random seed produced with 'od -vAn -N4 -tu4 < /dev/urandom'.
random_seed = 2928130004
self._reader = sam_reader.SamReader.from_file(
input_path.encode('utf8'),
ref_path.encode('utf8') if ref_path is not None else '',
reads_pb2.SamReaderOptions(
read_requirements=read_requirements,
aux_field_handling=aux_field_handling,
aux_fields_to_keep=aux_fields_to_keep,
hts_block_size=(hts_block_size or 0),
downsample_fraction=downsample_fraction,
random_seed=random_seed,
use_original_base_quality_scores=use_original_base_quality_scores)
)
self.header = self._reader.header
super(NativeSamReader, self).__init__()
def iterate(self):
"""Returns an iterable of Read protos in the file."""
return self._reader.iterate()
def query(self, region):
"""Returns an iterator for going through the reads in the region."""
return self._reader.query(region)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
class SamReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading Read protos from SAM/BAM/CRAM or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeSamReader(input_path, **kwargs)
def _record_proto(self):
return reads_pb2.Read
class NativeSamWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native SAM/BAM/CRAM files.
Most users will want SamWriter, which will write to either native SAM/BAM/CRAM
files or TFRecords files, based on the output filename's extensions.
"""
def __init__(self, output_path, header, ref_path=None, embed_ref=False):
"""Initializer for NativeSamWriter.
Args:
output_path: str. A path where we'll write our SAM/BAM/CRAM file.
ref_path: str. Path to the reference file. Required for CRAM file.
embed_ref: bool. Whether to embed the reference sequences in CRAM file.
Default is False.
header: A nucleus.SamHeader proto. The header is used both for writing
the header, and to control the sorting applied to the rest of the file.
"""
super(NativeSamWriter, self).__init__()
self._writer = sam_writer.SamWriter.to_file(
output_path,
ref_path.encode('utf8') if ref_path is not None else '', embed_ref,
header)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class SamWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing Read protos to SAM or TFRecord files."""
def _native_writer(self, output_path, **kwargs):
return NativeSamWriter(output_path, **kwargs)
class InMemorySamReader(object):
"""Python interface class for in-memory SAM/BAM/CRAM reader.
Attributes:
reads: list[nucleus.genomics.v1.Read]. The list of in-memory reads.
is_sorted: bool, True if reads are sorted.
"""
def __init__(self, reads, is_sorted=False):
self.replace_reads(reads, is_sorted=is_sorted)
def replace_reads(self, reads, is_sorted=False):
"""Replace the reads stored by this reader."""
self.reads = reads
self.is_sorted = is_sorted
def iterate(self):
"""Iterate over all records in the reads.
Returns:
An iterator over nucleus.genomics.v1.Read's.
"""
return self.reads
def query(self, region):
"""Returns an iterator for going through the reads in the region.
Args:
region: nucleus.genomics.v1.Range. The query region.
Returns:
An iterator over nucleus.genomics.v1.Read protos.
"""
# TODO: Add a faster query version for sorted reads.
return (
read for read in self.reads if utils.read_overlaps_region(read, region))
| true |
3590a5e88b26e4c5f11eb15c1690c331ddec2a59 | Python | sidpremkumar/PiBox | /PiBox-Client/PiBox-Daemon/PiBoxDaemon/Daemon/events/on_created.py | UTF-8 | 1,900 | 2.890625 | 3 | [] | no_license | import requests
from watchdog.events import DirCreatedEvent, FileCreatedEvent
import os
from urllib.parse import urljoin
from PiBoxDaemon.config import SERVER_URL, DIRECTORY
from PiBoxDaemon.Daemon import utils
def on_created(event):
# Extract our path
fullPath = os.path.relpath(event.src_path, DIRECTORY) # i.e. sid/somefolder/test.txt
if (type(event) == DirCreatedEvent):
# A Directory has been created
# Make a call to our server to create the folder
responseFolderCreated = requests.post(urljoin(SERVER_URL, "createFolder"), data={'path': fullPath})
if (responseFolderCreated.status_code != 200):
print("Error creating folder")
return
elif (type(event) == FileCreatedEvent):
# A File has been created
# Make a call to our server to check if the file exists/last modified time
responseFileLastModified = requests.get(urljoin(SERVER_URL, "retriveFileLastModified"), data={'path': fullPath})
if (responseFileLastModified.status_code == 400):
# The file does not exist. Grab the base path
files = {'file': open(event.src_path, 'rb')}
# Upload the file
utils.uploadFile(files, fullPath)
elif (responseFileLastModified.status_code == 200):
# We have the file uploaded, compare the timestamps to see if we need to reupload
timestamp = responseFileLastModified.json()['timestamp']
if (timestamp < os.path.getmtime(event.src_path)):
# We need to reupload. Grab the base path
files = {'file': open(event.src_path, 'rb')}
# Upload the file
utils.uploadFile(files, fullPath)
print(f"Uploaded/Updated {fullPath}") | true |
ae0359d6cdf7cd188e10de0c8e63cbf812e76000 | Python | NandhiniManohar22/python | /5-7.py | UTF-8 | 102 | 2.984375 | 3 | [] | no_license | nan=int(input())
nan1=list(map(int,input().split()))
if(len(nan1)==nan):
print(min(nan1),max(nan1))
| true |
d4b14dad76f3c9b2a458b9a9be967a3d33576e09 | Python | matty-boy79/DevNet_SAUI | /09 Umbrella Reporting/03_security_activity.py | UTF-8 | 1,938 | 3.203125 | 3 | [] | no_license |
import sys
import requests
import json
from datetime import datetime
API_KEY = "663a91da0a2545e6ba17acf83ef01b06"
API_SECRET = "4833be4f51ff4b398ebcbe144b9239c8"
def getSecurityActivity(start_time):
url = "https://reports.api.umbrella.com/v1/organizations/2353515/security-activity"
# params
params = {
'start': start_time
}
# do GET request for the domain status and category
req = requests.get(url, params=params, auth = (API_KEY, API_SECRET))
# error handling if true then the request was HTTP 200, so successful
if (req.status_code != 200):
print("An error has ocurred with the following code %s" % req.status_code)
sys.exit(0)
output = req.json()
print('{:^30}{:^20}{:^15}{:^20}{:^25}{:^10}'.format(
"Date Time",
"Origin Type",
"Origin Label",
"External IP",
"Destination",
"Action"
))
for item in output["requests"]:
origin_type = item['originType']
external_ip = item['externalIp']
destination = item['destination']
origin_label = item['originLabel']
action_taken = item['actionTaken']
datetime = item['datetime']
print('{:^30}{:^20}{:^15}{:^20}{:^25}{:^10}'.format(
datetime,
origin_type,
origin_label,
external_ip,
destination,
action_taken
))
def main():
# Print the menu
print("""
Umbrella - Retrieve Security Activity Report
ACME Inc, IT Security Department
""")
value = input(" Enter the Start Time in Unix-Epoch timestamp(https://www.epochconverter.com)\n"
" (Enter 0 to get all the Security Activities)\n"
" Start Time : ")
value = value.strip()
if not value:
sys.exit()
getSecurityActivity(value)
if __name__ == '__main__':
main()
| true |
0aa52b8d47422cea4cd0f754f7831702c70adb1f | Python | mhhoban/dukedoms.account_service | /account_service/shared/account_operations.py | UTF-8 | 2,223 | 2.578125 | 3 | [] | no_license | import json
from account_service.shared.db import get_new_db_session
from account_service.models.account import Account
from sqlalchemy.exc import SQLAlchemyError
from account_service.exceptions.account_service_exceptions import (
NoSuchAccountException
)
def check_account_id_exists(account_id):
"""
checks that given account id exists in db
"""
session = get_new_db_session()
try:
account = session.query(Account).filter(Account.id == account_id).first()
if account:
return True
else:
return False
except SQLAlchemyError:
raise SQLAlchemyError
finally:
session.close()
def retrieve_account_id_from_db(email):
"""
lookup and return a given account id from the database
"""
session = get_new_db_session()
try:
account = session.query(Account).filter(Account.email == email).first()
if account:
return account.id
else:
raise NoSuchAccountException
except SQLAlchemyError:
raise SQLAlchemyError
finally:
session.close()
def retrieve_account_email(account_id):
"""
retreives and returns account email for given account_id
"""
session = get_new_db_session()
try:
account = session.query(Account).filter(Account.id == account_id).first()
if account:
return account.email
else:
raise NoSuchAccountException
except SQLAlchemyError:
raise SQLAlchemyError
finally:
session.close()
def game_invite(account_id=None, game_id=None):
"""
invites given account to given game
Returns True if success, False if failure
"""
session = get_new_db_session()
try:
account = session.query(Account).filter(Account.id == account_id).first()
if not account:
return False
game_invitations = json.loads(account.game_invitations)
game_invitations['game_invitation_ids'].append(game_id)
account.game_invitations = json.dumps(game_invitations)
session.commit()
return True
except SQLAlchemyError:
raise SQLAlchemyError
finally:
session.close()
| true |
5409d400e741221b8ed0a92dcea736b40ba40dcc | Python | sonnbon/coinwar | /coinwar.py | UTF-8 | 11,386 | 4.3125 | 4 | [] | no_license | # HW6: Coin War
# Connor Williams 2021
import sys
import random
# This program plays the game, Coin War. You can select your army
# manually, randomly, or by reading a text file from the command
# line. The program then returns a winner or a tie.
# For the purpose of this program, it will typically loop throughout
# a range of 5, assigned to initial_size.
initial_size = range(5)
# Player 1 and Player 2 are assigned empty lists for their
# respective armies.
player1_army = []
player2_army = []
# Heads and tails side of a coin are assigned characters and
# grouped into a list.
heads = 'H'
tails = 'T'
coin_side = [heads, tails]
# Player 1 and Player 2 are assigned empty lists for their
# respective prisoners (prisoners1 for Player 1 and
# prisoners2 for Player 2).
prisoners1 = []
prisoners2 = []
# Function defined for user to choose whether to select player
# armies randomly or "positionally" (manually or by reading a
# text file on the command line).
def army_selection():
# For loop reads standard input line-by-line.
# Code adapted from Video Lecture 11 - linecount.py and
# from online resource, JournalDev:
# https://www.journaldev.com/32137/read-stdin-python#1-using-sysstdin-to-read-from-standard-input
for line in sys.stdin:
# If the line being read is not blank.
if line.rstrip() != "":
# If user types 'random' or program reads 'random'
# from first line of inputted text file,
# random_select() function is called.
if "random" == line.rstrip().lower():
print("Random selected.\n")
random_select()
break
# If user types 'position' or program reads 'position'
# from first line of inputted text file,
# position_select() function is called.
elif "position" == line.rstrip().lower():
print("Position selected.\n")
position_select()
break
# Otherwise user needs to try inputting a choice again.
else:
print("Try again. Enter random or position.")
# Function defined for user to "positionally" choose each player's
# army (manually or by reading a text file on the command line).
def position_select():
# Player 1 and Player 2 are assigned users input - or
# the next two lines of a text file are inputted from
# the command line - to their respective armies
# (position1 for Player 1 and position2 for Player 2).
position1 = input()
position2 = input()
# Both positions entered get a list length split at any spaces
# assigned to these variables (army_size1 for Player 1
# and army_size2 for Player 2). This is to check that the list
# length is 1.
army_size1 = len(position1.split(' '))
army_size2 = len(position2.split(' '))
# Game start message and players starting positions printed.
print("------------------ Begin Battle ------------------\n")
print(position1.upper())
print(position2.upper())
# For loop appends every nth index of Player 1's and Player 2's
# respective inputted position to their respective armies.
for n in initial_size:
# If the positions entered are not blank.
if position1 != "" and position2 != "":
# If the lengths of the lists created from the positions
# are also equal to 1.
if army_size1 == 1 and army_size2 == 1:
player1_army.append(position1[n].upper())
player2_army.append(position2[n].upper())
# Otherwise, the positions entered are blank
# making the armies incorrectly positioned.
else:
break
# Function defined to randomly select armies for both
# Player 1 and Player 2.
def random_select():
# For loop creates a random army for Player 1 and Player 2
# of range(5) (initial_size).
for n in initial_size:
# coin_side list element randomly chosen and
# appended to each players army.
player1_army.append(random.choice(coin_side))
player2_army.append(random.choice(coin_side))
# Game start message and players starting positions printed.
print("------------------ Begin Battle ------------------\n")
print(''.join(player1_army))
print(''.join(player2_army))
# Function defined to take 'army' as a parameter, plays out the
# Coin War game up until every possible comparative iteration
# has been made, and finalizes players army and prisoner lists.
def coinwar(army):
# While loop true as long as army lists are not empty.
while player1_army != [] and player2_army != []:
# Alphabetical comparison. 'H' is checked by being
# less than 'T' alphabetically. Alphabetically less than
# means Player 1 wins the battle round.
if player1_army[0] < player2_army[0]:
# Player 1 first takes Player 2's 0th army list index
# and adds it to the end of Player 1's army list, then
# takes Player 1's 0th army list index and moves it to
# the end of Player 1's army list.
player1_army.append(player2_army.pop(0))
player1_army.append(player1_army.pop(0))
# As long as Player 2's prisoners list is not empty,
# for loop iterates through the prisoners max possible
# list length (initial_size) and appends each prisoner
# to the end of Player 1's army list.
for n in initial_size:
if prisoners2 != []:
player1_army.append(prisoners2.pop(0))
# As long as Player 1's prisoners list is not empty,
# for loop iterates through the prisoners max possible
# list length (initial_size) and appends each prisoner
# to the end of Player 1's army list.
for n in initial_size:
if prisoners1 != []:
player1_army.append(prisoners1.pop(0))
# Alphabetical comparison. 'H' is checked by being
# less than 'T' alphabetically. Alphabetically less than
# means Player 2 wins the battle round.
elif player2_army[0] < player1_army[0]:
# Player 2 first takes Player 1's 0th army list index
# and adds it to the end of Player 2's army list, then
# takes Player 2's 0th army list index and moves it to
# the end of Player 2's army list.
player2_army.append(player1_army.pop(0))
player2_army.append(player2_army.pop(0))
# As long as Player 1's prisoners list is not empty,
# for loop iterates through the prisoners max possible
# list length (initial_size) and appends each prisoner
# to the end of Player 2's army list.
for n in initial_size:
if prisoners1 != []:
player2_army.append(prisoners1.pop(0))
# As long as Player 2's prisoners list is not empty,
# for loop iterates through the prisoners max possible
# list length (initial_size) and appends each prisoner
# to the end of Player 2's army list.
for n in initial_size:
if prisoners2 != []:
player2_army.append(prisoners2.pop(0))
# Alphabetical comparison. Alphabetically equal
# means the players tie during battle round.
elif player1_army[0] == player2_army[0]:
# If the length of either players army list is less
# than 2, then only one army element can be popped and
# appended to the prisoners list rather than two elements.
if len(player1_army) < 2 or len(player2_army) < 2:
# Player 1's 0th army index gets popped and
# appended to prisoner1's list.
prisoners1.append(player1_army.pop(0))
# Player 2's 0th army index gets popped and
# appended to prisoner2's list.
prisoners2.append(player2_army.pop(0))
break
else:
# For loop pops and appends from each players 0th army
# list index to their respective prisoner lists, twice.
for i in range(2):
# Player 1's 0th army index gets popped and
# appended to prisoner1's list.
prisoners1.append(player1_army.pop(0))
# Player 2's 0th army index gets popped and
# appended to prisoner2's list.
prisoners2.append(player2_army.pop(0))
# Otherwise, assert False to check if their is any
# improper code.
else:
assert False
# Function defined to take 'result' as a parameter and calculate
# which player has an army leftover or, if no army elements
# remain, which player has more coins with side 'heads' in
# prisoners list, determining the winner of Coin War.
def game_result(result):
# Variables set to 0 for calculating how many 'heads' are
# in each respective prisoner list.
nheads_prisoners1 = 0
nheads_prisoners2 = 0
# For loop checks that a list is not empty and how many
# 'heads' are in each list, adding the number to their
# respective variable counters.
for i in initial_size:
if prisoners1 != [] and len(prisoners1) - 1 >= i:
if prisoners1[i] == heads:
nheads_prisoners1 = nheads_prisoners1 + 1
if prisoners2 != [] and len(prisoners2) - 1 >= i:
if prisoners2[i] == heads:
nheads_prisoners2 = nheads_prisoners2 + 1
# If only Player 1 has an army, then Player 1 wins.
if player1_army != [] and player2_army == []:
return 1
# Else If only Player 2 has an army, then Player 2 wins.
elif player2_army != [] and player1_army == []:
return 2
# Else If neither player has an army and Player 1 has
# more 'heads' in their prisoners list, then
# Player 1 wins.
elif nheads_prisoners1 > nheads_prisoners2:
return 1
# Else If neither player has an army and Player 2 has
# more 'heads' in their prisoners list, then
# Player 2 wins.
elif nheads_prisoners2 > nheads_prisoners1:
return 2
# Else If one or both armies are returned empty during a
# manually entered game, the prisoners lists will also be
# empty and the game will not operate correctly. This
# prevents the program from calling it a tie.
elif prisoners1 == [] and prisoners2 == []:
return "Error: Teams not entered correctly. Game Over."
# Otherwise it must be a tie.
else:
return 0
# Welcome message and prompt printed.
print("************** Welcome to Coin Wars **************\n")
print("How would you like to select each player's army?")
print("Random or Position?")
# game_result() function called with coinwar() function as its
# 'result' parameter called with army_selection() function as its
# 'army' parameter, all assigned to coinwar_game.
coinwar_game = game_result(coinwar(army_selection()))
# Print coinwar_game to see which result is returned.
# (Tie == 0, Player 1 wins == 1, Player 2 wins == 2)
print(coinwar_game)
# Exiting message printed.
print("\n****************** Exiting Game ******************")
| true |
807e9f495c8e3d06f3816232665fe4ae574f26dc | Python | Abhishek19009/Algorithms_and_Data_structures | /Miscellaneous/Finding maximum sum subarray.py | UTF-8 | 588 | 4.0625 | 4 | [] | no_license | '''
The best way to perform this operation in linear time complexity is to add subsequent
terms at each index of the array, and compare it to element corresponding to current
index. Find the max of both these.
Then use another variable ('best' in this case) to compare the current and previous sums.
'''
def FindMax(arr):
sum = 0
best = 0
for i in range(len(arr)):
sum = max(arr[i], sum+arr[i])
best = max(sum, best)
return best
if __name__ == "__main__":
arr = list(map(int, input().rstrip().split(" ")))
result = FindMax(arr)
print(result)
| true |
3257e3b2b4013feab870d44d50d304bd8b5207a8 | Python | durbanie/ProjectEuler | /src/Common/stopwatch.py | UTF-8 | 1,404 | 3.6875 | 4 | [] | no_license | '''
My stopwatch implementation.
'''
#from datetime import datetime
import time
class StopWatch:
'''
Stop watch used for timing algorithms.
'''
__start = 0
@classmethod
def start(cls):
'''
Starts the clock.
'''
cls.__start = time.clock()
@classmethod
def get_time(cls):
'''
Gets the time as a timespan object.
'''
return time.clock() - cls.__start
@classmethod
def print_time(cls):
'''
Prints the time difference, usually in milliseconds, but in
microseconds if less than 1 ms.
'''
_td = cls.get_time()
_tdms = _td * 1000
if (_tdms > 1000):
print round(_tdms) / 1000, "s"
elif (int(_tdms) > 0):
print int(round(_tdms)), "ms"
else:
cls.print_time_us(_td)
@classmethod
def print_time_us(cls, td = None):
'''
Prints the time in microseconds.
'''
if (td):
_tdms = td * 1000000
else:
_td = cls.get_time()
_tdms = _td * 1000000
print int(round(_tdms)), "us"
def main():
print "start"
StopWatch.start()
for i in range(0, 100000000):
if i % 10000000 == 0:
print i
StopWatch.print_time()
print "done."
if __name__ == "__main__": main() | true |
1304a0602e2789a4f170c9d0946c231659cb1e5d | Python | Stealthbird97/PHYS2022 | /5.7.py | UTF-8 | 946 | 3.265625 | 3 | [] | no_license | from numpy import zeros, random
m1=zeros(100, int)
m2=zeros(100, int)
m3=zeros(100, int)
m4=zeros(100, int)
for i in range(1000):
a=random.normal();b=random.normal();c=random.normal();d=random.normal()
print a, b, c, d
avg1=int(10/1*a)
avg2=int(10/2*(a+b))
avg3=int(10/3*(a+b+c))
avg4=int(10/4*(a+b+c+d))
m1[avg1]=m1[avg1]+1
m2[avg2]=m2[avg2]+1
m3[avg3]=m3[avg3]+1
m4[avg4]=m4[avg4]+1
print m1, m2, m3, m4
from pylab import *
subplot(4,1,1)
title("1 Random Numbers")
bar(arange(0,100,0.1),m1,width=0.1)
ylabel("Frequency")
subplot(4,1,2)
title("2 Random Numbers")
bar(arange(0,100,0.1),m2,width=0.1)
ylabel("Frequency")
subplot(4,1,3)
title("3 Random Numbers")
bar(arange(0,100,0.1),m3,width=0.1)
ylabel("Frequency")
subplot(4,100,4)
title("4 Random Numbers")
bar(arange(0,1,0.1),m4,width=0.1)
ylabel("Frequency")
xlabel("Average of Random Numbers")
subplots_adjust(hspace=0.6)
savefig("5_7.png", dpi=300)
| true |
0d7e61bd2ec504c89378612db5a82db49488d3f1 | Python | bijitchakraborty12/MyProjects01 | /20180609/python_lines_04.py | UTF-8 | 144 | 2.859375 | 3 | [
"MIT"
] | permissive |
f=open('C:\\Python Practice\\MyProjects01\\MyProjects01\\20180609\\poem_01.txt')
for k in f.readlines():
print(k.strip().split())
f.close()
| true |
bb7963b48e6a342a7ce8b50e1975e4863509aa12 | Python | mickey1233/maxmum_subarray | /maxmum_subarray.py | UTF-8 | 560 | 3.265625 | 3 | [] | no_license | def maxsubarray(nums):
maxmun = max(nums)
if maxmun < 0:
return maxmun
maxmun = 0
temp = 0
for i in nums:
if temp + i < 0:
temp = 0
if temp + i >= 0:
temp = temp + i
if temp > maxmun:
maxmun = temp
return maxmun
def main():
x = []
x.append(maxsubarray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
x.append(maxsubarray([1]))
x.append(maxsubarray([5, 4, -1, 7, 8]))
print(x)
if __name__ == "__main__":
main()
| true |
79a9a834169b43c6e851811fb2ebc4611eba374a | Python | miikanissi/python_course_summer_2020 | /week5_nissi_miika/week5_ass1_2_3_nissi_miika.py | UTF-8 | 802 | 4.1875 | 4 | [] | no_license | import random
randomArray = []
for i in range(30):
randomArray.append(random.randint(1,100))
def Average(lst):
return sum(lst)/len(lst)
print("Sum of 30 random numbers between 1-100: ", sum(randomArray))
print("Average of 30 random numbers between 1-100: ", round(Average(randomArray), 2))
print("Largest number in array: ", max(randomArray))
while True:
finder = input("Enter a number (1-100), to find it in array, 0 to exit: ")
try:
x = int(finder)
if x < 0 or x > 100: raise ValueError("Number not between 1-100")
if x == 0: break
try:
print(x, " was found in array at index: ", randomArray.index(x))
except ValueError:
print(x, " was not found in array.")
except ValueError:
print("Try entering again")
| true |
2a99fc2ae49770a2866848d3946dd15a1df6036e | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/arc046/B/3814567.py | UTF-8 | 111 | 3.078125 | 3 | [] | no_license | n=int(input())
a,b=map(int,input().split())
print(["Aoki","Takahashi"][(a==b and n%(a+1)!=0) or a>b or n<=a]) | true |
bb3d88c2ee13f8f94147ebf74c492ebe62d57f25 | Python | Onikore/KVStorage | /kvstorage/defrag.py | UTF-8 | 2,059 | 2.875 | 3 | [] | no_license | from pathlib import Path
from typing import NoReturn
from kvstorage.consts import KEY_BLOCK_LEN, \
KEY_LEN, VALUE_OFFSET_LEN, KEY_OFFSET_LEN
class Defragmentation:
def __init__(self, key_file: str, value_file: str):
self.key_file = key_file
self.value_file = value_file
self.data = {}
@staticmethod
def to_bytes(value: int, length: int) -> bytes:
return int.to_bytes(value, length, byteorder='big')
@staticmethod
def from_bytes(value: bytes) -> int:
return int.from_bytes(value, byteorder='big')
def prepare(self) -> NoReturn:
with open(self.key_file, 'rb') as f:
while True:
packet = f.read(KEY_BLOCK_LEN)
key = packet[:KEY_LEN]
offset = self.from_bytes(packet[KEY_LEN:])
if key == b'':
print('Скан закончен')
break
with open(self.value_file, 'rb') as a:
a.seek(offset)
length = self.from_bytes(a.read(VALUE_OFFSET_LEN))
value = a.read(length)
self.data[key] = value
def start(self) -> NoReturn:
print('Начало дефрагментации')
self.prepare()
key_path = Path(self.key_file)
key_path.unlink(True)
val_path = Path(self.value_file)
val_path.unlink(True)
temp_key = Path(self.key_file)
temp_key.touch()
temp_key.rename(self.key_file)
temp_val = Path(self.value_file)
temp_val.touch()
temp_val.rename(self.value_file)
for i in self.data:
with open(temp_val, 'ab') as f:
pos = f.tell()
f.write(self.to_bytes(len(self.data[i]), VALUE_OFFSET_LEN))
f.write(self.data[i])
with open(temp_key, 'ab') as f:
f.write(i)
f.write(self.to_bytes(pos, KEY_OFFSET_LEN))
print('Дефрагментация завершена')
| true |
57919a046e796ac0e2433986481c3fcd1d96e23a | Python | shardsofblue/pinellas-scraper | /python/utils.py | UTF-8 | 2,795 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 11:26:41 2020
@author: shardsofblue
"""
# Built and tested in Spyder via Anaconda
from os import path
import csv
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup # HTML parsing
# Lower the case, strip blanks, strip multiple spaces, and remove linebreaks
def clean(str):
str = str.lower()
str = str.strip()
str = " ".join(str.split())
str = str.replace('\n', ' ')
return str
# Pull and return all html as a string from the current page of an active driver
# Requires the clean() function
def pull_all_html(driver):
html = BeautifulSoup(driver.page_source, 'lxml')
return(clean(str(html)))
# Save all html from the current page of an active driver
# Requires the pull_all_html() function
def save_all_html(driver, file_name = 'source.html'):
with open(file_name, 'w') as f:
f.write(pull_all_html(driver))
# Select options from a listbox
# Takes a driver, the id of the containing listbox, and a list of desired option display texts
def fast_multiselect(driver, element_id, labels):
try:
select = Select(driver.find_element_by_id(element_id))
select.deselect_all()
for label in labels:
select.select_by_visible_text(label)
except:
print("Error.")
# Check for sign on error
def sign_on_error():
try:
error_message = WebDriverWait(driver, short_timeout).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="Login"]/table/tbody/tr[2]/td/table/tbody/tr[1]/td[2]/table/tbody/tr[7]/td/p')))
if error_message == "Sign on failed. Please submit an email to publicview@mypinellasclerk.org for assistance.":
return(True)
else:
return(False)
except:
return(False)
# Check whether at login screen (returns boolean)
# Useful to check for auto-logout (system inconsistently returns users to login or home screens)
def is_login_screen(driver):
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.NAME, 'SignOn')))
return(True)
except:
return(False)
# Check whether at home screen and logged out (returns boolean)
# Useful to check for auto-logout (system inconsistently returns users to login or home screens)
# FLAG not locating element
def is_home_screen(driver):
try:
WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.LINK_TEXT, 'Registered User Login')))
return(True)
except:
return(False) | true |
31ce4789a55b3d4a1533fd240d2592c952f76d5e | Python | ngreenlaw/simpleftp | /ftclient.py | UTF-8 | 4,503 | 2.90625 | 3 | [] | no_license | #Nathan Greenlaw
#CS 372
#ONID: greenlan
#chatserve.py
from socket import *
import sys
import os
def sendMessage(cSocket, msg, msgLength):
#Found from the python documentation https://docs.python.org/2/howto/sockets.html
totalSent = 0;
while totalSent < msgLength:
sent = cSocket.send(msg[totalSent:]);
if sent == 0:
raise RuntimeError("socket connection broken");
totalSent = totalSent + sent;
def recvMessage(cSocket, recvLength):
#Found from the python documentation https://docs.python.org/2/howto/sockets.html
chunks = [];
bytes_recd = 0;
while bytes_recd < recvLength:
chunk = cSocket.recv(min(recvLength - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("Socket connection Broken")
chunks.append(chunk);
#print(chunks);
bytes_recd = bytes_recd + len(chunk);
return ''.join(chunks);
def recvMessageAt(cSocket):
chunks = [];
fullMess = '';
at = 0;
while at < 2:
chunk = cSocket.recv(1);
if chunk == '':
raise RuntimeError("Socket connection Broken");
if '@' in chunk:
at+=1;
chunks.append(chunk);
#print(chunks);
fullMess = ''.join(chunks);
#print(fullMess);
mess = fullMess.replace('@','');
return mess;
def sendCommand(cName, lName, sPort, com, fName, cPort):
clientSocket = socket(AF_INET, SOCK_STREAM);
serverAddress = (lName, sPort);
clientSocket.connect(serverAddress);
#print(cName, lName, sPort, com, fName, cPort);
if fName == None:
comLength = len(com);
else:
cF = com+" "+fName;
comLength = len(cF);
cLen = str(comLength)+'@'+'@';
#print(cLen, len(cLen));
#First send the command length
sendMessage(clientSocket, cLen, (len(cLen)));
#receive the ready from the server
ready = "ready";
msgR = recvMessage(clientSocket, len(ready));
#print(msgR);
#send the command
if fName == None:
sendMessage(clientSocket, com, comLength);
else:
sendMessage(clientSocket, cF, comLength);
#receive the length of the response
rLength = int(recvMessageAt(clientSocket));
#print(rLength);
#send a ready
sendMessage(clientSocket, ready, len(ready));
#receive response and display based on code
responseMessage = recvMessage(clientSocket, rLength);
if com == '-l': #directory
print("Receiving directory structure from " + lName + ":" + str(sPort));
print(responseMessage);
elif com == '-g' and responseMessage != "File does Not Exist Error": #get file
cwd = os.getcwd();
#handle duplicate names
#found here: https://stackoverflow.com/questions/12375612/avoid-duplicate-file-names-in-a-folder-in-python
path = cwd+'/'+fName;
uniq = 1;
finalFileName = fName;
print("Receiving file " +fName);
while os.path.isfile(path):
path = cwd+'/'+str(uniq)+"_"+fName;
finalFileName = str(uniq)+"_"+fName;
uniq+=1;
#Create the text file
text_file = open(finalFileName, "w");
text_file.write(responseMessage);
text_file.close();
print("Wrote to file: " +finalFileName);
else: #error Message
print(responseMessage);
#close the socket
clientSocket.close();
#Main function from Lecture 15 slide 9 Python TCP server
def main():
numArgs = len(sys.argv);
# the -l command
if(numArgs == 6):
clientName = str(sys.argv[1]); #ftclient
locationName = str(sys.argv[2]); #flip
serverPort = int(sys.argv[3]); #port number for ft server
command = str(sys.argv[4]); #argument given
clientPort = int(sys.argv[5]); #port number for the client
#print(clientName, locationName, serverPort, command, clientPort);
if len(command) > 2048:
print("Too long of command");
return;
else:
sendCommand(clientName, locationName, serverPort, command, None, clientPort);
return;
#the -g filename command
elif(numArgs == 7):
clientName = str(sys.argv[1]); #ftclient
locationName = str(sys.argv[2]); #flip
serverPort = int(sys.argv[3]); #port number for ft server
command = str(sys.argv[4]); #argument given
fileName = str(sys.argv[5]); #filename given
clientPort = int(sys.argv[6]); #port number for the client
#print(clientName, locationName, serverPort, command, fileName, clientPort);
if len(command) > 2048:
print("Too long of command");
return;
else:
sendCommand(clientName, locationName, serverPort, command, fileName, clientPort);
return;
#wrong number of arguments
else:
print("Incorrect Arguments entered");
return;
if __name__ == "__main__":
main();
| true |
da7fae2ade9db4f60353954841359bfbe105466d | Python | medifle/python_6.00.1x | /PSet6/ps6_recursion.py | UTF-8 | 2,124 | 4.53125 | 5 | [
"MIT"
] | permissive | # 6.00x Problem Set 6
#
# Part 2 - RECURSION
#
# Problem 3: Recursive String Reversal
#
def reverseString(aStr):
"""
Given a string, recursively returns a reversed copy of the string.
For example, if the string is 'abc', the function returns 'cba'.
The only string operations you are allowed to use are indexing,
slicing, and concatenation.
aStr: a string
returns: a reversed string
"""
# base case
if len(aStr) == 1:
return aStr
# recursion block
return aStr[-1] + reverseString(aStr[:-1])
#
# Problem 4: X-ian
#
def x_ian(x, word):
"""
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('eric', 'meritocracy')
True
>>> x_ian('eric', 'cerium')
False
>>> x_ian('john', 'mahjong')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
# base case
if len(x) == 2:
return word.index(x[0]) < word.index(x[1])
# recursion block
if word.index(x[0]) < word.index(x[1]):
return x_ian(x[1:], word)
else:
return False
# --iteration implementation of x_ian START--
# index = -1
# for i in x:
# if i in word and word.index(i) > index:
# index = word.index(i)
# else:
# return False
# return True
# --iteration implementation of x_ian END--
#
# Problem 5: Typewriter
#
def insertNewlines(text, lineLength):
"""
Given text and a desired line length, wrap the text as a typewriter would.
Insert a newline character ("\n") after each word that reaches or exceeds
the desired line length.
text: a string containing the text to wrap.
line_length: the number of characters to include on a line before wrapping
the next word.
returns: a string, with newline characters inserted appropriately.
"""
# base case
if len(text) < lineLength:
return text
# recursion block
return text[:lineLength] + '\n' + insertNewlines(text[lineLength:], lineLength) | true |
7f3cad1db6ae6c40a4e32697f00c34a5272c5975 | Python | christianb93/async-web-container | /test/test_protocol_http.py | UTF-8 | 16,189 | 2.625 | 3 | [
"MIT"
] | permissive | import warnings
import asyncio
import pytest
import unittest.mock
import httptools
import aioweb.protocol
###############################################
# Some helper classes
###############################################
class ParserHelper:
def __init__(self):
self._headers = None
self._body = None
def on_body(self, data):
if self._body is None:
self._body = bytearray()
self._body.extend(data)
class DummyTransport:
def __init__(self):
self._data = b""
self._is_closing = False
self._fail_next = False
def write(self, data):
if self._fail_next:
self._fail_next = False
raise BaseException()
self._data = data
def is_closing(self):
return self._is_closing
def close(self):
self._is_closing = True
def fail_next(self):
self._fail_next = True
class DummyContainer:
def __init__(self):
self._request = None
self._handle_request_called = False
self._exc = None
self._no_response = False
async def handle_request(self, request):
self._request = request
self._handle_request_called = True
if self._no_response:
return None
if self._exc is not None:
exc = self._exc
self._exc = None
raise exc
return b"abc"
def set_exception(self, exc):
self._exc = exc
@pytest.fixture
def transport():
return DummyTransport()
@pytest.fixture
def container():
return DummyContainer()
##############################################################
# These test cases test individual callbacks
##############################################################
def test_on_header():
protocol = aioweb.protocol.HttpProtocol(container=None, loop=unittest.mock.Mock())
protocol.on_header(b"Host", b"127.0.0.1")
protocol.on_header(b"A", b"B")
headers = protocol.get_headers()
assert "Host" in headers
assert "A" in headers
assert headers["Host"] == b"127.0.0.1"
assert headers["A"] == b"B"
assert protocol.get_state() == aioweb.protocol.ConnectionState.HEADER
def test_on_headers_complete():
with unittest.mock.patch("aioweb.protocol.httptools.HttpRequestParser") as mock:
with unittest.mock.patch("aioweb.protocol.asyncio.Queue") as Queue:
protocol = aioweb.protocol.HttpProtocol(container=None, loop=unittest.mock.Mock())
#
# Simulate data to make sure that the protocol creates a parser
#
protocol.data_received(b"X")
protocol.on_header(b"Host", b"127.0.0.1")
protocol.on_headers_complete()
queue = Queue.return_value
#
# Verify the state
#
assert protocol.get_state() == aioweb.protocol.ConnectionState.BODY
#
# Check that we have added something to the queue
#
queue.put_nowait.assert_called()
def test_on_message_complete():
with unittest.mock.patch("aioweb.protocol.httptools.HttpRequestParser") as mock:
protocol = aioweb.protocol.HttpProtocol(container=None, loop=unittest.mock.Mock())
protocol.on_message_complete()
#
# Verify the state
#
assert protocol.get_state() == aioweb.protocol.ConnectionState.PENDING
##############################################################
# Test some error cases
##############################################################
#
# Transport is already closing when we try to write a response
#
def test_transport_is_closing(transport):
with unittest.mock.patch("asyncio.create_task") as mock:
protocol = aioweb.protocol.HttpProtocol(container=None, loop=unittest.mock.Mock())
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# Close transport
#
transport.close()
#
# Simulate data to make sure that the protocol creates a parser
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
XYZ'''.replace(b'\n', b'\r\n')
protocol.data_received(request)
#
# We now have added a request object to the queue. Invoke the
# worker loop. This should return as the transport is already closed
#
raised = False
try:
coro.send(None)
except StopIteration:
raised = True
assert raised
#
# Write into transport fails
#
def test_transport_fails(container, transport):
with unittest.mock.patch("asyncio.create_task") as mock:
protocol = aioweb.protocol.HttpProtocol(container=container, loop=unittest.mock.Mock())
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# Ask the transport to raise an error
#
transport.fail_next()
#
# Simulate data to make sure that the protocol creates a parser
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
XYZ'''.replace(b'\n', b'\r\n')
protocol.data_received(request)
#
# We now have added a request object to the queue. Invoke the
# worker loop which should proceed right into our handler but
# ignore the error
#
coro.send(None)
assert container._request is not None
#
# Handler returns not a sequence of bytes
#
def test_handler_returntypemismatch(container, transport):
with unittest.mock.patch("asyncio.create_task") as mock:
protocol = aioweb.protocol.HttpProtocol(container=container, loop=unittest.mock.Mock())
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# Ask the handler to return None
#
container._no_response = True
#
# Simulate data
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
XYZ'''.replace(b'\n', b'\r\n')
protocol.data_received(request)
#
# We now have added a request object to the queue. Invoke the
# worker loop which should proceed right into our handler
#
coro.send(None)
assert container._request is not None
#
# Coroutine is cancelled while we are waiting for a new entry in the queue
#
def test_coroutine_cancelled_waitingforqueue(transport):
with unittest.mock.patch("asyncio.create_task") as mock:
protocol = aioweb.protocol.HttpProtocol(container=None, loop=unittest.mock.Mock())
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# Invoke the worker loop. The loop should then wait on the queue
#
coro.send(None)
#
# Now simulate that the task is cancelled. In this case, the event loop
# would throw a CancelledError into the coro, so we do this as well
#
raised = False
try:
coro.throw(asyncio.exceptions.CancelledError())
except asyncio.exceptions.CancelledError:
raised = True
assert raised
#
# Coroutine is cancelled while we are waiting for the handler
#
def test_coroutine_cancelled_waitingforbody(container, transport):
with unittest.mock.patch("asyncio.create_task") as mock:
protocol = aioweb.protocol.HttpProtocol(container=container, loop=unittest.mock.Mock())
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# Invoke the worker loop. The loop should then wait on the queue
#
coro.send(None)
#
# Simulate data to make sure that the protocol creates a parser
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
X'''.replace(b'\n', b'\r\n')
protocol.data_received(request)
#
# Now we should have written something into the queue. If we now
# resume the coroutine, it should proceed into our handler and wait
# for the body
#
future = coro.send(None)
#
# Throw a CancelledError
#
raised = False
try:
coro.throw(asyncio.exceptions.CancelledError())
except asyncio.exceptions.CancelledError:
raised = True
assert raised
##############################################################
# The test cases below this line simulate a full roundtrip
# using a "real" parser instead of calling the callbacks
##############################################################
def test_full_request_lifecycle_http11(transport, container):
protocol = aioweb.protocol.HttpProtocol(container=container)
with unittest.mock.patch("asyncio.create_task") as mock:
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# When we now start our coroutine, it should suspend and wait
#
coro.send(None)
#
# Feed some data and complete the headers
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
X'''
protocol.data_received(request.replace(b'\n', b'\r\n'))
assert protocol.get_state() == aioweb.protocol.ConnectionState.BODY
#
# When we now call send on the coroutine to simulate that the event
# loop reschedules it, it should invoke our handler function
#
coro.send(None)
#
# Make sure that the handler has been called
#
assert container._request is not None
#
# Verify some attributes of the request object
#
request = container._request
assert isinstance(request, aioweb.request.Request)
headers = request.headers()
assert headers is not None
assert isinstance(headers, dict)
assert "Host" in headers
assert headers["Host"] == b"example.com"
assert request.http_version() == "1.1"
#
# Get the future to wait for completion of the body
#
future = request.body().send(None)
#
# In our case, the body should not be complete yet
#
assert not future.done()
#
# complete it
#
request = b'YZ'
protocol.data_received(request)
assert protocol.get_state() == aioweb.protocol.ConnectionState.PENDING
#
# At this point, our future should be complete
#
body = future.result()
assert body == b"XYZ"
#
# Verify that we have written back something into the transport
#
assert len(transport._data) > 0
#
# Now let us try to parse the response data
#
parser_helper = ParserHelper()
parser = httptools.HttpResponseParser(parser_helper)
parser.feed_data(transport._data)
#
# If we get to this point, this is a valid HTTP response
#
assert parser.get_status_code() == 200
assert parser_helper._body == b"abc"
#
# Finally check that the transport is not closed
#
assert not transport._is_closing
#
# We now use HTTP 1.0 and verify that we get the same version back
# and do not use keep alive
#
def test_full_request_lifecycle_http10(transport, container):
protocol = aioweb.protocol.HttpProtocol(container=container)
with unittest.mock.patch("asyncio.create_task") as mock:
protocol.connection_made(transport)
coro = mock.call_args.args[0]
coro.send(None)
#
# Feed some data
#
request = b'''GET / HTTP/1.0
Host: example.com
Content-Length: 3
123'''
protocol.data_received(request.replace(b'\n', b'\r\n'))
#
# When we now call send on the coroutine to simulate that the event
# loop reschedules it, it should invoke our handler function
#
coro.send(None)
#
# Make sure that the handler has been called
#
assert container._request is not None
#
# Verify some attributes of the request object
#
request = container._request
assert isinstance(request, aioweb.request.Request)
headers = request.headers()
assert headers is not None
assert isinstance(headers, dict)
assert "Host" in headers
assert headers["Host"] == b"example.com"
assert request.http_version() == "1.0"
assert not request.keep_alive()
#
# Verify that we have written back something into the transport
#
assert len(transport._data) > 0
#
# Now let us try to parse the response data
#
parser_helper = ParserHelper()
parser = httptools.HttpResponseParser(parser_helper)
parser.feed_data(transport._data)
#
# If we get to this point, this is a valid HTTP response
#
assert parser.get_status_code() == 200
assert parser_helper._body == b"abc"
#
# Finally check that the transport is closed
#
assert transport._is_closing
#
# Finally we test a few error cases. We start with the case
# that the handler raises a HTTP exception
#
def test_full_request_lifecycle_handler_httpexception(transport, container):
protocol = aioweb.protocol.HttpProtocol(container=container)
with unittest.mock.patch("asyncio.create_task") as mock:
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# When we now start our coroutine, it should suspend and wait
#
coro.send(None)
#
# Feed some data
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
XYZ'''
protocol.data_received(request.replace(b'\n', b'\r\n'))
#
# When we now call send on the coroutine to simulate that the event
# loop reschedules it, it should invoke our handler function. We instruct
# the dummy handler to raise an exception
#
container.set_exception(aioweb.exceptions.HTTPException())
coro.send(None)
#
# Make sure that the handler has been called
#
assert container._request is not None
#
# Verify some attributes of the request object
#
request = container._request
assert isinstance(request, aioweb.request.Request)
headers = request.headers()
assert headers is not None
assert isinstance(headers, dict)
assert "Host" in headers
assert headers["Host"] == b"example.com"
assert request.http_version() == "1.1"
#
# Verify that we have written back something into the transport
#
assert len(transport._data) > 0
#
# Now let us try to parse the response data
#
parser_helper = ParserHelper()
parser = httptools.HttpResponseParser(parser_helper)
parser.feed_data(transport._data)
#
# If we get to this point, this is a valid HTTP response
#
assert parser.get_status_code() == 500
#
# Finally check that the transport is not closed
#
assert not transport._is_closing
#
# Test the behaviour of the worker loop when a handler returns
# an exception different from HTTPException
#
def test_full_request_lifecycle_handler_baseexception(transport, container):
protocol = aioweb.protocol.HttpProtocol(container=container)
with unittest.mock.patch("asyncio.create_task") as mock:
protocol.connection_made(transport)
coro = mock.call_args.args[0]
#
# When we now start our coroutine, it should suspend and wait
#
coro.send(None)
#
# Feed some data
#
request = b'''GET / HTTP/1.1
Host: example.com
Content-Length: 3
XYZ'''
protocol.data_received(request.replace(b'\n', b'\r\n'))
#
# When we now call send on the coroutine to simulate that the event
# loop reschedules it, it should invoke our handler function. We instruct
# the dummy handler to raise an exception
#
container.set_exception(BaseException())
coro.send(None)
#
# Make sure that the handler has been called
#
assert container._request is not None
#
# Verify some attributes of the request object
#
request = container._request
assert isinstance(request, aioweb.request.Request)
headers = request.headers()
assert headers is not None
assert isinstance(headers, dict)
assert "Host" in headers
assert headers["Host"] == b"example.com"
assert request.http_version() == "1.1"
#
# Verify that we have written back something into the transport
#
assert len(transport._data) > 0
#
# Now let us try to parse the response data
#
parser_helper = ParserHelper()
parser = httptools.HttpResponseParser(parser_helper)
parser.feed_data(transport._data)
#
# If we get to this point, this is a valid HTTP response
#
assert parser.get_status_code() == 500
#
# Finally check that the transport is not closed
#
assert not transport._is_closing
| true |
196c6cc3c3bcc3fd42efdc95ff87ec78a1e873db | Python | CarloColumna/student-discord-bot | /record.py | UTF-8 | 6,403 | 2.90625 | 3 | [] | no_license | # Sample static data
command_list = {'my': {'grades':'Retrieve your current course grades', 'study':'Retrieve your Study details',
'review':'Have a practice review before your actual exam', 'mates':'Retrieve your classmates details',
'dates':'Retrieve your upcoming important deadlines'}, 'bot':{'help':'list all the commands available'}}
grade_list = ({'IT Systems':{'Workshop': '85', 'Project': '90', 'Presentation': '80', 'Exam': '92'}},
{'Data Handling': {'Project 1': '80', 'Project 2': '86', 'Task 1': '91', 'Task 2': '90', 'Exam': '82'}},
{'Professional Practice': {'Workshop': '85', 'Project': '84', 'Task': '90', 'Exam': '92'}},
{'Programming Principles': {'Workshop': '83', 'Project': '80', 'Task 1': '83', 'Task 2': '90', 'Exam': '92'}},
{'Computer Servicing': {'Workshop': '92', 'Project': '85', 'Presentation': '89', 'Exam': '83'}},
{'Operating Systems': {'Project': '86', 'Task': '90', 'Exam': '88'}},
{'Networking': {'Workshop': '80', 'Project': '85', 'Exam': '77'}},
{'System Administration': {'Task': '81', 'Presentation': '75', 'Exam': '83'}})
date_list = ({'IT Systems':{'Workshop': '18 Apr 2018 0900H', 'Project': '18 Apr 2018', 'Presentation': '18 Apr 2018 0900H', 'Exam': '18 Apr 2018 0900H'}},
{'Data Handling': {'Project 1': '18 Apr 2018', 'Project 2': '18 Apr 2018','Exam': '18 Apr 2018 0900H'}},
{'Professional Practice': {'Workshop': '18 Apr 2018 0900H', 'Project': '18 Apr 2018', 'Exam': '18 Apr 2018 0900H'}},
{'Programming Principles': {'Workshop': '18 Apr 2018 0900H', 'Project': '18 Apr 2018', 'Exam': '18 Apr 2018 0900H'}},
{'Computer Servicing': {'Workshop': '18 Apr 2018 0900H', 'Project': '18 Apr 2018', 'Presentation': '18 Apr 2018 0900H', 'Exam': '18 Apr 2018 0900H'}},
{'Operating Systems': {'Project': '18 Apr 2018','Exam': '18 Apr 2018 0900H'}},
{'Networking': {'Workshop': '18 Apr 2018 0900H', 'Project': '18 Apr 2018', 'Exam': '18 Apr 2018 0900H'}},
{'System Administration': {'Presentation': '18 Apr 2018 0900H', 'Exam': '18 Apr 2018 0900H'}})
period_list = ({'IT Systems': '29 Jan 2018 - 12 Feb 2018'},
{'Data Handling': '29 Jan 2018 - 12 Feb 2018'},
{'Professional Practice': '29 Jan 2018 - 12 Feb 2018'},
{'Programming Principles': '29 Jan 2018 - 12 Feb 2018'},
{'Computer Servicing': '29 Jan 2018 - 12 Feb 2018'},
{'Operating Systems': '29 Jan 2018 - 12 Feb 2018'},
{'Networking': '29 Jan 2018 - 12 Feb 2018'},
{'System Administration': '29 Jan 2018 - 12 Feb 2018'})
student_list = ({'Tom Sawyer' : {'House':'Gryffindor','Study':'Computer Science', 'Email':'tom@gryffindor.com','Club': 'Book Club'}},
{'Huckleberry Finn': {'House':'Slytherin','Study':'Information Technology','Email':'huckleberry@slytherin.com','Club': 'Music Club'}},
{'Hannibal Lecter': {'House':'Hufflepuff','Study':'Computer Science', 'Email':'hannibal@hufflepuff.com','Club': 'Sports Club'}},
{'Scarlett O\'Hara': {'House':'Ravenclaw ','Study':'Network Engineering','Email':'scarlett@ravenclaw.com', 'Club': 'Book Club'} },
{'Jay Gatsby': {'House':'Gryffindor','Study':'Information Technology', 'Email':'jay@gryffindor.com', 'Club': 'Sports Club'} })
inst_date_list = ({'February 2018':{'14 Feb 0900H': {'Workshop': 'Build a Desktop PC'}, '21 Feb 1400H': {'Presentation':'Network Security'}, '27 Feb 1500H': {'Workshop':'Build a Desktop PC'}}},
{'March 2018': {'03 Mar 0800H': {'Workshop': 'Build a Desktop PC'}, '18 Mar 1300H': {'Presentation':'Network Security'}}},
{'April 2018': {'21 Feb 1400H': {'Presentation':'Network Security'}, '10 Apr 1000H': {'Workshop': 'Build a Desktop PC'}}})
Q1 = 'You are creating a custom Distance class. You want to ease the conversion from your Distance class to a double. What should you add? \n' \
'A. Nothing; this is already possible. \n' \
'B. An implicit cast operator. \n' \
'C. An explicit cast operator. \n' \
'D. A static Parse method.'
Q1E = 'A. Incorrect: A conversion between a custom class and a value type does not exist by default. \n' \
'B. Correct: Adding an implicit operator will enable users of your class to convert between Distance and double without any extra work. \n' \
'C: Incorrect: Although adding an explicit cast operator will enable users of the class to convert from Distance to double, they will still need to explicitly cast it. \n' \
'D: Incorrect: A Parse method is used when converting a string to a type. It doesn\'t add conversions from your type to another type. \n'
Q2 = 'You are creating a new collection type and you want to make sure the elements in it can be easily accessed. What should you add to the type? \n' \
'A. Constructor \n' \
'B. Indexer property \n' \
'C. Generic type parameter \n' \
'D. Static property'
Q2E = 'A: Incorrect: A constructor is used to create an instance of a new type \n' \
'B. Correct: An indexer property enables the user of the type to easily access a type that represents an array-like collection. \n' \
'C. Incorrect: Making the type generic enables you to store multiple different types inside your collection. \n' \
'D. Incorrect: A static property cannot access the instance data of the collection.\n'
Q3 = 'You are creating a generic class that should work only with reference types. Which type constraint should you add? \n' \
'A: where T: class \n' \
'B. where T: struct \n' \
'C. where T: new() \n' \
'D. where T: IDisposable'
Q3E = 'A. Correct: Constraining your generic type parameter to class allows the class to be used only with reference type. \n' \
'B. Incorrect: This will constrain the class to be used with a value type, not a reference type \n' \
'C. Incorrect: This will constrain the class to be used with a type that has an empty default constructor. It can be both a value and a reference type. \n' \
'D. Incorrect: This constrain the class to be used with a type that implements the IDisposable interface. \n'
question_list = ({'Q1' : { 'Question': Q1, 'B':Q1E }},
{'Q2' : { 'Question': Q2, 'B':Q2E }},
{'Q3' : { 'Question': Q3, 'A':Q3E }}) | true |
88001ef335b7596bec6dedfd33b588ad2d971290 | Python | David199926/ClassMood | /deteccion/Video/VideoCamera.py | UTF-8 | 711 | 2.59375 | 3 | [] | no_license | import cv2
class VideoCamera:
def startCapture(self):
self.video = cv2.VideoCapture(0)
def release(self):
try:
self.video.release()
except AttributeError:
print('video no definido')
def getFrame(self):
success, image = self.video.read()
if not success: raise ExternalCameraUsageError()
return image
def getBytes(self, img):
ret, jpeg = cv2.imencode('.jpg', img)
return jpeg.tobytes()
#Excepciones
class ExternalCameraUsageError(Exception):
def __init__(self, message = 'Could not open video source, another app is probably using camera',*args, **kwargs):
super().__init__(*args, **kwargs) | true |
501e1028ab2dab8641fe7cdbb6b90945cc78a4d6 | Python | stufit/pycharm_test | /연습모드.py | UTF-8 | 554 | 3.171875 | 3 | [] | no_license | class1_students = ["김철수", "홍길동", "문재인", "김정은", "트럼프", "성춘향"]
class2_students = ["손흥민", "이강인", "권창훈", "정우영", "김진수", "김민재"]
def check_list(paramList, nameStr):
result = False
for item in paramList:
if nameStr == item:
result = True
return result
print(check_list(class1_students, '홍길동'))
print(check_list(class2_students, '손흥민'))
print(check_list(class1_students, '박신웅'))
print(check_list(class2_students, '박신웅'))
| true |
e8de6c43209c662a68489e2fd9795872a3d0c370 | Python | naturkach/botgame | /board.py | UTF-8 | 8,412 | 3.046875 | 3 | [] | no_license | #! /usr/bin/env python3
from math import sqrt
from element import Element
from point import Point
import re
class Board:
COUNT_LAYERS = 3
INPUT_REGEX = "(.*),\"layers\":\[(.*)\](.*)"
def __init__(self, input):
matcher = re.search(Board.INPUT_REGEX, input)
board_string = matcher.group(2).replace('\n', '').replace(',', '').replace('\"', '') # one line board
self._board = []
_layer_len = int(len(board_string) / Board.COUNT_LAYERS)
for i in range(Board.COUNT_LAYERS):
_layer = []
for j in range(_layer_len):
_layer.append(board_string[j + (i * _layer_len)])
self._board.append(_layer)
self._layer_size = int(sqrt(_layer_len))
def _find_all(self, element):
_points = []
_a_char = element.get_char()
for i in range(len(self._board)):
for j in range(len(self._board[i])):
if self._board[i][j] == _a_char:
_points.append(self._strpos2pt(j))
return _points
def _strpos2pt(self, strpos):
return Point(*self._strpos2xy(strpos))
def _strpos2xy(self, strpos):
return strpos % self._layer_size, strpos // self._layer_size
def get_at(self, x, y):
_strpos = self._xy2strpos(x, y)
_elements = []
for i in range(len(self._board)):
_elements.append(Element(self._board[i][_strpos]))
return _elements
def _xy2strpos(self, x, y):
return self._layer_size * y + x
def is_at(self, x, y, element_object):
return element_object in self.get_at(x, y)
def is_barrier_at(self, x, y):
points = set()
points.update(self.get_floors())
points.update(self.get_starts())
points.update(self.get_exits())
points.update(self.get_golds())
points.update(self.get_holes())
points.update(self.get_lasers())
points.add(self.get_hero())
points.update(self.get_other_heroes())
return Point(x, y) not in list(points)
def get_hero(self):
points = set()
points.update(self._find_all(Element('ROBO_FALLING')))
points.update(self._find_all(Element('ROBO_LASER')))
points.update(self._find_all(Element('ROBO')))
points.update(self._find_all(Element('ROBO_FLYING')))
assert len(points) <= 1, "There should be only one robo"
return list(points)[0]
def is_me_alive(self):
points = set()
points.update(self._find_all(Element('ROBO_FALLING')))
points.update(self._find_all(Element('ROBO_LASER')))
return list(points) == 0
def get_other_heroes(self):
points = set()
points.update(self._find_all(Element('ROBO_OTHER_FALLING')))
points.update(self._find_all(Element('ROBO_OTHER_LASER')))
points.update(self._find_all(Element('ROBO_OTHER')))
points.update(self._find_all(Element('ROBO_OTHER_FLYING')))
return list(points)
def get_empty(self):
return self._find_all(Element('EMPTY'))
def get_zombies(self):
points = set()
points.update(self._find_all(Element('FEMALE_ZOMBIE')))
points.update(self._find_all(Element('MALE_ZOMBIE')))
points.update(self._find_all(Element('ZOMBIE_DIE')))
return list(points)
def get_laser_machines(self):
points = set()
points.update(self._find_all(Element('LASER_MACHINE_CHARGING_LEFT')))
points.update(self._find_all(Element('LASER_MACHINE_CHARGING_RIGHT')))
points.update(self._find_all(Element('LASER_MACHINE_CHARGING_UP')))
points.update(self._find_all(Element('LASER_MACHINE_CHARGING_DOWN')))
points.update(self._find_all(Element('LASER_MACHINE_READY_LEFT')))
points.update(self._find_all(Element('LASER_MACHINE_READY_RIGHT')))
points.update(self._find_all(Element('LASER_MACHINE_READY_UP')))
points.update(self._find_all(Element('LASER_MACHINE_READY_DOWN')))
return list(points)
def get_lasers(self):
points = set()
points.update(self._find_all(Element('LASER_LEFT')))
points.update(self._find_all(Element('LASER_RIGHT')))
points.update(self._find_all(Element('LASER_UP')))
points.update(self._find_all(Element('LASER_DOWN')))
return list(points)
def get_boxes(self):
return self._find_all(Element('BOX'))
def get_floors(self):
return self._find_all(Element('FLOOR'))
def get_holes(self):
points = set()
points.update(self._find_all(Element('HOLE')))
points.update(self._find_all(Element('ROBO_FALLING')))
points.update(self._find_all(Element('ROBO_OTHER_FALLING')))
return list(points)
def get_exits(self):
return self._find_all(Element('EXIT'))
def get_starts(self):
return self._find_all(Element('START'))
def get_golds(self):
return self._find_all(Element('GOLD'))
def get_walls(self):
points = set()
points.update(self._find_all(Element('ANGLE_IN_LEFT')))
points.update(self._find_all(Element('WALL_FRONT')))
points.update(self._find_all(Element('ANGLE_IN_RIGHT')))
points.update(self._find_all(Element('WALL_RIGHT')))
points.update(self._find_all(Element('ANGLE_BACK_RIGHT')))
points.update(self._find_all(Element('WALL_BACK')))
points.update(self._find_all(Element('ANGLE_BACK_LEFT')))
points.update(self._find_all(Element('WALL_LEFT')))
points.update(self._find_all(Element('WALL_BACK_ANGLE_LEFT')))
points.update(self._find_all(Element('WALL_BACK_ANGLE_RIGHT')))
points.update(self._find_all(Element('ANGLE_OUT_RIGHT')))
points.update(self._find_all(Element('ANGLE_OUT_LEFT')))
points.update(self._find_all(Element('SPACE')))
return list(points)
def get_perks(self):
points = set()
points.update(self._find_all(Element('UNSTOPPABLE_LASER_PERK')))
points.update(self._find_all(Element('DEATH_RAY_PERK')))
points.update(self._find_all(Element('UNLIMITED_FIRE_PERK')))
return list(points)
def is_near(self, x, y, elem):
_is_near = False
if not Point(x, y).is_bad(self._layer_size):
_is_near = (self.is_at(x + 1, y, elem) or
self.is_at(x - 1, y, elem) or
self.is_at(x, 1 + y, elem) or
self.is_at(x, 1 - y, elem))
return _is_near
def count_near(self, x, y, elem):
_near_count = 0
if not Point(x, y).is_bad(self._layer_size):
for _x, _y in ((x + 1, y), (x - 1, y), (x, 1 + y), (x, 1 - y)):
if self.is_at(_x, _y, elem):
_near_count += 1
return _near_count
def to_string(self):
return ("Board:\n{brd}\nHero at: {hero}\nOther Heroes "
"at: {others}\nZombies at: {zmb}\nLasers at:"
" {lsr}\nHoles at : {hls}\nGolds at: "
"{gld}\nPerks at: {prk}".format(brd=self._line_by_line(),
hero=self.get_hero(),
others=self.get_other_heroes(),
zmb=self.get_zombies(),
lsr=self.get_lasers(),
hls=self.get_holes(),
gld=self.get_golds(),
prk=self.get_perks())
)
def _line_by_line(self):
_string_board = ' '
for i in range(self._layer_size * Board.COUNT_LAYERS):
_string_board += str(i % 10)
if (i + 1) % self._layer_size == 0:
_string_board += '\t'
_string_board += '\n'
for i in range(self._layer_size):
_string_board += str(i % 10) + ' '
for j in range(Board.COUNT_LAYERS):
for k in range(self._layer_size):
_string_board += self._board[j][k + (i * self._layer_size)]
_string_board += '\t'
_string_board += '\n'
return _string_board
if __name__ == '__main__':
raise RuntimeError("This module is not designed to be ran from CLI")
| true |
69026338fa872871485e0b57df45089ca2c9a6d6 | Python | iCodeIN/materials | /ch3_collections_and_functions/problem_5.py | UTF-8 | 360 | 2.921875 | 3 | [] | no_license | from problem_4 import taxes_owed
import csv
def total_impact(strategy):
'''
This function takes in a tax strategy as a string and
returns a tuple of tax_revenue and poverty_burden.
Inputs:
tax strategy (string)
Output:
tax_revenue and poverty_burden (tuple, float and integer)
'''
pass | true |
8727b1af5920e84c6d7350d445e31ef0a06a0276 | Python | taylanaltin/pyWorks | /t2_Server.py | UTF-8 | 745 | 3.328125 | 3 | [] | no_license | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((socket.gethostname(), 1234))
s.listen(5)
print("Waiting connection from client")
while True:
clientsocket, adress = s.accept()
"""Client is connected"""
print(f"Connection from {adress} has been established")
print("Input 'q' for stop the program")
clientsocket.send(bytes("Welcome to the server", "utf-8"))
while True:
"""Gets input from user until the q key is pressed."""
inputstring = input("Enter the string \n")
clientsocket.send(bytes(inputstring, "utf-8"))
if inputstring == "q":
print("q pressed, ending loop")
clientsocket.close()
break
break
| true |
5e2a5a647a4ff1fd1ce0a12fff56826f765a36c3 | Python | justfollowthesun/laser_control | /storage/database.py | UTF-8 | 4,595 | 2.984375 | 3 | [] | no_license | import os
import logging
import sqlite3
# import pandas as pd
from config import DB_PATH, DB_DIR
class Database():
tablename: str = 'authorization'
connection = None
def __init__(self) -> None:
if not os.path.exists(DB_DIR):
os.mkdir(DB_DIR)
self.connection = sqlite3.connect(DB_PATH)
logging.info("Successfully connect to database")
self.create_table()
#self.create_testing_login()
# self.put_login_password_to_db()
logging.info("Successfully load environment")
def authorization_check(self, login:str, password:str) -> bool:
cursor: sqlite3.cursor = self.connection.cursor()
result = cursor.execute(f"select login, password from {self.tablename} where login = ? and password = ? ",(login,password))
return bool(cursor.fetchone())
def close(self) -> None:
if self.connection:
self.connection.close()
logging.info("Database connection was closed")
def create_table(self) -> None:
"""
Create table with name self.tablename in selected database
"""
cursor:sqlite3.cursor = self.connection.cursor()
cursor.execute(f'drop table {self.tablename} ')
# Есть один мастер-аккаунт, который пользователь создаёт при
# первом запуске программы.
# Мастер аккаунт может быть создан лишь единожды
cursor.execute(f"""create table if not exists {self.tablename}
(
id integer primary key AUTOINCREMENT,
login string,
password string,
is_master bool,
is_authorized bool
)""")
def create_testing_login(self):
cursor: sqlite3.cursor = self.connection.cursor()
insert_line = f'insert into {self.tablename} (login, password, is_master, is_authorized) values(?, ?, ?, ?)'
cursor.execute(insert_line, ('login', 'password', False, False))
self.connection.commit()
logging.info(f'Have inserted {cursor.rowcount} records to the table.')
# def put_login_password_to_db(self):
# cursor: sqlite3.cursor = self.connection.cursor()
# keys=pd.read_excel('Keys.xlsx', names=['Name','Login','Password', 'Master'])
# for i in range(0, keys.shape[0]):
# insert_line = f'insert into {self.tablename} (login, password, is_master, is_authorized) values(?, ?, ?, ?)'
# cursor.execute(insert_line, (str(keys.Login[i]), str(keys.Password[i]), bool(keys.Master[i]), True))
# self.connection.commit()
# logging.info(f'Have inserted {cursor.rowcount} records to the table.')
def check_if_master_exists(self) -> bool:
cursor: sqlite3.cursor = self.connection.cursor()
result = cursor.execute(f"select count(*) from {self.tablename} where is_master = true")
return bool(cursor.fetchone())
# def initiate_month(self) -> None:
# """Checks if days of the current month are
# inserted into database already.
# Inserts them if cannot find.
# """
#
# today = datetime.now()
#
# cursor: sqlite3.Cursor = self.connection.cursor()
# stored_days_list = self.get_checkboxes( today, cursor=cursor)
#
# if not stored_days_list:
#
# insert_line = f'insert into {self.tablename} (id, checked, day, month, full_date) values(?, ?, ?, ?, ?)'
# today = today.date()
# days_list = Helper.GetMonthDays()
# cursor.executemany(insert_line, ((None, day < today, day.day, day.month, day) for day in days_list))
# self.connection.commit()
# logging.info(f'Have inserted {cursor.rowcount} records to the table.')
#
# def get_checkboxes(self, d: Union[datetime, datetime.date], cursor: Optional[sqlite3.Cursor] = None) -> List[DataBaseCheckBox]:
# cursor = cursor or self.connection.cursor()
# days_list = cursor.execute(f"SELECT * from {self.tablename} where month = ?", (d.month, )).fetchall()
# return days_list
#
# def save_changes(self, boxes: Dict[int, QtWidgets.QCheckBox]):
#
# cursor: sqlite3.Cursor = self.connection.cursor()
# month = datetime.now().month
# cursor.executemany(f'update {self.tablename} set checked = ? where day = ? and month = ?', ((box.isChecked(), index, month) for index, box in boxes.items()))
# self.connection.commit()
| true |
f4670ce4c2fed4e5828dd9bdc9008b9f89e6c020 | Python | KD4N13-L/Data-Structures-Algorithms | /Data Structures/stack.py | UTF-8 | 1,609 | 3.609375 | 4 | [] | no_license | from abc import ABC, abstractmethod
class StackADT(ABC):
@abstractmethod
def push(self, data):
pass
@abstractmethod
def pop(self):
pass
@abstractmethod
def top(self):
pass
@abstractmethod
def empty(self):
pass
@abstractmethod
def is_empty(self):
pass
@abstractmethod
def size(self):
pass
class Node:
def __init__(self, data, next_node, previous=None):
self.data = data
self.next = next_node
self.previous = previous
class LListStack(StackADT):
def __init__(self):
self.first = None
self.last = None
self.size = 0
def push(self, data):
node = Node(data, None)
if self.last is None:
self.first = node
self.last = node
self.size += 1
return
self.last.next = node
self.last = node
self.size += 1
def pop(self):
if self.size == 0:
return
if self.size == 1:
self.first = None
self.last = None
self.size -= 1
return
tmp = self.first
while tmp.next != self.last:
tmp = tmp.next
tmp.next = None
self.last = tmp
self.size -= 1
def size(self):
return self.size
def top(self):
return self.last
def is_empty(self):
if self.size == 0:
return True
else:
return False
def empty(self):
self.first = None
self.last = None
self.size = 0 | true |
5fc58da1cdb04f99968ff8c89876fdfb7a944683 | Python | ebonnecab/MS-Herd-Immunity | /Herd_Immunity_Project/simulation.py | UTF-8 | 5,892 | 3.453125 | 3 | [] | no_license | import random
import sys
random.seed(42)
from person import Person
from logger import Logger
from virus import Virus
class Simulation(object):
''' Main class that will run the herd immunity simulation program.
Expects initialization parameters passed as command line arguments when file is run.
Simulates the spread of a virus through a given population. The percentage of the
population that are vaccinated, the size of the population, and the amount of initially
infected people in a population are all variables that can be set when the program is run.
'''
def __init__(self, pop_size, vacc_percentage, initial_infected=1, virus=None):
self.logger = Logger("interactions.txt")
self.population = [] # List of Person objects
self.pop_size = pop_size # Int
self.next_person_id = 0
self.virus = virus
self.initial_infected = initial_infected # Int
# FIXME: Use the variables below
self.total_infected = 0
self.vacc_percentage = vacc_percentage # float between 0 and 1
self.total_dead = 0 # Int
self.newly_infected = []
def _create_population(self):
is_vacc_options = [True, False]
start = 0
first_id = 0
while start <= self.pop_size:
person = Person(first_id, random.choice(is_vacc_options))
self.population.append(person)
start += 1
first_id += 1
self.set_infected()
print (self.population)
def set_infected(self):
infected = random.sample(self.population, self.initial_infected)
for sick_people in infected:
sick_people.infection = self.virus
def _simulation_should_continue(self):
while self.pop_size > 0 or not self.vacc_percentage == 1:
return True
else:
return False
def run(self):
''' This method should run the simulation until all requirements for ending
the simulation are met.
'''
# TODO: Finish this method. To simplify the logic here, use the helper method
# _simulation_should_continue() to tell us whether or not we should continue
# the simulation and run at least 1 more time_step.
# TODO: Keep track of the number of time steps that have passed.
# HINT: You may want to call the logger's log_time_step() method at the end of each time step.
# TODO: Set this variable using a helper
time_step_counter = 0
should_continue = None
while should_continue:
# TODO: for every iteration of this loop, call self.time_step() to compute another
# round of this simulation.
# print('The simulation has ended after {time_step_counter} turns.'.format(time_step_counter))
pass
def choose_infected(self):
return random.choice(self.newly_infected)
# Test later today in an index.py file
def time_step(self):
total_interactions = 0
# calling get_random_person method to randomly choose person from total population
rand_person = random.choice(self.population)
# looping through population to find infected person
for person in self.population:
if person.infection == virus:
# creates loop for sick person to interact with 100 randos
while total_interactions <= 100:
# checking if rando is alive and calling interaction method
if rand_person.is_alive:
self.interaction(person, rand_person)
total_interactions += 1
else:
# if they're dead the method starts over
self.time_step()
def append_newly_infected(self, random_person):
if random_person.is_vaccinated() == False:
num = random.randint(0, 1)
if num < self.virus.repro_rate:
self.newly_infected.append(random_person._id)
random_person.infection = virus
def interaction(self, person, random_person):
# Assert statements are to check if
assert person.is_alive == True
assert random_person.is_alive == True
if person.infection == virus and random_person.infection == virus:
self.logger.log_interaction(person, random_person)
self.check_dead(random_person)
elif person.infection == virus and random_person.is_vaccinated == True:
self.logger.log_interaction(person, random_person)
self.check_dead(random_person)
elif person.infection == virus and random_person.is_vaccinated == False:
self.logger.log_interaction(person, random_person)
self.check_dead(random_person)
else:
pass
def _infect_newly_infected(self):
for person in self.newly_infected:
self.total_infected += 1
person.infection = self.virus
self.newly_infected = list()
def check_dead(self, rand_person):
if not rand_person.is_alive:
self.total_dead += 1
else:
pass
if __name__ == "__main__":
pop_size = 150
vacc_percentage = 0.3
virus = Virus("Ebola", 0.2, 0.4)
initial_infected = 3
sim = Simulation(pop_size, vacc_percentage, initial_infected, virus)
sim._create_population()
sim.set_infected()
# params = sys.argv[1:]
# virus_name = str(params[0])
# repro_num = float(params[1])
# mortality_rate = float(params[2])
# pop_size = int(params[3])
# vacc_percentage = float(params[4])
# if len(params) == 6:
# initial_infected = int(params[5])
# virus = Virus(virus_name, repro_num, mortality_rate)
# sim = Simulation(pop_size, vacc_percentage, initial_infected, virus)
# sim.run()
| true |
c07f0f45fcc2dd6769102ddb152e327b78803e18 | Python | tumrod/cs373-netflix | /TestNetflix.py | UTF-8 | 5,141 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
# ---------------------------
# tumrod/cs373-netflix/TestNetflix.py
# Copyright (C) 2015
# Tipparat Umrod
# ---------------------------
# https://docs.python.org/3.4/reference/simple_stmts.html#grammar-token-assert_stmt
# -------
# imports
# -------
from io import StringIO
from unittest import main, TestCase
from Netflix import netflix_read, netflix_eval, netflix_print, netflix_solve, netflix_init, netflix_rmse
movie_avg = {}
viewer_avg = {}
expected = {}
# -----------
# TestNetflix
# -----------
class TestNetflix (TestCase) :
# ----
# init
# ----
netflix_init()
# ----
# read
# ----
def test_read_1 (self) :
s = "15:\n"
i, j = netflix_read(s)
self.assertEqual(i, 15)
self.assertEqual(j, 0)
s = "1234\n"
i, j = netflix_read(s)
self.assertEqual(i, 15)
self.assertEqual(j, 1234)
s = "1467\n"
i, j = netflix_read(s)
self.assertEqual(i, 15)
self.assertEqual(j, 1467)
def test_read_2 (self) :
s = "30:\n"
i, j = netflix_read(s)
self.assertEqual(i, 30)
self.assertEqual(j, 0)
s = "5466\n"
i, j = netflix_read(s)
self.assertEqual(i, 30)
self.assertEqual(j, 5466)
s = "6788\n"
i, j = netflix_read(s)
self.assertEqual(i, 30)
self.assertEqual(j, 6788)
s = "12:\n"
i, j = netflix_read(s)
self.assertEqual(i, 12)
self.assertEqual(j, 0)
s = "3444\n"
i, j = netflix_read(s)
self.assertEqual(i, 12)
self.assertEqual(j, 3444)
def test_read_3 (self) :
s = "2044:\n"
i, j = netflix_read(s)
self.assertEqual(i, 2044)
self.assertEqual(j, 0)
s = "345667\n"
i, j = netflix_read(s)
self.assertEqual(i, 2044)
self.assertEqual(j, 345667)
s = "4521\n"
i, j = netflix_read(s)
self.assertEqual(i, 2044)
self.assertEqual(j, 4521)
s = "2212:\n"
i, j = netflix_read(s)
self.assertEqual(i, 2212)
self.assertEqual(j, 0)
s = "3411\n"
i, j = netflix_read(s)
self.assertEqual(i, 2212)
self.assertEqual(j, 3411)
# ----
# eval
# ----
def test_eval_1 (self) :
v = netflix_eval(10035, 1651047)
self.assertEqual(v, 3.4)
def test_eval_2 (self) :
v = netflix_eval(2043, 2312054)
self.assertEqual(v, 4.3)
def test_eval_3 (self) :
v = netflix_eval(10851, 1050707)
self.assertEqual(v, 3.8)
def test_eval_4 (self) :
v = netflix_eval(10851, 514376)
self.assertEqual(v, 3.8)
def test_eval_5 (self) :
v = netflix_eval(14961, 1143187)
self.assertEqual(v, 5.0)
# ------------
# netflix_rmse
# ------------
def test_rmse_1 (self) :
num_list = [(10005, 793736, 3.3), (10005, 926698, 3.3), (10006, 0, 0), (10006, 1093333, 3.6), (10006, 1982605, 3.3)]
rmse = netflix_rmse(num_list)
self.assertEqual(rmse, str(0.95))
def test_rmse_2 (self) :
num_list = ((10008, 1813636, 4.3), (10008, 2048630, 3.5), (10008, 930946, 3.7), (1001, 1050889, 4.0))
rmse = netflix_rmse(num_list)
self.assertEqual(rmse, str(0.71))
def test_rmse_3 (self) :
num_list = {(1006, 0, 0), (1006, 1004708, 4.1), (1006, 762076, 4.2), (1006, 1403722, 3.8)}
rmse = netflix_rmse(num_list)
self.assertEqual(rmse, str(0.54))
def test_rmse_4 (self) :
num_list = [(10035, 1651047, 3.4), (10035, 811486, 4.4), (10059, 962754, 2.1)]
rmse = netflix_rmse(num_list)
self.assertEqual(rmse, str(0.49))
# -----
# print
# -----
def test_print_1 (self) :
w = StringIO()
netflix_print(w, 1, 10234, 4.6)
self.assertEqual(w.getvalue(), "4.6\n")
def test_print_2 (self) :
w = StringIO()
netflix_print(w, 10851, 0, 32)
self.assertEqual(w.getvalue(), "10851:\n")
def test_print_3 (self) :
w = StringIO()
netflix_print(w, 2041, 0, 32)
self.assertEqual(w.getvalue(), "2041:\n")
# -----
# solve
# -----
def test_solve_1 (self) :
r = StringIO("10035:\n1651047\n811486\n10059:\n962754\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "10035:\n3.4\n4.4\n10059:\n2.1\nRMSE: 0.49\n")
def test_solve_2 (self) :
r = StringIO("10008:\n1813636\n2048630\n930946\n1001:\n1050889\n67976\n1025642\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "10008:\n4.3\n3.5\n3.7\n1001:\n4.0\n3.5\n3.3\nRMSE: 0.93\n")
def test_solve_3 (self) :
r = StringIO("1006:\n1004708\n762076\n1403722\n")
w = StringIO()
netflix_solve(r, w)
self.assertEqual(w.getvalue(), "1006:\n4.1\n4.2\n3.8\nRMSE: 0.54\n")
# ----
# main
# ----
if __name__ == "__main__" :
main()
| true |
94cc694268f71d7eca79e3410766b8f86ebe32f7 | Python | nrichgels/ClassCode | /C152/sourcecode/ch12/Engine.py | UTF-8 | 3,719 | 3.78125 | 4 | [] | no_license | # Program: Engine.py
# Authors: Michael H. Goldwasser
# David Letscher
#
# This example is discussed in Chapter 12 of the book
# Object-Oriented Programming in Python
#
from ourStrip import ourStrip
from TextIndex import TextIndex
class Engine:
"""Support word searches within a collection of text documents."""
def __init__(self):
"""Create a new search engine.
By default, the initial corpus is empty.
"""
self._corpus = {} # maps each document label to the associated index
self._hasWord = {} # maps each word to a set of labels
def addDocument(self, contents, sourceLabel):
"""Add the given document to the corpus (if not already present).
contents a single string representing the complete contents
sourceLabel a string which identifies the source of the contents
"""
if sourceLabel not in self._corpus:
newIndex = TextIndex(contents, sourceLabel)
self._corpus[sourceLabel] = newIndex
for word in newIndex.getWords():
if word in self._hasWord:
self._hasWord[word].add(sourceLabel)
else:
self._hasWord[word] = set([sourceLabel]) # new set with one entry
def lookup(self, term):
"""Return a set of labels for those documents containing the search term."""
term = ourStrip(term)
if term in self._hasWord:
return set(self._hasWord[term]) # intentionally return a copy
else:
return set()
def getContext(term, docLabel, maxOccur=10):
"""Search a single document for a word, returning a string demonstrating context.
docLabel the name of the underlying document to search
maxOccur maximum number of distinct occurrences to display (default 10)
"""
return self._corpus[docLabel].getContext(term, maxOccur)
def makeReport(self, term, maxDocuments=10, maxContext=3):
"""Produce a formatted report about the occurrences of a term within the corpus.
Return a string summarizing the results. This will include names of all documents
containing the term as well as a demonstration of the context.
term the word of interest
maxDocuments the maximum number of files to report (default 10)
maxContext maximum number of occurrences to show per document (default 3)
"""
output = [] # lines of output
sources = self.lookup(term)
num = min(len(sources), maxDocuments)
labels = list(sources)[ :num] # choose first so many labels
for docLabel in labels:
output.append('Document: ' + docLabel)
context = self._corpus[docLabel].getContext(term, maxContext)
output.append(context)
output.append('=' * 40)
return '\n'.join(output)
if __name__ == '__main__':
wizard = Engine()
# Phase 1: load original files
print 'Enter filenames to catalog, one per line.'
print '(enter a blank line when done)'
filename = raw_input('File: ')
while filename:
try:
source = file(filename)
wizard.addDocument(source.read(), filename)
except IOError:
print 'Sorry. Unable to open file', filename
filename = raw_input('File: ')
# Phase 2: let user enter queries
print
print 'Ready to search. Enter search terms, one per line.'
print 'Enter a blank line to end.'
term = raw_input('Term: ')
while term:
documents = wizard.lookup(term)
if documents: # found the term
print 'Containing files are:'
print '\n'.join(documents)
report = wizard.makeReport(term)
print
print 'Sample report:'
print report
else:
print 'Term not found'
term = raw_input('Term: ')
| true |
0d8c79a9ae2365ad712f584760b731ea2032345c | Python | daveredrum/PyViz3D | /pyviz3d/lines.py | UTF-8 | 1,243 | 3.34375 | 3 | [
"MIT"
] | permissive | # Lines class i.e. normals.
import numpy as np
class Lines:
def __init__(self, lines_start, lines_end, colors_start, colors_end, visible):
# Interleave start and end positions for WebGL.
self.num_lines = lines_start.shape[0]
self.positions = np.empty((self.num_lines * 2, 3), dtype=lines_start.dtype)
self.positions[0::2] = lines_start
self.positions[1::2] = lines_end
self.colors = np.empty((self.num_lines * 2, 3), dtype=np.uint8)
self.colors[0::2] = colors_start
self.colors[1::2] = colors_end
self.visible = visible
def get_properties(self, binary_filename):
"""
:return: A dict conteining object properties. They are written into json and interpreted by javascript.
"""
json_dict = {}
json_dict['type'] = 'lines'
json_dict['visible'] = self.visible
json_dict['num_lines'] = self.num_lines
json_dict['binary_filename'] = binary_filename
return json_dict
def write_binary(self, path):
bin_positions = self.positions.tobytes()
bin_colors = self.colors.tobytes()
with open(path, "wb") as f:
f.write(bin_positions)
f.write(bin_colors)
| true |
d28d1a85f95e6dd65f43d9f193168ce624590f85 | Python | etamponi/eole | /core/centroid_picker.py | UTF-8 | 1,037 | 2.921875 | 3 | [] | no_license | import numpy
from scipy.spatial import distance
__author__ = 'Emanuele'
class RandomCentroidPicker(object):
def __init__(self):
pass
def pick(self, instances, labels, n_centroids):
choices = numpy.random.choice(len(instances), size=n_centroids, replace=True)
return instances[choices]
class AlmostRandomCentroidPicker(object):
def __init__(self, dist_measure=distance.euclidean):
self.dist_measure = dist_measure
def pick(self, instances, labels, n_centroids):
p = numpy.ones(len(instances)) / len(instances)
centroids = numpy.zeros(shape=(n_centroids, instances.shape[1]))
centroids[0] = instances[numpy.random.multinomial(1, p).argmax()]
for i in range(1, n_centroids):
distances = numpy.asarray([self.dist_measure(x, centroids[i-1]) for x in instances])
p = p * numpy.log(1.0 + distances)
p = p / p.sum()
centroids[i] = instances[numpy.random.multinomial(1, p).argmax()]
return centroids
| true |
536fb50d6e8c1602bb9e71f57c18b68f2185b3e4 | Python | young31/Algorithm | /Basic/11.KMP.py | UTF-8 | 882 | 3.53125 | 4 | [] | no_license | # 문자열 매칭 판단 알고리즘
## 하나하나 확인하지 말고 겹치는 부분 끼리 점프해서 확인하자
## 이 때 겹치는 부분정도에 따라서 점프할 거리를 미리 계산(table)
## 백준: 1786
def make_table(s):
n = len(s)
table = [0 for _ in range(n)]
j = 0
for i in range(1, n):
while j > 0 and s[i] != s[j]:
j = table[j-1]
if s[i] == s[j]:
j += 1
table[i] = j
return table
def KMP(s, p):
table = make_table(p)
n = len(s)
len_p = len(p)
j = 0
for i in range(n):
while j > 0 and s[i] != p[j]:
j = table[j-1]
if s[i] == p[j]:
if j == len_p-1:
j = table[j]
print('find')
else:
j += 1
s = 'ABAABACABAACCABACABACABAACABACABAAC'
p = 'ABACABAAC'
KMP(s, p) | true |
c9e0ebb11f7cf0e05d7a70cf995a27fa0e40e8ec | Python | lkitty0302/Algorithm | /BOJ/1766.py | UTF-8 | 744 | 3.09375 | 3 | [] | no_license | import sys
import heapq
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n+1)]
check = [0 for _ in range(n+1)]
for i in range(m):
a, b = map(int, input().split())
graph[a].append(b)
check[b] += 1
q = []
result = []
for i in range(1, n+1):
if check[i] == 0:
heapq.heappush(q, i)
for i in range(1, n+1):
while q:
num = heapq.heappop(q)
check[num] = -1
result.append(num)
for j in range(len(graph[num])):
check[graph[num][j]] -= 1
if graph[num][j] != 0 and check[graph[num][j]] == 0:
heapq.heappush(q, graph[num][j])
graph[num][j] = 0
print(*result) | true |
1fb8780c9aa79242844f6995841e02db5aaaacaa | Python | kwongjose/CodingChallenges | /Binary Search/python/solution1.py | UTF-8 | 695 | 3.765625 | 4 | [] | no_license | class Solution:
def search(self, nums: List[int], target: int) -> int:
start = 0
end = len(nums)-1
while start < end:
# get the mid value
mid = ((end - start+1) // 2) + start
# if the mid value is greater than the index the target is at least 1 to the left
# the target is between the start and the mid point so we can end at the mid
if nums[mid] > target:
end = mid - 1
else: # if the mid value is less than the target is between mid and end so we can start at the mid point
start = mid
return start if nums[start] == target else -1 | true |
0b8332e794a99111baf053025d9f77b3c5731a67 | Python | KennethJacobsen/EEGANN-NTNU | /ANN/dNN.py | UTF-8 | 13,824 | 3.03125 | 3 | [] | no_license | """
Created on 22 Mar 2019
@author: Christian Ovesen, KSVJ
"""
# Data structure [[Train data], [Train answer], [Test data], [Test answer]
from time import time
import timeit
import logging
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, CuDNNLSTM, Conv2D, BatchNormalization, Flatten, Conv1D, \
MaxPooling2D, Activation
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.models import load_model
import numpy as np
from collections import Counter
class DynamicNeuralNet(object):
"""
classdocs
Creates and trains a neural net of the type and size of your choosing
"""
def __init__(self):
# Command for opening TensorBoard: "tensorboard --logdir=logs/"
self.batchSize = 1
self.tb = TensorBoard(log_dir="logs/{}".format(time()), histogram_freq=1, batch_size=self.batchSize,
write_graph=True, write_grads=True, write_images=True, embeddings_freq=0)
self.model = None
self.scoreList = []
self.trainTime = None
self.padding = 0
self.neurons = None
self.outs = 0
self.fft = False
self.wave = False
self.filterSize = 0
self.epochNumber = 0
self.typeNet = None
self.numberHiddenLayers = 0
self.showNetSetup = False
self.lossFunction = None
if tf.test.is_gpu_available():
self.gpu = True
else:
self.gpu = False
# ------------------------------------------------- Creators -------------------------------------------------
# Checks if gpu is available before running createModel()
# @input typeNet: type of net to be created, string
# @input data: data to train and test new model on
# @input numberHiddenLayers: number of hidden layers, int
# @input neurons: list of neuron values per layer, list of int
# @input outs: number of outputs, int
# @input fft: value to determine use of fft, boolean
# @input wave: value to determine use of wavelet, boolean
# @input epochNumber: number of epochs to be used for training, int default to 10
# @input filterSize: size of filter to be used, int default to four
# @input padding: value used to decide padding type from list, int default to zero
# @input showNetSetup: value used to decide if setup should be logged, boolean default to False
# @input lossFunction: loss function to use, string default to 'sparse_categorical_crossentropy'
def create(self, typeNet, data, numberHiddenLayers, neurons, outs, fft, wave, epochNumber=10, filterSize=4,
padding=0, showNetSetup=False, lossFunction="sparse_categorical_crossentropy"):
self.padding = padding
self.neurons = neurons
self.outs = outs
self.fft = fft
self.wave = wave
self.filterSize = filterSize
self.epochNumber = epochNumber
self.typeNet = typeNet
self.numberHiddenLayers = numberHiddenLayers
self.showNetSetup = showNetSetup
self.lossFunction = lossFunction
if self.gpu:
with tf.device('/gpu:0'):
self.createModel(data)
else:
self.createModel(data)
# Constructor
# Different types of NN supported (CuDNNLSTM, LSTM, Conv1D, Dense)
# @input data: data to use for training and testing
def createModel(self, data):
self.model = Sequential()
if self.showNetSetup:
logging.error('\n TYPE: {} \n HIDDEN: {} \n NEURONS: {} '
'\n EPOCHS: {}'.format(self.typeNet, self.numberHiddenLayers, self.neurons, self.epochNumber))
self.batchSize = self.batch(data)
self.tb.batch_size = self.batchSize
self.tb.update_freq = 'epoch'
if self.fft:
inputShape = data[0].shape[1:]
elif self.wave:
inputShape = (len(data[0]), 1)
else:
inputShape = (len(data[0]), 1)
paddingType = ["same", "valid"]
if self.typeNet == 'LSTM':
if self.gpu:
self.model.add(CuDNNLSTM(self.neurons[0], input_shape=inputShape, return_sequences=True,
batch_size=self.batchSize, stateful=True))
else:
self.model.add(LSTM(self.neurons[0], input_shape=inputShape, return_sequences=True,
batch_size=self.batchSize, stateful=True))
self.model.add(Dropout(0.2))
elif self.typeNet == "Conv1D":
self.model.add(Conv1D(self.neurons[0], self.filterSize, padding=paddingType[self.padding],
input_shape=inputShape, activation="relu", batch_size=self.batchSize))
self.model.add(Dropout(0.2))
elif self.typeNet == "Conv2D":
self.model.add(Conv2D(self.neurons[0], (self.filterSize, 2), input_shape=inputShape))
self.model.add(Activation('relu'))
# self.model.add(MaxPooling2D(pool_size=(2, 2)))
elif self.typeNet == "Dense":
self.model.add(Dense(self.neurons[0], input_shape=inputShape, activation='relu', batch_size=self.batchSize))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.2))
else:
logging.error("No valid NN type selected")
for x in range(0, self.numberHiddenLayers):
if x == (self.numberHiddenLayers - 1) and self.typeNet == "LSTM":
self.dynamicLayerCreator(lastLstm=True)
else:
self.dynamicLayerCreator()
self.model.add(Flatten())
if self.fft:
self.model.add(Dense(self.neurons[len(self.neurons)-1], activation='sigmoid'))
self.model.add(Dense(self.outs, activation="sigmoid"))
self.model.compile(loss="sparse_categorical_crossentropy", optimizer='Adadelta', metrics=['accuracy'])
else:
self.model.add(Dense(self.neurons[len(self.neurons)-1], activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.2))
self.model.add(Dense(self.outs, activation="softmax"))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
# Loss Functions:
# kullback_leibler_divergence
# sparse_categorical_crossentropy
self.model.compile(loss=self.lossFunction, optimizer=opt, metrics=['accuracy'])
self.train(data)
# Creates a new layer for the model
# @input lastLstm: set true if it is the last lstm layer, boolean default to False
def dynamicLayerCreator(self, lastLstm=False):
if self.typeNet == "LSTM":
if not lastLstm:
if self.gpu:
self.model.add(CuDNNLSTM(self.neurons, return_sequences=True, batch_size=self.batchSize,
stateful=True))
else:
self.model.add(LSTM(self.neurons, return_sequences=True, batch_size=self.batchSize, stateful=True))
self.model.add(BatchNormalization())
else:
if self.gpu:
self.model.add(CuDNNLSTM(self.neurons, batch_size=self.batchSize, stateful=True))
else:
self.model.add(LSTM(self.neurons, batch_size=self.batchSize, stateful=True))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.2))
elif self.typeNet == "Conv1D":
paddingType = ["same", "valid"]
self.model.add(Conv1D(self.neurons, self.filterSize, activation="relu", padding=paddingType[self.padding],
batch_size=self.batchSize))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.2))
elif self.typeNet == "Dense":
self.model.add(Dense(self.neurons, activation='relu', batch_size=self.batchSize))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.2))
else:
logging.error("No valid NN type selected")
# ------------------------------------------------- Training functions -------------------------------------------------
# Runs train function on model with timer
# @input data: Data to use for training
def train(self, data):
# data = self.normalize([data[0], data[2]])
if self.fft or self.wave:
self.trainTime = timeit.Timer(lambda: self.fitModel(trainData=data[0], trainAnswer=data[1],
testData=data[2], testAnswer=data[3])).timeit(number=1)
else:
trainDataNP = np.array(data[0])
trainAnswerNP = np.array(data[1])
testDataNP = np.array(data[2])
testAnswerNP = np.array(data[3])
logging.debug(trainDataNP.shape)
trainDataNP = trainDataNP.reshape((trainDataNP.shape[1], trainDataNP.shape[0], 1))
testDataNP = testDataNP.reshape((testDataNP.shape[1], testDataNP.shape[0], 1))
for sample in trainDataNP:
sample = self.normalize(sample)
for sample in testDataNP:
sample = self.normalize(sample)
self.trainTime = timeit.Timer(lambda: self.fitModel(trainData=trainDataNP, trainAnswer=trainAnswerNP,
testData=testDataNP,
testAnswer=testAnswerNP)).timeit(number=1)
# Starts training of model
# @input trainData: training data, list
# @input trainAnswer: training answers, list
# @input testData: test data, list
# @input testAnswer: test answers, list
def fitModel(self, trainData, trainAnswer, testData, testAnswer):
self.model.fit(trainData, trainAnswer, batch_size=self.batchSize, epochs=self.epochNumber,
validation_data=(testData, testAnswer), verbose=2)
# validation_split=0.2, callbacks=[self.tb])
# validation_data=(testDataNP, testAnswerNP), callbacks=[self.tb])
# Evaluate model
# @input evalData: evaluation data, list with list of data and list of answers
def evaluateScore(self, evalData):
self.scoreList = self.model.evaluate(evalData[0], evalData[1], verbose=0, batch_size=self.batchSize)
# Returns scoring parameters for model
# @input evalData: evaluation data, list with list of data and list of answers
# @return: score values
def modelScore(self, evalData):
evalTime = timeit.Timer(lambda: self.evaluateScore(evalData)).timeit(number=1)
# score [loss, accuracy, train time(in sec), evaluation time]
score = [self.scoreList[0], self.scoreList[1], self.trainTime, evalTime]
return score
# ------------------------------------------------- Prediction functions ----------------------------------------------
# Predicts answer from new data
# @input data: data to predict on
# @return: prediction
def predictLive(self, data):
if self.fft:
npData = np.array(data)
npData = npData.reshape((1, npData.shape[0], npData.shape[1], 1))
return self.model.predict(npData, batch_size=self.batchSize)
else:
npData = np.array(data)
npData = npData.reshape((npData.shape[1], npData.shape[0], 1))
npData[0] = self.normalize(npData[0])
return self.model.predict(npData, batch_size=self.batchSize)
# ------------------------------------------------- Data processing -------------------------------------------------
# Normalizes training and testing data
# @input data: data to normalize
# @return: normalized data
def normalize(self, data):
divideBy = max(data)-min(data)
if divideBy == 0:
divideBy = 0.0001
data = (data - min(data)) / divideBy
return data
# Find highest batch size
# @input data: data to find batch size from
# @input maxBatchSize: the highest possible batch size, needs to be set to keep GPU to run out of memory,
# int default set to 1000
# return: highest possible batch size
def batch(self, data, maxBatchSize=1000):
bList = []
for d in range(0, 2):
if d == 0:
u = 0
else:
u = 2
if self.fft or self.wave:
for x in data[u].shape[:1]:
for y in range(1, x):
if x % y == 0:
if y < maxBatchSize:
bList.append(y)
else:
for x in data[u].shape:
for y in range(1, x):
if x % y == 0:
if y < maxBatchSize:
bList.append(y)
logging.debug('batch list: {}'.format(bList))
retSize = max([item for item, count in Counter(bList).items() if count >= 2])
return retSize
# Saves model
# @input fileName: where to store model, string default set do 'dNN_model.h5'
def saveModel(self, fileName='dNN_model.h5'):
self.model.save(fileName)
# Loads model
# @input fileName: where to load model from, string default set do 'dNN_model.h5'
def loadModel(self, fileName='dNN_model.h5'):
self.model = load_model(fileName)
| true |
e57aabed31c60d665c330afc442daedc2eb5c9fe | Python | ankitrana1256/ScreenRecorder | /ScreenRecorder.py | UTF-8 | 725 | 2.84375 | 3 | [] | no_license | import cv2
import numpy as np
import pyautogui
import time
# Sizing the screen
screen_size = (1920,1080)
# Loading the Video Writer
output = cv2.VideoWriter("filename.avi" , cv2.VideoWriter_fourcc(*'XVID'), 10.0 , screen_size)
# Setting fps
fps = 120
prev = 0
# Main loop
while True:
time_elapsed = time.time() - prev
img = pyautogui.screenshot()
if time_elapsed > 1.0/fps:
prev = time.time()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2.putText(frame, "Recorder", (960, 70), cv2.FONT_HERSHEY_TRIPLEX, 1, (106, 90, 205), 2)
output.write(frame)
cv2.waitKey(100)
cv2.destroyAllWindows()
output.release() | true |
e18dbd36fbe5b9c5bf0b21db20fdd59f9d401de3 | Python | ngaumont/course-material | /exercices/109/solution.py | UTF-8 | 118 | 2.640625 | 3 | [] | no_license | def sets_common(l):
if len(l) == 0:
return set()
r = l[0]
for s in l:
r &= s
return r
| true |
76ed090194960cd0914d139da10b2be2eb354306 | Python | ObaidAshraf/Feature-Request-App | /db_controls.py | UTF-8 | 1,333 | 2.546875 | 3 | [
"MIT"
] | permissive | import json
import psycopg2 as pg
from urllib.parse import urlparse
DATABASE_URL = "postgres://cjjlbjdi:Gp4loacJ0QcwAhiDYpwMF8ADFy_7Kmex@baasu.db.elephantsql.com:5432/cjjlbjdi"
url = urlparse(DATABASE_URL)
clients = {
"a": "clienta",
"b": "clientb",
"c": "clientc"
}
def insert_feature(data, clientName):
conn = pg.connect(database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
sql = "SELECT (" + (clients[clientName]) + ") from reqs"
cur.execute(sql)
rows = cur.fetchall()
if (cur.rowcount == 0):
sql = "INSERT into reqs (" + (clients[clientName]) + ") VALUES ('" + str(data) + "')"
else:
sql = "UPDATE reqs SET " + (clients[clientName]) + " = '" + str(data) + "'"
cur.execute(sql)
conn.commit()
conn.close()
def get_all_features():
conn = pg.connect(database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
sql = "SELECT * FROM reqs"
cur.execute(sql)
rows = cur.fetchall()
#print(rows)
cur.close()
conn.close()
return rows | true |
d6128ea545989af75724e94fbc5994d88a56bb90 | Python | Browco/sampleContributions | /M2-Internship-Scripts/extract_geneFusions.py | UTF-8 | 5,012 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
__description__ = \
"""
This script was created to extract the fusion genes \
names from each prediction tool file according to each file caracteristics .
"""
__author__ = "Coralie Capron"
__date__ = "05.2020"
from argparse import ArgumentParser
#import json # read FG Pizzly prediction file
from os.path import join
import os
class ParseFusionFile():
def __init__(self, fusion_file, file_truth=None):
self.fusion_file = fusion_file
self.file_truth = file_truth if file_truth is not None else []
def get_truth_set(self):
'''
Get gene fusions from the truth set
args : file_truth <file.dat>
return : FG_truth <list>
'''
with open(self.file_truth, "r") as file:
FG_truth = [line.strip().split("|")[1] for line in file]
print("truth")
print(FG_truth)
return FG_truth
def recognize_FGDetection_tool(self):
'''
Recognize by which tool the FG file was created
args : fusion_file <file.tsv>
return : toolName <str>
'''
toolName = None
with open(self.fusion_file) as ffile:
first_line = ffile.readline()
if first_line.startswith("#FusionName") and (len(first_line.strip().split("\t")) > 14):
toolName = "STARFUSION"
elif first_line.startswith("#gene1"):
toolName = "ARRIBA"
elif first_line.startswith("# chrom1"):
toolName = "SQUID"
# elif first_line.startswith("#FusionName") && (len(first_line.strip().split("\t")) <14:
# toolName = "TRINITYFUSION"
# elif ffile.name.endswith(".json"):
# toolName = "PIZZLY"
return toolName
def get_predicted_FG_from_merged(self):
''' Retrieve the Gene fusions and tool used according to their specific
columns in the different FG detection tools output. Gene Fusion will be named
like the STAR-FUSION output : GENE1--GENE2
args : fusion_file <file.tsv>
dict_tool_FG <dict>
'''
gene_fusion = []
dict_tool_FG = {}
with open(self.fusion_file) as ffile:
if os.path.getsize(self.fusion_file) != 0:
gene_fusion = [line.strip() for line in ffile if not line.startswith(".") and line not in gene_fusion]
print(gene_fusion)
else:
gene_fusion = [] #if file is empty, no fusion genes are predicted
dict_tool_FG["combinedPred"] = gene_fusion
print("predicted")
print(dict_tool_FG)
return dict_tool_FG
def get_predicted_FG(self):
''' Retrieve the Gene fusions and tool used according to their specific
columns in the different FG detection tools output. Gene Fusion will be named
like the STAR-FUSION output : GENE1--GENE2
args : fusion_file <file.tsv>
dict_tool_FG <dict>
'''
gene_fusion = []
toolName = self.recognize_FGDetection_tool()
dict_tool_FG = {}
with open(self.fusion_file) as ffile:
next(ffile)
if toolName == "STARFUSION":
gene_fusion = [line.strip().split("\t")[0].upper()
for line in ffile]
elif toolName == "ARRIBA":
gene_fusion = [line.strip().split("\t")[0].upper(
) + "--" + line.strip().split("\t")[1].upper() for line in ffile]
elif toolName == "SQUID":
gene_fusion = [line.strip().split("\t")[11].upper().replace(":","--") for line in ffile]
# If Pizzly was used :
# elif ffile.name.endswith(".json"):
# gene_fusion=[]
# jsonFile = json.load(ffile)
# i = 0
# FG_elements = jsonFile['genes']
# for i in FG_elements:
# i += 1
# gene_fusion.append(FG_elements["geneA"]["name"]+
# "--"+FG_elements["geneB"]["name"])
dict_tool_FG[toolName] = gene_fusion
return dict_tool_FG
def write_fusionGenes(self,output):
''' Write a file with only fusion genes
args : predicted_FG <dict>
return : FG_file <list_FG.txt>
'''
prefix=self.fusion_file
fg_dict = self.get_predicted_FG()
print(fg_dict)
for tool, FGs in fg_dict.items():
fusionGenes = {}
basefile="list_FG_"+str(tool)+".txt"
namefile= join(output,basefile)
with open(namefile,"a+") as FG_file:
for FG in FGs:
if FG not in fusionGenes.values() and (FG != "." and FG.split("--")[0]!= FG.split("--")[1]) and ("," not in FG):
fusionGenes[tool]=FG
FG_file.write(FG+"\n")
def main():
parse = ParseFusionFile(fusion_file,file_truth)
if __name__ == "__main__":
main()
| true |
3fe542561679bb1f1acd250f60ef384ae050fc8c | Python | TheoLong/NetAPP_P3 | /mongodb_setup.py | UTF-8 | 1,130 | 2.828125 | 3 | [] | no_license | """
Write code below to setup a MongoDB server to store usernames and passwords for HTTP Basic Authentication.
This MongoDB server should be accessed via localhost on default port with default credentials.
This script will be run before validating you system separately from your server code. It will not actually be used by your system.
This script is important for validation. It will ensure usernames and passwords are stored in the MongoDB server
in a way that your server code expects.
Make sure there are at least 3 usernames and passwords.
Make sure an additional username and password is stored where...
username = admin
password = pass
"""
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.canvas
post = db.posts
user1 = {
'username': 'yunfei',
'password': 'guoyunfei'
}
user2 = {
'username': 'theo',
'password': 'theo'
}
user3 = {
'username': 'none',
'password': 'none'
}
user4 = {
'username': 'admin',
'password': 'pass'
}
post.insert_one(user1)
post.insert_one(user2)
post.insert_one(user3)
post.insert_one(user4)
| true |
f89cb3707de0daf6c345443810b0a1ed68da357b | Python | EMBEDDIA/cross-lingual-summarization | /text-preprocessing/split-characters-dataset.py | UTF-8 | 333 | 2.71875 | 3 | [
"MIT"
] | permissive | from tqdm import tqdm
for ind, line in enumerate(tqdm(open("output/language-model-characters.txt"))):
mode = 'train'
if ind % 20 == 0:
mode = 'test'
elif ind % 10 == 0:
mode = 'valid'
with open("output/split_characters/language-model-characters-{}.txt".format(mode), 'a') as f:
f.write(line)
| true |
3ae42760ab4b379c406f403498a2f5402d5f7ae1 | Python | GunpreetAhuja/macc | /signup/utils.py | UTF-8 | 4,737 | 2.609375 | 3 | [] | no_license | from signup.models import Pcuser
from django.contrib.auth.models import User
from uuid import uuid4
import random, decimal
from random import randint
MAIL_PROVIDERS = ("@yahoo.com", "gmail.com", "@outlook.com", "riseup.net", "rediffmail.com", "anything.com")
MAIL_IDS = ("name1", "name2", "name3", "name4", "name5", "name6", "name7", "name8", "name9", "name10")
NAMES_LIST = ("name1", "name2", "name3", "name4", "name5", "name6", "name7")
LOCATION_LIST = ("Location place 1", "Location place 2", "Location place 3", "Location place 4", "Location place 5")
GENDER_LIST = ("Male", "Female")
def create_random_admin():
user_1 = User.objects.create_user(
username = random.choice(NAMES_LIST).lower().strip() + uuid4().hex[:9],
email = random.choice(MAIL_IDS).lower().strip() + random.choice(MAIL_PROVIDERS),
password = 'correct_password'
)
user_1.save()
return user_1
def create_random_pcuser():
"""the admin cannot login directly to the site
he/she must be registered as a Pcuser (form the admin page) to do so
This function can also be called any number of times for testing purpose
"""
user = User.objects.create_user(
username = random.choice(NAMES_LIST).lower().strip() + uuid4().hex[:9],
email = random.choice(MAIL_IDS).lower().strip() + random.choice(MAIL_PROVIDERS),
password = 'correct_password'
)
pcuser = Pcuser.objects.create(
user = user,
location = random.choice(LOCATION_LIST),
phone = randint(100000000, 9999999999),
gender = random.choice(GENDER_LIST),
reset_pass = '1',
verified = '1'
)
pcuser.save()
return pcuser
#functions with hard coded data to personally look up
def create_known_admin():
"""This creates an admin with hard coded data which
is already known to us. But this can be used only once
Calling this again gives error
"""
user = User.objects.create_user(
username = 'tester',
email = 'testeremail@gmail.com',
password = 'correct_password'
)
user.save()
return user
def create_known_pcuser():
user = User.objects.create_user(
username = 'onetimename',
email = 'onetimeemail@gmail.com',
password = 'correct_password'
)
pcuser = Pcuser.objects.create(
user = user,
location = 'Known location',
phone = '1234567890',
gender = 'Female',
reset_pass = '1',
verified = '1'
)
pcuser.save()
return pcuser
def get_admins_ordered_alphabetically():
admin_list = User.objects.all().order_by('username')
return admin_list
#Note : All pcusers are users, but every user is not a pcuser
def get_pcusers_ordered_alphabetically():
pcuser_list = Pcuser.objects.all().order_by('user__username')
return pcuser_list
def search_admins(username, email):
""" This function searches for the admins. You can give username, email or none for searching
In case no parameter is provided, it returns the list of all the existing admins
Example: search_admins(None, None) will return all admins
search_admins('yo', None) returns all the admins which have 'yo' in their username
"""
search_query = User.objects.all()
if username:
search_query = search_query.filter(username__contains=username)
if email:
search_query = search_query.filter(email__contains=email)
return search_query
def search_pcusers(username, email, location, phone, gender):
"""This function searches for the pcusers existing in the database. You can give the user associated, the email associated
with the user, location, phone, gender (in any form) or nothing to filter.
In case of no parameter, it returns all the pcusers.
Example: search_pcusers(None, None, None, None, 'M') will return all the male pcusers
"""
search_query = Pcuser.objects.all()
if username:
search_query = search_query.filter(user__username__contains=username)
if email:
search_query = search_query.filter(user__email__contains=email)
if location:
search_query = search_query.filter(location__contains=location)
if phone:
search_query = search_query.filter(phone__contains=phone)
if gender:
search_query = search_query.filter(gender__contains=gender)
return search_query
def delete_random_admins():
"""This deletes all the random admins created for testing purposes
For avoiding confusion, this must be called after the tests are cleared up
"""
random_query = User.objects.all()
random_query = random_query.filter(username__startswith='name')
random_query.delete()
new_list = User.objects.all()
return new_list
def delete_random_pcusers():
"""This deletes all the random pcusers created for the testing purpose
For avoiding confusion, this must be called after the tests are cleared up
"""
random_query = Pcuser.objects.all()
random_query = random_query.filter(user__username__startswith='name')
random_query.delete()
new_list = Pcuser.objects.all()
return new_list
| true |
f89119f36c4b035e73cc6b7d7b02c6bb25af43d3 | Python | adrianoff/big_data_yandex_course | /curs1/week6/td_idf/tf_idf_testing.py | UTF-8 | 1,755 | 2.90625 | 3 | [] | no_license | import math
ibm = (2.0/6.0) * (1.0/math.log(2.0))
print ibm
from sklearn.feature_extraction.text import TfidfVectorizer
corpus = [
"ibm vipusk first computer computer ibm",
"computer system linux kernel kernel linux",
"windows microsoft vipusk system okna",
"windows mac grant klava linux dos"
]
vectorizer = TfidfVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus)
idf = vectorizer.idf_
#print dict(zip(vectorizer.get_feature_names(), idf))
feature_names = vectorizer.get_feature_names()
doc = 1
feature_index = X[doc,:].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
for w, s in [(feature_names[i], s) for (i, s) in tfidf_scores]:
print w, s
print "#########"
from textblob import TextBlob as tb
def tf(word, blob):
return (blob.words.count(word)*1.0) / (len(blob.words)*1.0)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def tfidf(word, blob, bloblist):
def idf(word, bloblist):
return 1.0 / math.log(1.0 + n_containing(word, bloblist))
return tf(word, blob) * idf(word, bloblist)
document1 = tb("ibm vipusk first computer computer ibm")
document2 = tb("computer system linux kernel kernel linux")
document3 = tb("windows microsoft vipusk system okna")
document4 = tb("windows mac grant klava linux dos")
bloblist = [document1, document2, document3, document4]
for i, blob in enumerate(bloblist):
print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:3]:
print("\tWord: {}, TF-IDF: {}".format(word, score, 5)) | true |
17759ee45598c82fda01ee19e8ea45d40b287d85 | Python | thearn/Rimworld_roof_editor | /rim_map_roof.py | UTF-8 | 3,687 | 2.671875 | 3 | [] | no_license | import numpy as np
import base64, shutil
from PIL import Image
bytes2sky_type = {}
bytes2sky_type['\x00\x00'] = 3 # empty sky
bytes2sky_type['+\x1a'] = 2 # thin rock
bytes2sky_type['\r\x14'] = 1 # constructed
bytes2sky_type['D*'] = 0 #overhead mountain
type2bytes = {}
for name in bytes2sky_type:
type2bytes[bytes2sky_type[name]] = name
class RimMapRoof(object):
mapsize = None
pre_roof = None
roof_code = None
post_roof = None
fname = None
roof_array = None
def __init__(self, fn):
self.fname = fn
self.name = self.fname.split(".")[0]
backup_name = fn + "_backup"
shutil.copy(fn, backup_name)
self.load()
def load(self):
code = []
save = False
before = True
pre, post = '', '\r\n</roofs>\r\n'
with open(self.fname, 'rb') as f:
for line in f:
if 'initialMapSize' in line:
step = line.split('(')
step = step[1].split(")")[0]
m,_,n = step.split(",")
self.mapsize = (int(m), int(n))
if 'roofGrid' in line:
save = not save
before = False
if save:
#print line.split(" ")
code.append(line.strip())
else:
if before:
pre += line
else:
post += line
pre += '\t\t\t\t<roofGrid>\r\n\t\t\t\t\t<roofs>\r\n'
self.roof_array = np.zeros(self.mapsize).flatten()
self.pre_roof = pre
self.post_roof = post
code = code[2:-1]
top = ''.join(code)
chunks, chunk_size = len(top), 8
codes = [ top[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
self.map_code_counts = {}
self.numcells = 0
idx = 0
for c in codes:
c = base64.b64decode(c)
cells = [c[k:k+2] for k in range(0,len(c),2)]
for cell in cells:
if cell in self.map_code_counts:
self.map_code_counts[cell] += 1
else:
self.map_code_counts[cell] = 1
self.roof_array[idx] = bytes2sky_type[cell]
idx += 1
self.numcells += 1
self.roof_array = self.roof_array.reshape(self.mapsize)
def array2code(self):
roof_array = self.roof_array.flatten()
hexcodes = [type2bytes[i] for i in roof_array]
codes = [hexcodes[i:i+3] for i in range(0, len(hexcodes), 3)]
s = ''
for code in codes:
s += base64.b64encode(''.join(code))
ss = s[1:]
self.roof_code = self.pre_roof + s[0] + '\r\n' + '\r\n'.join([ss[i:i+100] for i in range(0, len(ss), 100)]) + self.post_roof
def save(self, fn):
self.array2code()
with open(fn, 'wb') as f:
f.write(self.roof_code)
def write_image(self, fn=None):
if not fn:
fn = self.name + ".bmp"
self.array2code()
data = self.roof_array[::-1] / 3.0 * 255
data = data.astype(np.uint8)
im = Image.fromarray(data, mode='L')
im.save(fn)
def read_image(self, fn):
im = Image.open(fn)
im = im.convert('L')
data = np.fromiter(iter(im.getdata()), np.float32)
data.resize(self.mapsize[0], self.mapsize[1])
data = data[::-1]/255. * 3
data = data.astype(np.uint8)
self.roof_array = data
if __name__ == '__main__':
fn = 'Pottstown.rws'
rm = RimMapRoof(fn)
#rm.read_image('Untitled.bmp')
| true |
7faef446a1c9d6ef6af9be5f1a483e9ec2d711e7 | Python | Vaishnavi6520/Python-Program | /DAY11/menu2.py | UTF-8 | 1,254 | 3.53125 | 4 | [] | no_license | import collections
import re
print("Select an option from menu :")
print("\n")
print("1. Add_Employee")
print("2. View_Employee")
choice=int(input("Enter the choice"))
li=[]
if(choice==1):
for i in range(2):
dict={}
print("Add_Employee details")
dict["name"]=input("Enter the Employee Name :")
dict["id"]=input("Enter the ID :")
dict["designation"]=input("Enter the Designation :")
salary=input("Enter the salary :")
amount=re.search("^[0-9]",salary)
if amount:
dict['salary']=salary
dict["address"]=input("Enter the address :")
phone=input("Enter the phone no. :")
validation_number=re.search("^[6-9]\d{9}$",phone)
if validation_number:
dict["phone"]=phone
pincode=input("Enter the pincode :")
validation_pincode=re.search("^[1-9]{1}[0-9]{2}\\s{0,1}[0-9]{3}$",pincode)
if validation_pincode:
dict["pincode"]=pincode
li.append(dict)
c=int(input("2. View Employee"))
if(c==2):
print("View employee is selected :")
for i in range(len(li)-1):
combi_dict=collections.ChainMap(li[i],li[i+1])
print(combi_dict)
else:
print("Wrong choice")
| true |
3b7a50c10ffb2bbc8beaa6b4e093db0442291b54 | Python | shiljatbl/rankingTool | /keywordScraper.py | UTF-8 | 3,140 | 2.6875 | 3 | [] | no_license | from selenium import webdriver
from bs4 import BeautifulSoup
import csv
from selenium.webdriver.chrome.options import Options
import time
import geolocation
from RankingTool.models import Product
import django
import os
def scrape_keyword(keyword):
productList = []
#inicijalizacija liste stranica
pages =[ ]
#Setup Chromedriver-a
options = Options()
options.add_argument('--start-maximised')
#options.add_argument('--headless')
options.add_argument('--window-size=1920,1080')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=options)
GeoLocation.set_location()
#.de za nemacku, .com za US
urlSearch ="https://www.amazon.de/s?k="+keyword.replace(" ","+")+"&ref=nb_sb_noss_1"
print("Retrieving data for " + keyword + "...")
print("Retrieving data...")
for x in range(1, 11):
newUrl = "https://www.amazon.de/s?k=" + keyword.replace(" ", "+") + "&page=" + str(x)
pages.append(newUrl)
#print(pages)
pageCounter = 1
#print(pages)
trigger = ""
for p in pages:
driver.get(p)
soup = BeautifulSoup(driver.page_source, 'lxml')
#print(soup)
try:
trigger = soup.find("label", {"for" : "captchacharacters"}).get_text()
except:
trigger = "OK"
#print("---------------------------------------------------")
#print(trigger)
#print("---------------------------------------------------")
if trigger == "Zeichen eingeben":
print("Captcha triggered. Scrape unsuccessful.")
break
item_tag = "s-search-result"
result = soup.find_all("div", {
"data-component-type": item_tag})
for r in result:
newProduct = Product()
try:
newProduct.asin = r.get("data-asin")
except:
newProduct.asin = "NoData"
try:
newProduct.position = str(r.get("data-index"))
except:
newProduct.asin = "NoData"
try:
newProduct.page = str(pageCounter)
except:
newProduct.page = "NoData"
try:
newProduct.title = r.find("span", {"class": "a-size-base-plus a-color-base a-text-normal"}).get_text()
except:
newProduct.title = "NoData"
try:
newProduct.rating = r.find("span", {"class": "a-icon-alt"}).get_text()
except:
newProduct.rating ="NoData"
try:
#27:-7 for US
#26:-9 za DE
newProduct.price = str(r.find("span", {"class": "a-offscreen"}))[26:-9]
except:
newProduct.price = "NoData"
try:
newProduct.image_url = r.find("img").get("src")
except:
newProduct.image_url = "NoData"
productList.append(newProduct)
pageCounter += 1
driver.close()
if not trigger == "Zeichen eingeben":
print("Scraping done!") | true |
aaf5856cd813927751d790689344d715f903f857 | Python | yongrl/LeetCode | /jianzhi_Offer/24. 数组中次数超过一半的数组.py | UTF-8 | 2,059 | 4.0625 | 4 | [] | no_license | '''
数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。
例如输入一个长度为9的数组{1,2,3,2,2,2,5,4,2}。
由于数字2在数组中出现了5次,超过数组长度的一半,因此输出2。如果不存在则输出0。
@Author: yongrl
Solution:
1. loop the number list and construct a map to store (value,count) map and then check the number,
and the time complex is O(n)
2. use Partition function which is the core idea in quick sort to find the (length//2) value and
then chen check the number of the this value.
'''
class Solution:
def MoreThanHalfNum_Solution(self, numbers):
return self.findmid(numbers,0,len(numbers)-1,len(numbers))
def partition(self,arr,low,high):
i = low+1
j = high
pivot = low
while(i<=j):
while(i<j): # if replace the < of <= ,i may be overflow in the case like:[5,1,2,3] or design more condition
if arr[i]<= arr[pivot]:
i=i+1
else:
break
while(j>=i):
if arr[j]>=arr[pivot]:
j=j-1
else:
break
self.swap(arr,pivot,j)
return j
def swap(self,arr,i,j):
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
def findmid(self,arr,low,high,length):
pivot = self.partition(arr,low,high)
while pivot!=length//2:
if pivot < length//2:
return self.findmid(arr,pivot+1,high,length)
else:
return self.findmid(arr,low,pivot-1,length)
if self.check(arr,pivot):
return arr[pivot]
else:
return 0
def check(self,arr,pivot):
length = len(arr)
count=0
for i in arr:
if i==arr[pivot]:
count+=1
if count>length//2:
return True
else:
return False
print(Solution().MoreThanHalfNum_Solution([1,2,3,2,2,2,5,4,2])) | true |
33313e295a0430fd8ec1d72a0dfb42fe366d5802 | Python | Mong-Gu/PS | /baekjoon/1427.py | UTF-8 | 59 | 2.75 | 3 | [] | no_license | n = sorted(list(input()), reverse = True)
print(''.join(n)) | true |
48473ea78417cb64fcec2100a6ccbf6aebaf504b | Python | Sumbrella/aiqiyi_comments_contents | /_7Days/Day_5/GenerateCloud.py | UTF-8 | 784 | 2.765625 | 3 | [] | no_license | """
"""
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from _7Days.Day_5.CutWords import cut_words
from _7Days.Day_5.CountWords import countWords
fp = 'comments.txt'
text = open(fp).read()
cloud_mask = np.array(Image.open("song.png"))
wordcloud = WordCloud(
font_path='simhei.ttf',
mask=cloud_mask,
max_words=100,
max_font_size=200,
#font_step=1,
background_color="white",
#random_state=1,
#margin=2,
colormap='rainbow'
)
words = cut_words()
words_dict = countWords(words)
words = {}
for word, num in words_dict.items():
if len(word) >= 2:
words.update({word: num})
wordcloud.fit_words(words)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show()
| true |
dd608cff8043413332c272d03dd6d4d75ff1aa65 | Python | CallumT45/Discord-Bot | /cogs/extraClasses/TicTacToe.py | UTF-8 | 8,073 | 3.390625 | 3 | [] | no_license | import discord
from discord.ext import commands
import requests
import random
import asyncio
class TicTacToe():
def __init__(self, player_letter, ctx, client, PvP):
self.board = ["___", '\u0031\u20E3', '\u0032\u20E3', '\u0033\u20E3', '\u0034\u20E3',
'\u0035\u20E3', '\u0036\u20E3', '\u0037\u20E3', '\u0038\u20E3', '\u0039\u20E3']
self.turns = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.player_letter = player_letter
self.ctx = ctx
self.client = client
self.rounds = 0
self.letter_dict = {'X': '\u274C', 'O': '\u2B55'}
if PvP:
self.player2_letter = self.other_letter(player_letter)
else:
self.comp_letter = self.other_letter(player_letter)
async def drawBoard(self):
"""Prints the board in the correct format"""
def string_format(L, M, R):
text = f"{L}{M}{R}"
return text
tic_embed = discord.Embed(title='TicTacToe', color=0x00ff00)
tic_embed.add_field(name=".", value=string_format(
self.board[7], self.board[8], self.board[9]), inline=False)
tic_embed.add_field(name=".", value=string_format(
self.board[4], self.board[5], self.board[6]), inline=False)
tic_embed.add_field(name=".", value=string_format(
self.board[1], self.board[2], self.board[3]), inline=False)
if self.rounds < 1:
self.game_board = await self.ctx.send(embed=tic_embed)
for i in range(9):
await self.game_board.add_reaction(emoji=self.board[1:][i])
else:
await self.game_board.edit(embed=tic_embed)
def player_move(self, move, letter):
"""Updates the board with the players turn, removes that option from turns"""
self.turns.remove(move)
self.board[move] = self.letter_dict[letter]
def other_letter(self, letter):
"""If player is X, comp is O visa versa"""
if letter == "X":
return "O"
else:
return "X"
def victory(self, board):
"""Returns True if any victory conditions are met"""
return (((board[7] == board[8]) and (board[8] == board[9])) or
# across the middle
((board[4] == board[5]) and (board[5] == board[6])) or
# across the bottom
((board[1] == board[2]) and (board[2] == board[3])) or
# down the left side
((board[1] == board[4]) and (board[4] == board[7])) or
# down the middle
((board[2] == board[5]) and (board[5] == board[8])) or
# down the right side
((board[3] == board[6]) and (board[6] == board[9])) or
((board[1] == board[5]) and (board[5] == board[9])) or # diagonal
((board[7] == board[5]) and (board[5] == board[3]))) # diagonal
async def comp_move_ai(self):
"""Makes a copy of the board, then iterates through all the remaining turns,
firstly to see if there is any move that will result in victory for the computer.
Then to see if there are any moves which will see the player win, if so blocks that move. If no move will lead
to victory then the computer randomly chooses its move"""
# checking for victory move for computer, must come before block loop
for i in range(len(self.turns)):
test_board = self.board[:]
test_board[self.turns[i]] = self.letter_dict[self.comp_letter]
if self.victory(test_board):
# if victory update the board and remove from turns list
await self.game_board.clear_reaction(emoji=self.board[self.turns[i]])
self.board[self.turns[i]] = self.letter_dict[self.comp_letter]
self.turns.remove(self.turns[i])
# await self.drawBoard()
return
# checking for blocking move for comp
for i in range(len(self.turns)):
test_board = self.board[:]
test_board[self.turns[i]] = self.letter_dict[self.player_letter]
if self.victory(test_board):
# if victory update the board and remove from turns list
await self.game_board.clear_reaction(emoji=self.board[self.turns[i]])
self.board[self.turns[i]] = self.letter_dict[self.comp_letter]
self.turns.remove(self.turns[i])
# await self.drawBoard()
return
# turns keeps track of options we have left, this line randomly chooses
comp = random.choice(self.turns)
await self.game_board.clear_reaction(emoji=self.board[comp])
self.board[comp] = self.letter_dict[self.comp_letter]
self.turns.remove(comp)
# await self.drawBoard()
async def mainGame(self):
def move_check(m):
return (m.content in list(map(lambda x: str(x), self.turns)) or m.content.lower() == "stop")
def react_check(msg):
def check(reaction, reacting_user):
return reacting_user != self.client.user and str(reaction.emoji) in self.board and reaction.message.id == msg.id and self.board.index(str(reaction.emoji)) in self.turns
return check
flag = True
while flag: # while no victory is determined or while there are turns left to make
self.rounds += 1
try:
reaction, user = await self.client.wait_for('reaction_add', timeout=45.0, check=react_check(self.game_board))
await self.game_board.clear_reaction(emoji=str(reaction.emoji))
move = self.board.index(str(reaction.emoji))
except:
await self.ctx.send('Timed Out!')
break
else:
self.player_move(int(move), self.player_letter)
await self.drawBoard()
if self.turns == [] or self.victory(self.board):
# if no moves left or victory reached, otherwise computers turn
flag = False
await self.drawBoard()
else:
await asyncio.sleep(0.5)
await self.comp_move_ai()
await self.drawBoard()
if self.turns == [] or self.victory(self.board):
flag = False
await self.ctx.send("Game Over!")
async def player_move_code(self, player):
def react_check(msg):
def check(reaction, reacting_user):
return reacting_user != self.client.user and str(reaction.emoji) in self.board and reaction.message.id == msg.id and self.board.index(str(reaction.emoji)) in self.turns
return check
try:
reaction, user = await self.client.wait_for('reaction_add', timeout=30.0, check=react_check(self.game_board))
move = self.board.index(str(reaction.emoji))
await self.game_board.clear_reaction(emoji=str(reaction.emoji))
return move
except Exception as e:
print(e)
await self.ctx.send('Timed Out!')
flag = False
async def mainGamePvP(self):
flag = True
move = ""
while flag: # while no victory is determined or while there are turns left to make
self.rounds += 1
move = await self.player_move_code("Player 1")
self.player_move(int(move), self.player_letter)
await self.drawBoard()
if self.turns == [] or self.victory(self.board):
# if no moves left or victory reached, otherwise computers turn
flag = False
else:
move = await self.player_move_code("Player 2")
self.player_move(int(move), self.player2_letter)
await self.drawBoard()
if self.turns == [] or self.victory(self.board):
flag = False
await self.ctx.send("Game Over!")
| true |