blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1ff3675894d0f255f299c2b428d99c4435cc2a9b | Python | miiip/Encrypted-Image-Processing | /test_comparasion.py | UTF-8 | 437 | 3.59375 | 4 | [] | no_license | import math
def heaviside(x, r):
tmp = math.tanh((2**r)*x)
tmp = 1 + tmp
tmp = 1/2 * tmp
return tmp
def sgn(x, r):
tmp = heaviside(x, r)
return 2*tmp - 1
def aprox_heaviside(x, r):
for i in range(r):
tmp = x * (1.9142 - (x**2))
x = tmp
return tmp
def aprox_sgn(x, r):
tmp = aprox_heaviside(x,r)
return 2*tmp -1
x = -0.01968
r = 6
print(aprox_heaviside(x,r))
print(sgn(x, r))
| true |
9f9a20c28c97e2621ffb35a614b8190704d7ea25 | Python | Alireza-Gerami/UBPythonWorkshop | /Session-1/homework/Ali Izadi/Q2_1.py | UTF-8 | 261 | 3.671875 | 4 | [] | no_license | a = 2; b = 5
print('Result Of Multiply (a*b) Is:', a*b)
print('Result Of Demical Division (b/a) Is:', b/a)
print('Result Of Correct Division (b/a) Is:', b//a)
print('Result Of Demical Division (a/b) Is:', a/b)
print('Result Of Correct Division (a/b) Is:', a//b) | true |
14400e20328a3403b3d860cadea7f9f822a01fa8 | Python | lancaster-university/rest-radio | /hub/auto_detector.py | UTF-8 | 1,248 | 2.734375 | 3 | [] | no_license | MICROBIT_VID = 3368
MICROBIT_PID = 516
from serial import Serial
import serial.tools.list_ports
from pathlib import Path
default_ports = ["/dev/ttyACM0", "/dev/ttyACM1", "/dev/ttyACM2", "/dev/ttyACM3"]
"""
We can autodetect the presence of a bridge micro:bit by resetting (sending a break) and waiting to see a SLIP_END character.
"""
def auto_detect_microbit():
ports = list(serial.tools.list_ports.comports())
micro_bits = []
for p in ports:
if isinstance(p, tuple):
micro_bits += [p[0]]
elif p.vid == MICROBIT_VID and p.pid == MICROBIT_PID:
micro_bits += [p.device]
selected = None
for mb in micro_bits:
try:
s = Serial(port= mb,baudrate=115200, timeout=5)
except:
continue
s.send_break()
c = s.read(1)
# check for slip end
if c == chr(0xC0):
selected = s
if selected is not None:
break
# if we get here, all hope is lost, pick from a pre determined set of ports.
if selected is None:
for port in default_ports:
io_port = Path(port)
if io_port.exists():
selected = port
break
return selected | true |
4bed7cf79f6ae364e4ce8bb4cbc3e59e3512093c | Python | dileepchebolu/python | /Hackerrank/1.py | UTF-8 | 704 | 3.96875 | 4 | [] | no_license | ##If the python interpreter is running that module (the source file) as the main program, it sets the special __name__ variable to have a value “__main__”. If this file is being imported from another module, __name__ will be set to the module’s name. Module’s name is available as value to __name__ global variable.
# A module is a file containing Python definitions and statements. The file name is the module name with the suffix .py appended.
if __name__ == '__main__':
n = int(input())
if n % 2 != 0:
print("Weird")
elif n % 2 == 0:
if n >= 2 and n <= 5:
print("Not Weird")
elif n >= 6 and n <= 20:
print("Weird")
elif n >= 20:
print("Not Weird") | true |
f0caea4e012985ee90e9ead33b2af4f0cf8f3fc5 | Python | caramck/q-complementary | /process_tuning_scripts.py | UTF-8 | 7,834 | 2.765625 | 3 | [] | no_license | #%%
#generate_tuning_scripts.py
#Purpose: In a directory, generate some given number of scripts optimizing a range of gamma from a given seed value with given coordinates
import os,sys
import subprocess
import shutil
import wheel
import numpy as np
import xlrd
import openpyxl
import matplotlib as plt
import seaborn as sns
import numpy as np
import pandas as pd
##########################################################
# Define which functionality this code will use #
##########################################################
flag=sys.argv[1]
##########################################################
# generate directory of output files #
##########################################################
def generate_directory(cwd):
"""
generate_directory
:param cwd: current working directory
:return none
"""
#make a new directory
names=cwd.split("/")
new_dir=str(names[-1])+"_out"
os.mkdir(new_dir)
#copy all files with ".out" into this directory
file_string=".out"
for roots, dirs, files in os.walk(cwd):
for file_name in files:
if file_string in file_name:
os.system('cp '+file_name+' '+str(new_dir))
##########################################################
# Define file_parser object #
##########################################################
#constructor
class file_parser:
"""
One instance of file parser parses all .out files in a directory.
"""
#Constructor
def __init__(self,dir_name):
"""
file_parser constructor
:param self
:param file_name: string. file to parse
:return none
"""
#static attributes
self.dir_name=dir_name
def search_all_files(self):
"""
search_all_files: searches all files and saves output
:return none
"""
#TODO edit this and make it into a clean function
def parse_file(file_name):
"""
parse_file: searches through a particle file for values of interest and saves
:return none
"""
#Load output file
p=open(file_name,"r")
all_lines=p.readlines()
p.close()
#SCF value array
SCFe=[]
#HOMO value array
HOMOe=[]
s=open(file_name,"r")
# This iterating process could be more efficient - put more thought into this.
# Create anchors for each section
# Parse output file by job section: job 2 is neutral, job 3 is cation, job 4 is anion
sec_anchors=[]
cur_line=1
#running job tally
jobs=0
for line in s:
#keep counter
cur_line+=1
#parse output file for job sections for all three jobs of interest
if line.find("Running Job") !=-1:
jobs+=1
sec_anchors.append(cur_line)
s.close()
#Total lines in output file
sec_anchors.append(len(all_lines))
cur_line=1
#Pull out SCF and HOMO energies for each calculation of interest
for i in range(jobs):
#Look selectively in the range in file corresponding to that calculation
#initiate homoArray outside of loop
homoArray=[]
for j in range(sec_anchors[i],sec_anchors[i+1]):
cur_line+=1
#search for SCF energy for that calculation
if all_lines[j].find("$molecule") != -1:
charge=str(all_lines[j+1])
if all_lines[j].find("Total energy in the final basis set") != -1:
scf_line=str(all_lines[j])
split=scf_line.split()
SCF_energy=(float(split[-1]))
# search for HOMO energy for that calculation
if all_lines[j].find("-- Virtual --") != -1:
HOMOline=str(all_lines[j-1])
splitB=HOMOline.split()
homoArray.append(float(splitB[-1]))
#pull the corresponding gamma for this calculation
if all_lines[j].find("omega") != -1:
gamma_line=str(all_lines[j])
split=gamma_line.split()
gamma=(float(split[-1]))
# Compare the saved HOMO energies, pick the highest if there are 2.
if len(homoArray)==2:
if homoArray[0]>homoArray[1]:
HOMO_energy=(homoArray[0])
else:
HOMO_energy=(homoArray[1])
else:
HOMO_energy=(homoArray[0])
if "0 1" in str(charge):
job_type=0
neutral_scf=SCF_energy
neutral_homo=HOMO_energy
elif "-1 2" in str(charge):
job_type=1
anion_scf=SCF_energy
anion_homo=HOMO_energy
elif "1 2" in str(charge):
job_type=2
cation_scf=SCF_energy
s.close()
#add results to array
results_arr=[file_name,gamma,neutral_scf,anion_scf,cation_scf,neutral_homo,anion_homo]
return results_arr
def calc_homo_ip(homo,cation_scf,neutral_scf):
"""
calc_homo_ip: find difference between given homo and ip values
:return none
"""
ip=cation_scf-neutral_scf
homo_ip=np.absolute(homo+ip)
return homo_ip
def calc_lumo_ea(lumo,neutral_scf,anion_scf):
"""
calc_lumo_ea: find difference between given lumo and ea values
:return none
"""
ea=neutral_scf-anion_scf
lumo_ea=np.absolute(lumo+ea)
return lumo_ea
#create dataframe headers
df_array=[['file','gamma','scf_neutral_ha','scf_anion_ha','scf_cation_ha','homo_neutral_ha','homo_anion_ha']]
#set file extension
file_string=".out"
#search all files in folder for this string, if found, include full file name in array
for roots, dirs, files in os.walk(self.dir_name):
for file_name in files:
if file_string in file_name:
print("Parsing "+str(file_name))
results=parse_file(self.dir_name+"/"+file_name)
df_array.append(results)
#create dataframe
df=pd.DataFrame(data=df_array)
df.rename(columns=df.iloc[0])
df.columns=df.iloc[0]
#add homo +ip and lumo+ea columns to df
#df["homo_ip"] = ((df["homo_neutral_ha"])+(df["scf_cation_ha"]-df["scf_neutral_ha"]))
#df["homo_ip"] = (calc_homo_ip(df["homo_neutral_ha"],df["scf_cation_ha"],df["scf_neutral_ha"]))
#print dataframe
print(df)
print(cwd+"/tuning.xlsx")
#create excel document
df.to_excel(cwd+"/tuning.xlsx")
#create plot and save it
#sns.lineplot(data=df, x="gamma", y="passengers")
##########################################################
# Parse files in directory #
##########################################################
#find current working directory
cwd=os.getcwd()
if flag=="a":
#create file parser object
parser=file_parser(cwd)
#parse files in directory
parser.search_all_files()
if flag=="b":
generate_directory(cwd)
# %%
| true |
c331170b4c2dfcdbc7de0f0c3493980bc5150c9a | Python | anna-is-bored/g_player | /player/models.py | UTF-8 | 1,491 | 2.65625 | 3 | [] | no_license | from django.db import models
class Artist(models.Model):
""" Model representing an artist.
I could add more fields in here like for example a country (but then I would have to create model country)
"""
name = models.CharField(max_length=255)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.name
def get_artists_total_songs_recent_played_truck(self):
# the next step is to make it in more django ORMish way
return Artist.objects.raw('SELECT player_artist.id, player_artist.name AS artist_name, '
'(SELECT name FROM player_track WHERE artist_id = player_artist.id ORDER BY last_play DESC LIMIT 1) AS recently_played_track, '
'(SELECT count(*) FROM player_track WHERE artist_id = player_artist.id GROUP BY artist_id) AS total_number_of_tracks '
'FROM player_artist')
class Track(models.Model):
""" Model representing a track."""
name = models.CharField(max_length=255)
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
duration = models.SmallIntegerField("Track Duration In Seconds")
last_play = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
class Meta:
indexes = [
models.Index(fields=['name', ]),
]
| true |
186762164eb1aa7fa9bb4a745599991dbc2c6e29 | Python | SokolnikSergey/InstaBot | /codes/SettingsManager.py | UTF-8 | 1,533 | 2.828125 | 3 | [] | no_license | import configparser
class SettingsManager:
def __init__(self,path_to_settings_file = '../infrastructure/settings.ini'):
self.__config_file = configparser.ConfigParser()
self.__config_file.read(path_to_settings_file)
self.__general_settings = {}
self.__mail_settings = {}
self.get_general_settings()
self.get_mail_settings()
def get_general_settings(self):
self.__general_settings.update({"timer_between_operations" : int(self.__config_file['DEFAULT']['timer_between_operations'])})
self.__general_settings.update({"timer_between_sub_unsub" : int(self.__config_file['DEFAULT']['timer_beetwen_refreshing'])})
self.__general_settings.update({"login" : self.__config_file['DEFAULT']['login']})
self.__general_settings.update({"password" : self.__config_file['DEFAULT']['password']})
self.__general_settings.update({"amount_of_repeats" : int(self.__config_file['DEFAULT']['amount_repeats'])})
def get_mail_settings(self):
self.__mail_settings.update({"login" : self.__config_file['MAIL']['login']})
self.__mail_settings.update({"password" : self.__config_file['MAIL']['password']})
#return settings value by type ( mail / general)
def get_setting_value(self,type,name_of_parameter):
settings = None
if type == 'general':
settings = self.__general_settings
elif type == 'mail':
settings = self.__mail_settings
return settings[name_of_parameter]
| true |
95376725c7c08e533d2c6f963871381eed0ced09 | Python | sakshisangwan/RandomPython | /ThinkPython/12tuples/has_match.py | UTF-8 | 132 | 3.703125 | 4 | [] | no_license | def has_match(t1, t2):
for x, y in zip(t1, t2):
if x == y:
return True
return False
print(has_match('abc', ['a', 'b', 'c'])) | true |
7f60cf48c9d60b09772e0412c97574b202214da6 | Python | kauecurti/FaceDetection | /02-VideoCapture/01-ConectandoWebcam/cap05-04-connect_webcam_template.py | UTF-8 | 1,571 | 3.3125 | 3 | [] | no_license | # Conectando a webcam - Este é um template genérico para captura de vídeos pela webcam do seu computador
# Execute uma das duas opções:
# Com um arquivo de vídeo como parâmetro: python cap05-04-connect_webcam_template.py --video videos/video1.mp4
# Sem um arquivo de vídeo como parâmetro (a detecção será feita a partir da webcam no seu computador): python cap05-04-connect_webcam_template.py
# Imports
import argparse
import imutils
import cv2
# Argumento
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="Caminho (opcional) para o vídeo")
args = vars(ap.parse_args())
# Se um caminho de vídeo não foi fornecido, usamos a referência para a webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0) # Caso você tenha mais de uma webcam, altere o valor que indica sua webcam: 1, 2, etc...
# Caso contrário, usamos a referência ao arquivo de vídeo
else:
camera = cv2.VideoCapture(args["video"])
# Navegamos pelos frames do vídeo enquanto não for pressionada a tecla "q" no seu teclado
while True:
# Obtém o frame corrente
(grabbed, frame) = camera.read()
# Se estivermos vendo um vídeo e não pegarmos um frame, chegamos ao final do vídeo
if args.get("video") and not grabbed:
break
# Mostra o frame na nossa tela
cv2.imshow("Frame", imutils.resize(frame, width=600))
key = cv2.waitKey(1) & 0xFF
# Se a tecla 'q' for pressionada, interrompe o loop e para a execução do script
if key == ord("q"):
break
# Limpa a câmera e fecha a janela
camera.release()
cv2.destroyAllWindows()
| true |
3f473a84e006127e596b5392f15477e7bc7259ee | Python | qingfengliu/statics_use | /python/数科项目/总结用/1.对sklearn的一些总结.py | UTF-8 | 4,583 | 2.84375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix,precision_recall_curve,roc_curve,mean_squared_error
from sklearn.metrics import precision_score, recall_score,accuracy_score,f1_score,roc_auc_score
from sklearn.datasets import fetch_openml
#绘制精度召回率 阈值图
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.legend(loc="center right", fontsize=16) # Not shown in the book
plt.xlabel("Threshold", fontsize=16) # Not shown
plt.grid(True) # Not shown
# plt.axis([-50000, 50000, 0, 1]) # 这个代码用于指定坐标轴宽度
#绘制ROC曲线
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal
plt.axis([0, 1, 0, 1]) # Not shown in the book
plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16) # Not shown
plt.ylabel('True Positive Rate (Recall)', fontsize=16) # Not shown
plt.grid(True) # Not shown
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14) # not shown
mnist = fetch_openml('mnist_784', version=1, as_frame=False)
# print(mnist.keys())
X, y = mnist["data"], mnist["target"]
# print(type(X))
# print(X.shape)
# print(y.shape)
y = y.astype(np.uint8)
some_digit = X[0]
# X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42)
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, random_state=42)
rnd_clf.fit(X_train, y_train_5) #这里暂时使用随机森林来拟合,
y_pred_train = rnd_clf.predict(X_train) #在训练集上预测
y_pred_test = rnd_clf.predict(X_test) #在测试集上预测
y_score_test =rnd_clf.predict_proba(X_test) #测试集每个点的预测得分,可用于计算AUC
feat_labels=X_train.columns
importance=rnd_clf.feature_importances_ #随机森林能够求出特征重要性,然而这个图像例子里并不适用
imp_result=np.argsort(importance)[::-1]
for j,i in enumerate(imp_result):
print("%2d. %-*s %f" %(j+1,30,feat_labels[i],importance[i]))
print('训练集准确率:',accuracy_score(y_train_5, y_pred_train))
print('训练集精度:',precision_score(y_train_5, y_pred_train))
print('训练集召回率:',recall_score(y_train_5, y_pred_train))
print('测试集准确率:',accuracy_score(y_test_5, y_pred_test))
print('测试集精度:',precision_score(y_test_5, y_pred_test))
print('测试集召回率:',recall_score(y_test_5, y_pred_test))
print('F1:',f1_score(y_test_5, y_pred_test))
print('auc:',roc_auc_score(y_test_5, y_score_test[:,1]))
#混淆矩阵格式
# TN(真负例) FP(假正例)
# FN(假负例) TP(真正例)
print('训练集混淆矩阵:',confusion_matrix(y_train_5, y_pred_train))
print('测试集混淆矩阵:',confusion_matrix(y_test_5, y_pred_test))
#混淆矩阵格式
# TN(真负例) FP(假正例)
# FN(假负例) TP(真正例)
precisions, recalls, thresholds = precision_recall_curve(y_test_5, y_score_test[:,1])
#召回率—精度阈值
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
fpr, tpr, thresholds = roc_curve(y_test_5, y_score_test[:,1])
plt.figure(figsize=(8, 6)) # Not shown
plot_roc_curve(fpr, tpr)
plt.show()
plot_learning_curves(rnd_clf, X, y)
| true |
3ecbc51f88629f45db99ef2846dcb5fef4eb0667 | Python | kamyu104/LeetCode-Solutions | /Python/reverse-prefix-of-word.py | UTF-8 | 251 | 3.1875 | 3 | [
"MIT"
] | permissive | # Time: O(n)
# Space: O(1)
class Solution(object):
def reversePrefix(self, word, ch):
"""
:type word: str
:type ch: str
:rtype: str
"""
i = word.find(ch)
return word[:i+1][::-1]+word[i+1:]
| true |
4fac297642285a6d7cf3ce68894dc2de94ad40a9 | Python | rkruegs123/qcircuit-optimization | /utilities.py | UTF-8 | 6,529 | 2.75 | 3 | [
"MIT"
] | permissive | import networkx as nx
import matplotlib.pyplot as plt
from tqdm import tqdm
import itertools
import pdb
import sys
sys.path.append('../pyzx')
import pyzx as zx
from pyzx.utils import VertexType, EdgeType
def pyzx2nx(zx_graph):
# nx_graph = nx.DiGraph()
nx_graph = nx.Graph()
nx_graph.add_nodes_from([v for v in zx_graph.vertices()])
nx_graph.add_edges_from(zx_graph.edges())
return nx_graph
def is_graph_like(g):
# checks that all spiders are Z-spiders
for v in g.vertices():
if g.type(v) not in [VertexType.Z, VertexType.BOUNDARY]:
return False
for v1, v2 in itertools.combinations(g.vertices(), 2):
if not g.connected(v1, v2):
continue
# Z-spiders are only connected via Hadamard edges
if g.type(v1) == VertexType.Z and g.type(v2) == VertexType.Z \
and g.edge_type(g.edge(v1, v2)) != EdgeType.HADAMARD:
return False
# FIXME: no parallel edges
# no self-loops
for v in g.vertices():
if g.connected(v, v):
return False
# every I/O is connected to a Z-spider
bs = [v for v in g.vertices() if g.type(v) == VertexType.BOUNDARY]
for b in bs:
if g.vertex_degree(b) != 1 or g.type(list(g.neighbors(b))[0]) != VertexType.Z:
return False
# every Z-spider is connected to at most one I/O
zs = [v for v in g.vertices() if g.type(v) == VertexType.Z]
for z in zs:
b_neighbors = [n for n in g.neighbors(z) if g.type(n) == VertexType.BOUNDARY]
if len(b_neighbors) > 1:
return False
return True
# enforces graph being graph-like
def to_graph_like(g):
# turn all red spiders into green spiders
zx.to_gh(g)
# simplify: remove excess HAD's, fuse along non-HAD edges, remove parallel edges and self-loops
# FIXME: check that spider_simp does the above
zx.spider_simp(g, quiet=True)
# ensure all I/O are connected to a Z-spider
bs = [v for v in g.vertices() if g.type(v) == VertexType.BOUNDARY]
for v in bs:
# if it's already connected to a Z-spider, continue on
if any([g.type(n) == VertexType.Z for n in g.neighbors(v)]):
continue
# have to connect the (boundary) vertex to a Z-spider
ns = list(g.neighbors(v))
for n in ns:
# every neighbor is another boundary or an H-Box
assert(g.type(n) in [VertexType.BOUNDARY, VertexType.H_BOX])
if g.type(n) == VertexType.BOUNDARY:
z1 = g.add_vertex(ty=zx.VertexType.Z)
z2 = g.add_vertex(ty=zx.VertexType.Z)
z3 = g.add_vertex(ty=zx.VertexType.Z)
g.remove_edge(g.edge(v, n))
g.add_edge(g.edge(v, z1), edgetype=EdgeType.SIMPLE)
g.add_edge(g.edge(z1, z2), edgetype=EdgeType.HADAMARD)
g.add_edge(g.edge(z2, z3), edgetype=EdgeType.HADAMARD)
g.add_edge(g.edge(z3, n), edgetype=EdgeType.SIMPLE)
else: # g.type(n) == VertexType.H_BOX
z = g.add_vertex(ty=zx.VertexType.Z)
g.remove_edge(g.edge(v, n))
g.add_edge(g.edge(v, z), edgetype=EdgeType.SIMPLE)
g.add_edge(g.edge(z, n), edgetype=EdgeType.SIMPLE)
# each Z-spider can only be connected to at most 1 I/O
vs = list(g.vertices())
for v in vs:
if not g.type(v) == VertexType.Z:
continue
boundary_ns = [n for n in g.neighbors(v) if g.type(n) == VertexType.BOUNDARY]
if len(boundary_ns) <= 1:
continue
# add dummy spiders for all but one
for b in boundary_ns[:-1]:
z1 = g.add_vertex(ty=zx.VertexType.Z)
z2 = g.add_vertex(ty=zx.VertexType.Z)
g.remove_edge(g.edge(v, b))
g.add_edge(g.edge(z1, z2), edgetype=EdgeType.HADAMARD)
g.add_edge(g.edge(b, z1), edgetype=EdgeType.SIMPLE)
g.add_edge(g.edge(z2, v), edgetype=EdgeType.HADAMARD)
assert(is_graph_like(g))
def uniform_weights(g, elts):
return [1 / len(elts)] * len(elts)
def c_score(c):
total = len(c.gates)
single_qubit_count = total - c.twoqubitcount()
# return 4 * c.twoqubitcount() + c.tcount()
return 10 * c.twoqubitcount() + single_qubit_count
def g_score(g):
g_tmp = g.copy()
# FIXME: VERY EXPENSIVE. only to enable circuit extraction.
# A better strategy would be to probailistically full_reduce
zx.full_reduce(g_tmp)
c = zx.extract_circuit(g_tmp.copy()).to_basic_gates()
c = zx.basic_optimization(c)
return c_score(c)
"""
total = len(c.gates)
single_qubit_count = total - c.twoqubitcount()
# return 4 * c.twoqubitcount() + c.tcount()
return 10 * c.twoqubitcount() + single_qubit_count
"""
if __name__ == "__main__":
N_QUBITS = 10
DEPTH = 300
c = zx.generate.CNOT_HAD_PHASE_circuit(qubits=N_QUBITS, depth=DEPTH, clifford=False)
g = c.to_graph()
# The below tests some of the networkx functionality
"""
nx_g = pyzx2nx(g)
nx.draw(nx_g)
plt.show()
"""
"""
Potentially useful networkx functions:
- nx.edge_load_centrality : maybe a good ranking function for pivoting
- nx.load_centrality : maybe good for local complementation
- See Algorithms -> Centrality in networkx manual
"""
# The below shows that it is basically never worth trying to directly extract a circuit from an arbitrary graph
"""
try:
c_orig = zx.Circuit.from_graph(g)
print("Can convert from original graph back to circuit")
except:
print("Can NOT convert from original graph back to circuit")
successes = 0
for _ in tqdm(range(1000)):
c = zx.generate.CNOT_HAD_PHASE_circuit(qubits=N_QUBITS, depth=DEPTH, clifford=False)
g = c.to_graph()
zx.full_reduce(g)
try:
c_opt = zx.Circuit.from_graph(g)
successes += 1
except:
continue
print(f"Number of successes: {successes}")
"""
# The below tests the graph-likeness utilities
for _ in tqdm(range(100), desc="Verifying equality with [to_graph_like]..."):
c = zx.generate.CNOT_HAD_PHASE_circuit(qubits=N_QUBITS, depth=DEPTH, clifford=False)
g = c.to_graph()
g1 = g.copy()
to_graph_like(g1)
zx.full_reduce(g1)
c1 = zx.extract_circuit(g1.copy())
assert(c.verify_equality(c1))
print("[to_graph_like] appears to maintain equality!")
| true |
ae6a02c1497eb945922a31163ce9c72431cda6b8 | Python | alanpaiva/python4linux | /HandsOn/Aula08/mongodb.py | UTF-8 | 1,542 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
from pymongo import MongoClient
client = MongoClient("127.0.0.1")
db = client["curso_python"]
# PARTE 3 - insert
#db.aulas.insert({"curso":"mongoDB","assunto":"replicacao","aula":5})
#aulas = db.aulas.find()
# PARTE 1 -
#aulas = db.aulas.find()
#for a in aulas:
# print a
# PARTE 2 - filtrar
#aulas = db.aulas.find()
#for a in aulas:
# print a.get("curso"),a.get("assunto")
# PARTE 4 - update
#db.aulas.insert({"curso":"cassandra","assunto":"replicacao","aula":5})
#db.aulas.update({"curso":"mongoDB"},{"$set":{"assunto":"backup"}})
#aulas = db.aulas.find()
#for a in aulas:
# print a.get("curso"),a.get("assunto")
# PARTE 5 - remover
#db.aulas.insert({"curso":"cassandra","assunto":"replicacao","aula":5})
#db.aulas.update({"curso":"mongoDB"},{"$set":{"assunto":"backup"}})
#db.aulas.remove({"cassandra":""}) ???????
#aulas = db.aulas.find()
#for a in aulas:
# print a.get("curso"),a.get("assunto")
# PARTE 6 -
#cursos = {"nome":"python","aulas":[
# {"aula":"basico"},
# {"aula":"estruturas de decisao"},
# {"aula":"funcoes"}
# ]
#
# }
#db.aulas.insert(cursos)
# PARTE 7 - add nova aula no dicionario acima
#aula = {"aula":"sqlalchemy"}
#db.aulas.update({"nome":"python"},
# {"$addToSet":{"aulas":aula}})
# PARTE 8 - remover item da lista
db.aulas.update({"nome":"python"},
{"@pull":{"aulas":{"aulas":"basico"}}})
| true |
3cf0eff3ef6664dbcdb3ec71863814084d258c59 | Python | lukichevanton/pyneng | /15_module_re/task_15_5.py | UTF-8 | 2,450 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Задание 15.5
Создать функцию generate_description_from_cdp, которая ожидает как аргумент
имя файла, в котором находится вывод команды show cdp neighbors.
Функция должна обрабатывать вывод команды show cdp neighbors и генерировать на основании вывода команды описание для интерфейсов.
Например, если у R1 такой вывод команды:
R1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Eth 0/0 140 S I WS-C3750- Eth 0/1
Для интерфейса Eth 0/0 надо сгенерировать такое описание
description Connected to SW1 port Eth 0/1
Функция должна возвращать словарь, в котором ключи - имена интерфейсов, а значения - команда задающая описание интерфейса:
'Eth 0/0': 'description Connected to SW1 port Eth 0/1'
Проверить работу функции на файле sh_cdp_n_sw1.txt.
"""
#!/usr/bin/env python3
from pprint import pprint
import re
def generate_description_from_cdp(filename):
template = ['description Connected to {} port {}']
dic = {}
final = []
regex = re.compile(r'(?P<remdev>^\w+) +'
r'(?P<locintf>\w+ \S+).+ +'
r'(?P<remintf>\w+ \S+)')
with open(filename) as f:
for line in f:
result = regex.search(line)
if result:
remdev = result.group('remdev')
locintf = result.group('locintf')
remintf = result.group('remintf')
dic[locintf] = {}
final = ('\n'.join(template).format(remdev, remintf))
dic[locintf] = final
return(dic)
result = generate_description_from_cdp('sh_cdp_n_sw1.txt')
pprint(result)
'''
{'Eth 0/1': 'description Connected to R1 port Eth 0/0',
'Eth 0/2': 'description Connected to R2 port Eth 0/0',
'Eth 0/3': 'description Connected to R3 port Eth 0/0',
'Eth 0/5': 'description Connected to R6 port Eth 0/1'}
''' | true |
875d37a947c02229f6a1b2e58f4866d39d46753f | Python | gwen23/Projet5 | /README.md | UTF-8 | 2,091 | 3.125 | 3 | [] | no_license | #! /usr/bin/env python3
# coding: utf-8
# Projet5
## La startup Pur Beurre et l'utilisation de la base de données
## "Open Food Facts"
Dans le cadre du parcours développeur Python sur OpenClassrooms,
il nous est demandé de créer un programme qui interagirait avec
la base Open Food Facts pour en récupérer les aliments, les comparer
et proposer à l'utilisateur un substitut plus sain à l'aliment
qui lui fait envie.
## Base de données utilisée:
### _ phpMyAdmin
### http://localhost/phpmyadmin/index.php?route=/database/structure&server=1&db=projet5
## Langage de programmation utilisé:
### _ Python 3.8
### https://www.python.org/downloads/
##Tableau Trello:
### https://trello.com/b/gk49js1Q
## Lien du repository sur Github pour le téléchargement:
### https://github.com/gwen23/Projet5
## Packages nécéssaires:
###
- requests
- mysql-connector-python
## Description du programme:
### Les différentes classes :
#### _ Connection (contient les paramètres de connection à la base de données )
#### _ Menu ( contient le menu principal )
#### _ Program ( contient les sous menus
_ Choisir une catégorie, voir les choix enregistrés ou quitter.
_ Choix d'un produit.
_ Proposition des substituts et de l'enregistrement du choix ou non.
#### _ DbCreate (création et remplissage de la database)
#### _ Datacollect ( collecte les données des catégories et produits proposés)
#### _ Category ( pour l'identifiant, le nom et l'URL pour chaque catégorie )
#### _ Product ( pour toutes les caractéristiques des produits ))
## Lancement du programme:
### Depuis le fichier config selctionner puis exécuter:
#### " DbCreation.py " ( Création de la base de données ).
#### " APIreq.py " ( Collecte des données via l'API ).
#### " program.py " ( Exécution du programme ).
## Utilisation du programme:
### Différents menus.
#### _ Menu principal.
#### _ Choix parmi 5 catégories.
#### _ Choix du produit.
### Choix d'un subsitut.
#### _ Enregistrment ou non du substitut.
#### _ Consulter les choix enregistrés.
| true |
4a2a90fa4cf1214cb711be7ba1c59b831bebaaf2 | Python | Python3pkg/Norm | /norm/connection.py | UTF-8 | 1,784 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive |
from .rows import RowsProxy
class CursorProxy(object):
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, name):
return getattr(self.cursor, name)
@property
def column_names(self):
if self.description is None:
return
return [d[0] for d in self.description]
def execute(self, query, params=None):
if not isinstance(query, str):
return self.cursor.execute(query.query, query.binds)
return self.cursor.execute(query, params)
def fetchall(self):
return RowsProxy(self.cursor.fetchall(), self.column_names)
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return dict(list(zip(self.column_names, row)))
class ConnectionProxy(object):
cursor_proxy = CursorProxy
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def cursor(self, *args, **kw):
return self.cursor_proxy(self.conn.cursor(*args, **kw))
def run_query(self, q):
cur = self.cursor()
try:
cur.execute(q.query, q.binds)
return cur.fetchall()
except:
raise
finally:
cur.close()
def run_queryone(self, q):
cur = self.cursor()
try:
cur.execute(q.query, q.binds)
result = cur.fetchone()
return result
finally:
cur.close()
class ConnectionFactory(object):
connection_proxy = ConnectionProxy
def __init__(self, connection_maker):
self.connection_maker = connection_maker
def __call__(self):
return self.connection_proxy(self.connection_maker())
| true |
8f8b700d6746adc8c83d507caeb57b495f8b510d | Python | callumfrance/brailler | /symbol_data/braille_mapping.py | UTF-8 | 1,455 | 3.34375 | 3 | [
"MIT"
] | permissive | """
This python script creates JSON and CSV files that specify information about
6-dot braille characters in unicode.
This is a helper script that is not directly used or imported in the
main brailler program.
"""
import unicodedata
import json
import csv
def csvify(in_list):
exporter = str(in_list[0])
for item in in_list[1:]:
add_quotes = False
if ' ' in str(item):
add_quotes = True
exporter += str(',' + (add_quotes * '"') + str(item) + (add_quotes * '"'))
return exporter
if __name__ == '__main__':
row_export = ['SHIFTED_INDEX,UNICODE,HEX,UNICODE_NAME,INTEGER,SHIFTED_BINARY,UNICODE_CATEGORY',]
for i in range(int('2800', 16), int('2840', 16)):
row = [(i - 10240),
chr(i),
hex(i),
unicodedata.name(chr(i)),
i,
format((i - 10240), "06b"),
unicodedata.category(chr(i)),]
row_export.append(csvify(row))
row_export_str = "\r\n".join(row_export)
with open("braille_unicode.csv", 'w') as f:
for item in row_export_str:
f.write(item)
braille_dict_items = list()
with open("braille_unicode.csv", 'r') as g:
with open("braille_unicode.json", "w") as h:
reader = csv.DictReader(g)
for row in reader:
braille_dict_items.append(row)
json.dump(row, h)
h.write(',\n')
| true |
1b727ab1f7977e9fc8bf7aeb98013c32a00cfe5b | Python | akramsey/python-excell | /count-capital-letters.py | UTF-8 | 221 | 3.3125 | 3 | [] | no_license | def countCapitalLetters(str):
count = 0;
for c in str:
if c.isupper():
count += 1
return count
print countCapitalLetters("asddfasdFASDFASFDAASDF")
print countCapitalLetters("THISisAtest") | true |
f765a7ac9c84aba51e6affbd1e291131d962fcfe | Python | klesnkri/sudoku-solver-aco | /sudoku/src/GUI/GUI.py | UTF-8 | 5,242 | 2.90625 | 3 | [
"MIT"
] | permissive | from GUI.GUIgrid import GUIgrid
import pygame
import time
def format_time(secs):
min = secs // 60
sec = secs % 60
millis = (secs * 100) % 100
time_str = "{:02.0f}".format(min) + ":" + "{:02.0f}".format(sec) + ":" + "{:02.0f}".format(millis)
return time_str
class GUI:
def __init__(self, win_width, win_height, grid_size):
self.win_width = win_width
self.win_height = win_height
self.grid_size = grid_size
self.win = None
self.GUIgrid = None
self.start_time = None
self.start_button = None
def start(self):
pygame.init()
pygame.font.init()
self.win = pygame.display.set_mode((self.win_width, self.win_height))
self.GUIgrid = GUIgrid(self.win_width, self.grid_size, self.win)
pygame.display.set_caption("Ant colony optimization sudoku solver")
def print_message(self, msg):
# Message
font = pygame.font.SysFont("dejavusansmono", 25)
text_box = font.render(msg, True, (0, 0, 0))
text_rect = text_box.get_rect()
x = 2 * self.win_width / 3 - text_rect.width / 2
y = (self.win_height + self.win_width) / 2 - text_rect.height / 2
# Delete old message
thick = 4
pygame.draw.rect(self.win, (255, 255, 255), (
self.win_width / 3, self.win_width + thick, 2 * self.win_width / 3,
self.win_height - self.win_width - thick))
# Print new message
self.win.blit(text_box, (x, y))
pygame.display.update()
def final_screen(self, msg):
self.print_message(msg)
# Wait till window is closed or start pressed again
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN and self.start_button.collidepoint(event.pos):
return True
def screen_with_message(self, grid, msg="", wait=True, cycle=0, pher_matrix=None):
# Redraw
self.redraw(grid, cycle, False, pher_matrix)
# Print message
if msg != "":
self.print_message(msg)
# If wait is True, wait till window is closed or start pressed again
if wait:
saved_time = self.start_time
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:
self.start_time += (time.time() - saved_time)
return True
def initial_screen(self, grid, msg):
# Redraw screen
self.redraw(grid, 0, True)
self.print_message(msg)
# Wait till start button is clicked
start_button_clicked = False
while not start_button_clicked:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.start_button.collidepoint(event.pos):
start_button_clicked = True
break
# Start counting time
self.start_time = time.time()
return True
def redraw(self, grid, cycle=0, start=False, pher_matrix=None):
self.win.fill((255, 255, 255))
font = pygame.font.SysFont("dejavusansmono", 25)
# Time
if start:
play_time = 0
else:
play_time = time.time() - self.start_time
text = "Time: " + format_time(play_time)
text_box = font.render(text, True, (0, 0, 0))
text_height = text_box.get_height()
padding = (self.win_height - self.win_width - 3 * text_height) / 8
x = 10
y = self.win_width + padding
self.win.blit(text_box, (x, y))
# Fixed cells
text = "Fixed cells: " + str(grid.fixed_cell_cnt)
text_box = font.render(text, True, (0, 0, 0))
y = y + text_height + padding
self.win.blit(text_box, (x, y))
# Cycle
text = "Cycle: " + str(cycle)
text_box = font.render(text, True, (0, 0, 0))
y = y + text_height + padding
self.win.blit(text_box, (x, y))
# Start button
text = "Start"
text_box = font.render(text, True, (0, 0, 0))
x = self.win_width / 6 - (text_box.get_width() + padding) / 2
y = y + text_height + 2 * padding
if start:
self.start_button = pygame.Rect(x, y, text_box.get_width() + padding, text_box.get_height() + padding)
pygame.draw.rect(self.win, (0, 0, 0), self.start_button)
pygame.draw.rect(self.win, (255, 255, 255), (
self.start_button.left + 3, self.start_button.top + 3, self.start_button.width - 6,
self.start_button.height - 6))
self.win.blit(text_box, (x + padding / 2, y + padding / 2))
# Draw grid and board
self.GUIgrid.draw(grid, pher_matrix, start)
pygame.display.update()
def end(self):
pygame.font.quit()
pygame.quit()
| true |
2183b4694dbdd042b29de975c62dd7165eabe3ca | Python | haiwenzhu/leetcode | /search_in_rotated_sorted_array.py | UTF-8 | 1,153 | 3.546875 | 4 | [] | no_license | class Solution:
"""
@see https://leetcode.com/problems/search-in-rotated-sorted-array/
"""
# @param A, a list of integers
# @param target, an integer to be searched
# @return an integer
def search(self, A, target):
n = len(A)
if n == 0:
return -1
i = 0
j = n-1
while True:
mid = (i+j) // 2
if A[mid] < A[n-1]:
j = mid -1
elif mid+1 < n and A[mid] < A[mid+1]:
i = mid + 1
else:
break
if target > A[mid]:
return -1
elif target <= A[n-1]:
i = mid + 1 if mid != n-1 else n-1
j = n - 1
else:
i = 0
j = mid
# binary search
while i <= j:
mid = (i+j) // 2
if A[mid] == target:
return mid
elif A[mid] < target:
i = mid + 1
else:
j = mid - 1
return -1
if __name__ == "__main__":
solution = Solution()
print(solution.search([], 1))
| true |
e37bd594cf80442962c1e23dbb0d060c6cb672c0 | Python | DenisMauricio/Escuela-DataScience | /programacion-dinamica-y-estocastica/camino_de_borrachos/camino_aleatorio.py | UTF-8 | 3,801 | 3.640625 | 4 | [] | no_license |
from borracho import BorrachoTradicional, BorrachoBailarin
from campo import Campo
from coordenada import Coordenada
from bokeh.plotting import figure, show
def caminata(campo, borracho, pasos):
"""función caminata. Será eñ campo donde se moverá el borracho realizando sus pasos
Recibe: campo, borracho y pasos
- inicio: Obtenemos las coordenadas actuales de la llave "borracho".
- bucle: En el bucle Repetiremos la misma cantidad de pasos definidos moviendo al borracho
@return: Devolveremos la distancia entre las coordenadas de la instancia inicio y campo.
"""
inicio = campo.obtener_coordenada(borracho)
for _ in range(pasos):
campo.mover_borracho(borracho)
return inicio.distancia(campo.obtener_coordenada(borracho))
def simular_caminata(pasos, numero_de_intentos, tipo_de_borracho):
"""Simulación de una caminata
Recibimos: pasos, numeros de intentos, tipo de borracho
Parametros:
- borracho. Objeto que define al borracho
- origen. Objeto que inicializa el origen
- distancias[]. Lista
- bucle: Para cada intento,
- Creamos una instancia de Campo.
- Añadimos un borracho al campo.
- Obtenemos la distancia final de la simulación.
- El resultado lo guardamos en la lista de distancias.
@return: Retornamos la lista de distancias.
"""
borracho = tipo_de_borracho(nombre='Franco')
origen = Coordenada(0, 0)
distancias = []
for _ in range(numero_de_intentos):
campo = Campo()
campo.añadir_borracho(borracho, origen)
simulacion_caminata = caminata(campo, borracho, pasos)
distancias.append(round(simulacion_caminata, 1))
return distancias
def graficar(x, y):
"""Método para graficar.
Recibimos valores para ejes.
- Creamos una instancia de figure, con su titulo y las etiquetas de los ejes.
- Ingresamos los datos de X e Y.
- Generamos una gráfica en HTML.
"""
grafica = figure(title='Camino aleatorio', x_axis_label='pasos', y_axis_label='distancia')
grafica.line(x, y, legend='distancia media')
show(grafica)
def main(distancias_de_caminata, numero_de_intentos, tipo_de_borracho):
"""Función principal que crea la simulación.
Parametro: Lista que guardara el promedio de cada caminata.
- Bucle. Por cada ítem en nuestras series de caminata:
- Guardamos las distancias que generan todas las simulaciones definido en numero_de_intentos.
- De la lista de distancias obtenemos la distancia promedio.
- De la lista de distancias obtenemos el máximo valor.
- De la lista de distancias obtenemos el menor valor.
- Guardamos el promedio de la caminata en la lista distancias_media_por_caminata.
- Imprimimos los datos de la caminata actual.
Graficamos la información de las distancias finales según la cantidad de pasos.
"""
distancias_media_por_caminata = []
for paso in distancias_de_caminata:
distancias = simular_caminata(paso, numero_de_intentos, tipo_de_borracho)
distancia_media = round(sum(distancias) / len(distancias), 4)
distancia_maxima = max(distancias)
distancia_minima = min(distancias)
distancias_media_por_caminata.append(distancia_media)
print(f'{tipo_de_borracho.__name__} caminata aleatorio de {paso} pasos')
print(f'Media = {distancia_media}')
print(f'Maxima = {distancia_maxima}')
print(f'Minima = {distancia_minima}')
graficar(distancias_de_caminata, distancias_media_por_caminata)
if __name__ == '__main__':
distancias_de_caminata = [10, 100, 1000, 10000]
numero_de_intentos = 100
main(distancias_de_caminata, numero_de_intentos, BorrachoTradicional) | true |
7109cb1dd4aba0a0494e9c080231bf4bc0ba0eb0 | Python | niterain/digsby | /digsby/src/tests/testgui/test_autodc.py | UTF-8 | 819 | 2.765625 | 3 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | '''
test AutoBufferedPaintDC
'''
import wx
def main():
a = wx.PySimpleApp()
f = wx.Frame(None, -1, 'AutoBufferedPaintDC test')
f.BackgroundStyle = wx.BG_STYLE_CUSTOM
def paint(e):
#dc = wx.PaintDC(f) # 1) this one works
#dc = wx.AutoBufferedPaintDC(f) # 2) this one works also
dc = wx.AutoBufferedPaintDC(f) # 3) this one results in a traceback
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.RED_BRUSH)
dc.DrawRectangle(20, 20, 30, 30)
gc = wx.GraphicsContext.Create(dc) # XXX the traceback occurs here
gc.SetPen(wx.TRANSPARENT_PEN)
gc.SetBrush(wx.BLUE_BRUSH)
gc.DrawRectangle(40, 40, 30, 30)
f.Bind(wx.EVT_PAINT, paint)
f.Show()
a.MainLoop()
if __name__ == '__main__':
main() | true |
222511ee0b5fc8f1e1fb99e2e1a2fc7d66f79d00 | Python | sabbir-ul-alam/pythonHackerRank | /nested Lists.py | UTF-8 | 1,052 | 3.125 | 3 | [] | no_license | if __name__ == '__main__':
student_list=[]
for _ in range(int(input())):
name = input()
score = float(input())
student=[name,score]
student_list.append(student)
for x in range(len(student_list)):
for y in range(len(student_list)):
if student_list[x][1]>=student_list[y][1]:
#print(student_list[x][1],student_list[y][1])
tmp=student_list[y]
student_list[y]=student_list[x]
student_list[x]=tmp
#print(student_list)
min=student_list[len(student_list)-1]
min2=-1
lowest=[]
for x in range(len(student_list)-1,-1,-1):
#print(student_list[x][1],min[1])
if student_list[x][1]>min[1]:
min2=student_list[x][1]
break
#print(min2)
if min2==-1:
for x in sorted(student_list):
print(x[0])
else:
lowest=[x for x in student_list if x[1]==min2]
#print(lowest)
res=sorted(lowest)
for x in res:
print(x[0]) | true |
bed9050ba32e215278c307f7e9fa58a52e346911 | Python | hossainarif726/DS_Algo_Python_Implementation | /DS & Algo/Graph/Binary Tree with built in module.py | UTF-8 | 149 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from binarytree import Node
root = Node(5)
root.left = Node(3)
root.right = Node(7)
print(root)
| true |
428249259516db0997cec080fd98d1821eb09a7b | Python | MAPLE-Robot-Subgoaling/IPT | /data/HW3/hw3_121.py | UTF-8 | 677 | 4.15625 | 4 | [] | no_license | def main():
Temperature = float(input("Please enter the temperature: "))
kind = input("Enter 'K' for kelvin, or 'C' for celcius.")
if kind == "C":
if Temperature > 100:
print("Water is gas")
elif Temperature < 0:
print("Water is solid")
elif (Temperature > 0) and (Temperature < 100):
print("Water is liquid.")
if kind == "K":
Temperature = Temperature - 273.15
if Temperature > 100:
print("Water is gas")
elif Temperature < 0:
print("Water is solid")
elif (Temperature > 0) and (Temperature< 100):
print("Water is liquid.")
main()
| true |
6c36dbe64e4500c8401e5991df7dc9b2affc139a | Python | opsee/buildar | /buildar/pipeline/pipeline.py | UTF-8 | 3,540 | 2.859375 | 3 | [] | no_license | """
A Pipeline is a collection of Steps that are executed in the order
they are added to the Pipeline. Upon execution of a Step, the Pipeline
marks the step for rollback, s.t. if a step fails halfway through its
duties, it has the opportunity to attempt to clean everything up.
A Pipeline is also a Step and can be embedded in other pipelines.
"""
import traceback
#pylint: disable=unused-import
from buildar.pipeline.step import Step
from buildar.pipeline.builder import Builder
from buildar.pipeline.provision import Provisioner
from buildar.pipeline.ami import Imager
from buildar.pipeline.launch import Launcher
from buildar.pipeline.test import Tester
from buildar.pipeline.publish import Publisher
from buildar.pipeline.tag import Tagger
class Pipeline(Step):
"""Pipeline can be used to encapsulate a set of steps or used as a step
itself. Steps' build phases are run in the order that they are added to a
Pipeline with add_step, but the cleanup phases of those steps are executed
in reverse order, s.t. the output that a step depends on from the previous
step still exists in an unaltered state."""
def __init__(self, **kwargs):
super(Pipeline, self).__init__(**kwargs)
self._steps = []
self._executed = []
self._failed = False
self._exception_cause = StandardError()
self.build_context = {}
def add_step(self, step):
"""Add a step to the pipeline. Must implement the missing methods of
the Step base class."""
self._steps.append(step)
def build(self, build_context):
"""Build iterates over the steps in the pipeline and executes them
in order"""
self.build_context = build_context
current_step = ''
try:
for step in self._steps:
current_step = type(step).__name__
self._executed.append(step)
self.build_context = step.build(self.build_context)
except Exception as ex:
print 'Build failed at step %s: %s' % (current_step, ex)
self._failed = True
traceback.print_exc()
raise ex
return build_context
def cleanup(self, build_context):
"""Cleanup iterates over the steps that the pipeline attempted to
execute in the reverse order that they were executed. It raises
the exception of the failed build step if a step failed during
build().
Pipelines adhere to cleanup of all steps included in the pipeline. You
can either set cleanup=True/False on individual steps or on the
pipeline. This can also be done in combination. If you want to cleanup
all but one step in a pipeline, you can set the Pipeline cleanup to
True, and then set that individual step's cleanup to False."""
if self.do_cleanup:
self._executed.reverse()
for step in self._executed:
try:
step.cleanup(self.build_context)
except Exception as ex:
print 'Cleanup step %s failed: %s' % (type(step).__name__, ex)
traceback.print_exc()
def execute(self, build_context):
"""Execute is a convenience function that ties build and cleanup together."""
self.build_context = build_context
try:
self.build(build_context)
except Exception as ex:
# Cleanups must handle their own exceptions.
self.cleanup(build_context)
| true |
4e22dd8b5dc2b0824b6c1592a1d55a218d7f5138 | Python | JunhuiJiang/MyCode | /python/深度学习/逻辑回归/deeplearning/classifiers/LogisticRegres.py | UTF-8 | 3,227 | 3.078125 | 3 | [] | no_license | import numpy as np
import math
import matplotlib.pyplot as plt
class LogisticRegres(object):
def __init__(self):
pass
def sigmoid(self,inX):
return 1.0 / (1+np.exp(-inX))
def gradDescent(self,x_train,y_train):
m,n = x_train.shape
alpha = 0.001
maxEpoch = 5000
weights = np.ones((n,1))
for i in range(maxEpoch):
value = self.sigmoid(x_train*weights)
error =value-y_train
grad=np.dot(x_train.transpose(),error)/m
weights=weights -alpha*grad
'''
TODO: You should use grad descent method to complete the code.
'''
return weights
def gradAscent(self,x_train,y_train):
# y_train=-y_train
#print y_train
m,n=y_train.shape
"""
for i in range(m):
if y_train[i]==0:
y_train[i]=1
else:
y_train[i]=0
#print y_train
"""
m,n = x_train.shape
alpha = 0.001
maxEpoch = 500
weights = np.ones((n,1))
for i in range(maxEpoch):
value=self.sigmoid(x_train*weights)
error=y_train-value
grad=x_train.transpose()*error
#print y_train.shape
# print error.shape
weights=weights+alpha*grad
'''
TODO: You should use grad ascent method to complete the code.
'''
return weights
def stocGradDescent(self,x_train,y_train,numEpochs):
m,n = x_train.shape
weights = np.ones((n,1))
for j in range (numEpochs):
index = range(m)
for i in range(m):
alpha=4/(1.0+j+i)+0.0001 #apha decreases with iteration,does not
randIndex=int(np.random.uniform(0,len(index)))
value = self.sigmoid(sum(x_train*weights))
error = y_train[randIndex]-value
#print alpha.shape
weights=weights+alpha*error[0,0]*x_train[randIndex].transpose()
del(index[randIndex])
'''
TODO: You should use stocGradDescent method to complete the code.
'''
return weights
def predict(self,weights,x_test,y_test):
m,n = np.shape(x_test)
rightCount = 0
for i in range(m):
if self.sigmoid(x_test[i,:]*weights) > 0.5:
predict = 1
else:
predict = 0
if predict == y_test[0,i]:
rightCount += 1
accuracy = float(rightCount)/ m
return accuracy
def plotBestFit(self,weights,x_train,y_train):
m,n = np.shape(x_train)
for i in range(m):
if int(y_train[0,i]) == 0:
plt.plot(x_train[i,1],x_train[i,2],'or')
else:
plt.plot(x_train[i,1],x_train[i,2],'ob')
min_x = min(x_train[:, 1])[0, 0]
max_x = max(x_train[:, 1])[0, 0]
weights = weights.getA() # convert mat to array
y_min_x = float(-weights[0] - weights[1] * min_x) / weights[2]
y_max_x = float(-weights[0] - weights[1] * max_x) / weights[2]
plt.plot([min_x, max_x], [y_min_x, y_max_x], '-g')
plt.xlabel('X1'); plt.ylabel('X2')
plt.show()
| true |
457eaedfb449c349946759697aa69762af732d07 | Python | ooksang/PythonMaze | /Core/Search.py | UTF-8 | 11,548 | 3.21875 | 3 | [] | no_license | import heapq
from Core.Node import Node
import math
class Search(object):
found = None
def __init__(self, start_node, end_node, maze): #Search constructor
self.__maze = maze
self.__start_node = start_node
self.__end_node = end_node
self.__ds = []
self.__prev_node = None
self.mode = 0
self.__visited = [None] * maze.height
for i in range(0,maze.height):
self.__visited[i] = [None] * maze.length
for j in range(0, maze.length):
self.__visited[i][j] = [False] * maze.width
def __reset(self): #method to reset the search
Search.found = None
Node.path = []
self.__ds = []
self.__start_node.cost = 0
self.__prev_node = None
self.__visited = [None] * self.__maze.height
for i in range(0,self.__maze.height):
self.__visited[i] = [None] * self.__maze.length
for j in range(0, self.__maze.length):
self.__visited[i][j] = [False] * self.__maze.width
if self.mode == 1:
self.__init_DFS()
elif self.mode == 2:
self.__init_BFS()
elif self.mode == 3:
self.__init_UCS()
elif self.mode == 4 or self.mode == 5:
self.__init_A_star()
elif self.mode == 6 or self.mode == 7:
self.__init_greedy()
self.__maze.reset_colors()
def __init_DFS(self):
self.__ds.append(self.__start_node)
self.__maze.print("DFS mode")
def __next_DFS_step(self):
if self.__ds and Search.found == None:
s = self.__ds.pop()
self.__maze.print("(DFS mode) Visiting " + " " + str(s))
self.__maze.tile_color[s.i][s.j][s.k] = 2
if self.__prev_node != None:
self.__maze.tile_color[self.__prev_node.i][self.__prev_node.j][self.__prev_node.k] = 3
self.__prev_node = s
if self.__visited[s.i][s.j][s.k] == False:
self.__visited[s.i][s.j][s.k] = True
children = s.get_children_nodes()
if not children:
return 0
for child in children:
if self.__visited[child.i][child.j][child.k] == False:
self.__ds.append(child)
self.__maze.tile_color[child.i][child.j][child.k] = 1
elif Search.found != None:
self.__maze.solved = True
self.__maze.print("DFS cost is " + str(Search.found.get_path_cost()))
print("DFS cost is " + str(Search.found.get_path_cost()))
print("Path is:")
for n in Node.path:
print(n)
self.__maze.tile_color[n.i][n.j][n.k] = 4
print(" ")
else:
self.__maze.print("No Solution")
print("No Solution")
def __init_BFS(self):
self.__ds.append(self.__start_node)
self.__visited[self.__start_node.i][self.__start_node.j][self.__start_node.k] = True
self.__maze.print("BFS mode")
def __next_BFS_step(self):
if self.__ds and Search.found == None:
s = self.__ds.pop(0)
self.__maze.print("(BFS mode) Visiting " + " " + str(s))
self.__maze.tile_color[s.i][s.j][s.k] = 2
if self.__prev_node != None:
self.__maze.tile_color[self.__prev_node.i][self.__prev_node.j][self.__prev_node.k] = 3
self.__prev_node = s
children = s.get_children_nodes()
if not children:
return 0
for child in children:
if self.__visited[child.i][child.j][child.k] == False:
self.__ds.append(child)
self.__maze.tile_color[child.i][child.j][child.k] = 1
self.__visited[child.i][child.j][child.k] = True
elif Search.found != None:
self.__maze.solved = True
self.__maze.print("BFS cost is " + str(Search.found.get_path_cost()))
print("BFS cost is " + str(Search.found.get_path_cost()))
print("Path is:")
for n in Node.path:
print(n)
self.__maze.tile_color[n.i][n.j][n.k] = 4
print(" ")
else:
self.__maze.print("No Solution")
print("No Solution")
def __init_UCS(self):
self.__ds.append((0,self.__start_node))
self.__visited[self.__start_node.i][self.__start_node.j][self.__start_node.k] = True
self.__maze.print("UCS mode")
def __next_UCS_step(self):
if self.__ds and Search.found == None:
s = heapq.heappop(self.__ds)
self.__maze.print("(UCS mode) Visiting " + " " + str(s[1]))
self.__maze.tile_color[s[1].i][s[1].j][s[1].k] = 2
if self.__prev_node != None:
self.__maze.tile_color[self.__prev_node.i][self.__prev_node.j][self.__prev_node.k] = 3
self.__prev_node = s[1]
if s[1].n != 'E':
children = s[1].get_children_nodes()
if not children:
return 0
for child in children:
if self.__visited[child.i][child.j][child.k] == False:
self.__maze.tile_color[child.i][child.j][child.k] = 1
self.__visited[child.i][child.j][child.k] = True
if s[1].n == "A" or s[1].n == 'S' or s[1].n == 'E':
heapq.heappush(self.__ds,(s[0] + 1, child))
else:
heapq.heappush(self.__ds,(s[0] + s[1].n, child))
elif Search.found != None:
self.__maze.solved = True
self.__maze.print("UCS cost is " + str(Search.found.get_path_cost()))
print("UCS cost is " + str(Search.found.get_path_cost()))
print("Path is:")
for n in Node.path:
print(n)
self.__maze.tile_color[n.i][n.j][n.k] = 4
print(" ")
else:
self.__maze.print("No Solution")
print("No Solution")
def __init_A_star(self):
self.__ds.append((0,self.__start_node))
self.__visited[self.__start_node.i][self.__start_node.j][self.__start_node.k] = True
self.__maze.print("A* mode with " + ("Manhattan distance" if self.mode == 4 else "Euclidean distance"))
def __next_A_star_step(self):
if self.__ds and Search.found == None:
s = min(self.__ds, key=lambda o:o[0] + self.__heuristic(o[1]))
self.__ds.remove(s)
self.__maze.print("(A* mode with " + ("Manhattan distance" if self.mode == 4 else "Euclidean distance") + ") Visiting " + " " + str(s[1]))
self.__maze.tile_color[s[1].i][s[1].j][s[1].k] = 2
if self.__prev_node != None:
self.__maze.tile_color[self.__prev_node.i][self.__prev_node.j][self.__prev_node.k] = 3
self.__prev_node = s[1]
if s[1].n != 'E':
children = s[1].get_children_nodes()
if not children:
return 0
for child in children:
if self.__visited[child.i][child.j][child.k] == False:
self.__maze.tile_color[child.i][child.j][child.k] = 1
self.__visited[child.i][child.j][child.k] = True
if s[1].n == "A" or s[1].n == 'S' or s[1].n == 'E':
heapq.heappush(self.__ds,(s[0] + 1, child))
else:
heapq.heappush(self.__ds,(s[0] + s[1].n, child))
elif Search.found != None:
self.__maze.solved = True
self.__maze.print("A* with " + ("Manhattan distance" if self.mode == 4 else "Euclidean distance") + " cost is " + str(Search.found.get_path_cost()))
print("A* with " + ("Manhattan distance" if self.mode == 4 else "Euclidean distance") + " cost is " + str(Search.found.get_path_cost()))
print("Path is:")
for n in Node.path:
print(n)
self.__maze.tile_color[n.i][n.j][n.k] = 4
print(" ")
else:
self.__maze.print("No Solution")
print("No Solution")
def __init_greedy(self):
self.__ds.append(self.__start_node)
self.__visited[self.__start_node.i][self.__start_node.j][self.__start_node.k] = True
self.__maze.print("Greedy mode with " + ("Manhattan distance" if self.mode == 6 else "Euclidean distance"))
def __next_greedy_step(self):
if self.__ds and Search.found == None:
s = min(self.__ds, key = lambda o : self.__heuristic(o))
self.__ds.remove(s)
self.__maze.print("(Greedy mode with " + ("Manhattan distance" if self.mode == 6 else "Euclidean distance") + ") Visiting " + " " + str(s))
self.__maze.tile_color[s.i][s.j][s.k] = 2
if self.__prev_node != None:
self.__maze.tile_color[self.__prev_node.i][self.__prev_node.j][self.__prev_node.k] = 3
self.__prev_node = s
if s.n != 'E':
children = s.get_children_nodes()
if not children:
return 0
for child in children:
if self.__visited[child.i][child.j][child.k] == False:
self.__maze.tile_color[child.i][child.j][child.k] = 1
self.__visited[child.i][child.j][child.k] = True
if s.n == "A" or s.n == 'S' or s.n == 'E':
heapq.heappush(self.__ds,child)
else:
heapq.heappush(self.__ds,child)
elif Search.found != None:
self.__maze.solved = True
self.__maze.print("Greedy with " + ("Manhattan distance" if self.mode == 6 else "Euclidean distance") + " cost is " + str(Search.found.get_path_cost()))
print("Greedy with " + ("Manhattan distance" if self.mode == 6 else "Euclidean distance") + " cost is " + str(Search.found.get_path_cost()))
print("Path is:")
for n in Node.path:
print(n)
self.__maze.tile_color[n.i][n.j][n.k] = 4
print(" ")
else:
self.__maze.print("No Solution")
print("No Solution")
def __manhattan(self, n):
return abs(n.i - self.__end_node.i) + abs(n.j - self.__end_node.j) + abs(n.k - self.__end_node.k)
def __Euc(self, n):
return math.sqrt(math.pow((n.i - self.__end_node.i),2) + math.pow((n.j - self.__end_node.j),2) + math.pow((n.k - self.__end_node.k),2))
def __heuristic(self, n):
return self.__manhattan(n) if self.mode % 2 == 0 else self.__Euc(n)
def __get_path(self): #method to return the path of the search
Search.found.get_path_cost()
return Search.found.get_path()
def set_mode(self, mode):
self.mode = mode
self.__reset()
def next_step(self):
if self.mode == 1:
self.__next_DFS_step()
elif self.mode == 2:
self.__next_BFS_step()
elif self.mode == 3:
self.__next_UCS_step()
elif self.mode == 4 or self.mode == 5:
self.__next_A_star_step()
elif self.mode == 6 or self.mode == 7:
self.__next_greedy_step()
| true |
ea709d42e00d72f2470f7c86dafc07cb56fcedd5 | Python | ntbrewer/pixie_ldf_MTAS | /MTAS_beam_monitor/calc_rate.py | UTF-8 | 12,403 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
"""This program takes an input file with a number of parameters required to
calculate the beam rate on the collection point"""
import sys
import math
import types
# first we check the command line and grab the module name
if len(sys.argv) != 2:
print "\nUsage:\n\t./mda.py configuration_file\n\t or"
print "\tpython mda.py configuration_file\n"
sys.exit()
# strip off the .py if it exists
CF_FILE_NAME = None
if sys.argv[1][-3:] == ".py":
CF_FILE_NAME = sys.argv[1][0:-3]
else:
CF_FILE_NAME = sys.argv[1]
# prevent bytecode generation for the config file
ORIGINAL_SYS_DONT_WRITE_BYTECODE = sys.dont_write_bytecode
sys.dont_write_bytecode = True
# import the config file
INPUT = __import__(CF_FILE_NAME).INPUT
# restore the dont_write_bytecode variable to its original value
sys.dont_write_bytecode = ORIGINAL_SYS_DONT_WRITE_BYTECODE
def main():
"""check the input file for sane values then do work"""
# fatal sanity checks
test_list_types()
test_scalar_types()
test_list_lengths()
# add the input entry of effective collection time
INPUT["Effective Collection Time"] = INPUT["Collection Time"] -\
INPUT["Voltage Rampdown Time"]
# non-fatal sanity checks:
test_value_sanity()
# now call the function that does stuff
calculate_beam_rates()
def calculate_beam_rates():
"""This function takes the information in the input file and calculates
the resulting beam rates from that information"""
# construct the efficiency object
eff_obj = HPGeEfficiency(INPUT["HPGe Efficiency Function"])
# calculate the effeciencies
eff_set = [eff_obj.calculate_frac_eff(energy) for energy in
INPUT["Gamma Energies"]]
# calculate the total number of decays
total_decays = [area/(inten*eff) for (area, inten, eff) in
zip(INPUT["Gamma Areas"], INPUT["Gamma Branchings"],
eff_set)]
# now calculate the number of collection periods
num_collections = calc_collections()
# now correct for the fact that 1 in every x cycles has no collection
# if no correction is present that is fine as well
factor = 1.0
try:
factor = (INPUT["Data to Bg Ratio"]/(INPUT["Data to Bg Ratio"] + 1.0))
except KeyError:
print "\n\tMissing background ratio, skipping correction\n"
factor = 1.0
num_collections *= factor
# calculate the number of decays per cycle with collection
decays_per_cycle = [decays / num_collections for decays in total_decays]
# calculate the decay constant
dec_const = math.log(2)/INPUT["Half Life"]
# calculate the activity integral
act_int = act_integral(dec_const, INPUT["Effective Collection Time"])
# calculate the beam rates
beam_rates = [decays / act_int for decays in decays_per_cycle]
# output everything for the users
print HEADER_STRING.format(INPUT["Title Name"])
for info in zip(INPUT["Gamma Energies"], INPUT["Gamma Branchings"], eff_set,
total_decays, decays_per_cycle, beam_rates):
print ROW_STRING.format(*info)
print FOOTER_STRING
def act_integral(dec_const, time):
"""calculate the integral of the time dependant portion of the activity"""
temp = math.exp(-dec_const*time)
temp -= 1.0
temp /= dec_const
return temp + time
def calc_collections():
"""Calculate the number of cycles in a sane way"""
# calc the number of full cycles (each of which had a full collection)
full_cycle_count = math.floor(INPUT["HPGe Integration Time"] /
INPUT["Cycle Length"])
# calculate the unaccounted for time
extra_collection = INPUT["HPGe Integration Time"] - (full_cycle_count *
INPUT["Cycle Length"])
# if the unaccounted for time exceeded an effectiove collection time,
# return the full_cycle_count + 1
if extra_collection > INPUT["Collection Time"]:
return full_cycle_count + 1
else: # otherwise return the appropriate fraction
return (full_cycle_count + ((extra_collection -
INPUT["Voltage Rampdown Time"]) /
INPUT["Collection Time"]))
class HPGeEfficiency(object):
"""Object to hold efficiency function information and calculate the gamma
efficiencies given an energy"""
def __init__(self, param_list):
# store the parameters
self.par = [0.0]*8
self.par[0] = param_list[0]
self.par[1] = param_list[1]
self.par[2] = param_list[2]
self.par[3] = param_list[3]
self.par[5] = param_list[4]
self.par[7] = param_list[5]
def calculate_frac_eff(self, energy):
"""Returns the efficiency as a fraction"""
logx = math.log(energy)
# calculate logx to various powers
exp_vals = [math.pow(logx, float(i)) for i in range(8)]
# calculate the products of parameters and the logx powers
prod_values = [exp_vals[i]*self.par[i] for i in range(8)]
return sum(prod_values) / energy
def calculate_perc_eff(self, energy):
"""Returns the efficiency as a fraction"""
logx = math.log(energy)
# calculate logx to various powers
exp_vals = [math.pow(logx, float(i)) for i in range(8)]
# calculate the products of parameters and the logx powers
prod_values = [exp_vals[i]*self.par[i] for i in range(8)]
return 100.0 * sum(prod_values) / energy
def test_value_sanity():
"""Trys to weed out silly mistakes"""
# check to make sure that all the values in the HPGE efficiency are small
for param in INPUT["HPGe Efficiency Function"]:
if abs(param) > 0.1:
print "Warning, efficiency parameters might be too large\n"
# check to make sure the integration time is not too small
if INPUT["HPGe Integration Time"] < 20.0:
print "Warning, you might not have counted in the HPGe spectrum long"
print "enough for a statistically significant result\n"
# check to make sure the gamma energy is not too small or too large
for energy in INPUT["Gamma Energies"]:
if energy < 0.02 or energy > 5.0:
print "Warning energy {0:8.4f} is well outside the"\
" usual efficiency".format(energy)
print "calibration range\n"
# check to make sure the areas are not too small
for area in INPUT["Gamma Areas"]:
if (math.sqrt(float(area))/float(area)) >= 0.50:
print "Warning, gamma ray area statistical errors are >= 50%\n"
def test_list_lengths():
"""Tests input list lengths for correctness"""
# check to make sure that the lists contain the appropriate numbers of
# elements
if len(INPUT["Gamma Energies"]) != len(INPUT["Gamma Branchings"]) or\
len(INPUT["Gamma Energies"]) != len(INPUT["Gamma Areas"]):
print 'The "Gamma Energies", "Gamma Branchings", and "Gamma Areas"'
print 'entries must all have equal length'
sys.exit()
if len(INPUT["Gamma Energies"]) == 0:
print 'The "Gamma Energies", "Gamma Branchings", and "Gamma Areas"'
print 'entries must all have length > 0'
sys.exit()
if len(INPUT["HPGe Efficiency Function"]) != 6:
print 'The "HPGe Efficiency Function" entry must contain exactly 6'
print 'parameters, a0 through a5'
sys.exit()
def test_scalar_types():
"""Tests input values to make sure the appropriate ones are numbers"""
# check to make sure that the scalar values are in fact scalars
if not isinstance(INPUT["Half Life"], types.FloatType):
print 'The "Half Life" entry must be a floating point'
print 'number. Consider adding ".0" to the end if it is an integer'
sys.exit()
if not isinstance(INPUT["Cycle Length"], types.FloatType):
print 'The "Cycle Length" entry must be a floating point'
print 'number. Consider adding ".0" to the end if it is an integer'
sys.exit()
if not isinstance(INPUT["Collection Time"], types.FloatType):
print 'The "Effective Collection Time" entry must be a floating point'
print 'number. Consider adding ".0" to the end if it is an integer'
sys.exit()
if not isinstance(INPUT["Voltage Rampdown Time"], types.FloatType):
print 'The "Effective Collection Time" entry must be a floating point'
print 'number. Consider adding ".0" to the end if it is an integer'
sys.exit()
if not isinstance(INPUT["HPGe Integration Time"], types.FloatType):
print 'The "HPGe Integration Time" entry must be a floating point'
print 'number. Consider adding ".0" to the end if it is an integer'
sys.exit()
if INPUT["Voltage Rampdown Time"] > INPUT["Collection Time"]:
print 'The "Voltage Rampdown Time" entry excedes the "Collection Time"'
print 'entry, therefor collection is not happening'
sys.exit()
if not isinstance(INPUT["Title Name"], types.StringType):
print 'The "Title Name" entry must be a string'
sys.exit()
if len(INPUT["Title Name"]) > 63:
print 'The "Title Name" entry must is too long'
print '"Title Name" must not exceed 63 characters'
sys.exit()
def test_list_types():
"""Tests input values to make sure the appropriate ones are lists"""
# check to make certain that certain parameters are lists, even if they
# only contain one element
if not isinstance(INPUT["Gamma Energies"], types.ListType):
print 'The "Gamma Energies" entry must be a list'
sys.exit()
if not isinstance(INPUT["Gamma Branchings"], types.ListType):
print 'The "Gamma Intensities" entry must be a list'
sys.exit()
if not isinstance(INPUT["Gamma Areas"], types.ListType):
print 'The "Gamma Areas" entry must be a list'
sys.exit()
if not isinstance(INPUT["HPGe Efficiency Function"], types.ListType):
print 'The "HPGe Efficiency Function" entry must be a list'
sys.exit()
for element in INPUT["Gamma Energies"]:
if not isinstance(element, types.FloatType):
print 'All elements of the "Gamma Energies" entry must be'
print 'floating point numbers. Consider adding ".0" to the end if'
print 'it is an integer'
sys.exit()
if element > 5.0:
print 'Check your gamma energies. Did you enter an energy in keV'
print ' instead of entering the energy in MeV?'
for element in INPUT["Gamma Branchings"]:
if not isinstance(element, types.FloatType):
print 'All elements of the "Gamma Branchings" entry must be'
print 'floating point numbers. Consider adding ".0" to the end if'
print 'it is an integer'
sys.exit()
if element > 1.0:
print 'Check your branchings. Did you enter the percent branching'
print ' instead of the fractional branching?'
for element in INPUT["Gamma Areas"]:
if not isinstance(element, types.FloatType):
print 'All elements of the "Gamma Areas" entry must be'
print 'floating point numbers. Consider adding ".0" to the end if'
print 'it is an integer'
sys.exit()
for element in INPUT["HPGe Efficiency Function"]:
if not isinstance(element, types.FloatType):
print 'All elements of the "HPGe Efficiency Function" entry must'
print 'be floating point numbers. Consider adding ".0" to the end'
print 'if it is an integer'
sys.exit()
HEADER_STRING = """
|-----------------------------------------------------------------------------|
| RESULTS FOR: {0:63s}|
|-----------------------------------------------------------------------------|
| GAMMA | DECAYS AT HPGE | BEAM RATE |
| ENERGY(MeV) | BRANCHING | EFFICIENCY | TOTAL | PER CYCLE | (HZ) |"""
ROW_STRING = """|-------------|-----------|------------|-----------|-----------|--------------|
| {0: ^11.4f} | {1:4.3e} | {2:5.4e} | {3:4.3e} | {4:4.3e} | {5:7.6e} |"""
FOOTER_STRING = """|-------------|-----------|------------|-----------|-----------|--------------|
"""
if __name__ == "__main__":
main()
| true |
0e631880741e59bee52e472872509e2dafde89aa | Python | dariush-bahrami/Dimensional-Analysis | /dimensional_analysis.py | UTF-8 | 13,323 | 3.625 | 4 | [] | no_license | def raise_unit_2_power(unit_string, power):
"""Raise unit string to power
Parameters
----------
unit_string : str
power : float
Returns
-------
str
"""
unit_list = unit_string.split()
new_unit_list = []
if power == 1:
return unit_string
for unit in unit_list:
if '^' in unit:
unit_part = unit.split('^')[0]
power_part = float(unit.split('^')[1])
power_part *= power
if power_part - int(power_part) == 0:
power_part = int(power_part)
new_unit_list.append('^'.join([unit_part, str(power_part)]))
else:
unit_part = unit
power_part = power
if power_part - int(power_part) == 0:
power_part = int(power_part)
new_unit_list.append('^'.join([unit_part, str(power_part)]))
return ' '.join(new_unit_list)
def si_derived_unit_equivalent(unit_string: str) -> str:
"""Convert SI derived units to fundamental
for example: 'N' (newton) wil became 'kg m s^-2'
Parameters
----------
unit_string : str
Returns
-------
str
"""
assert ' ' not in unit_string, 'Invalid space character'
si_derived_units = {
'N': 'kg m s^-2',
'J': 'kg m^2 s^-2',
'C': 'A s',
'T': 'kg s^-2 A^-1',
'Pa': 'kg m^-1 s^-2',
'W': 'kg m^2 s^-3',
'Hz': 's^-1',
'V': 'kg m^2 s^-3 A^-1',
'F': 'kg^-1 m^-2 s^4 A^2',
'Wb': 'kg m^2 s^-2 A^-1',
'H': 'kg m^2 s^-2 A^-2',
'ohm': 'kg m^2 s^-3 A^-2',
'rad': '',
'sr': ''
}
if '^' in unit_string:
unit_part = unit_string.split('^')[0]
power_part = float(unit_string.split('^')[1])
else:
unit_part = unit_string
power_part = 1
if unit_part in si_derived_units:
unit_part = si_derived_units[unit_part]
return raise_unit_2_power(unit_part, power_part)
else:
return unit_string
def simplify_derived_units(unit_string: str) -> str:
"""Batch conversion of SI derived units
Parameters
----------
unit_string : str
Returns
-------
str
"""
unit_list = []
for unit in unit_string.split(' '):
unit_list.append(si_derived_unit_equivalent(unit))
return ' '.join(unit_list)
def si_parser(unit_string: str) -> dict:
"""Convert SI units to fundamental system-independent dimensions.
for example: 'm' will became sympy.physics.units.length
Parameters
----------
unit_string : str
Returns
-------
dict
dictionary of basic dimensions with coressponding power
"""
from sympy.physics import units
si_system = {
'm': units.length,
's': units.time,
'k': units.temperature,
'kg': units.mass,
'mol': units.amount_of_substance,
'A': units.current,
'cd': units.luminous_intensity
}
unit_string = simplify_derived_units(unit_string)
result_unit = 1
for unit in unit_string.split(' '):
if '^' in unit:
unit_part = unit.split('^')[0]
power_part = int(unit.split('^')[1])
else:
unit_part = unit
power_part = 1
result_unit *= (si_system[unit_part]**power_part)
return result_unit
def parameter(name: str, unit: str, latex_repr: str):
"""Simple function for creating sympy Quantity objects
Parameters
----------
name : str
name of the physical quantity
unit : str
unit of quantity; for example unit of velocity is 'm s^-1'
latex_repr : str
latext representation of quantity; for example for density this
parameter is: '\\rho' and for viscosity is '\mu'
Returns
-------
sympy Quantity object
"""
from sympy.physics.units import Quantity
from sympy.physics.units.systems import SI
parameter = Quantity(name, latex_repr=latex_repr)
SI.set_quantity_dimension(parameter, si_parser(unit))
return parameter
class DimensionalAnalysis:
"""Python class for Buckingham Theorem
"""
def __init__(self, parameters: list):
"""
Parameters
----------
parameters : list
list of sympy Quantity objects
"""
self.parameters = parameters
@property
def fundamental_dimensions(self):
from sympy.physics.units.systems.si import dimsys_SI
dimensions = set()
for parameter in self.parameters:
dimension_dict = dimsys_SI.get_dimensional_dependencies(
parameter.dimension)
for dimension in dimension_dict:
dimensions.add(dimension)
return dimensions
@property
def dimension_matrix(self):
import sympy
from sympy.physics.units.systems.si import dimsys_SI
matrix = sympy.zeros(len(self.fundamental_dimensions),
len(self.parameters))
for i, dimension in enumerate(self.fundamental_dimensions):
for j, parameter in enumerate(self.parameters):
dimension_dict = dimsys_SI.get_dimensional_dependencies(
parameter.dimension)
if dimension in dimension_dict:
matrix[i, j] = dimension_dict[dimension]
else:
matrix[i, j] = 0
return matrix
@property
def dimensionless_parameters(self):
import sympy
dimesionless_dict = dict()
nullspace = self.dimension_matrix.nullspace()
for i, vector in enumerate(nullspace):
d = 1
for j, power in enumerate(vector):
if power - int(power) == 0:
power = int(
power) # Prefer using integer powers if possible
d *= (self.parameters[j]**power)
dimesionless_dict[sympy.symbols(f'Pi_{i}')] = d
return dimesionless_dict
def solve_for(self, parameter) -> dict:
"""Solve result from dimensional analysis for selected parameter
Parameters
----------
parameter : sympy Quantity object
selected parameter for solving result from dimensional analysis
Returns
-------
dict
dictionary of solutions for each dimensionless parameter
"""
import sympy
solution_dict = dict()
dimensionless_dict = self.dimensionless_parameters
for d in dimensionless_dict:
solution = sympy.solve(sympy.Eq(dimensionless_dict[d], d),
parameter)
solution_dict[d] = solution
return solution_dict
def dimensional_analysis(*args) -> dict:
"""Performing dimensional analysis on input Quantiy objects
Returns
-------
dict
dictionary of dimensionless parameters
"""
analyze = DimensionalAnalysis([*args])
return analyze.dimensionless_parameters
def solve_from_dimensional_analysis(*args, target_parameter) -> dict:
"""Performing dimensionall analysis and solving results for specific
parameter
Parameters
----------
target_parameter : sympy Quantity object
selected parameter for solving result from dimensional analysis
Returns
-------
dict
dictionary of solutions for each dimensionless parameter
"""
analyze = DimensionalAnalysis([*args])
return analyze.solve_for(target_parameter)
def standard_parameters(parameters_string: str) -> list:
"""Collection of standard physical parameters
Parameters
----------
parameters_string : str
name of parameters which will be returned as sympy Quantity object; for
example: 'density viscosity length'
Returns
-------
list
list of sympy Quantity objects
"""
parameter_list = parameters_string.split(' ')
# 1D Length related parameters
length = parameter('Length', 'm', 'L')
width = parameter('Width', 'm', 'L')
height = parameter('Height', 'm', 'h')
diameter = parameter('Diameter', 'm', 'D')
radius = parameter('Radius', 'm', 'r')
amplitude = parameter('Amplitude', 'm', 'A')
thickness = parameter('Thickness', 'm', 't')
# 2D Length related parameters
area = parameter('Area', 'm^2', 'A')
# 3D Length related parameters
volume = parameter('Volume', 'm^2', 'V')
# Mechanic related parameters
mass = parameter('Mass', 'kg', 'm')
velocity = parameter('Velocity', 'm s^-1', 'v')
speed = parameter('Speed', 'm s^-1', 'v')
angular_velocity = parameter('Angular Velocity', 's^-1', '\omega')
acceleration = parameter('Acceleration', 'm s^-2', 'a')
g = parameter('g', 'm s^-2', 'g')
force = parameter('Force', 'N', 'F')
momentum = parameter('Momentum', 'kg m s^-1', 'p')
period = parameter('Period', 's', 'T')
frequency = parameter('Frequency', 's^-1', 'f')
energy = parameter('Energy', 'J', 'E')
work = parameter('Work', 'J', 'W')
potential_energy = parameter('Potential Energy', 'J', 'PE')
kinetic_energy = parameter('Kinetic Energy', 'J', 'KE')
tension = parameter('Tension', 'N', 's')
linear_density = parameter('Linear Density', 'kg m^-1', '\\rho')
stress = parameter('Stress', 'N m^-2', '\sigma')
power = parameter('Power', 'J s^-1', 'P')
# Fluid mechanic parameters
density = parameter('Density', 'kg m^-3', '\\rho')
viscosity = parameter('Viscosity', 'kg m^-1 s^-1', '\mu')
pressure = parameter('Pressure', 'Pa', 'P')
temperature = parameter('Temperature', 'k', 'T')
heat = parameter('Heat', 'J', 'Q')
flow_rate = parameter('Flow Rate', 'm^3 s^-1', 'Q')
specific_heat_capacity = parameter('Specific Heat Capacity', 'J kg^-1 k^-1', 'C_p')
# Wave related parameters
wave_length = parameter('Wave Length', 'm', '\lambda')
# Electric related parameters
electric_current = parameter('Electric Current', 'A', 'I')
electric_voltage = parameter('Electric Voltage', 'V', 'V')
electric_resistance = parameter('Electric Resistance', 'ohm', 'R')
electric_charge = parameter('Electric Charge', 'A s', 'q')
# Magnetic related parameters
magnetic_field = parameter('Magnetic Field', 'T', 'B')
standard_quantities = {
'length': length,
'width': width,
'height': height,
'diameter': diameter,
'radius': radius,
'amplitude': amplitude,
'thickness': thickness,
'area': area,
'volume': volume,
'mass': mass,
'velocity': velocity,
'speed': speed,
'angular_velocity': angular_velocity,
'acceleration': acceleration,
'g': g,
'force': force,
'momentum': momentum,
'period': period,
'frequency': frequency,
'energy': energy,
'work': work,
'potential_energy': potential_energy,
'kinetic_energy': kinetic_energy,
'tension': tension,
'linear_density': linear_density,
'stress': stress,
'power': power,
'density': density,
'viscosity': viscosity,
'pressure': pressure,
'temperature': temperature,
'heat': heat,
'flow_rate': flow_rate,
'specific_heat_capacity': specific_heat_capacity,
'wave_length': wave_length,
'electric_current': electric_current,
'electric_voltage': electric_voltage,
'electric_resistance': electric_resistance,
'electric_charge': electric_charge,
'magnetic_field': magnetic_field
}
parameters = [standard_quantities[p] for p in parameter_list]
return parameters
def standard_dimensional_analysis(parameters_string: str) -> dict:
"""Performing dimensional analysis on selected standard physical parameters
Parameters
----------
parameters_string : str
name of parameters which will be returned as sympy Quantity object; for
example: 'density viscosity length'
Returns
-------
dict
dictionary of dimensionless parameters
"""
parameters = standard_parameters(parameters_string)
return dimensional_analysis(*parameters)
def solve_from_standard_dimensional_analysis(parameters_string: str,
target_parameter_string: str
) -> dict:
"""Performing dimensionall analysis on selected standard physical parameters
and solving results for specific parameter
Parameters
----------
parameters_string : str
name of parameters which will be returned as sympy Quantity object; for
example: 'density viscosity length'
target_parameter : sympy Quantity object
selected parameter for solving result from dimensional analysis
Returns
-------
dict
dictionary of solutions for each dimensionless parameter
"""
parameters = standard_parameters(parameters_string)
target_parameter = standard_parameters(target_parameter_string)[0]
return solve_from_dimensional_analysis(*parameters,
target_parameter=target_parameter)
| true |
625bb7396cfee27af0ceac032f34efd970a93473 | Python | javyxu/pytorch | /torch/ao/quantization/fx/_model_report/model_report.py | UTF-8 | 10,151 | 2.875 | 3 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | from typing import Any, Dict, Set, Tuple
import torch
from torch.ao.quantization.fx._model_report.detector import DetectorBase
from torch.ao.quantization.fx.graph_module import GraphModule
from torch.ao.quantization.observer import ObserverBase
class ModelReport:
r"""
Generates report and collects statistics
Used to provide users suggestions on possible model configuration improvements
Currently supports generating reports on:
- Suggestions for dynamic vs static quantization for linear layers (Graph Modules)
* :attr:`desired_report_detectors` The set of Detectors representing desired reports from the ModelReport class
Make sure that these are all unique types of detectors [do not have more than 1 of the same class]
Proper Use:
1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects
2.) Prepare your model with prepare_fx
3.) Call model_report.prepare_detailed_calibration on your model to add relavent observers
4.) Callibrate your model with data
5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
"""
def __init__(self, desired_report_detectors: Set[DetectorBase]):
if len(desired_report_detectors) == 0:
raise ValueError("Should include at least 1 desired report")
# keep the reports private so they can't be modified
self._desired_report_detectors = desired_report_detectors
self._desired_detector_names = set([detector.get_detector_name() for detector in desired_report_detectors])
# keep a mapping of desired reports to observers of interest
# this is to get the readings, and to remove them, can create a large set
# this set can then be used to traverse the graph and remove added observers
self._detector_name_to_observer_fqns: Dict[str, Set[str]] = {}
# initialize each report to have empty set of observers of interest
for desired_report in self._desired_detector_names:
self._detector_name_to_observer_fqns[desired_report] = set([])
# flags to ensure that we can only prepare and generate report once
self._prepared_flag = False
self._removed_observers = False
def get_desired_reports_names(self) -> Set[str]:
""" Returns a copy of the desired reports for viewing """
return self._desired_detector_names.copy()
def get_observers_of_interest(self) -> Dict[str, Set[str]]:
""" Returns a copy of the observers of interest for viewing """
return self._detector_name_to_observer_fqns.copy()
def prepare_detailed_calibration(self, prepared_fx_model: GraphModule) -> GraphModule:
r"""
Takes in a prepared fx graph model and inserts the following observers:
- ModelReportObserver
Each observer is inserted based on the desired_reports into the relavent locations
Right now, each report in self._desired_detector_names has independent insertions
However, if a module already has a Observer of the same type, the insertion will not occur
This is because all of the same type of Observer collect same information, so redundant
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
Returns the same GraphModule with the observers inserted
"""
# if already prepared once, cannot prepare again
if self._prepared_flag:
raise ValueError("Already ran preparing detailed callibration. Run the report generation next after callibration.")
# loop through each detector, find where placements should be, and keep track
insert_observers_fqns: Dict[str, Any] = {}
for detector in self._desired_report_detectors:
# determine observer points for each detector
obs_fqn_to_info = detector.determine_observer_insert_points(prepared_fx_model)
# map each insert point to the observer to use
insert_observers_fqns.update(obs_fqn_to_info)
# update the set of observers this report cares about
self._detector_name_to_observer_fqns[detector.get_detector_name()] = set(obs_fqn_to_info.keys())
# now insert all the observers at their desired locations
for observer_fqn in insert_observers_fqns:
target_node = insert_observers_fqns[observer_fqn]["target_node"]
insert_obs = insert_observers_fqns[observer_fqn]["insert_observer"]
insert_post = insert_observers_fqns[observer_fqn]["insert_post"]
observer_args = insert_observers_fqns[observer_fqn]["observer_args"]
self._insert_observer_around_module(
prepared_fx_model, observer_fqn, target_node, insert_obs, observer_args, insert_post
)
self._prepared_flag = True
return prepared_fx_model
def _insert_observer_around_module(
self,
prepared_fx_model: GraphModule,
obs_fqn: str,
target_node: torch.fx.node.Node,
obs_to_insert: ObserverBase,
observer_args: Tuple,
insert_post: bool
):
r"""
Helper function that inserts the observer into both the graph structure and the module of the model
Args
prepared_fx_model (GraphModule): The prepared Fx GraphModule
node_fqn (str): The fully qualified name of the observer we want to insert
target_node (torch.fx.node.Node): The node in prepared_fx_module we are inserting observers around
obs_to_insert (ObserverBase): The observer we are inserting around target_node
observer_args (Tuple): The arguments we want to pass into the observer
insert_post (bool): whether this is meant to be a post observer for this node
"""
# if we are inserting post, then our target node is the next node
if insert_post:
target_node = target_node.next
with prepared_fx_model.graph.inserting_before(target_node):
prepared_fx_model.add_submodule(obs_fqn, obs_to_insert)
prepared_fx_model.graph.create_node(op="call_module", target=obs_fqn, args=observer_args)
# recompile model after inserts are made
prepared_fx_model.recompile()
def _get_node_from_fqn(self, fx_model: GraphModule, node_fqn: str) -> torch.fx.node.Node:
r"""
Takes in a graph model and returns the node based on the fqn
Args
fx_model (GraphModule): The Fx GraphModule that already contains the node with fqn node_fqn
node_fqn (str): The fully qualified name of the node we want to find in fx_model
Returns the Node object of the given node_fqn otherwise returns None
"""
node_to_return = None
for node in fx_model.graph.nodes:
# if the target matches the fqn, it's the node we are looking for
if node.target == node_fqn:
node_to_return = node
break
if node_to_return is None:
raise ValueError("The node_fqn is was not found within the module.")
# assert for MyPy
assert isinstance(node_to_return, torch.fx.node.Node)
return node_to_return
def generate_model_report(
self, calibrated_fx_model: GraphModule, remove_inserted_observers: bool
) -> Dict[str, Tuple[str, Dict]]:
r"""
Takes in a callibrated fx graph model and generates all the requested reports.
The reports generated are specified by the desired_reports specified in desired_reports
Can optionally remove all the observers inserted by the ModelReport instance
Args:
calibrated_fx_model (GraphModule): The Fx GraphModule that has already been callibrated by the user
remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance
Returns a mapping of each desired report name to a tuple with:
The textual summary of that report information
A dictionary containing relavent statistics or information for that report
"""
# if we already removed the observers, we cannot generate report
if self._removed_observers:
raise Exception("Cannot generate report on model you already removed observers from")
# keep track of all the reports of interest and their outputs
reports_of_interest = {}
for detector in self._desired_report_detectors:
# generate the individual report for the detector
report_output = detector.generate_detector_report(calibrated_fx_model)
reports_of_interest[detector.get_detector_name()] = report_output
# if user wishes to remove inserted observers, go ahead and remove
if remove_inserted_observers:
self._removed_observers = True
# get the set of all Observers inserted by this instance of ModelReport
all_observers_of_interest: Set[str] = set([])
for desired_report in self._detector_name_to_observer_fqns:
observers_of_interest = self._detector_name_to_observer_fqns[desired_report]
all_observers_of_interest.update(observers_of_interest)
# go through all_observers_of_interest and remove them from the graph and model
for observer_fqn in all_observers_of_interest:
# remove the observer from the model
calibrated_fx_model.delete_submodule(observer_fqn)
# remove the observer from the graph structure
node_obj = self._get_node_from_fqn(calibrated_fx_model, observer_fqn)
if node_obj:
calibrated_fx_model.graph.erase_node(node_obj)
else:
raise ValueError("Node no longer exists in GraphModule structure")
# remember to recompile the model
calibrated_fx_model.recompile()
# return the reports of interest
return reports_of_interest
| true |
5ab17de0fd2744525bd9ba3419523e06c1061f8f | Python | annkeenan/zoo-art | /webservice/tests/test_region.py | UTF-8 | 1,886 | 2.84375 | 3 | [] | no_license | import json
import requests
import unittest
class TestRegion(unittest.TestCase):
PORT_NUM = '51042'
print("Testing /region/")
SITE_URL = 'http://student04.cse.nd.edu:' + PORT_NUM
REGION_URL = SITE_URL + '/region/'
RESET_URL = SITE_URL + '/reset/'
def reset_data(self):
m = {}
r = requests.put(self.RESET_URL, data=json.dumps(m))
resp = json.loads(r.content.decode())
self.assertEqual(resp['result'], 'success')
def is_json(self, resp):
try:
json.loads(resp)
return True
except ValueError:
return False
def test_get_regions(self):
self.reset_data()
# Get the server response
r = requests.get(self.REGION_URL)
self.assertTrue(self.is_json(r.content.decode()))
resp = json.loads(r.content.decode())
# Check the response
self.assertEqual(resp['result'], 'success')
self.assertIsInstance(resp['regions'], list)
def test_get_region(self):
self.reset_data()
# Get the server response
r = requests.get(self.REGION_URL + 'nearctic')
self.assertTrue(self.is_json(r.content.decode()))
resp = json.loads(r.content.decode())
# Check the response
self.assertEqual(resp['result'], 'success')
self.assertEqual(resp['desc'], 'north america')
def test_post_region(self):
self.reset_data()
# Build the input dictionary
m = {}
m['region'] = 'region'
m['desc'] = 'desc'
# Post the request to the server
r = requests.post(self.REGION_URL, data=json.dumps(m))
self.assertTrue(self.is_json(r.content.decode()))
resp = json.loads(r.content.decode())
# Check the response
self.assertEqual(resp['result'], 'success')
if __name__ == "__main__":
unittest.main()
| true |
c7354db4a52fac49f7c2cf09e577c5f9c6b4c24a | Python | lucasadsouza/Python-Exercises | /Python_Exercises-Sequential_Structure/09_fahrenheitToCelsius.py | UTF-8 | 294 | 4.65625 | 5 | [] | no_license | # Make a Program that asks for the temperature in degrees Farenheit,
# transform and show the temperature in degrees Celsius.
farenheit = float(input('Insert the temperature in Farenheit: '))
celsius = ((farenheit - 32) * 5) / 9
print(f'\n{farenheit:.2f}°F in Celcius is {celsius:.2f}°C') | true |
27c70c08da6b196727df533936570e0ae157df8d | Python | dacastanogo/holbertonschool-machine_learning | /supervised_learning/0x11-attention/4-positional_encoding.py | UTF-8 | 1,419 | 3.5 | 4 | [] | no_license | #!/usr/bin/env python3
"""
4-positional_encoding.py
"""
import numpy as np
def positional_encoding(max_seq_len, dm):
"""function that computes the positional encoding vector"""
# Note: positional encoding vector will be added to the embedding vector.
# Embeddings represent a token in a d-dimensional space where tokens with
# similar meaning are closer to each other. But the embeddings do not
# encode the relative position of words in a sentence. So after adding
# the positional encoding, words will be closer to each other based on the
# similarity of their meaning and their position in the sentence,
# in the d-dimensional space.
# dm: depth of the model
# Computes the angles for the positional encoding
angle_rads = get_angles(np.arange(max_seq_len)[:, np.newaxis],
np.arange(dm)[np.newaxis, :],
dm)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return pos_encoding[0]
def get_angles(position, i, dm):
"""function that computes the angles for the positional encoding"""
angle_rates = 1 / np.power(10000, (2 * np.floor(i / 2)) / np.float32(dm))
return position * angle_rates
| true |
a0fc9aee604d866748ffc8e6ad4688de282cebc9 | Python | nirmalnishant645/LeetCode | /0350-Intersection-of-Two-Arrays-II.py | UTF-8 | 1,534 | 4.40625 | 4 | [] | no_license | '''
Given two arrays, write a function to compute their intersection.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
Note:
Each element in the result should appear as many times as it shows in both arrays.
The result can be in any order.
Follow up:
What if the given array is already sorted? How would you optimize your algorithm?
What if nums1's size is small compared to nums2's size? Which algorithm is better?
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
'''
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
d = {}
res = []
for num in nums1:
d[num] = d.get(num, 0) + 1
for num in nums2:
if num in d and d[num]:
res.append(num)
d[num] -= 1
return res
'''
Follow up 1:
If both arrays are sorted, use two pointers to iterate.
Follow up 2:
Make a hashmap from numbers in nums1 and loop through nums2 to find the intersection.
Follow up 3:
If only nums2 cannot fit in memory, put all elements of nums1 into a HashMap, read chunks of array that fit into the memory, and record the intersections.
If both nums1 and nums2 are so huge that neither fit into the memory, sort them individually (external sort), then read 2 elements from each array at a time in memory, record intersections.
'''
| true |
2da3f2ce33c9b118f796139d12d82a2e139a086d | Python | JaeDukSeo/Implementation_of_CPS616_Advanced_algo | /2_brute_forece/4.1_4.4.py | UTF-8 | 3,558 | 3.5 | 4 | [] | no_license | # Jae Duk Seo 500 633 241
def Jae_Selection_sort(array = None):
current_minval = 0
for i in range(0,len(array)-1):
current_minval = i
for j in range(i+1,len(array)):
if array[j] < array[current_minval]:
current_minval = j
temp = array[current_minval]
array[current_minval] = array[i]
array[i] = temp
return array
print "1. Selection Sort "
array = [29,3,4,2,-3,2,0,0.002]
print Jae_Selection_sort(array)
# ---------------------------------
def Jae_Bruteforcestring(text = None, pattern = None):
if len(pattern) > len(text):
return False
n = len(text)
m = len(pattern)
for i in range(0,n-m):
j = 0
while j<m and pattern[j] == text[i+j]:
j = j + 1
# The length of the pattern equals the length that is found in the text string
# That means the text contains the pattern since one of the condition is pattern[j] == text[i+j]
if j == m:
return True
return False
print "\n2. Brute Force String patter"
text = 'This is my text'
pattern = "is"
print Jae_Bruteforcestring(text,pattern)
# ---------------------------------
import time
from random import randint
def Jae_poly(point =None, n = None ):
random_const = [randint(0,9) for p in range(0,n)]
print 'Constants are : ', random_const[::-1]
p = 0.0
for i in range(n-1,-1,-1):
power = 1
for j in range(0,i):
power = power * point
p = p + random_const[i] * power
return p
print "\n3. Poly no improvement"
start_time = time.time()
print Jae_poly(3,500), " answer with 500 Constants"
print("--- %s seconds ---" % (time.time() - start_time))
# ---------------------------------
def Jae_poly_improve_one(point= None, n = None):
random_const = [randint(0,9) for p in range(0,n)]
print 'Constants are : ', random_const[::-1]
p = random_const[0]
power = 1
for i in range(1,n):
power = power * point
p = p + random_const[i] * power
return p
print "\n3.1 Poly with one improvement"
start_time = time.time()
print Jae_poly_improve_one(3,500), " answer with 500 Constants"
print("--- %s seconds ---" % (time.time() - start_time))
# ---------------------------------
def Jae_Poly_final(points= None, n = None ):
random_const = [randint(0,9) for p in range(0,n)]
print 'Constants are : ', random_const[::-1]
p = random_const[n-1]
for i in range(n-2,-1,-1 ):
p = p * points + random_const[i]
return p
print "\n3.2 Poly with final improvement"
start_time = time.time()
print Jae_Poly_final(3,500), " answer with 500 Constants"
print("--- %s seconds ---" % (time.time() - start_time))
# ---------------------------------
import math
def Jae_Bruteforce_point(points = None):
dmin = float("inf")
for i in range(0,len(points) - 1):
for j in range(i+1,len(points)):
d = math.sqrt( (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2 )
if d< dmin:
dmin = d
index1 = i
index2 = j
return index1,index2
print "\n4.4 Brute force closetest length"
points = [ (1,2),( 3000 ,4),(455,399),(5,6) ]
print "I made points that look like ", points, " so the expected output is first point and last point - which is 0 and 3"
print Jae_Bruteforce_point(points), ' : Are the closest points'
# ---------------------------------
def Jae_Max_element(array = None):
maxval = array[0]
for i in range(1,len(array)):
if array[i] > maxval:
maxval = array[i]
return maxval
print "\n4.5 Max Element - Exhaustive Search"
array = [2,3,4,9,2,90,44]
print "The array that I have : ", array
print "The max element that I found : ",Jae_Max_element(array)
# ---------------------------------
# ---- END OF THE CODE ------------------ | true |
dc91efe985e244aed448c3b1dde4dd21532b7188 | Python | Jensmoking/dkpj | /game/pija.py | UTF-8 | 10,337 | 2.8125 | 3 | [] | no_license | from random import seed
from random import randint
# coding=utf-8
cardnames = {
0:'Vacío',
1:'Toba Ah.',
2:'Juanxa ~Pistolas~',
3:'Flavia ~No está, faltó~',
4:'Nico ~Ver. anime~',
5:'VTZ army',
6:'VTZ Mokin',
7:'Toba Ah ~Topoide~',
8:'Niko ~Máscar berde ahre~',
9:'Gonza ~Carta que va a ser censurada por ser racista ndeah~',
10:'English tichah',
11:'Juanxa religioso',
12:'Mokin biolador',
13:'Nico ~Angra Manyú~',
14:'Langostina, but menos fea',
15:'Toba Crux',
16:'Mokin Dios de las Pajas',
17:'Juanxa ~Nerox~',
18:'Alejo ~Ryonim~',
19:'Mokin ~DM~',
20:'Juada VTZ',
21:'TEAM PIJA',
22:'Juanxa Pija',
23:'Toba Pija',
24:'Mokin Pija',
25:'Nico Pija',
26:'Alejo Pija',
27:'Santi Jei',
28:'Casa de Nico',
29:'Comiste del Toba',
30:'Maca Despeinada',
31:'Toba Rusiano',
32:'Toba cagando',
33:'TOBA AH CAGADO',
34:'La pulsera gay del Mokin',
35:'Mikeas',
36:'La sube del Nico',
37:'Nico arañando',
38:'Juanxa haciendo algo',
39:'Navaja reveladora del gonza',
40:'Iglesia cristianista',
41:'Nico satanista',
42:'Aleho satanista',
43:'Juanxa satanista',
44:'Mokin satanista',
45:'La sofi',
46:'Fran Horozko',
47:'Khal Lannister Tiguesito Suricato Cascote',
48:'Pacha :v',
49:'Felisia and The black',
50:'Jumanji HOUDINI!'}
cardtext = {
0:'Vacío/ Nada',
1:'Toba Ah./Efecto / Es gay. | Sube 1 punto de homosexualidad al ser invocado.',
2:'Juanxa ~Pistolas~/ Dispara S.W.A.G.',
3:'Flavia ~No está, faltó~/Efecto | Como no está no recibe daño.',
4:'Nico ~Ver. anime~/ Alto virgo, pero tira flow en japonés y los menores c ofenden en chino.',
5:'VTZ army/Efecto / Si se combinan con un mokin, él hace -10 de daño y le pasan pack. | Reduce el ataque de un mokin en 10',
6:'VTZ Mokin/ Te tira hate en chino y como es sad se viste de negro ahre emo.',
7:'Toba Ah ~Topoide~/Efecto / Es re pete. Tiró un rayo y como estaban en una cueva casi se mueren todos. | Todos quedan con 1HP.',
8:'Niko ~Máscara berde ahre~/Efecto / Ndeah re turbio. | +10DEF porque si.',
9:'Gonza ~Carta que va a ser censurada por ser racista ndeah~/Efecto / Es invocado y automáticamente se van todos del miedo | Todas las cartas menos él se van al descarte.',
10:'English tichah/ In inglish plis.',
11:'Juanxa religioso/Efecto/ Activa el poder del comiste y te envía un waskaso con +10 de daño | Al ser invocado puede hacer un ataque extra con +10ATK',
12:'Mokin biolador/ Re govir',
13:'Nico ~Angra Manyú~/Efecto / Se tira dos kill con arco re picante el pibe | Elimina dos cartas al entrar en juego.',
14:'Langostina, but menos fea/Efecto / Es alta rancia, si se combina con un toba le roba la campera y el toba aumenta su homosexualidad | Sube un punto de homosexualidad a un Toba',
15:'Toba Crux ah/Efecto / En un equipo conformado por tobitos, todos le ofrendan su DEF y este sube +10 a sus estadísticas | Si todos los del campo local son Tobas, reducir su DEF a 0 para ganar +10ATK, +10DEF, +10HP para Toba Crux.',
16:'Mokin Dios de las Pajas/Efecto / Con el caudal de waska de los pajeros del mundo, aciega a quienes tiene cerca, y los pegotea con semen, evitando que lo ataquen otros personajes que no sean de religión | Sólo recibe ataques de personajes Religión',
17:'Juanxa ~Nerox~/Efecto / Como la tiene corta, se esconde y no muere, pero si se queda solo se va. | No puede ser destruido en combate',
18:'Alejo ~Ryonim~/Efecto / No es muy gay, pero si un toba lo ve se enamora y le sube la homosexualité al toba | Los Tobas activos al momento de ser invocada esta carta, suben 1 punto de homosexualidad',
19:'Mokin ~DM~ Si comparte equipo con otros personajes D&D, les sube +10ATK. Como el no juega está ausente. Si no tiene otros personajes D&D, se tira un pedo y se va | No puede ser destruido en combate, suma 10ATK a personajes D&D, es descartada si no hay otros D&D en el campo local.',
20:'Juada VTZ/Efecto / Si ataca un miqueas le hace +20 de daño xqsi. Si se combina con cualquier mokin, lo friendzonea al toke y lo saca de la partida | +20ATK al combatir contra Mikeas, puede eliminar un mokin',
21:'TEAM PIJA/Fusión / El SQUAD completo, Sólo invocación especial/ Juan Pija, Toba pija, Mokn Pija, Nico Pija, Ale Pija(Combinar 3 de estos para la invocación)',
22:'Juanxa Pija/ Para invocar al team pija, debe mantenerse al menos dos turnos',
23:'Toba pija/ Para invocar al team pija, debe mantenerse al menos dos turnos',
24:'Mokin Pija/ Para invocar al team pija, debe mantenerse al menos dos turnos',
25:'Nico Pija/ Para invocar al team pija, debe mantenerse al menos dos turnos',
26:'Alejo Pija/ Para invocar al team pija, debe mantenerse al menos dos turnos',
27:'Santi Jei/ No juega al piedra papel o tijera con Nico.',
28:'Casa de Nico/Consumible / Los del team pija se van a comer empanadas, así que quedan fuera del juego | Envía todos los Nico, Ale, Mokin, Toba y Juan al descarte',
29:'Comiste del Toba/Consumible / Elimina una carta del otro porque comió | Envía una carta del campo rival al descarte',
30:'Maca Despeinada/Efecto / Si ve un toba, le mete el dedo y lo domina | Toma el control de un Toba',
31:'Toba Ah ~Rusiano~/Efecto / Sólo es posible quitarlo del juego si lo atacan entre tres del team pija | Para eliminarlo debe ser atacado por 3 personajes Ale, Juan, Toba, Nico o Mokin al mismo tiempo',
32:'Toba cagando/Efecto / Como está cagando, no se encuentra en la batalla | No recibe daño de combate',
33:'TOBA AH CAGADO/Fusión / Ataca con su caquita uwu/ Toba cagando',
34:'La pulsera gay del Mokin/Consumible / Descarta a todos los tobas',
35:'Mikeas/Efecto / Las profes le hacen el doble de daño porque es re molesto eu | Recibe x2ATK de personajes Profe',
36:'La sube del Nico/Consumible / Nico se toma el palo y el micro atropella una carta del rival | Descarta un Nico de tu lado para descartar una carta del campo rival',
37:'Nico arañando/Efecto / Si Juan responde haciendo ese movimiento rancio, ambos juegan como una carta única con sus estadísticas sumadas | Puede mezclarse con [Juan haciendo algo], para poner ambos en un sólo espacio, atacar una sola vez, y sumar sus ATK y DEF.',
38:'Juanxa haciendo algo/ La sociedad secreta del Japish, Japish.',
39:'Navaja reveladora del gonza/Efecto / El gonzalo te amenaza y del cagaso que te agarra por su negrura le mostras las cartas en tu mano al rival | Al activar esta carta, tu rival te muestra sus cartas',
40:'Iglesia cristianista/Efecto /Efecto pasivo / Aumenta al toba crux en +10 e inhibe otros personajes religiosos | Las estadísticas de un Toba Crux en tu campo local son aumentadas en 10 | [Estructura] Es incapaz de realizar ataques',
41:'Nico satanista/Efecto Pasivo / [Nico satánico] Activa las habilidades de los personajes satanistas',
42:'Aleho satanista/Efecto Inactivo/ Como está a favor del Ndeahismo, no le hace daño a los personajes no religiosos, pero estos no lo atacan | No puede combatir con personajes no Religión',
43:'Juanxa satanista/Efecto Inactivo / Como no profesa el credo , los personajes de otras religionses no le hacen daño | Los personajes Religión que no tengan Satanista en su nombre, no pueden atacarlo',
44:'Mokin satanista/Efecto Inactivo / Como no practica el Tobaísmo, los Tobas no le hacen daño | No puede ser atacado por Tobas',
45:'La sofi/Efecto / Si se combina con un Mokin, los dos se homosexualizan por estar hablando de chinos | Aumenta en 1 la homosexualidad de un Mokin, y se aumenta en 1 su propia homosexualidad',
46:'Fran Horozko/Efecto / Al primero que golpea le hace el doble de daño',
47:'Khal Lannister Tiguesito Suricato Cascote/Efecto / Como todo gato sabe arañar, y como la tiene re grande te deja una cortadura que quita 1 de salud por turno | Una vez por turno, puede poner Cortadura sobre una carta del rival, esta pierde 1 HP por turno',
48:'Pacha :\'v/Consumible continua / Con sus poderes de angelito,| le da +10DEF a los gatitos <3',
49:'Felisia and The black/Efecto / Como son dos gatas, atacan dos veces',
50:'Jumanji HOUDINI!/Efecto / Gatiza un personaje no Gatuno, haciendo que los efectos para Gatitos apliquen sobre este'}
inplay = {
1:0,
2:0,
3:0}
card = {
1:0,
2:0,
3:0
}
sss = 10
cm1 = 'draw'
cm2 = 'exit'
print('Usa [draw] para levantar cartas!')
print('Usa [exit] para salir')
print('Usa [inplay] para ver las cartas en juego')
while (1):
seed(sss)
cmd = input('>')
if cmd == cm2:
exit()
elif cmd == cm1:
card[1] = randint(1,50)
card[2] = randint(1,50)
card[3] = randint(1,50)
print (cardnames[card[1]], ', ', cardnames[card[2]],', ', cardnames[card[3]], '.')
sss += card[3] + card[2] + card[1]
elif cmd == 'inplay':
print (cardnames[inplay[1]], ' | ', cardnames[inplay[2]], ' | ', cardnames[inplay[3]])
sss += 1
elif cmd == 'play':
print ('Qué carta quieres jugar?(1, 2, 3)')
buffer = int(input(' >'))
print ('En qué zona la quieres jugar?(1, 2, 3)')
buffera = int(input(' >'))
inplay[buffera] = card[buffer]
card[buffer] = 0
sss += buffer + buffera
elif cmd == 'hand':
print (cardnames[card[1]], ' | ', cardnames[card[2]], ' | ', cardnames[card[3]])
sss += 5
elif cmd == 'text':
print ('1:', cardnames[card[1]])
print ('2:', cardnames[card[2]])
print ('3:', cardnames[card[3]])
print ('4:', cardnames[inplay[1]])
print ('5:', cardnames[inplay[2]])
print ('6:', cardnames[inplay[3]])
print ('Qué carta quieres leer?')
tosee = int(input(' >'))
if tosee < 4:
print (cardtext[card[tosee]])
elif tosee > 3:
tosee -= 3
print (cardtext[inplay[tosee]])
else:
print ('Número inválido')
sss += tosee
elif cmd == 'clear':
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
print (' ')
sss += randint(0,100)
elif cmd == 'toba':
print ('El toba es re puto dea')
sss += 19843
else:
print ('Nope')
sss += cmd
| true |
ef5fca7be74401ee5db33dba38eac9d0bfd010db | Python | wuyb518/python-learn | /基础教程/009时间.py | UTF-8 | 674 | 3.359375 | 3 | [] | no_license | #!/usr/bin/python
# _*_ coding:UTF-8 _*_
import time
import calendar
#time
ticks=time.time()
print '当前时间戳为:',ticks
localtime=time.localtime(time.time())
print '本地时间为:',localtime
print time.asctime(time.localtime(time.time()))
print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print time.mktime(time.strptime('2018-01-01 12:00:20','%Y-%m-%d %H:%M:%S'))
# calendar
cal=calendar.month(2018,3)
print cal
# time内置函数
# time.altzone
# time.clock() 用以浮点数计算的描述返回当前cpu时间,用来衡量不同程序的耗时,比time.time()更有用
# time.sleep(secs) 退出调用线程的运行,secs指秒数
| true |
cbfbb0b8ee9cd3778b588af7f8009749a810188a | Python | 666sempron999/Abramyan-tasks- | /Series(40)/2.py | UTF-8 | 413 | 3.703125 | 4 | [] | no_license | """
Series2. Даны десять вещественных чисел. Найти их произведение.
"""
import random
import math
resultList = list()
for x in range(1,11):
resultList.append(round(random.random() * 10, 3))
mul = 1
for i in range(0,len(resultList)):
print(resultList[i])
if i != len(resultList)-1:
print("*")
mul *= resultList[i]
print("____________________")
print(mul)
| true |
1e410d23d0075b3fed5545680d05efd25d39d7ba | Python | ktn-andrea/Scripts | /10/naprendszer.py | UTF-8 | 527 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
import re
PATTERN = '.*j.*s.*u.*n.*'
def search_words(text) -> str():
li = [line.split(',')[0] for line in text]
res = []
for word in li:
act = re.search(PATTERN, word)
if act:
res.append(word)
return res
def main():
f = open("corpus.txt", "r")
lines = f.read().splitlines()
f.close()
print(search_words(lines))
###################################################################
if __name__ == "__main__":
main()
| true |
32b2c833c625f6905d271924804e6f08edd78860 | Python | soorya1991/Academics | /Interview_Prep/Strings/permutation_strings.py | UTF-8 | 319 | 3.375 | 3 | [] | no_license | #!/usr/bin/python
count = 0
def permute(a,k, n):
if k==n:
global count
count+=1
print a
else:
i = k
while i < n:
a[k],a[i] = a[i],a[k]
permute(a, k+1, n)
a[k],a[i] = a[i],a[k]
i+=1
a=[1,2,3,4]
permute(a,0,4)
print count
| true |
8790523a80924c345c3e74adbc79703802288e06 | Python | kopok2/CodeforcesSolutionsPython | /src/863B/cdf_863B.py | UTF-8 | 993 | 3.25 | 3 | [
"MIT"
] | permissive | def total_insta(www, exl1, exl2):
wwe = www[::]
wwe.remove(exl1)
wwe.remove(exl2)
result = 0
for x in range(len(wwe) // 2):
result += abs(wwe[x * 2] - wwe[x * 2 + 1])
return result
class CodeforcesTask863BSolution:
def __init__(self):
self.result = ''
self.n = 0
self.weights = []
def read_input(self):
self.n = int(input())
self.weights = [int(x) for x in input().split(" ")]
def process_task(self):
self.weights.sort()
mnx = sum(self.weights)
for x in range(self.n * 2):
for y in range(self.n * 2):
if x != y:
mnx = min(mnx, total_insta(self.weights, self.weights[x], self.weights[y]))
self.result = str(mnx)
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask863BSolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| true |
e4e286f0225a32aa2eae875771af9c5adfa3de53 | Python | Saradippity26/Beginning-Python | /Bytes.py | UTF-8 | 867 | 4.09375 | 4 | [] | no_license | """
Bytes not characters: Learn about Bytes
Bytes are similar to str type, but they are a sequence of bytes instead of Unicode code points.
Use for raw binary data, fixed-width, single-byte encoding: Ascii
will be using the byte constructor
"""
d = b'data' #must prefix with b for byte
print(d, type(d))
info = b'some bytes here'
#can split the bytes using the split() method for bytes (separates at spaces)
print(info.split())
#Encoding
message = "Vamos al zoológico" #alt +162 will print the ó - we are going to the zoo
print(message)
#lets try to encode this string message
data = message.encode("utf-8") #python default for encoding is utf-8
print(data)
#lets try to decode string message , take the data and call the data
new_message = data.decode("utf-8")
print(new_message)
#bytes are tranmutted encoded. we have to decode them, exercise from web: decode | true |
bfe3ff27f0fb26bf76c0d25a468e13f018aa2e11 | Python | ayanamizuta/cpro | /tenka1/tenka1-2022/main.py | UTF-8 | 9,810 | 2.828125 | 3 | [] | no_license | import os
import sys
import random
import json
import time
import urllib.request
import urllib.error
from log import logger
from copy import deepcopy
from multiprocessing import Pool
# ゲームサーバのアドレス / トークン
GAME_SERVER = os.getenv('GAME_SERVER', 'https://2022contest.gbc.tenka1.klab.jp')
TOKEN = os.getenv('TOKEN', 'YOUR_TOKEN')
N = 5
Dj = [+1, 0, -1, 0]
Dk = [0, +1, 0, -1]
# ゲームサーバのAPIを叩く
def call_api(x: str) -> dict:
url = f'{GAME_SERVER}{x}'
# 5xxエラーの際は100ms空けて5回までリトライする
for i in range(5):
logger.info(url)
try:
with urllib.request.urlopen(url) as res:
return json.loads(res.read())
except urllib.error.HTTPError as err:
if 500 <= err.code and err.code < 600:
logger.info(err.code)
time.sleep(0.1)
continue
else:
raise
except ConnectionResetError as err:
logger.info(err)
time.sleep(0.1)
continue
raise Exception('Api Error')
def get_game_ids() -> int:
while True:
join = call_api(f'/api/join/{TOKEN}')
if join['status'] == 'ok' and join['game_ids']:
logger.info(f"join game id: {join['game_ids']}")
return join['game_ids']
time.sleep(1)
# start APIを呼び出し練習試合のgame_idを取得する
mode = 0
delay = 0
start = call_api(f'/api/start/{TOKEN}/{mode}/{delay}')
if start['status'] == 'ok' or start['status'] == 'started':
return [start['game_id']]
raise Exception(f'Start Api Error : {start}')
# d方向に移動するように移動APIを呼ぶ
def call_move(game_id: int, d: int) -> dict:
return call_api(f'/api/move/{TOKEN}/{game_id}/{d}')
# ゲーム状態クラス
class State:
def __init__(self, field, agent):
self.field = deepcopy(field)
self.agent = deepcopy(agent)
# idxのエージェントがいる位置のfieldを更新する
# 値を返す。
# 0: 既に自陣
# 1: 自陣以外が半塗りになる
# 2: 半塗りが自陣全塗りに代わる
# 3: 崩壊
# 4: 無が自陣に代わる
def paint(self, idx: int) -> int:
i, j, k, _ = self.agent[idx]
if self.field[i][j][k][0] == -1:
# 誰にも塗られていない場合はidxのエージェントで塗る
self.field[i][j][k][0] = idx
self.field[i][j][k][1] = 2
return 4
elif self.field[i][j][k][0] == idx:
# idxのエージェントで塗られている場合は完全に塗られた状態に上書きする
ret = 2
if self.field[i][j][k][1] == 2:
ret = 0
self.field[i][j][k][1] = 2
return ret
elif self.field[i][j][k][1] == 1:
# idx以外のエージェントで半分塗られた状態の場合は誰にも塗られていない状態にする
self.field[i][j][k][0] = -1
self.field[i][j][k][1] = 0
return 3
else:
# idx以外のエージェントで完全に塗られた状態の場合は半分塗られた状態にする
self.field[i][j][k][1] -= 1
return 1
# エージェントidxをd方向に回転させる
# 方向については問題概要に記載しています
def rotate_agent(self, idx: int, d: int):
self.agent[idx][3] += d
self.agent[idx][3] %= 4
# idxのエージェントを前進させる
# マス(i, j, k)については問題概要に記載しています
def move_forward(self, idx: int):
i, j, k, d = self.agent[idx]
jj = j + Dj[d]
kk = k + Dk[d]
if jj >= N:
self.agent[idx][0] = i // 3 * 3 + (i % 3 + 1) % 3 # [1, 2, 0, 4, 5, 3][i]
self.agent[idx][1] = k
self.agent[idx][2] = N - 1
self.agent[idx][3] = 3
elif jj < 0:
self.agent[idx][0] = (1 - i // 3) * 3 + (4 - i % 3) % 3 # [4, 3, 5, 1, 0, 2][i]
self.agent[idx][1] = 0
self.agent[idx][2] = N - 1 - k
self.agent[idx][3] = 0
elif kk >= N:
self.agent[idx][0] = i // 3 * 3 + (i % 3 + 2) % 3 # [2, 0, 1, 5, 3, 4][i]
self.agent[idx][1] = N - 1
self.agent[idx][2] = j
self.agent[idx][3] = 2
elif kk < 0:
self.agent[idx][0] = (1 - i // 3) * 3 + (3 - i % 3) % 3 # [3, 5, 4, 0, 2, 1][i]
self.agent[idx][1] = N - 1 - j
self.agent[idx][2] = 0
self.agent[idx][3] = 1
else:
self.agent[idx][1] = jj
self.agent[idx][2] = kk
# エージェントが同じマスにいるかを判定する
def is_same_pos(self, a: list[int], b: list[int]) -> bool:
return a[0] == b[0] and a[1] == b[1] and a[2] == b[2]
# idxのエージェントがいるマスが自分のエージェントで塗られているかを判定する
def is_owned_cell(self, idx: int) -> bool:
i = self.agent[idx][0]
j = self.agent[idx][1]
k = self.agent[idx][2]
return self.field[i][j][k][0] == idx
# 全エージェントの移動方向の配列を受け取り移動させてフィールドを更新する
# -1の場合は移動させません(0~3は移動APIのドキュメント記載と同じです)
def move(self, move: list[int]):
# エージェントの移動処理
for idx in range(6):
if move[idx] == -1:
continue
self.rotate_agent(idx, move[idx])
self.move_forward(idx)
# フィールドの更新処理
ret = None
for idx in range(6):
if move[idx] == -1:
continue
ok = True
for j in range(6):
if idx == j or move[j] == -1 or not self.is_same_pos(self.agent[idx], self.agent[j]) or self.is_owned_cell(idx):
continue
# 移動した先にidx以外のエージェントがいる場合は修復しか行えないのでidxのエージェントのマスではない場合は更新しないようにフラグをfalseにする
ok = False
break
if not ok:
if idx==0:
ret = 0
continue
ret_ = self.paint(idx)
if idx==0:
ret=ret_
return ret
def is_another_near(self) -> bool:
for j in range(1,6):
if self.is_same_pos(self.agent[0], self.agent[j]):
return True
return False
def solve_game(game_id):
next_d = random.randint(0, 3)
two_continue = 0
while True:
# 移動APIを呼ぶ
move = call_move(game_id, next_d)
logger.info('status = {}'.format(move['status']))
if move['status'] == "already_moved":
continue
elif move['status'] != 'ok':
break
logger.info('turn = {}'.format(move['turn']))
logger.info('score = {} {} {} {} {} {}'.format(move['score'][0], move['score'][1], move['score'][2], move['score'][3], move['score'][4], move['score'][5]))
# 4方向で移動した場合を全部シミュレーションする
best_c = -1
best_d = []
flip_ok = False
another_near = [False]*4
for d in range(4):
m = State(move['field'], move['agent'])
local_score = m.move([d, -1, -1, -1, -1, -1])
if m.is_another_near():
another_near[d]=True
for d_ in range(4):
if another_near[d]:
break
m_ = State(m.field,m.agent)
m_.move([d_, -1, -1, -1, -1, -1])
if m_.is_another_near():
another_near[d]=True
# 自身のエージェントで塗られているマス数をカウントする
# 直前のマスが自陣でない場合、そのマスに戻る
c = local_score
"""for i in range(6):
for j in range(N):
for k in range(N):
if m.field[i][j][k][0] == 0:
c += 1"""
if d==2 and c>0:
flip_ok=True
# 最も多くのマスを自身のエージェントで塗れる移動方向のリストを保持する
if c > best_c:
best_c = c
best_d = [d]
elif c == best_c:
best_d.append(d)
# 最も多くのマスを自身のエージェントで塗れる移動方向のリストからランダムで方向を決める
another_vacances = [i for i in range(4) if not another_near[i]]
if set(best_d).intersection(set(another_vacances)):
next_d = random.choice(list(set(best_d).intersection(set(another_vacances))))
else:
if another_vacances:
next_d = random.choice(another_vacances)
else:
next_d = random.choice([0,1,2,3])
if flip_ok and not another_near[2]:
next_d=2
if next_d==2:
two_continue+=1
else:
two_continue=0
if two_continue>5:
next_d = random.choice([0,1,3])
two_continue=0
if __name__ == "__main__":
while True:
game_ids = get_game_ids()
with Pool(len(game_ids)) as p:
logger.info(f"games {game_ids} start")
print("games start")
p.map(solve_game, game_ids)
print("games finish")
logger.info(f"games {game_ids} finish")
| true |
4fb0d72882e5f4ef88125da49e0983d3cf0501a8 | Python | ttsakai/danalyzer | /app/chart.py | UTF-8 | 3,527 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
from math import pi
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname=r'C:\WINDOWS\Fonts\meiryo.ttc', size=10)
class Chart(object):
def __init__(self, name_list, values_list, categories):
"""
constructor for chart
Args:
name_list: list string for radar chart name.
values_list: value 2d list for plot data.
len(1D should be same with name.len(2D) should be same with categories.
categories:categoies name for chart x axis .
"""
super().__init__()
self.name_list = name_list
self.values_list = values_list
self.categories = categories
def create_chart(self):
"""
create chart and show
"""
self._create_angles()
self._initialize_plt()
self._produce_chart()
def _create_angles(self):
raise NotImplementedError
def _initialize_plt(self):
raise NotImplementedError
def _produce_chart(self):
raise NotImplementedError
class RadarChart(Chart):
def _create_angles(self):
"""
create angle for radar chart
Returns:
bool:The return value. True for success
"""
N = len(self.categories)
self.angles = [n / float(N) * 2 * pi for n in range(N)]
self.angles += self.angles[:1]
return True
def _initialize_plt(self):
"""
initialize matplotlib.pyplot.create xtricks, yticks,ylim
if should be called after _create_angles()
Returns:
bool:The return value. True for success,false if self.anges is empty.
"""
if not self.angles:
return False
# cloase old session
plt.close()
self.ax = plt.subplot(111, polar=True)
# # set title
plt.xticks(
self.angles[:-1],
self.categories,
color='grey',
size=8,
fontproperties=fp)
ytick_angle = [ float("0." + str(x) ) for x in range(1,10) ]
ytick_label = [ str(x) for x in range(10,100,10) ]
plt.yticks(ytick_angle, ytick_label , color="grey", size=7)
plt.ylim(0,1)
return True
def _produce_chart(self):
"""
produce chart data
Returns:
bool:The return value. True for success,false otherwise
"""
try:
for name,values in zip(self.name_list,self.values_list):
# # # Plot data
values += values[:1]
self.ax.set_rlabel_position(0)
self.ax.plot(self.angles, values, linewidth=1, linestyle='solid')
# # # Fill area
self.ax.fill(self.angles, values, 'b', alpha=0.1)
# plt.legend(self.name_list,prop=fp,bbox_to_anchor=(0.9, 1.0, 0.5, .100))
plt.legend(self.name_list,prop=fp,bbox_to_anchor=(-0.02, 1.0))
# [TODO] looks ugly.need to refactor
if hasattr(self,"save") and self.save:
filepath = os.sep.join([self.dirpath,self.filename])
plt.savefig(filepath)
else:
plt.show()
except:
# print("--------in-------")
raise
return False
else:
return True
| true |
003e2b652ac9e2da0603b990dadcb44210bc0b86 | Python | Aravindh-SNR/CS50-problem-sets | /finance/application.py | UTF-8 | 13,597 | 2.671875 | 3 | [] | no_license | import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
# Query database for shares owned by user
data = db.execute(
"SELECT stock, SUM(shares) AS shares FROM shares WHERE id = :id GROUP BY stock HAVING SUM(shares) > 0", id=session["user_id"])
# Represents sum of share values and cash
total = 0
# Look up stock details and add more key-value pairs to the dict objects in data
for item in data:
stock = lookup(item["stock"])
item["name"] = stock["name"]
item["price"] = stock["price"]
item["total"] = item["shares"] * item["price"]
total += item["total"]
# Query database for cash currently available with user
cash = db.execute("SELECT cash FROM users WHERE id = :id", id=session["user_id"])[0]["cash"]
total += cash
return render_template("index.html", data=data, cash=cash, total=total)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure stock symbol was submitted
if not request.form.get("symbol"):
return apology("must provide stock symbol")
# Ensure number of shares was submitted
elif not request.form.get("shares"):
return apology("must provide number of shares")
# Ensure numbers of shares is a positive integer
elif not request.form.get("shares").isdecimal():
return apology("number of shares must be a positive integer")
# Get stock details
stock = lookup(request.form.get("symbol"))
# Ensure valid stock symbol was submitted
if not stock:
return apology("invalid stock symbol")
# Number of shares user wants to buy. Converting from float to int in case user enters a value like 1.0
shares = int(float(request.form.get("shares")))
# Query database for cash currently available with user
cash = db.execute("SELECT cash FROM users WHERE id = :id", id=session["user_id"])[0]["cash"]
# Ensure user has enough cash to buy requested number of shares
if cash < shares * stock["price"]:
return apology("insufficient cash")
# Insert purchase details into database
db.execute("INSERT INTO shares (id, stock, price, shares, transaction_date) VALUES (:id, :stock, :price, :shares, datetime('now'))",
id=session["user_id"], stock=stock["symbol"], price=stock["price"], shares=shares)
# Update cash available with user in database
db.execute("UPDATE users SET cash = :cash WHERE id = :id", cash=cash - (shares * stock["price"]), id=session["user_id"])
# Display an alert message after shares are bought
flash(f"{shares} {stock['symbol']} {'share' if shares == 1 else 'shares'} bought!")
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("buy.html")
@app.route("/check", methods=["GET"])
def check():
"""Return true if username available, else false, in JSON format"""
# Ensure username was passed
if not request.args.get("username"):
return jsonify(False)
# Query database for user with username passed
user = db.execute("SELECT id FROM users WHERE username = :username", username=request.args.get("username"))
# Return True if username does not exist already, else return False
return jsonify(True) if not len(user) else jsonify(False)
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
# Query database for user's records of shares
data = db.execute("SELECT * FROM shares WHERE id = :id ORDER BY transaction_date", id=session["user_id"])
for item in data:
# Add transaction type
item["type"] = "Bought" if item["shares"] > 0 else "Sold"
# Change number of shares to positive, if it is negative
if item["shares"] < 0:
item["shares"] = item["shares"] + (-2 * item["shares"])
return render_template("history.html", data=data)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure stock symbol was submitted
if not request.form.get("symbol"):
return apology("must provide stock symbol")
# Get stock details
stock = lookup(request.form.get("symbol"))
# Ensure valid stock symbol was submitted
if not stock:
return apology("invalid stock symbol")
return render_template("quoted.html", stock=stock)
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# Ensure password confirmation was submitted
elif not request.form.get("confirmation"):
return apology("must confirm password")
# Ensure passwords match
elif not request.form.get("password") == request.form.get("confirmation"):
return apology("passwords do not match")
# Insert new user into database
user = db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash)",
username=request.form.get("username"), hash=generate_password_hash(request.form.get("password")))
# Ensure username is unique
if not user:
return apology("username already exists")
# Remember which user has registered
session["user_id"] = user
# Display an alert message after user is registered
flash("Registered!")
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure stock symbol was submitted
if not request.form.get("symbol"):
return apology("must provide stock symbol")
# Ensure number of shares was submitted
elif not request.form.get("shares"):
return apology("must provide number of shares")
# Ensure numbers of shares is a positive integer
elif not request.form.get("shares").isdecimal():
return apology("number of shares must be a positive integer")
# Get stock details
stock = lookup(request.form.get("symbol"))
# Query database for number of stocks currently available with user
count = db.execute("SELECT SUM(shares) AS shares FROM shares WHERE id = :id AND stock = :stock",
id=session["user_id"], stock=stock["symbol"])[0]["shares"]
# Number of shares user wants to sell. Converting from float to int in case user enters a value like 1.0
shares = int(float(request.form.get("shares")))
# Ensure user has enough shares to sell
if count < shares:
return apology("insufficient shares")
# Insert sale details into database
db.execute("INSERT INTO shares (id, stock, price, shares, transaction_date) VALUES (:id, :stock, :price, :shares, datetime('now'))",
id=session["user_id"], stock=stock["symbol"], price=stock["price"], shares=-shares)
# Query database for cash currently available with user
cash = db.execute("SELECT cash FROM users WHERE id = :id", id=session["user_id"])[0]["cash"]
# Update cash available with user in database
db.execute("UPDATE users SET cash = :cash WHERE id = :id", cash=cash + (shares * stock["price"]), id=session["user_id"])
# Display an alert message after shares are sold
flash(f"{shares} {stock['symbol']} {'share' if shares == 1 else 'shares'} sold!")
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
stocks = db.execute("SELECT stock FROM shares WHERE id = :id GROUP BY id, stock HAVING SUM(shares) > 0",
id=session["user_id"])
return render_template("sell.html", stocks=stocks)
@app.route("/change", methods=["GET", "POST"])
@login_required
def change_password():
"""Change user's password"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure old password was submitted
if not request.form.get("old-password"):
return apology("must provide old password")
# Ensure new password was submitted
elif not request.form.get("new-password"):
return apology("must provide new password")
# Ensure new password was confirmed
elif not request.form.get("confirmation"):
return apology("must confirm new password")
# Ensure passwords match
elif not request.form.get("new-password") == request.form.get("confirmation"):
return apology("passwords do not match")
# Query database for user's old hash
hash = db.execute("SELECT hash FROM users WHERE id = :id",
id=session["user_id"])[0]["hash"]
# Ensure old password is correct
if not check_password_hash(hash, request.form.get("old-password")):
return apology("incorrect old password", 403)
# Update password in database
db.execute("UPDATE users SET hash = :hash WHERE id = :id",
hash=generate_password_hash(request.form.get("new-password")), id=session["user_id"])
# Display an alert message after password is changed
flash("Password changed!")
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("change.html")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler) | true |
75bc8ac6bae8d6fe0654aaff7267000f9d00cf5d | Python | threathive/mwdblib | /src/core.py | UTF-8 | 22,745 | 2.53125 | 3 | [
"MIT"
] | permissive | import getpass
import json
import itertools
import time
import warnings
from .api import MalwarecageAPI
from .exc import ObjectNotFoundError, ValidationError
from .object import MalwarecageObject
from .file import MalwarecageFile
from .config import MalwarecageConfig
from .blob import MalwarecageBlob
try:
import __builtin__
user_input = getattr(__builtin__, "raw_input")
except ImportError:
user_input = input
class Malwarecage(object):
"""
Main object used for communication with Malwarecage
:param api: Custom :class:`MalwarecageAPI` used to communicate with Malwarecage
:type api: :class:`MalwarecageAPI`, optional
:param api_key: API key used for authentication (omit if password-based authentication is used)
:type api_key: str, optional
.. versionadded:: 2.6.0
API request will sleep for a dozen of seconds when rate limit has been exceeded.
.. versionadded:: 3.2.0
You can enable :attr:`retry_on_downtime` to automatically retry
requests in case of HTTP 502/504 or ConnectionError.
Usage example:
.. code-block:: python
from mwdblib import Malwarecage
mwdb = Malwarecage()
mwdb.login("example", "<password>")
file = mwdb.query_file("3629344675705286607dd0f680c66c19f7e310a1")
"""
def __init__(self, api=None, **api_options):
self.api = api or MalwarecageAPI(**api_options)
def login(self, username=None, password=None, warn=True):
"""
Performs user authentication using provided username and password.
.. warning::
Keep in mind that password-authenticated sessions are short lived, so password needs to be stored
in :class:`MalwarecageAPI` object. Ask Malwarecage instance administrator for an API key (or send e-mail to
info@cert.pl if you use mwdb.cert.pl)
.. versionadded:: 2.4.0
Malwarecage tries to reauthenticate on first Unauthorized exception
.. versionadded:: 2.5.0
username and password arguments are optional. If one of the credentials is not provided via arguments,
user will be asked for it.
.. versionadded:: 2.6.0
:py:meth:`Malwarecage.login` will warn if login is called after setting up API key
:param username: User name
:type username: str
:param password: Password
:type password: str
:param warn: Show warning about password-authenticated sessions
:type warn: bool (default: True)
:raises: requests.exceptions.HTTPError
"""
if self.api.api_key is not None:
warnings.warn("login() will reset the previously set API key. If you really want to reauthenticate, "
"call logout() before to suppress this warning.")
if username is None:
# Py2 compatibility
username = user_input("Username: ")
if password is None:
password = getpass.getpass("Password: ")
self.api.login(username, password, warn=warn)
def logout(self):
"""
Performs session logout and removes previously set API key.
"""
self.api.logout()
def _recent(self, object_type, query=None):
try:
last_object = None
while True:
params = {"older_than": last_object.id} if last_object else {}
if query is not None:
params["query"] = query
# 'object', 'file', 'config' or 'blob'?
endpoint = object_type.URL_PATTERN.split("/")[0]
result = self.api.get(endpoint, params=params)
key = endpoint+"s"
if key not in result or len(result[key]) == 0:
return
for obj in result[key]:
last_object = object_type.create(self.api, obj)
yield last_object
except ObjectNotFoundError:
return
def recent_objects(self):
"""
Retrieves recently uploaded objects
If you already know type of object you are looking for, use specialized variants:
- :py:meth:`recent_files`
- :py:meth:`recent_configs`
- :py:meth:`recent_blobs`
Usage example:
.. code-block:: python
from mwdblib import Malwarecage
from itertools import islice
mwdb = Malwarecage()
mwdb.login("admin", "password123")
# recent_files is generator, do not execute list(recent_files)!
files = islice(mwdb.recent_files(), 25)
print([(f.name, f.tags) for f in files])
:rtype: Iterator[:class:`MalwarecageObject`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageObject)
def recent_files(self):
"""
Retrieves recently uploaded files
:rtype: Iterator[:class:`MalwarecageFile`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageFile)
def recent_configs(self):
"""
Retrieves recently uploaded configuration objects
:rtype: Iterator[:class:`MalwarecageConfig`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageConfig)
def recent_blobs(self):
"""
Retrieves recently uploaded blob objects
:rtype: Iterator[:class:`MalwarecageBlob`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageBlob)
def _listen(self, last_object, object_type, blocking=True, interval=15):
if last_object is None:
last_object = next(self._recent(object_type))
# If there are no elements (even first element): just get new samples from now on
if last_object is not None:
last_id = last_object.id
elif isinstance(last_object, MalwarecageObject):
# If we are requesting for typed objects, we should additionally check the object type
if object_type is not MalwarecageObject and not isinstance(last_object, object_type):
raise TypeError("latest_object type must be 'str' or '{}'".format(object_type.__name__))
# If object instance provided: get ID from instance
last_id = last_object.id
else:
# If not: first check whether object exists in repository
last_id = self._query(object_type, last_object, raise_not_found=True).id
while True:
objects = list(itertools.takewhile(lambda el: el.id != last_id,
self._recent(object_type)))
# Return fetched objects in reversed order (from oldest to latest)
for obj in objects[::-1]:
last_id = obj.id
yield obj
if blocking:
time.sleep(interval)
else:
break
def listen_for_objects(self, last_object=None, **kwargs):
"""
Listens for recent objects and yields newly added.
In blocking mode (default) if last_object is provided: the method fetches the latest objects until
the provided object is reached and yields new objects from the oldest one. Otherwise, the method periodically
asks for recent objects until a new object appears. The default request interval is 15 seconds.
In a non-blocking mode: a generator stops if there are no more objects to fetch.
last_object argument accepts both identifier and MalwarecageObject instance. If the object identifier is
provided: method firstly checks whether the object exists in repository and has the correct type.
If you already know type of object you are looking for, use specialized variants:
- :py:meth:`listen_for_files`
- :py:meth:`listen_for_configs`
- :py:meth:`listen_for_blobs`
Using this method you need to
.. warning::
Make sure that last_object is valid in Malwarecage instance. If you provide MalwarecageObject that doesn't
exist, mwdblib will iterate over all objects and you can quickly hit your rate limit. Library is trying to
protect you from that as much as possible by checking type and object existence, but it's still possible to
do something unusual.
.. versionadded:: 3.2.0
Added listen_for_* methods
:param last_object: MalwarecageObject instance or object hash
:type last_object: MalwarecageObject or str
:param blocking: Enable blocking mode (default)
:type blocking: bool, optional
:param interval: Interval for periodic queries in blocking mode (default is 15 seconds)
:type interval: int, optional
:rtype: Iterator[:class:`MalwarecageObject`]
"""
return self._listen(last_object,
object_type=MalwarecageObject,
**kwargs)
def listen_for_files(self, last_object=None, **kwargs):
"""
Listens for recent files and yields newly added.
.. seealso::
More details can be found here: :meth:`listen_for_objects`
.. versionadded:: 3.2.0
Added listen_for_* methods
:param last_object: MalwarecageFile instance or object hash
:type last_object: MalwarecageFile or str
:param blocking: Enable blocking mode (default)
:type blocking: bool, optional
:param interval: Interval for periodic queries in blocking mode (default is 15 seconds)
:type interval: int, optional
:rtype: Iterator[:class:`MalwarecageFile`]
"""
return self._listen(last_object,
object_type=MalwarecageFile,
**kwargs)
def listen_for_configs(self, last_object=None, **kwargs):
"""
Listens for recent configs and yields newly added.
.. seealso::
More details can be found here: :meth:`listen_for_objects`
.. versionadded:: 3.2.0
Added listen_for_* methods
:param last_object: MalwarecageConfig instance or object hash
:type last_object: MalwarecageConfig or str
:param blocking: Enable blocking mode (default)
:type blocking: bool, optional
:param interval: Interval for periodic queries in blocking mode (default is 15 seconds)
:type interval: int, optional
:rtype: Iterator[:class:`MalwarecageConfig`]
"""
return self._listen(last_object,
object_type=MalwarecageConfig,
**kwargs)
def listen_for_blobs(self, last_object=None, **kwargs):
"""
Listens for recent blobs and yields newly added.
.. seealso::
More details can be found here: :meth:`listen_for_objects`
.. versionadded:: 3.2.0
Added listen_for_* methods
:param last_object: MalwarecageBlob instance or object hash
:type last_object: MalwarecageBlob or str
:param blocking: Enable blocking mode (default)
:type blocking: bool, optional
:param interval: Interval for periodic queries in blocking mode (default is 15 seconds)
:type interval: int, optional
:rtype: Iterator[:class:`MalwarecageBlob`]
"""
return self._listen(last_object,
object_type=MalwarecageBlob,
**kwargs)
def _query(self, object_type, hash, raise_not_found):
try:
result = self.api.get(object_type.URL_PATTERN.format(id=hash))
return object_type.create(self.api, result)
except ObjectNotFoundError:
if not raise_not_found:
return None
else:
raise
def query(self, hash, raise_not_found=True):
"""
Queries for object using provided hash.
If you already know type of object you are looking for, use specialized variants:
- :py:meth:`query_file`
- :py:meth:`query_config`
- :py:meth:`query_blob`
.. versionadded:: 2.4.0
Added raise_not_found optional argument
.. versionchanged:: 3.0.0
Fallback to :py:meth:`query_file` if other hash than SHA256 was provided
:param hash: Object hash (identifier, MD5, SHA-1, SHA-2)
:type hash: str
:param raise_not_found: If True (default), method raises HTTPError when object is not found
:type raise_not_found: bool, optional
:rtype: :class:`MalwarecageObject` or None (if raise_not_found=False)
:raises: requests.exceptions.HTTPError
"""
if len(hash) != 64:
# If different hash than SHA256 was provided
return self.query_file(hash, raise_not_found=raise_not_found)
return self._query(MalwarecageObject, hash, raise_not_found)
def query_file(self, hash, raise_not_found=True):
"""
Queries for file using provided hash
:param hash: Object hash (identifier, MD5, SHA-1, SHA-2)
:type hash: str
:param raise_not_found: If True (default), method raises HTTPError when object is not found
:type raise_not_found: bool
:rtype: :class:`MalwarecageFile` or None (if raise_not_found=False)
:raises: requests.exceptions.HTTPError
"""
return self._query(MalwarecageFile, hash, raise_not_found)
def query_config(self, hash, raise_not_found=True):
"""
Queries for configuration object using provided hash
:param hash: Object hash (SHA-256 identifier)
:type hash: str
:param raise_not_found: If True (default), method raises HTTPError when object is not found
:type raise_not_found: bool
:rtype: :class:`MalwarecageConfig` or None (if raise_not_found=False)
:raises: requests.exceptions.HTTPError
"""
return self._query(MalwarecageConfig, hash, raise_not_found)
def query_blob(self, hash, raise_not_found=True):
"""
Queries for blob object using provided hash
:param hash: Object hash (SHA-256 identifier)
:type hash: str
:param raise_not_found: If True (default), method raises HTTPError when object is not found
:type raise_not_found: bool
:rtype: :class:`MalwarecageBlob` or None (if raise_not_found=False)
:raises: requests.exceptions.HTTPError
"""
return self._query(MalwarecageBlob, hash, raise_not_found)
def search(self, query):
"""
Advanced search for objects using Lucene syntax.
If you already know type of object you are looking for, use specialized variants:
- :py:meth:`search_files`
- :py:meth:`search_configs`
- :py:meth:`search_blobs`
Usage example:
.. code-block:: python
from mwdblib import Malwarecage
# Search for samples tagged as evil and with size less than 100kB
results = mwdb.search_files("tag:evil AND file.size:[0 TO 100000]")
:param query: Search query
:type query: str
:rtype: Iterator[:class:`MalwarecageObject`]
:raises: requests.exceptions.HTTPError
"""
result = self.api.post("search", json={"query": query})
for file in result:
yield MalwarecageObject.create(self.api, file)
def search_files(self, query):
"""
Advanced search for files using Lucene syntax.
:param query: Search query
:type query: str
:rtype: Iterator[:class:`MalwarecageFile`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageFile, query)
def search_configs(self, query):
"""
Advanced search for configuration objects using Lucene syntax.
:param query: Search query
:type query: str
:rtype: Iterator[:class:`MalwarecageConfig`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageConfig, query)
def search_blobs(self, query):
"""
Advanced search for blob objects using Lucene syntax.
:param query: Search query
:type query: str
:rtype: Iterator[:class:`MalwarecageBlob`]
:raises: requests.exceptions.HTTPError
"""
return self._recent(MalwarecageBlob, query)
@staticmethod
def _convert_bytes(data):
if isinstance(data, dict):
return dict(map(Malwarecage._convert_bytes, data.items()))
if isinstance(data, bytes):
return data.decode('utf-8', 'replace')
if isinstance(data, (tuple, list)):
return list(map(Malwarecage._convert_bytes, data))
return data
def _upload(self, type, parent=None, metakeys=None,
share_with=None, private=False, public=False,
req_files=None, req_json=None):
parent = parent or "root"
if isinstance(parent, MalwarecageObject):
parent = parent.id
metakeys = metakeys or []
req_files = req_files or {}
req_json = req_json or {}
if isinstance(metakeys, dict):
metakeys = [{"key": key, "value": value}
for key, value_list in metakeys.items()
for value in (value_list if isinstance(value_list, list) else [value_list])]
if private and public:
raise ValidationError("Sample can't be both private and public")
if public:
share_with = "public"
if private:
share_with = self.api.logged_user
result = self.api.put("{}/{}".format(type, parent), data={
'metakeys': json.dumps({'metakeys': metakeys}),
'upload_as': share_with or "*"
}, files=req_files, json=self._convert_bytes(req_json))
return result
def upload_file(self, name, content, **kwargs):
"""
Upload file object
:param name: Original file name (see also :py:attr:`MalwarecageFile.file_name`)
:type name: str
:param content: File contents
:type content: bytes
:param parent: Parent object or parent identifier
:type parent: :class:`MalwarecageObject` or str, optional
:param metakeys: Dictionary with metakeys.
If you want to set many values with the same key: use list as value
:type metakeys: dict, optional
:param share_with: Group name you want to share object with
:type share_with: str, optional
:param private: True if sample should be uploaded as private
:type private: bool, optional
:param public: True if sample should be visible for everyone
:type public: bool, optional
:rtype: :class:`MalwarecageFile`
:raises: :class:`requests.exceptions.HTTPError`, :class:`ValueError`
Usage example:
.. code-block:: python
mwdb.upload_file(
"malware.exe",
open("malware.exe", "rb").read(),
parent="3629344675705286607dd0f680c66c19f7e310a1",
public=True)
"""
result = self._upload("file", req_files={'file': (name, content)}, **kwargs)
return MalwarecageFile(self.api, result)
def upload_config(self, family, cfg, config_type="static", **kwargs):
"""
Upload configuration object
:param family: Malware family name (see also :py:attr:`MalwarecageConfig.family`)
:type family: str
:param cfg: Dict object with configuration (see also :py:attr:`MalwarecageConfig.cfg`)
:type cfg: dict
:param config_type: Configuration type (default: static, see also :py:attr:`MalwarecageConfig.type`)
:type config_type: str, optional
:param parent: Parent object or parent identifier
:type parent: :class:`MalwarecageObject` or str, optional
:param metakeys: Dictionary with metakeys.
If you want to set many values with the same key: use list as value
:type metakeys: dict, optional
:param share_with: Group name you want to share object with
:type share_with: str, optional
:param private: True if sample should be uploaded as private
:type private: bool, optional
:param public: True if sample should be visible for everyone
:type public: bool, optional
:rtype: :class:`MalwarecageConfig`
:raises: :class:`requests.exceptions.HTTPError`, :class:`ValueError`
.. code-block:: python
mwdb.upload_config(
"evil",
{
"botnet": "mal0123",
"version": 2019,
"urls": [
"http://example.com",
"http://example.com/2"
]
}
parent="3629344675705286607dd0f680c66c19f7e310a1",
public=True)
"""
result = self._upload("config", req_json={
"family": family,
"cfg": cfg,
"config_type": config_type
}, **kwargs)
return MalwarecageConfig(self.api, result)
def upload_blob(self, name, type, content, **kwargs):
"""
Upload blob object
:param name: Blob name (see also :py:attr:`MalwarecageBlob.blob_name`)
:type name: str
:param type: Blob type (see also :py:attr:`MalwarecageBlob.blob_type`)
:type type: str
:param content: Blob content (see also :py:attr:`MalwarecageBlob.content`)
:type content: str
:param parent: Parent object or parent identifier
:type parent: :class:`MalwarecageObject` or str, optional
:param metakeys: Dictionary with metakeys.
If you want to set many values with the same key: use list as value
:type metakeys: dict, optional
:param share_with: Group name you want to share object with
:type share_with: str, optional
:param private: True if sample should be uploaded as private
:type private: bool, optional
:param public: True if sample should be visible for everyone
:type public: bool, optional
:rtype: :class:`MalwarecageBlob`
:raises: :class:`requests.exceptions.HTTPError`, :class:`ValueError`
"""
result = self._upload("blob", req_json={
"blob_name": name,
"blob_type": type,
"content": content
}, **kwargs)
return MalwarecageBlob(self.api, result)
| true |
9d14c453733a948ee38bd2e349ff2e8c28fe2619 | Python | zm-git-dev/Bioinfo-pipelines | /bioinfo_training/exercises_lecture1-7/day03/test002/test003.py | UTF-8 | 941 | 3.140625 | 3 | [] | no_license | # each word's frequency
import os
files = os.listdir()
word_count = {}
def read_file(file):
f = open(file)
readline = f.readlines()
word = []
for line in readline:
line = line.replace(',', '')
line = line.strip()
wo = line.split(' ')
word.extend(wo)
return word
def clear_account(lists):
wordkey = {}
wordkey = wordkey.fromkeys(lists)
word_1 = list(wordkey.keys())
for i in word_1:
wordkey[i] = lists.count(i)
return wordkey
def wordsort(wordkey):
# 删除''字符
del [wordkey['']]
wordkey_sorted = sorted(wordkey.items(), key=lambda d: d[1], reverse=True)
wordkey_sorted = dict(wordkey_sorted)
return wordkey_sorted
def main(wordkey_sorted):
print(wordkey_sorted)
for file in files:
if os.path.splitext(file)[1] == ".txt":
word_count[file] = wordsort(clear_account(read_file(file)))
print(word_count)
| true |
3cd7a98f7606d9d7446812de621897f7d6ff0587 | Python | missbelinda/Modul-2-Purwadhika | /Day03/nov18b.py | UTF-8 | 766 | 2.578125 | 3 | [] | no_license | import requests
host = "https://developers.zomato.com/api/v2.1"
kategori = "/categories"
inputCity = input("Masukkan kota : ")
city = f"/cities?q={inputCity}"
apikey = "6a3792f3d2ab39afa6fbe5b442af12e9"
headInfo = {
"user-key":apikey
}
urlCity = host + city
kota = requests.get(urlCity, headers=headInfo)
kodeKota = (kota.json()["location_suggestions"][0]["id"])
inputMenu = input("Makanan apa yang ingin anda cari : ")
urlMenu = host + f"/search?entity_id={kodeKota}&entity_type=city&q={inputMenu}"
data = requests.get(urlMenu, headers=headInfo)
data = (data.json()["restaurants"])
for i in range (len(data)):
ambil = data[i]["restaurant"]
print (f"+ {ambil['name']}, {ambil['location']['address']}, rating : {ambil['user_rating']['aggregate_rating']}") | true |
e51d9a0bff0ab1801b071f7b3f9737c3254245be | Python | josh1985m/Sqlite3 | /RowColDb.py | UTF-8 | 248 | 2.796875 | 3 | [] | no_license | import sqlite3
con = sqlite3.connect('mydatabase.db')
cursorObj = con.cursor()
print(cursorObj.execute('SELECT * FROM employees').rowcount)
rows = cursorObj.fetchall()
print(len(rows))
print(cursorObj.execute('DELETE FROM employees').rowcount)
| true |
0958c8fe13f2a2d7ddb03179a826938f54382726 | Python | chithien0909/Competitive-Programming | /Leetcode/Leetcode - Premium/Google/Valid Parenthesis String.py | UTF-8 | 1,681 | 4.1875 | 4 | [] | no_license | """
Given a string containing only three types of characters: '(', ')' and '*', write a function to check whether this string is valid. We define the validity of a string by these rules:
Any left parenthesis '(' must have a corresponding right parenthesis ')'.
Any right parenthesis ')' must have a corresponding left parenthesis '('.
Left parenthesis '(' must go before the corresponding right parenthesis ')'.
'*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string.
An empty string is also valid.
Example 1:
Input: "()"
Output: True
Example 2:
Input: "(*)"
Output: True
Example 3:
Input: "(*))"
Output: True
Note:
The string size will be in the range [1, 100].
"""
class Solution:
def checkValidString(self, instr: str) -> bool:
oStack, sStack, cStack = [],[],[]
for i,ch in enumerate(instr):
if ch == '*': sStack.append(i)
elif ch == '(': oStack.append(i)
else:
if oStack and oStack[-1] < i:
oStack.pop()
else:
cStack.append(i)
cStack = cStack[::-1]
sStack = sStack[::-1]
while cStack:
if not sStack: break
if sStack[-1] < cStack[-1]:
sStack.pop()
cStack.pop()
else:
break
sStack = sStack[::-1]
while oStack:
if not sStack: break
if sStack[-1] > oStack[-1]:
sStack.pop()
oStack.pop()
else:
break
return not (oStack or cStack)
s = Solution()
print(s.checkValidString("((*)*"))
| true |
676106b3549f5ce80a2a7301092442342d6ffdf4 | Python | bavardage/ACM-Practice | /csubsub/genfile.py | UTF-8 | 207 | 3.203125 | 3 | [] | no_license |
import random
getchar = lambda : chr(97 + int(random.random()*25))
outmessage = [getchar() for i in range(100000)]
text = [getchar() for i in range(100)]
print "".join(outmessage)
print "".join(text)
| true |
1cd6e3c62365447ae9a2df90184a77056c84bac7 | Python | krishna-tx/hack_high_school | /OOP/final_proj/card.py | UTF-8 | 86 | 2.59375 | 3 | [] | no_license | class Card:
def __init__(self, power):
cost = 5
self.power = power | true |
542543f6e98f711d62b3f72b29f62ebc17167845 | Python | Aasthaengg/IBMdataset | /Python_codes/p02410/s841422361.py | UTF-8 | 258 | 2.765625 | 3 | [] | no_license | n, m = map(int, input().split())
a = [list(map(int, input().split())) for _ in range(n)]
b = [int(input()) for _ in range(m)]
ans = [0 for _ in range(n)]
for i in range(n):
for j in range(m):
ans[i] += a[i][j]*b[j]
print("\n".join(map(str, ans))) | true |
1b736f4acb72c2bcd73c89a8213bc5460151cbc7 | Python | wisesky/LeetCode-Practice | /src/29.divide-integers.py | UTF-8 | 929 | 3.421875 | 3 | [
"MIT"
] | permissive | class Solution:
# 不用 内置 除法 实现 //
def divide(self, dividend: int, divisor: int) -> int:
if (dividend < 0 ) ^ (divisor < 0):
flag = -1
else:
flag = 1
dividend, divisor = abs(dividend), abs(divisor)
res = 0
while dividend >= divisor:
# 通过多次减法来实现,每减去一个 divisor , res += add
temp, add = divisor, 1
while dividend >= temp:
# 先减去一个 divisor * add, 同时 res 补上 add个数
# 随后 divisor * add 加倍, add 同步加倍
dividend -= temp
res += add
temp <<= 1
add <<= 1
res = flag * res
return min(max(res, -2**31), 2**31 - 1)
if __name__ == "__main__":
so = Solution()
print(so.divide(10, 3))
print(so.divide(-7, 3)) | true |
43a201a5aa16d8701cb15c723151ba146e3bc226 | Python | tberhanu/all_trainings | /10_training/18_4sum.py | UTF-8 | 1,208 | 3.765625 | 4 | [] | no_license | """Given an array nums of n integers and an integer target, are there elements a, b, c, and d in nums
such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note:
The solution set must not contain duplicate quadruplets.
Example:
Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
def four_sum(nums, target):
"""Runtime: 1188 ms, faster than 27.05% of Python3 online submissions for 4Sum.
"""
nums.sort()
all = set()
size = len(nums)
for i in range(size - 3):
for j in range(i + 1, size - 2, 1):
k = j + 1
kk = size - 1
s = nums[i] + nums[j]
while k < kk:
s2 = nums[k] + nums[kk]
if s + s2 == target:
all.add((nums[i], nums[j], nums[k], nums[kk]))
k += 1
kk -= 1
if s + s2 < target:
k += 1
if s + s2 > target:
kk -= 1
return [list(e) for e in all]
nums = [1, 0, -1, 0, -2, 2]
target = 0
print(four_sum(nums, target))
| true |
e3637d56274cc1423cd34db71e63dd003e79343e | Python | jmilde/centroid-text-summarization | /final_code/util.py | UTF-8 | 7,148 | 2.609375 | 3 | [] | no_license | from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from scipy.spatial.distance import cosine
import pickle
import numpy as np
from nltk import word_tokenize, sent_tokenize
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import re
from tqdm import tqdm
def similarity(v1, v2):
score = 0.0
if np.count_nonzero(v1) != 0 and np.count_nonzero(v2) != 0:
score = ((1 - cosine(v1, v2)) + 1) / 2
return score
def save_txt(filename, lines, split=""):
"""writes lines to text file."""
with open(filename, 'w') as file:
for line in lines:
print(line+split, file= file)
def load_embedding(language, topic):
assert language=="english" or language=="german", "language needs to be 'german' or 'english'"
assert topic=="general" or topic=="news", "topic needs to be 'general' or 'news'"
if language == "english":
if topic == "news":
# google news
model = KeyedVectors.load_word2vec_format('../data/embed_files/GoogleNews-vectors-negative300.bin', binary=True, limit=100000)
elif topic == "general":
# glove
#glove2word2vec("../data/embed_files/glove.6B.300d.txt", "../data/embed_files/glove") #preprocess glove file to fit gensims word2vec format
model = KeyedVectors.load_word2vec_format('../data/embed_files/glove', limit=100000)
elif language=="german":
if topic == "general":
print("download pretrained general ones from here https://deepset.ai/german-word-embeddings")
elif topic == "news":
print("need to be trained on data of our partners")
return model
def clean_txts(txt, remove):
sents = sent_tokenize(txt)
clean_sents = [" ".join([word for word in word_tokenize(sent.lower()) if word not in remove])
for sent in sents]
clean_txt = " ".join(clean_sents)
raw_sents = [sent for sent in sents]
return clean_txt, raw_sents
def topic_words(sents, model_path, topic_threshold, load=True, save=False, refdoc_path=None):
if load:
count_vect = pickle.load(open(model_path + "/count_vect.sav", 'rb'))
doc_freq = pickle.load(open(model_path + "/df_vect.sav", 'rb'))
else:
assert refdoc_path is not None, "need to give the path of the cleaned reference corpus"
### get topic words via TF-IDF
count_vect = CountVectorizer() #todo check settings
### IDF based on big reference corpus
ref_docs = open(refdoc_path).read().split("\n")
doc_freq = count_vect.fit_transform(ref_docs+sents.split())
if save: ## to save trained models
pickle.dump(count_vect, open(model_path + "/count_vect.sav", 'wb'))
pickle.dump(doc_freq, open(model_path + "/df_vect.sav", 'wb'))
feature_names = count_vect.get_feature_names()
# add the doc freq to the tfidf class
tfidf = TfidfTransformer(use_idf=True, smooth_idf=True).fit(doc_freq)
# caluclate the tfidf scores for the input text
tfidf_vector = tfidf.transform(count_vect.transform([sents]))
coo_matrix = tfidf_vector.tocoo()
tuples = zip(coo_matrix.col, coo_matrix.data)
sorted_items = sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
#centroid_words = [feature_names[idx] for idx, score in sorted_items if score>topic_threshold]
centroid_words_weights = {feature_names[idx]:score
for idx, score in sorted_items if score>topic_threshold}
return centroid_words_weights, sorted_items, feature_names
def weight_sentences(txt, centroid_words_weights, tfidf_scores, feature_names, remove):
d_score = []
for doc in txt:
# general tfidf weights
tfidf_weights = {feature_names[idx]:score for idx, score in tfidf_scores}
s_score = []
centroid_weights = centroid_words_weights.copy()
for i, sent in enumerate(sent_tokenize(doc)):
score = []
for wrd in word_tokenize(sent.lower()):
if wrd not in remove and tfidf_weights.__contains__(wrd):
# if wrd is a centroid word that appears for the first time, give it a higher weight
if centroid_weights.__contains__(wrd):
score.append(centroid_weights[wrd]*3)
del centroid_weights[wrd]
else:
score.append(tfidf_weights[wrd])
if len(score)>0:
s_score.append([sum(score)/len(score),i]) # average the sentence score
d_score.append(sorted(s_score, reverse=True))
return d_score
# GET CENTROID VECTOR
def get_centroid(centroid_words, model):
dim = model.vector_size
centroid_vector = np.zeros(dim)
count=0
for idx, word in enumerate(centroid_words):
if model.__contains__(word):
centroid_vector = centroid_vector + model[word]
count += 1
if count>0:
centroid_vector = np.divide(centroid_vector, count)
return centroid_vector
def select_ntop(txt, scores, n_top, remove):
clean_sents, raw_sents = [], []
for doc, scr in zip(txt, scores):
sel_sents = set([s[1] for i,s in enumerate(scr) if i<=n_top-1])
for i,sent in enumerate(sent_tokenize(doc)):
if i in sel_sents:
raw_sents.append(sent)
clean_sents.append(" ".join([wrd for wrd in word_tokenize(sent.lower()) if wrd not in remove]))
return clean_sents, raw_sents
def score_sentences(sents, raw_sents, model, centroid_vector):
dim = model.vector_size
sentence_scores = []
for i, sent in enumerate(sents):
sent_vector = np.zeros(dim)
count=0
words = sent.split()
for w in words:
if model.__contains__(w):
sent_vector = sent_vector + model[w]
count += 1
if count>0:
sent_vector = np.divide(sent_vector, count)
score = similarity(sent_vector, centroid_vector)
sentence_scores.append((i, raw_sents[i], score, sent_vector))
# rank sentences by score
sentence_scores_sort = sorted(sentence_scores, key=lambda el: el[2], reverse=True)
return sentence_scores_sort
def select_sentences(sentence_scores, sim_threshold, limit_type, limit, reorder):
assert limit_type == "words" or limit_type == "bytes", "limit_type has to be 'words' or 'bytes'"
count = 0
summary = []
for s in sentence_scores:
if count >= limit:
break
else:
include = True
for ps in summary:
sim = similarity(s[3], ps[3])
if sim > sim_threshold:
include = False
if include:
summary.append(s)
if limit_type == 'words':
count += len(s[1].split())
elif limit_type == "bytes":
count += len(s[1])
if reorder:
summary = [s[1] for s in sorted(summary, key=lambda x: x[0])]
else:
summary = [s[1] for s in summary]
return summary
| true |
8090ea9f7460852ba1c60a6309a58acdb23aa641 | Python | Mengqiao2020/Challenge-of-Leetcode2020 | /39xdgy/q21.py | UTF-8 | 769 | 3.375 | 3 | [] | no_license | '''
merge two sorted lists
28ms, 98.20%, 92.18%
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
flag = output = ListNode()
while l1 or l2:
if not l1:
flag.next = l2
break
if not l2:
flag.next = l1
break
if l1.val <= l2.val:
flag.next = l1
l1 = l1.next
else:
flag.next = l2
l2 = l2.next
flag = flag.next
return output.next
| true |
398da655f5e860c933d1475db3d1f9e514f6bb4e | Python | bnajafi/EETBS-Python-Assignments-Polimi-2019-2020 | /Assignment6/assignment6_gmoret.py | UTF-8 | 2,110 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pandas as pd
import os
def resistance_reader (input_element):
"""This function reads the input materials and extract the standard resistance value"""
where_table_is = "/Users/giuliamoret/Documents/Documenti università/EETBS/ASSIGNMENT"
filename_LibraryMaterials = "LibraryMaterials.csv"
path_LibraryMaterials = os.path.join(where_table_is, filename_LibraryMaterials)
LibraryMaterials = pd.read_csv(path_LibraryMaterials, sep=";" , index_col=0 , header=0)
Material_Rstd_Read = LibraryMaterials.loc[input_element,"Std_R_value"]
return Material_Rstd_Read
def std_thickness_reader(input_element):
"""This function reads the input materials and extract the standard thickness value"""
where_table_is = "/Users/giuliamoret/Documents/Documenti università/EETBS/ASSIGNMENT"
filename_LibraryMaterials = "LibraryMaterials.csv"
path_LibraryMaterials = os.path.join(where_table_is, filename_LibraryMaterials)
LibraryMaterials = pd.read_csv(path_LibraryMaterials, sep=";" , index_col=0 , header=0)
Material_Std_Thickness = LibraryMaterials.loc[input_element,"Std_Thickness"]
where_table_is = "/Users/giuliamoret/Documents/Documenti università/EETBS/ASSIGNMENT"
filename_resistance = "resistance.csv"
path_resistance = os.path.join(where_table_is, filename_resistance)
resistance = pd.read_csv(path_resistance, sep=";" , index_col=0 , header=0)
resistance["Std_R_value"] = resistance.loc[:,"Materials"].apply(resistance_reader)
resistance["Std_Thickness"] = resistance.loc[:,"Materials"].apply(std_thickness_reader)
resistance.loc[resistance["Type"]=="cond","R_value"] = (resistance.loc[resistance["Type"]=="cond","Std_R_value"]).astype(float) * ((resistance.loc[resistance["Type"]=="cond","Thickness"]).astype(float))/(resistance.loc[resistance["Type"]=="cond","Std_Thickness"]).astype(float)
resistance.to_excel("resistancetable_gmoret.xlsx")
results_folder = "/Users/giuliamoret/Documents/Documenti università/EETBS/ASSIGNMENT"
path_file = os.path.join(results_folder,"resistancetable_gmoret.xlsx")
| true |
c28c959128677fa72b78cd16b5f33c9eb1bf5181 | Python | prs-prabodh/minorProject | /gui/gui.py | UTF-8 | 679 | 2.953125 | 3 | [] | no_license | import tkinter as tk
import random
from tkinter import *
from tkinter import ttk
def createDialogBox(threatName='Unrecognized'):
window = tk.Tk()
window.minsize(500, 200)
window.title("Alert!")
label = ttk.Label(
window, text="Threat detected!\n\n" + threatName + " attack. Detection Confidence: " + str(round(random.uniform(95.0, 98.0), 2)) + "%", font='Courier 11 bold')
label.place(relx=0.5, rely=0.4, anchor=CENTER)
button = ttk.Button(window, text="OK",
command=window.destroy)
button.place(relx=0.8, rely=0.8, anchor=CENTER)
window.mainloop()
if __name__ == '__main__':
createDialogBox("Port Scan")
| true |
dce49996175b4d12fecc6cc07931e807db35e9ae | Python | daeken/space_game | /spaceship.py | UTF-8 | 2,420 | 2.71875 | 3 | [] | no_license | import input, util, sound
import pygame, random
class Spaceship(pygame.sprite.Sprite):
def __init__(self, dispatch, wsize, pos, image):
pygame.sprite.Sprite.__init__(self)
self.type = 'player'
self.dispatch = dispatch
self.wsize = wsize
self.life = 100
self.pos = [float(pos[0]), float(pos[1])]
self.move = 10
self.image = image
self.size = list(self.image.get_size())
def Move(self, dir=0, loc=None):
if dir & input.MOVE_UP:
if self.pos[1] != self.size[1]:
self.pos[1] -= self.move
elif dir & input.MOVE_DOWN:
if self.pos[1] != self.size[1]:
self.pos[1] += self.move
elif dir & input.MOVE_LEFT:
if self.pos[0] != self.size[0]:
self.pos[0] -= self.move
elif dir & input.MOVE_RIGHT:
if self.pos[0] != self.size[0]:
self.pos[0] += self.move
if loc:
self.pos = loc
pygame.mouse.set_pos(self.pos)
def Fire(self):
self.dispatch.Fire('blue', [self.pos[0] + (self.size[0] / 2), self.pos[1]], (0, -10), False)
def update(self):
self.rect = pygame.Rect(self.pos + self.size)
collision = pygame.sprite.spritecollide(self, self.dispatch.sprites, False)
for sprite in collision:
if sprite == self:
continue
if sprite.type == 'enemy':
self.dispatch.sprites.remove(sprite)
self.Move(0, [random.randrange(0, self.wsize[0]), random.randrange(0, self.wsize[1])])
self.update()
elif sprite.type == 'projectile':
if sprite.enemy:
self.dispatch.sprites.remove(sprite)
self.Move(0, [random.randrange(0, self.wsize[0]), random.randrange(0, self.wsize[1])])
self.update()
for sprite in self.dispatch.sprites.sprites():
if sprite.type == 'projectile' and not sprite.enemy:
collision = pygame.sprite.spritecollide(sprite, self.dispatch.sprites, False)
for spr in collision:
if (spr.type == 'enemy' or spr.type == 'player' or spr.enemy) and spr != sprite and spr != self:
self.dispatch.sprites.remove(sprite)
self.dispatch.sprites.remove(spr)
| true |
70a3399ac51f98785cc6a0e9e6b84b14b48ad114 | Python | alexmikhalevich/osm_print | /png_from_osm.py | UTF-8 | 3,526 | 2.875 | 3 | [] | no_license | #!/usr/bin/python3
import io, urllib.request, time, re, random
import argparse
import os
import math
from PIL import Image, ImageDraw
parser = argparse.ArgumentParser(description="Create a PNG file from OSM data.")
parser.add_argument("--tiles", type=str, default="outdoors", choices=["cycle", "transport",
"landscape", "outdoors", "transport-dark", "spinal-map", "pioneer",
"mobile-atlas", "neighbourhood"],
metavar="tiles_source", help="tiles source (default: outdoors)")
parser.add_argument("--zoom", type=int, default=14, metavar="zoom_level",
help="zoom level (default: 14)")
parser.add_argument("--out", type=str, default="output.png", metavar="output_name",
help="output file name (default: output.png)")
parser.add_argument("--laleft", type=float, required=True, metavar="latitude_left",
help="Latitude of the top left corner of the selected area")
parser.add_argument("--loleft", type=float, required=True, metavar="longitude_left",
help="Longitude of the top left corner of the selected area")
parser.add_argument("--laright", type=float, required=True, metavar="latitude_right",
help="Latitude of the bottom right corner of the selected area")
parser.add_argument("--loright", type=float, required=True, metavar="longitude_right",
help="Longitude of the bottom right corner of the selected area")
args = parser.parse_args()
# this method was stolen from here: https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Derivation_of_tile_names
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n)
return (xtile, ytile)
APIkey = os.getenv("THUNDERFOREST_API_KEY")
layers = [f"https://a.tile.thunderforest.com/{args.tiles}/!z/!x/!y.png?apikey={APIkey}",
f"https://b.tile.thunderforest.com/{args.tiles}/!z/!x/!y.png?apikey={APIkey}",
f"https://c.tile.thunderforest.com/{args.tiles}/!z/!x/!y.png?apikey={APIkey}"]
# these coordinates are tiles numbers
xleft, yleft = deg2num(args.laleft, args.loleft, args.zoom)
xright, yright = deg2num(args.laright, args.loright, args.zoom)
ymax = max(yleft, yright)
ymin = min(yleft, yright)
xsize = xright - xleft + 1
ysize = ymax - ymin + 1
resultImage = Image.new("RGBA", (xsize * 256, ysize * 256), (0,0,0,0))
counter = 0
for x in range(xleft, xright + 1):
for y in range(ymin, ymax + 1):
for layer in layers:
print(f"{x}, {y}, {layer}")
url = layer.replace("!x", str(x)).replace("!y", str(y)).replace("!z", str(args.zoom))
match = re.search("{([a-z0-9]+)}", url)
if match:
url = url.replace(match.group(0), random.choice(match.group(1)))
print(f"{url} ...")
try:
req = urllib.request.Request(url)
tile = urllib.request.urlopen(req).read()
except Exception as e:
print(f"Error: {e}")
continue;
image = Image.open(io.BytesIO(tile))
resultImage.paste(image, ((x - xleft) * 256, (y - ymin) * 256), image.convert("RGBA"))
counter += 1
if counter == 10:
time.sleep(2);
counter = 0
draw = ImageDraw.Draw(resultImage)
del draw
resultImage.save(args.out)
| true |
c4dab10c5e3faa9ac859374f681d18bd7f19a0de | Python | antonxy/mpv_scripts | /video_control_ma.py | UTF-8 | 2,219 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import print_function
import sys
import telnetlib
import re
'''
This script connects to the grandMA2 via telnet and waits for an error message starting with MpvDo
Add "MpvDo Play file.avi" as CMD in a cue to play the file
and "MpvDo Show file.png" to show an image
and "MpvDo Stop" to blank the screen
The MpvDo command of course does not exist in grandMA which generates an error message which is sent via telnet.
A litte hacky but it works :)
'''
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def ashex(my_hex):
return " ".join(hex(ord(n)) for n in my_hex)
def ashex2(inputtext):
replchars = re.compile(r'[\n\r]')
def replchars_to_hex(match):
return r'\x{0:02x}'.format(ord(match.group()))
return replchars.sub(replchars_to_hex, inputtext)
conn = telnetlib.Telnet()
conn.open("192.168.178.63", 30000)
try:
eprint("connected")
while 1:
read = conn.read_until("\n")
#eprint(ashex2(read))
match = re.search(r"Error : MpvDo (?P<cmd>\w+)( (?P<arg>[a-zA-Z0-9_\.\-]+))?", read)
if match:
if match.group('cmd') == "Play":
sys.stdout.write('loadfile {}\n'.format(match.group('arg')))
sys.stdout.flush()
sys.stdout.write('seek 0.0 absolute\n')
sys.stdout.flush()
sys.stdout.write('set pause no\n')
sys.stdout.flush()
elif match.group('cmd') == "Show":
sys.stdout.write('loadfile {}\n'.format(match.group('arg')))
sys.stdout.flush()
sys.stdout.write('set pause yes\n')
sys.stdout.flush()
sys.stdout.write('seek 0.0 absolute\n')
sys.stdout.flush()
elif match.group('cmd') == "Stop":
sys.stdout.write('loadfile black.png\n')
sys.stdout.flush()
sys.stdout.write('set pause yes\n')
sys.stdout.flush()
sys.stdout.write('seek 0.0 absolute\n')
sys.stdout.flush()
else:
eprint("Unknown command")
except Exception as ex:
eprint(ex)
conn.close()
| true |
30c463533180510ed0dc64c7a1f13c2aa78e6f77 | Python | shahbaz181990/iNeuron_Assignments | /Assignment1/Vol_Of_Sphere.py | UTF-8 | 209 | 3.5 | 4 | [] | no_license | # import pi from math package to use it in the calculation of the the volume of sphere
from math import pi
def vol_of_sphere(d):
r = d / 2
return 4 / 3 * pi * r ** 3
d = 12
print(vol_of_sphere(d))
| true |
401b1829edff0358a2f2ed6573c36935753e6feb | Python | Una-zh/algorithms | /LeetCode/547_findCircleNum.py | UTF-8 | 1,053 | 3 | 3 | [] | no_license | # -- coding: utf-8 --
# author: una
# datetime: 2019-08-15 17:29
from typing import List
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
if not M:
return 0
N = len(M)
father = [i for i in range(N)]
def find(x):
if father[x] != x:
# 压缩路径:把当前查找路径上面的节点直接作为与根节点相连的叶子节点,减少树高而增加树宽
father[x] = find(father[x])
return father[x]
for i in range(N):
for j in range(i + 1, N):
if M[i][j] == 1:
i_father = find(i)
j_father = find(j)
if i_father != j_father:
father[j_father] = i_father
for i in range(N):
father[i] = find(i)
print(father)
return len(set(father))
if __name__ == '__main__':
a = [[1, 0, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1], [1, 0, 1, 1]]
print(Solution().findCircleNum(a))
| true |
d6d3bcf5c2af59efd9530bb1120bca225c9cc3d5 | Python | DomfeLacre/zyBooksPython_CS200 | /module4/zyBooksChpt5/halfArrow.py | UTF-8 | 639 | 4.03125 | 4 | [] | no_license | # zyBooks Chpt.5 Exercise 5.13 Drawing a half arrow (Python 3)
print ('Enter arrow base height: ')
arrow_base_height = int(input())
print ('Enter arrow base width: ')
arrow_base_width = int(input())
print ('Enter arrow head width: ')
arrow_head_width = int(input())
while arrow_head_width <= arrow_base_width:
print ('Please enter an arrow head width that is larger than the arrow base: ')
arrow_head_width = int(input())
arrow_head_width = arrow_head_width
for symbol in range(arrow_base_height):
print(('*') * arrow_base_width)
i = arrow_head_width
for symbol in range(arrow_head_width):
print(('*') * i)
i -= 1
| true |
0afe5738c4a2342681c3a3f700fabb647ee22b23 | Python | ThriledLokki983/dev | /web-dev/practices/pract-web/project2/app.py | UTF-8 | 1,723 | 2.625 | 3 | [] | no_license | from flask import Flask, render_template, redirect, request, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Email, Length, EqualTo
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SECRET_KEY'] = '12345'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class RegisterForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(min=(4), max=(35))])
username = StringField('Username', validators=[DataRequired(), Length(min=3, max=30)])
email = StringField('Email', validators=[DataRequired(), Length(max=120), Email()])
password = PasswordField('Password', validators=[DataRequired(), Length(min=8, max=120)])
confirm = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password', message='Password must match!')])
submit = SubmitField('Sign Up')
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=3, max=30)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=8, max=120)])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
return redirect(url_for('index'))
return render_template('register.html', form=form, pageTitle='Register Here')
if __name__ == "__main__":
app.run(debug=True) | true |
ef2c1f3eeae77d76c306eeb70b39dea3be80ec0f | Python | YevhenMix/courses | /Python Start/Лекция 4. Условные операторы/Lection_4_tsk_3.py | UTF-8 | 559 | 3.859375 | 4 | [] | no_license | print('Программа определяет какой год високосный или нет.')
year = int(input('Введите интересующий Вас год: '))
# первый вариант
if year % 4 == 0:
print('366 Дней')
elif year % 100 == 0:
print('365 Дней')
elif year % 400 == 0:
print('366 Дней')
else:
print('365 Дней')
# второй вариант - сокращенный
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
print('366 Дней')
else:
print('365 Дней')
| true |
91a77f0fdc1e546fac50bdbc4b7e2ca8b0a95444 | Python | Dekares/LoL-Valorant-Offline-Mode | /lol-valorant-game-offline-tk.py | UTF-8 | 2,597 | 2.625 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
from tkinter.messagebox import showinfo
import os,ctypes
tk = Tk()
tk.geometry("600x200")
tk.title("LoL - Valorant Game Offline Mode")
tk['background'] = "#313131"
tk.resizable(False, False)
# LoL sohbet sunucuları
dict_servers = {
"TR": "172.65.202.166", # TR
"EUNE": "172.65.223.136", # EUNE
"EUW": "172.65.252.238", # EUW
"JP": "172.65.217.212", # JP
"KR": "172.65.226.99", # KR
"LAN": "172.65.250.49", # LAN
"LAS": "172.65.194.233", # LAS
"NA": "172.65.244.155", # NA
"OCE": "172.65.208.61", # OCE
"PBE": "172.65.223.16", # PBE
"RU": "172.65.192.156", # RU
"BR": "172.65.212.1" # BR
}
def isAdmin():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
def online():
server = servers.get()
if server == "":
showinfo(title="Feedback", message=f"You must choose one server!")
else:
os.system(f'netsh advfirewall firewall delete rule name="lolchat{server}"')
showinfo(title="Feedback", message=f"You are online on {server} server")
def offline():
server = servers.get()
if server == "":
showinfo(title="Feedback", message=f"You must choose one server!")
else:
os.system(
f'netsh advfirewall firewall add rule name="lolchat{server}" dir=out remoteip={dict_servers[server]} protocol=TCP action=block')
showinfo(title="Feedback", message=f"You are offline on {server} server")
myInfoText = Label(tk, text="Press 'Offline!' for offline, press 'Online!' for online.",
font="Arial 10", bg="#313131", fg="white")
button_online = Button(tk, text="Online!", font="Arial",
command=online, bg="red", fg="white", height=1, width=13)
button_offline = Button(tk, text="Offline!", font="Arial",
command=offline, bg="red", fg="white", height=1, width=13)
exitButton = Button(tk, text="Exit", font="Arial",
command=tk.destroy, bg="red", fg="white", height=1, width=13)
servers = ttk.Combobox(tk, font="Arial 15", values=list(dict_servers.keys()))
myInfoText.place(x=30, y=110) # Bilgilendirme yazısı
button_online.place(x=300, y=55) # Online butonu
button_offline.place(x=450, y=55) # Offline butonu
exitButton.place(x=400, y=100) # Çıkış butonu
servers.place(x=30, y=55) # Sunucuların bulunduğu combobox
if isAdmin():
tk.mainloop()
else:
showinfo(title="Feedback", message=f"You should start as administrator!")
| true |
5f75f5c3f151306d29e92b48429ab0db6e996b29 | Python | rukipgus/Algorithm | /Baek_10844.py | UTF-8 | 226 | 3 | 3 | [] | no_license | import copy
n = int(input())
a = [0,1,1,1,1,1,1,1,1,1]
for i in range(1, n):
s = copy.deepcopy(a)
a[0] = s[1]
for j in range(1,9):
a[j] = s[j-1]+s[j+1]
a[9] =s[8]
print(sum(a)%1000000000) | true |
736a220c66162dcf5b64af5c4377457a0e59fc74 | Python | zhu00121/covid19-sounds-neurips | /Respiratory_prediction/Opensmile/2_classifcation.py | UTF-8 | 6,162 | 2.6875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 22 11:55:32 2021
@author: xiatong
load csv and test the performance
"""
import numpy as np
import pandas as pd
from sklearn import decomposition, metrics, preprocessing
from sklearn.svm import SVC
def get_metrics(probs, label):
predicted = []
for i in range(len(probs)):
if probs[i] > 0.5:
predicted.append(1)
else:
predicted.append(0)
auc = metrics.roc_auc_score(label, probs)
TN, FP, FN, TP = metrics.confusion_matrix(label, predicted).ravel()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP * 1.0 / (TP + FN)
# Specificity or true negative rate
TNR = TN * 1.0 / (TN + FP)
return auc, TPR, TNR
def get_CI(data, AUC, Sen, Spe):
AUCs = []
TPRs = []
TNRs = []
for s in range(1000):
np.random.seed(s) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
try:
get_metrics(sample_pro, sample_label)
except ValueError:
np.random.seed(1001) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
else:
auc, TPR, TNR = get_metrics(sample_pro, sample_label)
AUCs.append(auc)
TPRs.append(TPR)
TNRs.append(TNR)
q_0 = pd.DataFrame(np.array(AUCs)).quantile(0.025)[0] # 2.5% percentile
q_1 = pd.DataFrame(np.array(AUCs)).quantile(0.975)[0] # 97.5% percentile
q_2 = pd.DataFrame(np.array(TPRs)).quantile(0.025)[0] # 2.5% percentile
q_3 = pd.DataFrame(np.array(TPRs)).quantile(0.975)[0] # 97.5% percentile
q_4 = pd.DataFrame(np.array(TNRs)).quantile(0.025)[0] # 2.5% percentile
q_5 = pd.DataFrame(np.array(TNRs)).quantile(0.975)[0] # 97.5% percentile
print(
str(AUC.round(2))
+ "("
+ str(q_0.round(2))
+ "-"
+ str(q_1.round(2))
+ ")"
+ "&"
+ str(Sen.round(2))
+ "("
+ str(q_2.round(2))
+ "-"
+ str(q_3.round(2))
+ ")"
"&" + str(Spe.round(2)) + "(" + str(q_4.round(2)) + "-" + str(q_5.round(2)) + ")"
)
inputFile = "features_384.csv"
df_features = pd.read_csv(inputFile)
df_cough = df_features["cough_feature"].map(lambda x: [float(v) for v in x.split(";")])
cough = np.array([x for x in df_cough])
df_breath = df_features["breath_feature"].map(lambda x: [float(v) for v in x.split(";")])
breath = np.array([x for x in df_breath])
df_voice = df_features["voice_feature"].map(lambda x: [float(v) for v in x.split(";")])
voice = np.array([x for x in df_voice])
# x_data = np.concatenate([cough,breath,voice],axis=1)
x_data = voice
y_label = np.array(df_features["label"])
y_set = np.array(df_features["fold"])
x_data_train = x_data[y_set == 0]
y_label_train = y_label[y_set == 0]
x_data_vad = x_data[y_set == 1]
y_label_vad = y_label[y_set == 1]
x_data_test = x_data[y_set == 2]
y_label_test = y_label[y_set == 2]
# scale data
scaler = preprocessing.StandardScaler().fit(x_data_train)
x_train_n = scaler.transform(x_data_train)
x_test_n = scaler.transform(x_data_test)
x_vad_n = scaler.transform(x_data_vad)
# use PCA to reduce the feature dimension
pca = decomposition.PCA(0.99)
pca.fit(x_train_n)
x_train_n_pca = pca.fit_transform(x_train_n)
x_test_n_pca = pca.transform(x_test_n)
x_vad_n_pca = pca.transform(x_vad_n)
for c in [0.0001]:
print(c)
clf = SVC(C=c, kernel="linear", gamma="auto", probability=True)
# clf = XGBClassifier(learning_rate =0.1, n_estimators=1000,
# max_depth=8,min_child_weight=1, gamma=0, subsample=0.8,
# colsample_bytree=0.8, objective= 'binary:logistic',
# nthread=8, scale_pos_weight=1, seed=0)
clf = clf.fit(x_train_n_pca, y_label_train)
predicted = clf.predict(x_vad_n_pca)
probs = clf.predict_proba(x_vad_n_pca)
auc = metrics.roc_auc_score(y_label_vad, probs[:, 1])
precision, recall, _ = metrics.precision_recall_curve(y_label_vad, probs[:, 1])
se = metrics.recall_score(y_label_vad, predicted, labels=[1], average=None)[0]
sp = metrics.recall_score(y_label_vad, predicted, labels=[0], average=None)[0]
print("auc", auc, "SE", se, "SP", sp)
predicted = clf.predict(x_test_n_pca)
probs = clf.predict_proba(x_test_n_pca)
auc = metrics.roc_auc_score(y_label_test, probs[:, 1])
precision, recall, _ = metrics.precision_recall_curve(y_label_test, probs[:, 1])
se = metrics.recall_score(y_label_test, predicted, labels=[1], average=None)[0]
sp = metrics.recall_score(y_label_test, predicted, labels=[0], average=None)[0]
print("auc", auc, "SE", se, "SP", sp)
data = [[probs[i, 1], y_label_test[i]] for i in range(len(y_label_test))]
AUC, Sen, Spe = get_metrics(probs[:, 1], y_label_test)
get_CI(data, AUC, Sen, Spe)
# clf = SVC(C=0.001, kernel='linear',gamma='auto', probability=True)
# auc 0.7086871921345558 SE 0.6648501362397821 SP 0.6520681265206812
# auc 0.7108886021859103 SE 0.6505944517833554 SP 0.6679636835278858
# 0.71(0.69-0.73)&0.64(0.61-0.66)&0.68(0.66-0.7)
# #384
# 0.01
# auc 0.7240216259936223 SE 0.6444141689373297 SP 0.6727493917274939
# auc 0.7083999403749184 SE 0.6373844121532365 SP 0.6627756160830091
# 0.71(0.69-0.73)&0.63(0.6-0.65)&0.67(0.65-0.7)
# 0.05
# auc 0.7214692018536566 SE 0.6376021798365122 SP 0.6727493917274939
# auc 0.70406812679582 SE 0.6360634081902246 SP 0.6640726329442282
# 0.7(0.69-0.72)&0.62(0.6-0.65)&0.67(0.65-0.7)
# 0.001
# auc 0.7297562932171814 SE 0.6362397820163488 SP 0.6909975669099757
# auc 0.7253151305498016 SE 0.6433289299867899 SP 0.6867704280155642
# 0.73(0.71-0.74)&0.64(0.62-0.67)&0.69(0.66-0.71)
# 0.0001
# auc 0.7306463268296239 SE 0.6103542234332425 SP 0.7068126520681265
# auc 0.7279068083961708 SE 0.631439894319683 SP 0.704928664072633
# 0.73(0.71-0.74)&0.65(0.62-0.67)&0.69(0.67-0.71)
| true |
4f1cd9e6b8465a8eae03b87fa7f46b5ef76a5e85 | Python | itwhat126/picview | /utils/get_bcrypt.py | UTF-8 | 388 | 3.03125 | 3 | [] | no_license | import bcrypt
def get_bcrypt(password, salt=None):
if salt is None:
# salt为None的时候是新用户注册的时候
salt = bcrypt.gensalt(10)
h_password = bcrypt.hashpw(password, salt)
# 返回密文
return h_password, salt
else:
try:
h_password = bcrypt.hashpw(password, salt.encode('utf-8'))
except Exception as r:
print(r)
# 返回密文
return h_password | true |
0037285e8e6c217845c7e37e421d0219147524c1 | Python | xf261/python | /pyTest/demoTest/helloWorld.py | UTF-8 | 7,019 | 4.3125 | 4 | [] | no_license | import math
x = '???'
print(x)
print("hello world");
# 集合数组的倒序与正序
# 0为正序的第一个,1为第二个,以此类推
# -1为倒序的第一个,-2为第二个,以此类推
# List 相当于数组方法有append追加,insert插入,pop移除,替换(赋值)
l = ['admam','lisa','paul','bart']
l.pop(3)
l.pop(2)
print(l)
# tuple 有序列表,相当于元组,没有append,pop,insert等方法,可通过下标来获取元素
# 当tuple里只有一个元素的时候(创建单元素tuple),要在元素后加‘,’避免歧义
# 当tuple包含list的时候,可导致tuple的元素是可变的
t = ('admam','lisa','paul','bart')
# python的代码缩进原则:具有相同缩进的代码被视为代码块
# 缩进严格按照python的习惯写法:四个空格,不要使用tab,更不要混合tab和空格,
# 当在python交互环境下敲代码,要留意缩进,退出缩进要多按一行回车
# if后接表达式,然后用:表示代码块的开始
grade = 75
if( grade > 60):
print('及格')
else:
print('不及格')
if(grade > 90):
print('非常好')
else:
if(grade > 60):
print('及格')
print('还得加油')
else:
print('重修吧,孩子啊')
# 对集合的遍历
for name in l:
print(name)
print()
for t1 in t:
print(t1)
# while循环 布尔类型的True 与 False首字母大写
# while True:
# print(l[0])
# l.pop(l[0])
# print('溢出成功')
# 多重循环
for i in range(1,10):
for j in range(1,i):
print("此处多重循环")
# print(i+'*'+j+'='+i*j)
# dict{}相当于java的map存放的是一对key-value键值对,由于dict也是集合,所以len()函数可以计算任意集合的大小
d = {
'paul':30,
'adam':95,
'bart':86,
# l:42,
}
# 对dict的访问,可以使用d(集合名)[key]返回值为value,与list比较像,list必须使用索引返回相应的元素,而dict使用key()
# 如果key不存在,会直接报错:keyError,
# 要避免该错的发生,有两个办法1.使用in操作符判断key是否存在。2.使用提供的get方法,若不存在则返回None
# 优点:查找速度快;存储key-value顺序对的时候无无序;作为key的元素必须是不可变的,否则报错:unhashable type
# dict更新信息
d['paul'] = 100
print(d)
# 遍历dict:遍历所有的key,然后通过key来获得value
for key in d:
print(key)
print('-------------------------------------------------------')
# set集合,里面不能包含重复的元素,无序的
s = set(['adma','lisa'])
# 对set集合访问用in
print('lisa' in s)
print('-------------------------------------------------------')
# 对越set的集合的遍历与其他集合遍历一致
for s1 in s:
print(s1)
print('-------------------------------------------------------')
# set更新
# 1.增加元素add(),如果增加的元素已经存在,则不会报错,但也不会增加进去
s.add('bob')
# 2.删除元素remove(),如果被删除的元素不存在,则报错,因此在进行删除元素操作之前最好进行判断操作
print(s)
print (s.remove('lisa'))
print(s)
print('-------------------------------------------------------')
# 函数
# 内置函数:
# abs()取绝对值;
# int()将其他类型转换为int;
# str()将其他类型转换为字符串;
# cmo(x,y)比较两个值的大小;
# sum(x,y)求和
print(abs(-20))
print('-------------------------------------------------------')
# 函数的定义
# 定义函数使用def语句,依次写出函数名,括号,括号中的参数和冒号,
# 然后在缩进块中编写方法体,函数的返回值用return语句返回,
# 没有return语句,函数执行完毕后也会返回结果,只是结果为None,return None可简写为return
# def sum1(a,b):
# c = a+b
# return c
def sum1(a,b):
c = a+b
return c
print(sum1(1,3))
print('-------------------------------------------------------')
# 如何返回多值
# return a,b
# 其实返回多值的时候是返回由这几个多值组成的tupe,在语法上返回一个tupe可以省略括号
def sumAndmul(a,b):
c = a+b
d = a*b
return c,d
print(sumAndmul(4,6))
print('-------------------------------------------------------')
# 函数递归 举例求n阶乘
def fac(n):
if n==1:
return n
else:
return n*fac(n-1)
print(fac(10))
print('-------------------------------------------------------')
# 定义默认参数,在定义函数的时候将其中的某(几)个设置为常量
# 由于函数的参数是从左往右匹配的,故在定义默认参数的时候默认参数必须定义在必需参数后面
# def sum(a,10):
# pass
# sum(10)
print('-------------------------------------------------------')
# 定义可变参数,在可变参数前加个*,表示我们可以传入0个,1个或多个参数给可变参数
# python解释器会将这一组参数组装成一个tuple传递给可变参数,
# 因此在函数内部,可变参数就是一个tuple
# 例如 def fn(*args):
# print args
def average(*arr):
print(arr)
s=0.0
for a in arr:
print(a)
s += a
print(len(arr))
print(s)
print(s/len(arr))
average(1,2,3,4,5,6,7,8,10)
print('-------------------------------------------------------')
# 对list进行切片(支持倒序)
# l[1:3]表示从索引1开始取,直到索引3为止但不包括索引3即索引1,2,成为一个新的list
# 如果第一个索引为0,还可以省略
# l[:]表示从头取到尾
# l[::n]表示每n个取一个,也就是每隔n-1个取一个
# 把list换成tuple,切片操作完全相同,只是切片的结果也成了tuple
l.append('bob')
l.append('julia')
print(l[1:3]) # 表示从索引1开始取,直到索引3为止但不包括索引3即索引1,2,如果第一个索引为0,还可以省略
print('-------------------------------------------------------')
# 切片操作支持对字符串操作
# .upper() 将字符串转换成大写的
def firstCharUpper(s): #将传入的字符串首字母大写
s1 = s[1:].upper()
return s[0:1].upper()+s[1:]
print(firstCharUpper('hello'))
print(firstCharUpper('sunday'))
print(firstCharUpper('september'))
print('-------------------------------------------------------')
# python的for循环抽象程度高于java
# 集合是指包含一组元素的数据结构
# 有序集合:list,tuple,str和Unicode
# 无序集合:set
# 无序集合并具有key-value键值对的:dict
# 内置函数range(a,b[,c])
print('-------------------------------------------------------')
# 异常捕捉
# try:
# 代码块
# except 异常名1:
# 处理1
# except 异常名2:
# 处理2
# except Exception:
# 处理
# finally:
# 最终执行语句
print('-------------------------------------------------------')
# 自定义异常
def fun(a):
c = 0;
c = math.log(a)
print(c)
fun(0) | true |
8d2d2e69d8cbaaaa78e56a0bc5b601b0e0dc2324 | Python | AmitAps/python | /headfirstpy/ch5/hello_flask.py | UTF-8 | 1,168 | 4 | 4 | [] | no_license | #This is the module's name "flask" with a lowercase "f".
#This is the class name "Flask" with an uppercase "F".
#The second line of code creates an object of type Flask, assigning it to the app variable.
from flask import Flask
#Create an instance of a Flask object and assign it to "app".
#The __name__ value is maintained by the Python interpreter and, when used anywhere within your program’s code, is set to the name of the currently
#active module. It turns out that the Flask class needs to know the current value of __name__ when creating a new Flask object, so it must be
#passed as an argument, which is why we’ve used it here (even though its usage does look strange).
app = Flask(__name__)
#A function decorator adusts the behavior of an existing function (without changing the function's code).
#Here's the function decorator, hich -like all decorators-is prefixed with @ symbol.
#this is the URL.
@app.route('/')
#This is just a regular Python function which, when invoked, returns a string to its caller (note the '->str' annotation).
def hello() ->str:
return 'Hello world from Flask!'
#Asks the webapp to start running.
app.run() | true |
1a1d38c7cae9e1b601a5cf5ce0c02d7269cf1770 | Python | k-alkiek/numerical-methods | /controllers/gauss_jordan_controller.py | UTF-8 | 1,250 | 2.75 | 3 | [] | no_license | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog, QTableWidget, QVBoxLayout, QTableWidgetItem
class DataTable(QDialog):
def __init__(self, parent, **kwargs):
super(DataTable, self).__init__(parent)
self.values = kwargs["results"][0]
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(len(self.values))
self.tableWidget.setColumnCount(2)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.tableWidget)
self.tableWidget.move(0, 0)
self.setLayout(layout)
print()
self.fill()
def fill(self):
self.tableWidget.setHorizontalHeaderItem(0, QTableWidgetItem('Variable'))
self.tableWidget.setHorizontalHeaderItem(1, QTableWidgetItem('Value'))
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
for i in range(0, len(self.values)):
value = self.values[i]
self.tableWidget.setItem(i, 0, QTableWidgetItem(str(value[0])))
self.tableWidget.setItem(i, 1, QTableWidgetItem(str(value[1])))
| true |
13da8f5675fbed83a7a0d10cf857cea2c2cefb11 | Python | vnikolayev1/cs50-python-part | /pset6/vigenere/vigenere.py | UTF-8 | 805 | 3.640625 | 4 | [] | no_license | import sys
from cs50 import get_string
if not len(sys.argv) == 2:
print("Too many or not enough arguments")
exit(1)
if sys.argv[1].isalpha():
key = sys.argv[1].lower()
word = get_string("Enter your phrase: ")
keylen_counter = 0
print("ciphertext: ", end="")
for character in word:
if keylen_counter == len(key):
keylen_counter = 0
if character.islower():
character = chr(((ord(character) - 97 + ord(key[keylen_counter]) - 97) % 26) + 97)
keylen_counter += 1
elif character.isupper():
character = chr(((ord(character) - 65 + ord(key[keylen_counter]) - 97) % 26) + 65)
keylen_counter += 1
print(character, end="")
print()
else:
print("Input has to be alphabetical.")
exit(1) | true |
ed37ddd1428aebea80dd556d7c35bf2fbd1852b6 | Python | ChenSun-YP/2048_NEAT | /game/core_2048.py | UTF-8 | 5,449 | 2.9375 | 3 | [] | no_license | from game import utils
from game.utils import Direction
# from game.ub4c106207f7d7ae4d7fb268df44519d4e1e
# from game.utils import Key
from game.utils import State
from game.utils import char_to_direction
from random import randint
EMPTY_TILE = 0
ID = 0
class GameCore:
def __init__(self, game_size=4):
self.game_size = game_size
self.board = fresh_board(game_size)
self.score = 0
self.state = State.MENU
self.move = 0
self.max_tile = 0
def Score(self):
return self.score
def Max_tile(self):
return self.max_tile
def Move(self):
return self.move
def Board(self):
return self.board
def Game_size(self):
return self.game_size
def State(self):
return self.state
def restart_game(self, game_size=None):
self.game_size = game_size if game_size is not None else self.game_size
self.score = 0
self.move = 0
self.board = fresh_board(self.game_size)
self.state = State.IDLE
self.max_tile = 0
# Spawn two tiles randomly on the board
self._spawn_tile(self.board)
self._spawn_tile(self.board)
def try_move(self, direction):
if not has_move(self.board):
self.state = State.LOSS
return False
moved = False
rotations = 0
back_rotations = 0
if direction == Direction.UP:
rotations = 2
back_rotations = 2
self.move += 1
elif direction == Direction.DOWN:
rotations = 0
back_rotations = 0
self.move += 1
elif direction == Direction.LEFT:
rotations = 3
back_rotations = 1
self.move += 1
elif direction == Direction.RIGHT:
rotations = 1
back_rotations = 3
self.move += 1
else:
return moved
utils.rotate_clockwise(self.board, rotations)
# Merge then shift through empty space
merged = self._merge_down(self.board)
shifted = self._shift_down(self.board)
moved = merged or shifted
utils.rotate_clockwise(self.board, back_rotations)
if moved:
self._spawn_tile(self.board)
return moved
# Can also be used to notify new tile to observers
def _new_tile_appeared(self, new_tile_value):
self.score = self.score + new_tile_value
self.max_tile = max(self.max_tile, new_tile_value)
def _merge_down(self, board):
merged = False
for row in range(len(board) - 1, 0, -1):
for col in range(0, len(board[row])):
if board[row][col] != EMPTY_TILE:
if board[row][col] == board[row - 1][col]:
merged = True
new_value = board[row][col] + board[row - 1][col]
board[row][col] = new_value
board[row - 1][col] = EMPTY_TILE
self._new_tile_appeared(new_value)
return merged
# Shifts down tiles where there are empty spaces
def _shift_down(self, board):
shifted = False
for row in range(len(board) - 1, -1, -1):
for col in range(0, len(board[row])):
temp_row = row
while temp_row != len(board) - 1 and board[temp_row + 1][col] == EMPTY_TILE:
shifted = True
board[temp_row + 1][col] = board[temp_row][col]
board[temp_row][col] = EMPTY_TILE
temp_row = temp_row + 1
return shifted
# Randomly spawns a tile of value 2 or 4
# P(x = 2) = 90%, P(x = 4) = 10%
def _spawn_tile(self, board):
spawned = False
num_empty_tiles = count_value(board, EMPTY_TILE)
if num_empty_tiles == 0:
return spawned
probability = randint(1, 100)
tile_value = 2 if probability <= 90 else 4
kth_selected_tile = randint(1, num_empty_tiles)
current_empty_tile = 0
for i, i_val in enumerate(board):
for j, j_val in enumerate(i_val):
if j_val == EMPTY_TILE:
current_empty_tile = current_empty_tile + 1
if current_empty_tile == kth_selected_tile:
board[i][j] = tile_value
spawned = True
break
if spawned:
self._new_tile_appeared(tile_value)
break
return spawned
def has_move(board):
if count_value(board, EMPTY_TILE) > 0:
return True
_has_move = False
for i in range(1, 5):
_has_move = has_merge_down(board)
if _has_move:
# Rotate the board back
utils.rotate_clockwise(board, 5 - i)
return _has_move
utils.rotate_clockwise(board)
return _has_move
def has_merge_down(board):
for row in range(len(board) - 1, 0, -1):
for col in range(0, len(board[row])):
if board[row][col] == board[row - 1][col]:
return True
return False
def fresh_board(size):
return [[0 for i in range(0, size)] for j in range(0, size)]
# 2D array
def count_value(arr, value):
count = 0
for i in arr:
for j in i:
if j == value:
count = count + 1
return count
| true |
57433f98e4f7d09470d4506dff26e7b8192dfa67 | Python | jadendick/AdventOfCode2020 | /day4/code.py | UTF-8 | 1,929 | 2.78125 | 3 | [] | no_license | from os import truncate
import re
file = open("input","r")
def part1():
count = 0
values = []
for line in file:
if(line != "\n"):
values += line.split(" ")
else:
if(len(values) == 8 or (len(values) == 7 and (len(list(filter (lambda x : x.split(":")[0] == "cid", values))) == 0))):
count += 1
values = []
print(count)
def part2():
count = 0
values = []
for line in file:
if(line != "\n"):
values += line.split(" ")
else:
if(len(values) == 8 or (len(values) == 7 and (len(list(filter (lambda x : x.split(":")[0] == "cid", values))) == 0))):
fields = {}
for value in values:
valueSplit = value.split(":")
fields[valueSplit[0]] = valueSplit[1]
byr = int(fields.get("byr"))
iyr = int(fields.get("iyr"))
eyr = int(fields.get("eyr"))
hgtValid = False
if(re.search(".*cm",fields.get("hgt"))):
hgt = int(fields.get("hgt").split("cm")[0])
if(hgt >= 150 and hgt <= 193):
hgtValid = True
else:
hgt = int(fields.get("hgt").split("in")[0])
if(hgt >= 59 and hgt <= 76):
hgtValid = True
if(byr >= 1920 and byr <= 2002
and iyr >= 2010 and iyr <= 2020
and eyr >= 2020 and eyr <= 2030
and hgtValid
and re.search("#[0-9a-f]{6}",fields.get("hcl"))
and re.search("amb|blu|brn|gry|grn|hzl|oth",fields.get("ecl"))
and len(fields.get("pid").rstrip()) == 9
):
count += 1
values = []
print(count)
# part1()
part2()
file.close() | true |
a8c76502bce70686af30e68a71d9453376abda16 | Python | anukkrit149/competitive-programming-practice | /Python/hackerrank-problem-solving/arrays/array-manipulation.py | UTF-8 | 1,437 | 2.875 | 3 | [] | no_license | """
Copyrights Reserved
Developed By- Anukkrit Shanker
"""
# !/bin/python3
import math
import os
import random
import re
import sys
# Naives approach time limit exceed
# def arrayManipulation(n, queries):
# zeroes_arr = [0]*(n+1)
# # print(zeroes_arr)
# for query in queries:
# a, b, k = query
# for i in range(a, b + 1):
# zeroes_arr[i] += k
# return max(zeroes_arr)
# Naives approach time limit exceed
def arrayManipulation(n, queries):
zeroes_arr = [0]*(n+1)
for i in range(len(queries)):
a, b, k = queries[i]
zeroes_arr[a-1] += k
if b < len(zeroes_arr):
zeroes_arr[b] -= k
print(zeroes_arr)
val = 0
maximum = 0
for i in zeroes_arr:
val += i
if maximum<val:
maximum = val
return maximum
# numpy implementation working
# def arrayManipulation(n, queries):
# zeroes_arr = np.zeros(n)
# for query in queries:
# zeroes_arr[query[0]-1:query[1]-1] += query[2]
# return int(max(zeroes_arr))
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
print(n)
print(result)
# fptr.write(str(result) + '\n')
#
# fptr.close()
| true |
d4af9aa24f9b1e3acc4c851a9125cf51e38add1a | Python | lonlin0731/python-script-example | /operateLocalFile.py | UTF-8 | 8,635 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python
# -*-coding: utf-8 -*-
'''
@FileName : operateLocalFile.py
@Created : 2016/10/21 21:37
@Author : Lonlin0731
@Site : https://github.com/lonlin0731
@Description : 爬取网站目录、提取url、爬取url页面,将页面制作成pdf。
@Background : 在python中文网站上(python.usyiyi.cn),有中文版库参考文档,但是没有提供下载,
只能在线查看,在手机上看就很不方便,所以想把该参考文档爬取下来制作成pdf,以便在手机上看。
@Usage :
1、找出目录所在的页面,爬取该页面
2、找出目录页面的特征,提取指定范围内的url链接
3、循环请求url链接,将返回结果进行处理并保存到本地
4、将本地保存的所有页面生成pdf文档
'''
import re
import StringIO
import pycurl
import pdfkit
from shutil import rmtree
def main():
#抓取目录,并进行处理,获取目录中的所有文章链接
print '='*30
print '开始抓取目录......'
#定义缓存区,用于接收response数据
buf = StringIO.StringIO()
#创建curl,进行网络请求
curl = pycurl.Curl()
#设置请求url
curl.setopt(pycurl.URL,'http://python.usyiyi.cn/documents/python_278/library/index.html')
#将responses数据重定向到缓存中
curl.setopt(pycurl.WRITEFUNCTION, buf.write)
curl.perform()
#获取响应状态码
statu_code = curl.getinfo(pycurl.HTTP_CODE)
#关闭curl
curl.close()
#处理抓取的内容,提取链接
#将提取的url保存到list中
lLinks = []
if statu_code == 200:
#从缓存中获取html的内容,以便进一步处理
html = buf.getvalue()
#利用正则,结合页面特征,提取页面中的目录div内容
content = re.findall('<div class="toctree-wrapper compound">(.*?)</div>', html, re.S)
#通过re.findall得到的content是一个list,content[0]即为匹配到的内容,通过目录页面特征发现,就一个匹配项
links = re.findall('<a.*?href="(.*?)">(.*?)</a>', content[0], re.S)
#通过re.findall匹配出来多条内容保存在links中,links中的每一项也是一个list,包含两个匹配字段(因为正则中有两个括号),字段item[0]是链接,字段item[1]是链接文本
for item in links:
#如果链接中包含#,说明是锚点(一个页面中可以有多个锚点进行快速定位,
#该页面已经被添加进列表了,锚点就不添加了,不然会就同一个页面请求多次),就不添加进去
if '#' in item[0]:
continue
#处理item[1]字段,因为item[1]字段虽然是a标签的链接文本,但是其中也包含了像字体font这样的样式在里面,要清除这样的样式,提取纯文本
c = re.compile(r'<[^>]+>',re.S)
s = c.sub('', item[1])
#提取目录标号,比如:1、1.1,5,5.1,5.2,5.3等
if '—' in s: s = s.replace('—','-')
#正则提取目录标号,链接文本s类似这样的:8. Data Types 或 36.15. Built-in Functions ,所以这里正则匹配的时候要两部分(两个括号)
label = re.findall('^(\d+\.)([\d]*[\.]*)', s)
#合并,匹配的结果label是一个list,label中包含的元素也是一个列表,包含两个字段,组合之后就是一个序列号,比如8.或36.15.
Serial_number = label[0][0]+label[0][1]
#组装信息到lLinks中
l = []
l.append(Serial_number) #序列号
l.append(item[0]) #url链接
l.append(s) #链接文本,包含序列号的原始文本
lLinks.append(l)
print statu_code,'SUC!','目录抓取完毕!'
else:
print statu_code,'ERROR!','目录抓取出错,已退出...'
exit()
#缓存数据分析完毕,用不到了,关闭缓存
buf.close()
#抓取目录中链接的具体内容,循环请求url并进行处理
print '='*40
print '开始抓取页面......'
#将抓取的页面先保存到basepath下面
#因为后面要将所有html页面转换成一个pdf,本地一次性读取所有文件要快,如果网络请求一个个处理比较慢,而且出现请求失败的情况,比较麻烦
basepath = '/root/pdf/'
#需要抓取的页面url的路径
baseurl = 'http://python.usyiyi.cn/documents/python_278/library/'
#如果usyiyi网站上面请求失败,则转到python官网再次请求,因为usyiyi网站确实存在有些页面不存在的情况
baseurl_https= 'https://docs.python.org/2.7/library/'
#这个列表中用来保存已经保存到本地的页面名字,用来转换成pdf的时候使用
filenames = []
#循环请求页面,把所有的页面全部保存到本地
for l in lLinks:
filename = basepath + l[0] + 'html' #本地保存的文件路径,类似/root/pdf/36.15.html
fileurl = baseurl + l[1] #url链接,类似http://python.usyiyi.cn/documents/python_278/library/intro.html
#书签,其实就是这个页面的名字,这个字段其实用不到,但可以在输出出错提示信息的时候用
#pdfkit可以将页面h1、h2、h3、h4...这样的html标签生成为pdf中的书签,
#因为文档中包含有h标签,所以pdf自动就生成了书签,而且具有层级结构
#同一个文件中的h1是一级书签,h2是二级书签,h3是三级书签,以此类推
bookmark = l[2]
filenames.append(filename)
#抓取链接的内容,并提取正文内容,保存到文件中
buf=StringIO.StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL,fileurl)
curl.setopt(pycurl.WRITEFUNCTION, buf.write)
curl.perform()
statu_code = curl.getinfo(pycurl.HTTP_CODE)
curl.close()
#页面抓取成功,保存到本地
if statu_code == 200:
html = buf.getvalue()
#正则表达式匹配出正文
content = re.findall('<div class="document">(.*?)<div class="sphinxsidebar">', html, re.S)
f = open(filename,'wb')
#将正文中的链接都去掉,其实只是去掉了a标签的href而且,但是这样一来,正文中的链接都不可点击了,在pdf中没必要有那么多的链接
content_filter = re.sub('href=".*?"', '',content[0])
#内容写入本地文件
f.write(content_filter)
f.close()
print statu_code,'SUC!',filename,fileurl
else:
#当在usyiyi上抓取失败时,是因为usyiyi网站缺少这个页面,然后向python官网请求该页面
buf_https=StringIO.StringIO()
curl_https = pycurl.Curl()
curl_https.setopt(pycurl.URL,baseurl_https+l[1])
curl_https.setopt(pycurl.WRITEFUNCTION, buf_https.write)
curl_https.perform()
statu_code_https = curl_https.getinfo(pycurl.HTTP_CODE)
curl_https.close()
if statu_code_https == 200:
html_https = buf_https.getvalue()
#这里匹配的时候有一点不同,官网的这div写法与usyiyi网站上的稍微有点不同
content_https = re.findall('<div class="document">(.*?)<div class="sphinxsidebar', html_https, re.S)
f_https = open(filename,'wb')
content_filter_https = re.sub('href=".*?"', '',content_https[0])
f_https.write(content_filter_https)
f_https.close()
print statu_code_https,'SUC!',filename,baseurl_https+l[1]
else:
print statu_code,'ERROR!',filename,filename,baseurl_https+l[1]
buf_https.close()
buf.close()
print '页面抓取完毕!'
#页面全部已经保存到了本地,下面生成pdf
print '='*40
print '开始生成pdf文件......'
#options有很多选项,其中encoding是这里必需的,不然会有乱码
#另外linux中要有中文字体,从windos系统的fonts文件夹复制一个中文字体传到linux系统的/shar/即可
options = { 'encoding':'utf-8' }
pdfkit.from_file(filenames, '/root/python-library.pdf',options=options)
print '文件生成成功!'
#清理生成的文件
#rmtree(basepath)
if __name__ == '__main__':
main() | true |
426fb918b13ed7df839ba840b8106e3c042eb5e0 | Python | vianaclaus/Python- | /entregar/ex5.py | UTF-8 | 386 | 4.0625 | 4 | [] | no_license | height = float(input("Qual é a sua altura? "))
gender = input("Se você for homem digite H, se você for mulher, digite M")
if(gender=="M" or gender=="m"):
ideal_weight = 62.1*height - 44.7
elif(gender=="H" or gender=="h"):
ideal_weight = 72.7*height - 58.00
else:
print("Esse valor não é valido")
print("O seu peso ideal é: {}".format(ideal_weight)) | true |
e1dc8d4e1a077c9e375d2d802e6b80fa850678b8 | Python | zimeon/ocfl-py | /tests/test_object_utils.py | UTF-8 | 3,612 | 2.65625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""Object tests."""
import argparse
import unittest
from ocfl.object_utils import remove_first_directory, make_unused_filepath, next_version, add_object_args, add_shared_args, check_shared_args, find_path_type
class TestAll(unittest.TestCase):
"""TestAll class to run tests."""
def test_remove_first_directory(self):
"""Test remove_first_directory function."""
self.assertEqual(remove_first_directory(''), '')
self.assertEqual(remove_first_directory('a'), '')
self.assertEqual(remove_first_directory('a/b'), 'b')
self.assertEqual(remove_first_directory('a/b/'), 'b')
self.assertEqual(remove_first_directory('a/b/c'), 'b/c')
def test_make_unused_filepath(self):
"""Test make_unused_filepath function."""
self.assertEqual(make_unused_filepath('x/y', []), 'x/y__2')
self.assertEqual(make_unused_filepath('x/y', {'x/y__2': 1}), 'x/y__3')
self.assertEqual(make_unused_filepath('x/y', {'x/y': 1}, ''), 'x/y2')
self.assertEqual(make_unused_filepath('x/y', ['x/y', 'x/y2', 'x/y3'], ''), 'x/y4')
def test_next_version(self):
"""Test next_version function."""
self.assertRaises(Exception, next_version, '1')
self.assertRaises(Exception, next_version, 1)
self.assertRaises(Exception, next_version, 'v1v')
# good non-zero padded
self.assertEqual(next_version('v1'), 'v2')
self.assertEqual(next_version('v99'), 'v100')
self.assertEqual(next_version('v1234'), 'v1235')
# good zero-padded
self.assertEqual(next_version('v01'), 'v02')
self.assertEqual(next_version('v00001'), 'v00002')
self.assertEqual(next_version('v00999'), 'v01000')
self.assertEqual(next_version('v0998'), 'v0999')
# overflow
self.assertRaises(Exception, next_version, 'v09')
self.assertRaises(Exception, next_version, 'v0999')
def test_add_object_args(self):
"""Test (kinda) adding object args."""
parser = argparse.ArgumentParser()
add_object_args(parser)
args = parser.parse_args(['--skip', 'aa'])
self.assertIn('aa', args.skip)
def test_add_shared_args(self):
"""Test (kinda) adding shared args."""
parser = argparse.ArgumentParser()
add_shared_args(parser)
args = parser.parse_args(['--version', '-v'])
self.assertTrue(args.version)
self.assertTrue(args.verbose)
def test_check_shared_args(self):
"""Test check of shared args."""
parser = argparse.ArgumentParser()
add_shared_args(parser)
parser.parse_args(['--version', '-v'])
check_shared_args(parser.parse_args(['-v']))
self.assertRaises(SystemExit, check_shared_args, parser.parse_args(['--version']))
def test_find_path_type(self):
"""Test find_path_type function."""
self.assertEqual(find_path_type("extra_fixtures/good-storage-roots/fedora-root"), "root")
self.assertEqual(find_path_type("fixtures/1.0/good-objects/minimal_one_version_one_file"), "object")
self.assertEqual(find_path_type("README"), "file")
self.assertIn("does not exist", find_path_type("this_path_does_not_exist"))
self.assertIn("nor can parent", find_path_type("still_nope/nope_doesnt_exist"))
self.assertEqual(find_path_type("ocfl"), "no 0= declaration file")
self.assertEqual(find_path_type("extra_fixtures/misc/multiple_declarations"), 'root')
self.assertIn("unrecognized", find_path_type("extra_fixtures/misc/unknown_declaration"))
| true |
6e5a4389d52b635a32bcb9148e306acf87075a14 | Python | ccoughlin/SkinDepth | /tests/testcontroller.py | UTF-8 | 11,076 | 3.0625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | '''testcontroller.py- Tests the basic controller'''
__author__ = 'Chris'
import os
import os.path
import unittest
import tempfile
from platform import SkinDepthController
from material import Material
class TestSkinDepthController(unittest.TestCase):
'''Tests the basic controller for SkinDepth'''
def setUp(self):
self.testctrl = SkinDepthController.SkinDepthController(":memory:")
self.testctrl.open()
def test_addone(self):
'''Testing addition of a material to the database'''
testmaterial_dict = {"name":"Iron", "notes":"Pure Iron", "iacs":18, "mu_r":150}
self.testctrl.add(testmaterial_dict)
retrieved_material = self.testctrl.db.retrieve("Iron")
self.assertEqual(testmaterial_dict["name"], retrieved_material.name)
self.assertEqual(testmaterial_dict["notes"], retrieved_material.notes)
try:
self.assertAlmostEqual(testmaterial_dict["iacs"] , retrieved_material.iacs,
delta=0.01*testmaterial_dict["iacs"])
self.assertAlmostEqual(testmaterial_dict["mu_r"], retrieved_material.mu_r,
delta=0.01*testmaterial_dict["mu_r"])
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial_dict["iacs"] , retrieved_material.iacs, places=1)
self.assertAlmostEqual(testmaterial_dict["mu_r"], retrieved_material.mu_r, places=1)
def test_fetchone(self):
'''Testing retrieval of one material from the database'''
testmaterial = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
self.testctrl.db.add(testmaterial)
retrieved_material = self.testctrl.fetch("Iron")
self.assertEqual(testmaterial.name, retrieved_material["name"])
self.assertEqual(testmaterial.notes, retrieved_material["notes"])
try:
self.assertAlmostEqual(testmaterial.iacs, retrieved_material["iacs"], delta=0.01*testmaterial.iacs)
self.assertAlmostEqual(testmaterial.mu_r, retrieved_material["mu_r"], delta=0.01*testmaterial.mu_r)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial.iacs, retrieved_material["iacs"], places=1)
self.assertAlmostEqual(testmaterial.mu_r, retrieved_material["mu_r"], places=1)
def test_fetchlist(self):
'''Testing retrieval of the complete list of materials from the database'''
iron = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
copper = Material.Material(name="Copper", sigma_iacs=100, mu_rel=1, notes="Pure Annealed Copper")
aluminum = Material.Material(name="Aluminum", sigma_iacs=61, mu_rel=1, notes="Unalloyed Pure Aluminum")
cobalt = Material.Material(name="Cobalt", sigma_iacs=27.6, mu_rel=70,
notes ="Relative permeability can range between 70-250")
water = Material.Material(name="Water", sigma_iacs=4.353e-10, mu_rel=1, notes="Tap water")
thematerials = [iron, copper, aluminum, cobalt, water]
thematerialnames = [iron.name, copper.name, aluminum.name, cobalt.name, water.name]
for amat in thematerials:
self.testctrl.db.add(amat)
materials_list = self.testctrl.fetchlist()
self.assertEqual(len(thematerialnames), len(materials_list))
try:
self.assertItemsEqual(thematerialnames, materials_list)
except AttributeError:
# Use assertEqual for 2.6
self.assertEqual(sorted(thematerialnames), sorted(materials_list))
def test_deleteone(self):
'''Testing deletion of one material from the database'''
testmaterial = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
self.testctrl.db.add(testmaterial)
self.testctrl.remove("Iron")
retrieved_material = self.testctrl.fetch("Iron")
try:
self.assertIsNone(retrieved_material)
except AttributeError:
# Use assertTrue for 2.6
self.assertTrue(retrieved_material==None)
def test_undo(self):
'''Testing database rollback'''
testmaterial = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
self.testctrl.db.add(testmaterial)
self.testctrl.update()
self.testctrl.remove("Iron")
retrieved_material = self.testctrl.fetch("Iron")
try:
self.assertIsNone(retrieved_material)
except AttributeError:
# Use assertTrue for 2.6
self.assertTrue(retrieved_material==None)
self.testctrl.undo()
retrieved_material = self.testctrl.fetch("Iron")
self.assertEqual(testmaterial.name, retrieved_material["name"])
self.assertEqual(testmaterial.notes, retrieved_material["notes"])
try:
self.assertAlmostEqual(testmaterial.iacs , retrieved_material["iacs"], delta=0.01*testmaterial.iacs)
self.assertAlmostEqual(testmaterial.mu_r, retrieved_material["mu_r"], delta=0.01*testmaterial.mu_r)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial.iacs , retrieved_material["iacs"], places=1)
self.assertAlmostEqual(testmaterial.mu_r, retrieved_material["mu_r"], places=1)
def test_calcdelta(self):
'''Verifying attenuation depth calculation'''
testmaterial = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
self.testctrl.db.add(testmaterial)
skindepth = self.testctrl.calcdelta(materialname="Iron", frequency=1138)
try:
self.assertAlmostEqual(testmaterial.calc_skindepth(1138), skindepth, delta=0.01*skindepth)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial.calc_skindepth(1138), skindepth, places=1)
nomaterial = self.testctrl.calcdelta(materialname="Adamantium", frequency=1)
try:
self.assertIsNone(nomaterial)
except AttributeError:
# Use assertTrue for 2.6
self.assertTrue(nomaterial==None)
def test_calcfrequency(self):
'''Verifying excitation frequency calculation'''
testmaterial = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
self.testctrl.db.add(testmaterial)
freq = self.testctrl.calcfrequency(materialname="Iron", skindepth=0.75)
try:
self.assertAlmostEqual(testmaterial.calc_frequency(attenuation=0.75), freq, delta=0.01*freq)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial.calc_frequency(attenuation=0.75), freq, places=1)
def test_exportdb(self):
'''Testing export of the database as a SQL script'''
testmaterial_dict = {"name":"Iron", "notes":"Pure Iron", "iacs":18, "mu_r":150}
self.testctrl.add(testmaterial_dict)
self.testctrl.update()
temp_sql_file = tempfile.NamedTemporaryFile(delete=False)
self.testctrl.exportsql(temp_sql_file.name)
file_db = SkinDepthController.SkinDepthController(":memory:")
file_db.importsql(temp_sql_file.name)
retrieved_material = file_db.fetch("Iron")
temp_sql_file.close()
if os.path.exists(temp_sql_file.name):
os.remove(temp_sql_file.name)
self.assertEqual(testmaterial_dict["name"], retrieved_material["name"])
self.assertEqual(testmaterial_dict["notes"], retrieved_material["notes"])
try:
self.assertAlmostEqual(testmaterial_dict["iacs"], retrieved_material["iacs"],
delta=0.01*testmaterial_dict["iacs"])
self.assertAlmostEqual(testmaterial_dict["mu_r"], retrieved_material["mu_r"],
delta=0.01*testmaterial_dict["mu_r"])
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(testmaterial_dict["iacs"] , retrieved_material["iacs"], places=1)
self.assertAlmostEqual(testmaterial_dict["mu_r"], retrieved_material["mu_r"], places=1)
def test_savecopyfromfile(self):
'''Testing database file copies from storage'''
dbfile_ctrl = SkinDepthController.SkinDepthController("test.db")
dbfile_ctrl.open()
iron = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
iron_dict = {"name":iron.name, "notes":iron.notes, "iacs":iron.iacs, "mu_r":iron.mu_r}
dbfile_ctrl.add(iron_dict)
dbfile_ctrl.update()
dbfile_ctrl.savecopy("copyoftest.db")
dbcopy_ctrl = SkinDepthController.SkinDepthController("copyoftest.db")
dbcopy_ctrl.open()
ironcopy_dict = dbcopy_ctrl.fetch("Iron")
self.assertEqual(iron.name, ironcopy_dict["name"])
self.assertEqual(iron.notes, ironcopy_dict["notes"])
try:
self.assertAlmostEqual(iron.iacs, ironcopy_dict["iacs"], delta=0.01*iron.iacs)
self.assertAlmostEqual(iron.mu_r, ironcopy_dict["mu_r"], delta=0.01*iron.mu_r)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(iron.iacs, ironcopy_dict["iacs"], places=1)
self.assertAlmostEqual(iron.mu_r, ironcopy_dict["mu_r"], places=1)
def test_savecopyfrommemory(self):
'''Testing database file copies from :memory:'''
dbfile_ctrl = SkinDepthController.SkinDepthController(":memory:")
dbfile_ctrl.open()
iron = Material.Material(name="Iron", sigma_iacs=18, mu_rel=150, notes="Pure Iron")
iron_dict = {"name":iron.name, "notes":iron.notes, "iacs":iron.iacs, "mu_r":iron.mu_r}
dbfile_ctrl.add(iron_dict)
dbfile_ctrl.update()
dbfile_ctrl.savecopy("copyoftest.db")
dbcopy_ctrl = SkinDepthController.SkinDepthController("copyoftest.db")
dbcopy_ctrl.open()
ironcopy_dict = dbcopy_ctrl.fetch("Iron")
self.assertEqual(iron.name, ironcopy_dict["name"])
self.assertEqual(iron.notes, ironcopy_dict["notes"])
try:
self.assertAlmostEqual(iron.iacs , ironcopy_dict["iacs"], delta=0.01*iron.iacs)
self.assertAlmostEqual(iron.mu_r, ironcopy_dict["mu_r"], delta=0.01*iron.mu_r)
except TypeError:
# Use places instead of delta for 2.6
self.assertAlmostEqual(iron.iacs , ironcopy_dict["iacs"], places=1)
self.assertAlmostEqual(iron.mu_r, ironcopy_dict["mu_r"], places=1)
def tearDown(self):
pass
def run():
'''Runs the tests in the suite'''
suite = unittest.TestLoader().loadTestsFromTestCase(TestSkinDepthController)
unittest.TextTestRunner(verbosity=2).run(suite) | true |
ddeb5ab6d4267744682bfcf2f6d60d828ce79ff6 | Python | shubhamishere/JustSomeRandomPythonShit | /dividedBy.py | UTF-8 | 186 | 3.296875 | 3 | [] | no_license | def spam(divideBy):
return 84/divideBy
try:
print(spam(round(float(raw_input("Enter a number: ")))))
except ZeroDivisionError:
print ("Cannot divide by zero! You asshole.") | true |
f6c06a44fc7fc72503047aa04e416790e0052bed | Python | ashutosh321607/DS601--Digital-Image-Processing | /DIP-A2-G4/q2_test.py | UTF-8 | 1,763 | 3.21875 | 3 | [] | no_license |
import pickle
import argparse
from q2 import *
def conv_list(list,columns): # List into a 2-D matrix
result=[]
start = 0
end = columns
rows=len(list)/columns
for i in range(int(rows)):
result.append(list[start:end])
start +=columns
end += columns
return result
parser = argparse.ArgumentParser()
parser.add_argument("--prob",nargs='+',type=float,help='Probability matrix with 2 columns')
parser.add_argument("--N",default=10,type=int,help='Number of symbols to be coded together')
parser.add_argument("--msg",nargs='+',default=None,type=int,help='Message to be encoded')
parser.add_argument("--image",default='q2_Image.jpg',type=str,help='Path to the image file')
args= parser.parse_args()
if args.prob != None:
prob_matrix=conv_list(args.prob,2)
else:
prob_matrix=None
if args.prob != None and args.msg!=None:
args.image=None
print("Parameters used are as follows:\n\n","Probability matrix - ",prob_matrix,"\n","N - ",args.N,"\n","Message - ",args.msg,"\n","Image Path - ",args.image)
Encoded,prob_matrix=arithmetic(prob_matrix=prob_matrix,N=args.N,message=args.msg,image_path=args.image)
print("\n")
with open('q2_Output.pickle', 'wb') as f:
pickle.dump((Encoded,prob_matrix), f) # Saving Arithmetic emcoding and Probability table in q2_Output.pickle
with open('q2_Output.pickle', 'rb') as f:
Encoded,prob_matrix=pickle.load(f) # Loading Arithmetic emcoding and Probability table from q2_Output.pickle
print('Encoding is as follows : \n')
for i in Encoded:
print(i)
print('\n\n')
print('The Probability matrix used is : \n')
for i in prob_matrix:
print('Symbol : {} , probability : {}'.format(i[0],i[1])) | true |
bab355f15d0084fe12b14dabcacddbc8f860cc60 | Python | hildebrando001/CodingForFun | /python/inheritance.py | UTF-8 | 1,335 | 4.125 | 4 | [] | no_license | class Person:
def __init__(self, name, age, height):
self.name = name
self.age = age
self.height = height
def __str__(self):
return f"name: {self.name}, age: {self.age}, height: {self.height}"
def get_older(self, years):
self.age += years
class Worker(Person):
def __init__(self, name, age, height, salary):
super(Worker, self).__init__(name, age, height) # "super" is accessing the parenthesis class
self.salary = salary # This init refers to Person class
def __str__(self):
text = super(Worker, self).__str__()
text += f", Salary: {self.salary}"
return text
def calc_yearly_salary(self):
return self.salary * 12
worker1 = Worker("Carlos", 30, 173, 3000)
print(worker1)
print(worker1.calc_yearly_salary())
######################### OPERATORS OVERLOAD ######################################
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y)
def __str__(self):
return f"X: {self.x}, Y: {self.y}"
v1 = Vector(5, 3)
v2 = Vector(4, 2)
print(v1)
print(v2)
print(v1 + v2)
print(v1 - v2) | true |
fa46028af387e6f938cfb741a67d3db5d17615d2 | Python | Beautyi/PythonPractice | /Chapter5-Practice/ex_5_10.py | UTF-8 | 910 | 3.125 | 3 | [] | no_license | #检查用户名
current_users = ['Jack', 'Jobs', 'admin', 'Martin', 'Ava']
new_users = [ 'Tom', 'Martin', 'Ava', 'Zoe', 'Rose']
for new_user in new_users:
if new_user in current_users:
print("Sorry,you should change the user name.")
else:
print("The user name has not be used.")
# The user name has not be used.
# Sorry,you should change the user name.
# Sorry,you should change the user name.
# The user name has not be used.
# The user name has not be used.
current_users = ['Jack', 'Jobs', 'admin', 'Martin', 'Ava']
new_users = [ 'Tom', 'martin', 'ava', 'Zoe', 'Rose']
current_users_lower = []
for current_user in current_users:
current_users_lower.append(current_user.lower())
for new_user in new_users:
if new_user.lower() in current_users_lower:
print("Sorry,you should change the user name.")
else:
print("The user name has not be used.")
| true |
feec5af1040be0cafa4bfdded653975f66438c19 | Python | manpreet-singh/DataCollectionPackage | /Butter.py | UTF-8 | 1,341 | 2.953125 | 3 | [
"MIT"
] | permissive | import numpy as np
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
# Filter requirements.
order = 6
fs = 50 # sample rate, Hz
cutoff = 2.25 # desired cutoff frequency of the filter, Hz
# "Noisy" data. We want to recover the 1.2 Hz signal from this.
allData = np.loadtxt(open('data.csv', 'rb'), delimiter=",")
t = allData[:,0]
plt.plot(t, allData[:,12]+0.01, 'b-', label='data')
data = allData[:,12]
# Filter the data, and plot both the original and filtered signals.
y = butter_lowpass_filter(data, cutoff, fs, order)
z_accel = y
y_accel = butter_lowpass_filter(allData[:,11], cutoff, fs, order)
x_accel = butter_lowpass_filter(allData[:,10], cutoff, fs, order)
newData = allData
newData[:,10] = x_accel-0.1
newData[:,11] = y_accel+0.033
newData[:,12] = z_accel+0.045
np.savetxt('newData.csv', newData, delimiter=',')
plt.plot(t, y, 'g-', linewidth=2, label='filtered data')
plt.xlabel('Time [sec]')
plt.grid()
plt.legend()
plt.subplots_adjust(hspace=0.35)
plt.show() | true |
a3adeb5a47158e7f6cf1e1c1d965b3e69b961bf1 | Python | Ali-J2/aws-jmeter-test-engine | /jmeter-icap-poc/scripts/CreateDashboard.py | UTF-8 | 2,841 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | import requests
import json
import argparse
# get command line arguments and return their parsed content
def __get_commandline_args():
parser = argparse.ArgumentParser(description='Get Grafana template file, prefix to use when producing dashboards, '
'API key, and grafana URL')
parser.add_argument('-u', '--url',
type=str,
help='The URL to your grafana DB home',
required=True)
parser.add_argument('-k', '--key',
type=str,
help='API key to be used for dashboard creation in grafana database',
required=True)
parser.add_argument('-f', '--file',
type=str,
help='path to grafana template to be created',
required=True)
parser.add_argument('-p', '--prefix',
type=str,
help='prefix used for differentiating grafana dashboards and metrics',
required=True)
return parser.parse_args()
# Appends prefix to all occurrences of "measurement" value in the Grafana JSON file
def __add_prefix_to_grafana_json(grafana_json, prefix):
if 'panels' in grafana_json["dashboard"]:
for i in grafana_json["dashboard"]['panels']:
for j in i:
if 'targets' in j:
for k in i['targets']:
if 'measurement' in k:
k['measurement'] = prefix + k['measurement']
# responsible for posting the dashboard to Grafana and returning the URL to it
def __post_grafana_dash(key, grafana_template, prefix, grafana_url):
if grafana_url[len(grafana_url) - 1] != '/':
grafana_url += '/'
grafana_api_url = grafana_url + 'api/dashboards/db'
headers = {
"Authorization": "Bearer " + key,
"Content-Type": "application/json"}
with open(grafana_template) as json_file:
grafana_json = json.load(json_file)
__add_prefix_to_grafana_json(grafana_json, prefix)
print(grafana_json)
resp = requests.post(grafana_api_url, json=grafana_json, headers=headers)
print(resp.text)
d = eval(resp.text)
# if the response contains a URL, use it to build a url that links directly to the newly created dashboard
if "url" in d:
return grafana_url + d.get('url')
# main: Gets command line arguments, creates dashboard in grafana, outputs URL in response (if any)
if __name__ == '__main__':
arguments = __get_commandline_args()
key = arguments.key
grafana_template = arguments.file
prefix = arguments.prefix
grafana_url = arguments.url
print(__post_grafana_dash(key, grafana_template, prefix, grafana_url))
| true |
b62ebd92586adfeff5de1d64c5997146ce14c52a | Python | MichaelLoves/Python-based-Automatic-Processing | /python script/netlist_into_sp.py | UTF-8 | 1,552 | 2.78125 | 3 | [] | no_license | #encoding:utf-8
import os, sys, getopt, operator, re
#读取 netlist_sim 和 sp 文件
opts, args = getopt.getopt(sys.argv[1:], "hi:o:")
input_file, output_file = '', ''
def usage():
print('Usage: ' + sys.argv[0] + ' -i netlist_sim -o ***.sp')
for op, value in opts:
if op == '-i':
input_file = value
if op == '-o':
output_file = value
elif op == '-h':
usage()
sys.exit()
#读取原 netlist_sim 文件 并将去除了 i66等 所在行的文件储存在 new_netlist_sim_file 中
netlist_sim_file = open(input_file, 'r')
new_netlist_sim_file = []
for line in netlist_sim_file.readlines()[1:]: #略过 netlist_sim 文件中第一行的空白行
if re.findall(r"\bi\d{2}\w*\b", line):
pass
else:
new_netlist_sim_file.append(line)
netlist_sim_file.close()
#用 new_netlist_sim_file 替换 sp 文件中的 netlist_sim 部分
sp_file = open(output_file, 'r')
temp_file = sp_file.readlines()
#找出 netlist_sim 文件中 ***netlist_sim*** 部分所在的两行的行数
#查出一个元素在 list 中的所有位置
def find_all_index(array, item):
return [index for index, element in enumerate(array) if element == item]
for line in temp_file:
if 'netlist_sim' in line:
netlist_sim_line = line
start_line, end_line = find_all_index(temp_file, netlist_sim_line)
#替换两行 ***netlist_sim*** 中的部分
temp_file[start_line+3 : end_line-2] = new_netlist_sim_file
#将替换后的文件重新写入
write_file = open(output_file, 'w')
for line in temp_file:
write_file.write(line)
write_file.close()
sp_file.close()
| true |
741c1118ea7c2e638d862ab482e74a7a636961f2 | Python | timothyrubin/python_gclda | /examples/script_run_gclda.py | UTF-8 | 5,219 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | # This script is useful for training a full model.
# It can either create a new model or load an existing model (depending on the value of 'current_iter'), and then perform training updates on the model
# It will save a compressed model to disk every so often, in case program needs to be halted an later resumed
from python_gclda_package.gclda_dataset import gclda_dataset
from python_gclda_package.gclda_model import gclda_model
import cPickle as pickle
import os
import gzip
# ----------------------------------------------------
# --- Set up Dataset Label & Load/Save Directories ---
# ----------------------------------------------------
# Set up dataset-label and dataset-directory we use for building dataset object
datasetLabel = '2015Filtered2_TrnTst1p1'
dataDirectory = '../datasets/neurosynth/'
# Root-directory where model results are saved
results_rootdir = 'gclda_results'
# -----------------------------------------
# --- Configure the sampling parameters ---
# -----------------------------------------
current_iter = 0 # Current model iteration: if 0, start new model, otherwise load & resume sampling existing model
total_iterations = 1000 # Total iterations to run up to
save_freq = 25 # How often we save a model object and topic-distributions to file
loglikely_Freq = 5 # How often we compute log-likelihood (which slows training down a bit, but is useful for tracking model progress)
sampler_verbosity = 2 # How much information about sampler progress gets printed to console (2 is max, 0 is min)
# ------------------------------------------
# --- Configure gcLDA model Parameters ---
# ------------------------------------------
nt = 100 # Number of topics
nr = 2 # Number of subregions (any positive integer, but must equal 2 if symmetric subregion model)
alpha = .1 # Prior count on topics for each doc
beta = .01 # Prior count on word-types for each topic
gamma = .01 # Prior count added to y-counts when sampling z assignments
delta = 1.0 # Prior count on subregions for each topic
roi = 50 # Default spatial 'Region of interest' size (default value of diagonals in covariance matrix for spatial distribution, which the distributions are biased towards)
dobs = 25 # Region 'default observations' (# pseudo-observations biasing Sigma estimates in direction of default 'roi' value)
symmetric = True # Use symmetry constraint on subregions? (symmetry requires nr = 2)
seed_init = 1 # Initial value of random seed
# --- Set up model_str identifier for saving/loading results based on model params ---
model_str = '%s_%dT_%dR_alpha%.3f_beta%.3f_gamma%.3f_delta%.3f_%ddobs_%.1froi_%dsymmetric_%d' % (datasetLabel,
nt, nr, alpha, beta, gamma, delta, dobs, roi, symmetric, seed_init)
# -----------------------------------------------------------------------------
# --- Set up directories and either Initialize model or load existing model ---
# -----------------------------------------------------------------------------
# --- Set up directories for saving/loading results ---
if not os.path.isdir(results_rootdir):
os.mkdir(results_rootdir)
results_outputdir = '%s/%s' % (results_rootdir, model_str)
if not os.path.isdir(results_outputdir):
os.mkdir(results_outputdir)
# --- Initialize / Load model object (depending on current_iter) ---
if current_iter == 0:
# --- If starting a new model ---
# Create dataset object & Import data
dat = gclda_dataset(datasetLabel,dataDirectory) # Create dataset object
dat.importAllData() # Import all data from txt files into object
dat.displayDatasetSummary() # Print dataset summary to console
# Create a model object (using the dataset object and all parameter settings) and initialize
model = gclda_model(dat, nt, nr , alpha, beta, gamma, delta, dobs, roi, symmetric, seed_init)
model.initialize() # Randomly initialize all z, y and r assignments. Get initial spatial estimates
else:
# --- If resuming existing model ---
print "Resuming model at iteration %02d" % current_iter
# Set up loadfilename
results_loadfile = "%s/results_iter%02d.p" % (results_outputdir, current_iter)
# Load compressed model object
with gzip.open(results_loadfile,'rb') as f:
model = pickle.load(f)
# Display initialized or loaded model summary
model.displayModelSummary()
# ----------------------------------------------------------------
# --- Run the gclda model until model.iter = total_iterations ---
# ----------------------------------------------------------------
for i in range(model.iter, total_iterations):
# Cycle through an iteration of all parameter updates (update_z, update_y, update_regions)
model.runCompleteIteration(loglikely_Freq, sampler_verbosity)
# Save model and topic-word distributions every 'savefreq' iterations
if ((model.iter % save_freq )==0):
# Set up savefilenames
savefilenm_pickle = "%s/results_iter%02d.p" % (results_outputdir, model.iter)
savefilenm_csv = "%s/results_iter%02d.csv" % (results_outputdir, model.iter)
# Save a gzip compressed model object to file
with gzip.open(savefilenm_pickle,'wb') as f:
pickle.dump(model,f)
# Print topic-word distributions
model.printTopicWordProbs(savefilenm_csv)
| true |
b4309ed922ed2d7aa9e45d2d24ff442c8017a428 | Python | spatrayuni/Python | /Learning/Strings_sample_1.py | UTF-8 | 226 | 3.1875 | 3 | [] | no_license | """This example is use String variables to concatination
"""
import sys
if __name__ == "__main__":
first_arg = sys.argv[1]
second_arg = sys.argv[2]
concatinated_string = first_arg + second_arg
print concatinated_string
| true |
93a44fb9b14db40289785ed1981d802be210b8a9 | Python | visheshlm10/neural-lattice-language-models | /accumulator.py | UTF-8 | 2,780 | 3.078125 | 3 | [] | no_license | from collections import namedtuple
import math
import util
AccumulatorItem = namedtuple("Accumulator", ["name", "init", "update"])
Display = namedtuple("Display", ["name", "func"])
class Accumulator(object):
def __init__(self, items, displays):
self.items = {item.name: item for item in items}
self.values = {item.name: None for item in items}
self.displays = displays
def update(self, update_dict):
for item_name in self.items:
try:
if self.values[item_name] is None:
self.values[item_name] = self.items[item_name].update(self.items[item_name].init(), update_dict)
else:
self.values[item_name] = self.items[item_name].update(self.values[item_name], update_dict)
except: pass
def pp(self, delimiter=u' | '):
outs = []
for display in self.displays:
try: outs.append(display.name + ": " + display.func(self.values))
except: pass
return delimiter.join(outs)
def lp(self, delimiter=u','):
outs = []
for display in self.displays:
try: outs.append(display.func(self.values))
except: pass
return delimiter.join(outs)
accs = [
AccumulatorItem("loss", lambda :0, lambda v,d: v + sum(d["loss"].vec_value())),
AccumulatorItem("klloss", lambda :0, lambda v,d: v + sum(d["klloss"].vec_value())),
AccumulatorItem("klanneal", lambda :0, lambda v,d: d["klanneal"]),
AccumulatorItem("discloss", lambda :0, lambda v,d: v + sum(d["discloss"].vec_value())),
AccumulatorItem("genloss", lambda :0, lambda v,d: v + sum(d["genloss"].vec_value())),
AccumulatorItem("reconloss", lambda :0, lambda v,d: v + sum(d["reconloss"].vec_value())),
AccumulatorItem("convergence", lambda :0, lambda v,d: v + sum(d["convergence"].vec_value())),
AccumulatorItem("charcount", lambda :0, lambda v,d: v + sum(d["charcount"])),
AccumulatorItem("wordcount", lambda :0, lambda v,d: v + sum(d["wordcount"])),
AccumulatorItem("sentcount", lambda :0, lambda v,d: v + len(d["wordcount"])),
]
disps = [
Display("Loss", lambda d:"%4f" % (d["loss"] / d["wordcount"])),
Display("KLL", lambda d:"%4f" % (d["klloss"] / d["wordcount"])),
Display("KLA", lambda d:"%4f" % (d["klanneal"])),
Display("Gen", lambda d:"%4f" % (d["genloss"] / d["wordcount"])),
Display("Disc", lambda d:"%4f" % (d["discloss"] / d["wordcount"])),
Display("Recon", lambda d:"%4f" % (d["reconloss"] / d["wordcount"])),
Display("Conv", lambda d:"%4f" % (d["convergence"] / d["wordcount"])),
Display("Perp", lambda d:"%4f" % math.exp(d["loss"] / d["wordcount"])),
Display("BPC", lambda d:"%4f" % (d["loss"]/math.log(2) / d["charcount"])),
] | true |
bd851d1637bfe16d5763c519d57d781573ad20c2 | Python | flogothetis/Data-Structures-Python | /LinkedLists/CircularList/CircularList.py | UTF-8 | 1,842 | 4.15625 | 4 | [] | no_license | from Node import Node
class CircularList:
def __init__(self):
self.head=None
def insertAtFront(self, element):
# If list is empty, initialize head
if self.head==None:
self.head= Node(element)
self.head.next=self.head
return
#else ...
#Create new Node that points to the current head
newNode= Node(element)
newNode.next=self.head
#Search for the last node in list
tmp= self.head
while(tmp.next!=self.head):
tmp=tmp.next
#Last node points to new node
tmp.next=newNode
#Change head
self.head=newNode
def search(self,element):
tmp=self.head
while True and tmp:
if(tmp.data==element):
return element
tmp=tmp.next
if(tmp==self.head):
break
return None
def delete(self,element):
if(self.head==None):
return
#If deleted element is head
if(self.head.data==element):
# Search for the last node in list
tmp = self.head
while (tmp.next != self.head):
tmp = tmp.next
tmp.next= self.head.next
#delete previous head
deleteHead= self.head
self.head= self.head.next
del deleteHead
return
#Delete other nodes that are not head of the list
prev = self.head
next=self.head.next
while (next != self.head):
#If found, delete the node from the list
if (next.data == element):
prev.next= next.next
deleteNode=next
next = next.next
del deleteNode
#else, keep searching
else:
prev=next
next=next.next
def printList(self):
tmp=self.head
count=1
while(tmp.next!=self.head):
print("Element", count, "has value", tmp.data)
tmp=tmp.next
count=count+1
print("Element", count, "has value", tmp.data)
if __name__=="__main__":
list = CircularList()
#Insert 124 elements
for i in range (0 ,123):
list.insertAtFront(i)
#Delete the head of the list
list.delete(122)
#Print List
list.printList() | true |
9b71b4305e40899b8d3a78aa7a71f8396dc6efc5 | Python | ggdario/Redes | /methods/exp_tree.py | UTF-8 | 4,875 | 3.3125 | 3 | [] | no_license | import random
import itertools
import pandas as pd
def exp_tree(ed, v, pesos=None):
'''
This functions looks for the best way to connect all vertex (at minimum cost if weights is given)
Parameters
----------
ed: list(str)
list containing graph edges
v: list(str)
list containing graph vertex
pesos: list(str) (optional)
list containing weights for each vertex
Returns
-------
'''
end = False
blue = []
orange = []
buckets = []
results = []
# Edges that begin and end in the same vertex are eliminated
for edge in ed:
if edge.split('-')[0] == edge.split('-')[1]:
ed.pop(edge.index(edge))
# If weights are not given, first edge is chosen randomly
edge0 = random.choice(ed)
edge_ = ed
#If weights are given, firs edge is the 'minimum cost' one
if pesos is not None:
edge_ = [x for x, y in sorted(zip(ed, pesos), key=lambda x: x[1])]
edge0 = edge_[0]
# First edge is always included in the final expansion tree, so it is added to blue list. Its vertex are
# added to a bucket. since this edge has benn analyzed, it is removed from edge list.
blue.append(edge0)
edge_.pop(edge_.index(edge0))
buckets.append([edge0.split('-')[0], edge0.split('-')[1]])
bu = buckets.copy()
results.append([0, [edge0], [], bu])
i = 0
while not end:
v1b = None
v2b = None
# If weights are not given, next edge is chosen randomly and if weights is given, next edge will be the
# one with lower cost
i = i + 1
if pesos is None:
edge_random = random.choice(edge_)
else:
edge_random = edge_[0]
# Vertex of the chosen edge are separated
v1 = edge_random.split('-')[0]
v2 = edge_random.split('-')[1]
# For each vertex, it is seen which bucket it corresponds to. (Maybe they ddo not belong to a bucket)
for bucket in buckets:
if v1 in bucket: v1b = buckets.index(bucket)
if v2 in bucket: v2b = buckets.index(bucket)
if (v1b is not None) and (v2b is not None):
break
# If both vertex are in the same bucket, this edge do not belong to the final expansion tree.
if (v1b == v2b) and (v1b is not None):
orange.append(edge_random)
edge_.pop(edge_.index(edge_random))
results_ = [i, blue[:], orange[:], bu]
#If both vertex do not belong to a bucket, the edge belongs to final tree and both vertex are placed in a new
# bucket
elif (v1b is None) and (v2b is None):
blue.append(edge_random)
edge_.pop(edge_.index(edge_random))
bu = buckets.copy()
bu.append([v1, v2])
results_ = [i, blue[:], orange[:], bu]
buckets.append([v1, v2])
elif v1b != v2b:
# If one vertex belongs to a bucket and the other is not in any bucket, this last vertex is added to the
# same bucket as the other and the edge belongs to the final graph (blue)
if v1b is None:
blue.append(edge_random)
edge_.pop(edge_.index(edge_random))
bu = buckets.copy()
results_ = [i, blue[:], orange[:], bu[:]]
buckets[v2b].append(v1)
# The same thing happens here
elif v2b is None:
blue.append(edge_random)
edge_.pop(edge_.index(edge_random))
bu = buckets.copy()
results_ = [i, blue[:], orange[:], bu[:]]
buckets[v1b].append(v2)
# If each vertex belongs to different buckets, edge belongs to final silution (blue) and both buckets
# are merged
else:
blue.append(edge_random)
edge_.pop(edge_.index(edge_random))
bu = buckets.copy()
bu[v1b] = bu[v1b] + bu[v2b]
bu.pop(bu.index(bu[v2b]))
results_ = [i, blue[:], orange[:], bu[:]]
buckets[v1b] = buckets[v1b] + buckets[v2b]
buckets.pop(buckets.index(buckets[v2b]))
results.append(results_.copy())
# If there is just one bucket containing all vertex, the algorithm ends
if (len(buckets) == 1) and (sorted(list(itertools.chain.from_iterable(buckets))) == sorted(v)):
end = True
coste = 0
results_df = pd.DataFrame(results, columns=["Iter", "Blue", "Orange", "Buckets"])
print(results_df.to_string(index=False))
# Final cost is calculated by the sum of all the edges belonging to the final solution
if pesos is not None:
for i in blue:
coste = coste + pesos[ed.index(i)]
print(f'\n El coste total es {coste}') | true |
9c5677075efa00c08a6b69aea7e4728f37bb3821 | Python | lavron/neopixel-uanimate | /neopixel_animate.py | UTF-8 | 4,485 | 3.109375 | 3 | [
"MIT"
] | permissive | import utime
import math
import random
class NeopixelAnimate:
def __init__(self, strip_len, duration_ms=0, **params):
self.len = strip_len
self.params = params
self.duration_ms = duration_ms
self.start_ms = 0
self.now_ms = utime.ticks_ms()
self.status = False # passive
self.loop = params.get("loop") if 'loop' in params else True
self.leds = [0] * self.len
self.frame_count = 0
def start(self):
self.start_ms = utime.ticks_ms()
self.status = True # active
def stop(self):
if 'callback' in self.params:
callback = self.params.get("callback")
callback()
self.status = False
self.frame_count = 0
def frame(self, offset):
return self.leds
def fill(self, color):
for i in range(self.len):
self.leds[i] = color
def get_frame(self):
if self.status == False:
# bail, no active animation
return False
self.frame_count += 1
self.now_ms = utime.ticks_ms()
passed_ms = utime.ticks_diff(self.now_ms, self.start_ms)
if passed_ms >= self.duration_ms:
# animation time is over
self.stop()
if not self.loop:
return False
self.start()
offset = (passed_ms % self.duration_ms)/self.duration_ms
self.frame(offset)
return self.leds
class RainbowAnimation(NeopixelAnimate):
def frame(self, offset):
sat = 1.0
bri = 0.2
for i in range(self.len):
hue = (360*(offset + i/self.len)) % 360
color = hsv2rgb(hue, sat, bri)
self.leds[i] = color
class PulseAnimation(NeopixelAnimate):
def frame(self, offset):
color = self.params.get("color")
color = rgb2hsv(color)
val = color[2] * wave(offset)
color = hsv2rgb(color[0], color[1], val)
self.fill(color)
class RotateAnimation (NeopixelAnimate):
def frame(self, offset):
color = self.params.get("color")
color_bg = self.params.get("color_bg")
direction = self.params.get("dir") if 'dir' in self.params else 'cw'
width_px = self.len // 2
active_px = int(offset * self.len)
result = [0] * self.len
for i in range(self.len):
result[i] = color if i < width_px else color_bg
# shift result forward for active_px
result = result[active_px:] + result[:active_px]
if direction == "ccw":
result.reverse()
self.leds = result
# print("self.leds:", self.leds)
def hsv2rgb(h, s, v):
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0:
r, g, b = v, t, p
elif hi == 1:
r, g, b = q, v, p
elif hi == 2:
r, g, b = p, v, t
elif hi == 3:
r, g, b = p, q, v
elif hi == 4:
r, g, b = t, p, v
elif hi == 5:
r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
def rgb2hsv(rgb):
r, g, b = rgb
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return h, s, v
def wave(offset):
if offset > 0.5:
offset = 1 - offset
return offset * 2
def lerp(a, b, f):
return a + f * (b - a)
def mix(a, b, t):
r1, g1, b1 = a
r2, g2, b2 = b
r = int(math.sqrt((1 - t) * r2 ** 2 + t * r1 ** 2))
g = int(math.sqrt((1 - t) * g2 ** 2 + t * g1 ** 2))
b = int(math.sqrt((1 - t) * b2 ** 2 + t * b1 ** 2))
return (r, g, b)
count = 0
def random_color():
global count
color = [0,0,0]
color[count] = random.randrange(256)
a0 = random.randrange(1)
a1 = ((1-a0)+count+1) % 3
a0 = (count+a0+1) % 3
color[a0] = 255-color[count]
color[a1] = 0
count += random.randrange(15)
count %= 3
print(color[0], color[1], color[2])
return ( color[0], color[1], color[2])
| true |
af5a90c037ae65d09d102b3e077b09c9eb866e3a | Python | Akshaya-CIET/Python-Passion | /fibo13.py | UTF-8 | 571 | 3.875 | 4 | [] | no_license | '''13.Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four
million, write a program to find the sum of the even-valued terms.
'''
odd=0
even=1
total = 0
while True:
#finding the odd numbers
odd = odd + even
#finding the even numbers
even = odd + even
if even < 4000000:
total += even
else:
break
print total
| true |
963d2f2b9b6a79491989f74dc99c7bbae43df144 | Python | wlokhorst/proc10xG | /samConcat2Tag.py | UTF-8 | 5,900 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
'''
Copyright 2017 Matt Settles
Created June 8, 2017
Revised by Wouter Lokhorst, 5FEB2019
bwa mem -C option concatenates the fasta/fastq
CTACATTGTCAAGGGT:E00558:34:HGCJ3ALXX:1:1101:2108:1731 99 000000F 9225\
71 60 127M = 922961 517 ACTCGGGGAGGTGTTAGCTGCTGCCTCACACA\
TTGGGTTTATAGGCTGAATCTTGTTCTCTTTAGGCTTCCAGAGTTTTCTCAGTTACTATTTCTCCTGTCACATACT\
CGCTGCTTCTTCTGTCATA JJJJJJ<JJF<7A7FJJJJJJ<JJJAJAJJJFJFFFJ----AJJFJ---7---<FJ\
J<JF<7FFFJJJFJJAJF-AAFFFFF-AFJF7FF<A--FJJJAF)-7-77<<7--)7)<<--77A7-<--< NM:i\
:3 MD:Z:74T34A3T13 AS:i:112 XS:i:19 1:N:0:GOOD:CCGATTAA:CTACATTGTCAA\
GGGT:<AAFFJJFJJFJJJJJ:CCAGTGA:J<FFFJJ
This pulls it out, 9 columns and produces new 10x tags in the bam then write\
s to out
'''
import sys
import os
import argparse
from itertools import islice
#import multiprocessing as mp
#lock = mp.Lock()
def make_output_file(out_name):
"""Creates the output file.
"""
with open(out_name, "w") as outfile:
pass
def write_line(out_name, line):
"""Writes a single line to either stdout or the output file.
"""
#lock.acquire()
# Checks if out_name is a string.
if isinstance(out_name, str):
out = open(out_name, "a")
out.write(line)
out.close()
else:
sys.stdout.write(line)
#lock.release()
def extract_tag(line, out_name):
"""Extracts the GEM tag for a single alignment line.
Input:
- line: string (the alignment line)
- out_name: either a string (full path of the output file) or stdout
Output:
- An alignment file without the GEM tags
"""
line2 = line.strip().split()
# Comment/header lines start with @
if line[0] != "@" and len(line2) > 2:
# Handle TAG:
# get the final concatenated tag
tag = line2[-1]
if (tag[0:6] in ['1:N:0:', '2:N:0:']):
tsplit = tag.split(":", 4)
tsplit2 = tsplit[4].split("_")
if len(tsplit2) != 5:
sys.stderr.write("SAMCONCAT\tERROR\tsam file has \
concatenated info, but its the wrong size")
sys.exit(1)
# fixed barcode
line2 = line2[0:-1]
line2.extend(["ST:Z:" + tsplit2[0],
"BX:Z:" + line2[0].split(":")[0] + "-1",
"BC:Z:" + tsplit[3],
"QT:Z:" + '!' * len(tsplit[3]),
"RX:Z:" + tsplit2[1],
"QX:Z:" + tsplit2[2],
"TR:Z:" + tsplit2[3],
"TQ:Z:" + tsplit2[4]])
write_line(out_name, '\t'.join(line2) + '\n')
else: # Does not contain a concatenated tag as expected by bwa mem
write_line(out_name, line)
else: # Its the header lines, so just put back on the stream/file
write_line(out_name, line)
class ArgumentParserError(Exception):
"""Creates a new kind of error.
"""
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
"""Redefines the argparse error, to be able to catch it with try/except.
"""
def error(self, message):
raise ArgumentParserError(message)
def handle_args():
"""Handles arguments both in the command line and in IDLE.
Output:
Tuple, consisting of:
- string (input filename or stdin)
- string (output filename or stdout)
- integer (number of CPUs)
"""
version_num = "0.0.2"
# Tries to execute the script with command line arguments.
try:
# Creates an instance of argparse.
argparser = ThrowingArgumentParser(prog=sys.argv[0],
description='samConcat2Tag, processes bwa mem sam format where \
the read comment has been appended to the mapping line following process_10\
xReads.py', epilog='For questions or comments, please contact Matt Settles \
<settles@ucdavis.edu>\n%(prog)s version: ' + version_num, add_help=True)
except ArgumentParserError:
print("Please run this script on the command line, with the \
correct arguments. Type -h for help.\n")
sys.exit()
else:
# Adds the positional arguments.
argparser.add_argument('inputfile', metavar='inputsam', type=str,
nargs='?', help='Sam file to process [default: %(default)s]',
default="stdin")
# Adds the optional arguments.
argparser.add_argument('--version', action='version',
version="%(prog)s version: " + version_num)
# TODO: ADD parameter for sample ID
argparser.add_argument('-o', '--output_base',
help="Directory + prefix to output, [default: %(default)s]",
action="store", type=str, dest="output_base", default="stdout")
argparser.add_argument("-@", "--cpus",
help="The number of CPUs to use.", type=int, default=1)
# Parses the arguments given in the shell.
args = argparser.parse_args()
inp = args.inputfile
outb = args.output_base
cpus = args.cpus
return inp, outb, cpus
if __name__ == "__main__":
inp, outb, cpus = handle_args()
if outb == "stdout":
out = FALSE
else:
out = outb + ".sam"
make_output_file(out)
if inp == 'stdin':
# Reading from stdin.
insam = sys.stdin
else:
if not os.path.exists(inp):
sys.exit("Error, can't find input file %s" % inp)
insam = open(inp, 'r')
# Maximum number of concurrent processes is the given number of CPUs.
#P = mp.Pool(cpus)
# Read the file line by line, without loading it all into memory.
while True:
chunk = list(islice(insam, 1))
if not chunk:
break
line = chunk[0]
extract_tag(line,out)
#P.apply_async(extract_tag,args=(line, out,))
#P.close()
#P.join()
# Checks if insam is a string.
if isinstance(insam, str):
insam.close()
| true |
6e1b62d4d868e67065815e2b37fce30b82d2bbb4 | Python | sandyqlin/python_projects | /HUD_project_2.py | UTF-8 | 3,959 | 4.1875 | 4 | [] | no_license | ###---Automate Repetitive Tasks in Python---###
# Now we will learn how to automate the same process for both the 2005 and 2007 datasets.
# Create 2 variables, housing_2007 and housing_2005, that contain the DataFrame objects associated with Hud_2007.csv and Hud_2005.csv, respectively.
# Remember from the previous lesson that we read in the 2013 data set in the following way: housing_2013 = pandas.read_csv("Hud_2013.csv")
import pandas
housing_2007 = pandas.read_csv("Hud_2007.csv")
housing_2005 = pandas.read_csv("Hud_2005.csv")
# List
# Now that we have read in both datasets into DataFrame objects, let's add them to a List.
# we can group a few objects into a List object, write the logic once, and apply it to every object in that List.
# A list is a data structure in Python that makes it easy to apply logic over many objects
# This line creats an empty list with no objects
data_frames_list = []
# Adding a year column to each DataFrame, so we can easily keep track easier later
housing_2005['year'] = '2005'
housing_2007['year'] = '2007'
# .append() adds the specified object to the end of the list
data_frames_list.append(housing_2005)
data_frames_list.append(housing_2007)
# the list now contains 2 objects, the respective DataFrames for 2005 and 2007
print(len(data_frames_list))
# Column Filtering
# Create a new DataFrame, filtered_housing_2007, that contains the column filtered version of housing_2007, with just the columns we are interested in.
# The columns we want are: ['AGE1', 'FMR', 'TOTSAL', 'year'].
# First, create a List variable, columns, that contains the names of all of the columns we are interested in.
# then, we use bracket notation on the DataFrame object to specify a filter. We want the filter to just contain the columns list.
columns = ["AGE1", "FMR", "TOTSAL", "year"]
filtered_housing_2007 = housing_2007[columns]
# define a function to filter each DataFrame down to only the columns we want.
# A function is a way to turn our commands into a module to be reused whenever we want
def filter_columns(data_frames_list):
# Final list we want to return, starts out empty
new_df_list = list()
# Use a "for" loop to iterate through each DataFrame. For every DataFrame ("df") in the list: data_frames_list
for df in data_frames_list:
# Use a list to specify the columns we want
columns = ['AGE1', 'FMR', 'TOTSAL', 'year']
# Filter the current dataframe we are iterating through to have only the columns we want
filtered_df = df[columns]
# Add the filtered dataframe object to the empty list we created in the beginning on the function
new_df_list.append(filtered_df)
# Functions require a "return" value, which is the result of what happened inside
return new_df_list
filtered_data_frames_list = filter_columns(data_frames_list)
# 6: Column Filtering Verification
# For every df, or DataFrame, in the list: filtered_data_frames_list
for df in filtered_data_frames_list:
# print that DataFrame's columns
print(df.columns)
# Now let's write a function that counts the number of rows in each DataFrame that have negative values for the AGE1 column.
for df in filtered_data_frames_list:
# Get the year
year = df['year'][0]
# Only the rows with negative age values
negative_age_count = df[df['AGE1'] < 0]
# Custom print formatting
print( str(year) + " - " + str(len( negative_age_count ) ) + " rows")
# Multiple Dataset Cleanup
# write a function that automates the clean up we did in the last mission so that we are left only with the rows that contain positive values for the AGE1 column.
def clean_rows(filtered_data_frames_list):
# Create a new empty list
cleaned_list = list()
for df in filtered_data_frames_list:
cleaned_df = df[ df ['AGE1'] > 0 ]
cleaned_list.append(cleaned_df)
return cleaned_list
cleaned_data_frames_list = clean_rows(filtered_data_frames_list)
| true |
d4f9919f088817629690b6774974d18df6aa20c8 | Python | XMirtX/Implantacion-de-aplicaciones-web | /ut2/a3/Programa4.py | UTF-8 | 232 | 3.546875 | 4 | [] | no_license | import sys
number = int(sys.argv[1])
if number <= 0:
print("Error es negativo")
else:
for i in range(1, number + 1):
factor = 1
for f in range(1, i + 1):
factor *= f
print(i, "!", factor)
| true |