blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
25395ce50679ac4cca22f0d53270436ad9edc654 | Python | MegyAnn/isacademy | /day2/dodawanie.py | UTF-8 | 106 | 3.53125 | 4 | [] | no_license | x = 0.3
y = 0.1 + 0.1 + 0.1
if x==y:
print(f"x jest rowny y")
else:
print(f"x jest rozny od y")
| true |
57b62639d2212e3fd7962fd68a88b67e9f3b90ce | Python | Certinax/cst383-data-science | /spyderProjects/19-linear-regression.py | UTF-8 | 520 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 24 17:41:47 2019
@author: certinax
"""
import numpy as np
import pandas as pd
from scipy.stats import zscore
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("https://raw.githubusercontent.com/grbruns/cst383/master/machine.csv")
df.index = df['vendor']+' '+df['model']
df.drop(['vendor', 'model'], axis=1, inplace=True)
df['cs'] = np.round(1e3/df['myct'], 2) # clock speed in MHz (millions of cycles/sec)
sns.pairplot(df)
| true |
a7b180a702b2a0fd02d08a4bd7db72c0a862b4c8 | Python | laknath123/coverletter_generator_heinz | /job_descrip.py | UTF-8 | 775 | 2.96875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 12:30:30 2021
@author: lakna
"""
def enter_job_descrip():
import nltk
from nltk import sent_tokenize
from nltk import word_tokenize
from nltk.corpus import stopwords
sent= input("Enter the Internship Description :")
words = word_tokenize(sent)
words_no_punc=[]
for w in words:
if w.isalpha():
words_no_punc.append(w.lower())
stopwords = stopwords.words("english")
clean_words= []
for w in words_no_punc:
if w not in stopwords:
clean_words.append(w)
descrip=nltk.ne_chunk(clean_words,True)
print(descrip)
if __name__ == "__main__": # two underscores each
enter_job_descrip() | true |
666e02cd5d9c0dce5dcf5837e17e250e50e8bccf | Python | jarvisteach/appJar | /examples/issues/issue235_lab.py | UTF-8 | 1,737 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
val = app.popUp("a", "b", btn)
print(val)
app.label("title", bg="red")
def press2(btn):
val = app.prompt("a", "b", btn)
print(val)
def press3(btn):
print(btn)
def fonts(btn):
if btn == "BUT+": app.increaseButtonFont()
elif btn == "BUT-": app.decreaseButtonFont()
elif btn == "LAB+": app.increaseLabelFont()
elif btn == "LAB-": app.decreaseLabelFont()
elif btn == "ALL+": app.increaseFont()
elif btn == "ALL-": app.decreaseFont()
with gui("Simple Demo") as app:
app.setFont(size=16, family="Times", underline=True, slant="italic")
app.setButtonFont(size=14, family="Verdana", underline=False, slant="roman")
app.label("title", "TOOLTIP & MENU", bg="green", fg="blue", pos=(0, 0), tooltip="some info", menu=True, anchor="e")
app.label("title2", "SUBMIT", bg="red", fg="white", pos=(0, 1), submit=press)
app.label("title3", "CHANGE & OVER", bg="orange", fg="black", pos=(1,0), over=[press3, press3], change=press)
app.label("title4", "DRAG & DROP", bg="pink", fg="yellow", pos=(1, 1), drop=True, drag=(press3, press3))
app.label("title5", "FLASH", kind="flash", pos=(2, 0), bg="orange", drop=True, drag=press2)
app.label("title6", "SELECTABLE", kind="selectable", pos=(2,1), bg="green", drop=True)
app.message("mess1", pos=(None, 0, 2), drop=True, over=press3, bg="green", tooltip="message here")
app.addButtons(["info", "error", "warning", "yesno", "question", "ok", "retry"], press, colspan=2)
app.addButtons(["string", "integer", "float", "text", "number", "junk"], press2, colspan=2)
app.addButtons(["BUT+", "BUT-", "LAB+", "LAB-", "ALL+", "ALL-"], fonts)
| true |
fb88ac412a804bdc40a79f7a6d9f44d4a412d194 | Python | liouver/motion_of_electrons | /motion_of_longitudinal.py | UTF-8 | 1,306 | 3.140625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
pi = np.pi
def cal_phase(phi_s, phi, H_phi):
DW0 = []
phi0 = []
phi1 = []
DW1 = []
DW2 = phi * np.cos(phi_s) - np.sin(phi) + H_phi
for i in range(np.size(DW2)):
i += 1
if DW2[-i] > 0:
phi0.append(phi[-i])
DW0.append(np.sqrt(DW2[-i]))
for i in range(np.size(DW0)):
i += 1
phi1.append(phi0[-i])
DW1.append(-1 * DW0[-i])
phi = phi0 + phi1
DW = DW0 + DW1
return phi, DW
def main():
phi_s = -1 * pi / 4
phi = np.arange(-pi, pi / 2, 0.00001 * pi)
H_phi = - np.sin(phi_s) + phi_s * np.cos(phi_s)
phi, DW = cal_phase(phi_s, phi, H_phi)
phi1 = np.arange(-pi, pi / 2, 0.00001 * pi)
H_phi1 = - np.sin(phi_s) + phi_s * np.cos(phi_s) + 0.1
phi1, DW1 = cal_phase(phi_s, phi1, H_phi1)
phi2 = np.arange(-pi, pi / 6, 0.00001 * pi)
H_phi2 = - np.sin(phi_s) + phi_s * np.cos(phi_s) - 0.1
phi2, DW2 = cal_phase(phi_s, phi2, H_phi2)
phi3 = np.arange(-pi, pi / 6, 0.00001 * pi)
H_phi3 = - np.sin(phi_s) + phi_s * np.cos(phi_s) - 0.2
phi3, DW3 = cal_phase(phi_s, phi3, H_phi3)
fig, ax = plt.subplots()
plt.plot(phi, DW, phi1, DW1, phi2, DW2, phi3, DW3)
plt.show()
if __name__ == '__main__':
main()
| true |
77b570028119193cca31edbb78ed18fa522e4a2f | Python | matthew-cheney/kattis-solutions | /solutions/elementarymath.py | UTF-8 | 1,599 | 2.890625 | 3 | [] | no_license | N = int(input())
G = [[] for n in range(N)]
problemKey = [[]]*N
answerKey = [tuple()]*N
revAnswerKey = dict()
nodeKey = [0]*N
ans_i = N
for n in range(N):
l, r = [int(ea) for ea in input().split(' ')]
nodeKey[n] = [l, r]
if l + r not in revAnswerKey:
revAnswerKey[l + r] = ans_i
nodeKey.append(l + r)
ans_i += 1
if l - r not in revAnswerKey:
revAnswerKey[l - r] = ans_i
nodeKey.append(l - r)
ans_i += 1
if l * r not in revAnswerKey:
revAnswerKey[l * r] = ans_i
nodeKey.append(l * r)
ans_i += 1
G[n].append(revAnswerKey[l + r])
G[n].append(revAnswerKey[l - r])
G[n].append(revAnswerKey[l * r])
# problems on left
# answers on right
matched = [-1]*len(nodeKey)
visited = [0]*N
def augmenting_path(left):
global visited
if visited[left]:
return 0
visited[left] = True
for right in G[left]:
if matched[right] == -1 or augmenting_path(matched[right]):
matched[right] = left
matched[left] = right
return 1
return 0
MCBM = 0
for left in range(N):
visited = [0]*N
MCBM += augmenting_path(left)
if MCBM < N:
print('impossible')
exit(0)
for i, ans in enumerate(matched[N:]):
if ans > -1:
nodeKey[ans].append(nodeKey[N + i])
for l, r, ans in nodeKey[:N]:
if l + r == ans:
print(l, '+', r, '= ', ans)
elif l - r == ans:
print(l, '-', r, '= ', ans)
else:
print(l, '*', r, '= ', ans)
| true |
c10eee408612c6a418bb4dda87973aac97205f1b | Python | isimic95/simple_rest_api | /nest.py | UTF-8 | 1,293 | 3.5625 | 4 | [] | no_license | import json
def recursive_f(d, output, level, levels):
for key, value in d.items():
if key == levels[level]:
del d[key]
output[value] = {}
level += 1
if level == len(levels):
output[value] = [d]
return
break
recursive_f(d, output[value], level, levels)
def nest(json_data, levels):
nested_result = {}
for d in json_data:
if len(levels) > len(d) - 1:
return "Too many nesting levels specified, for this input maximum is 3", False
for level in levels:
if level not in d:
return f"Invalid key provided, '{level}' does not exist in input", False
recursive_f(d, nested_result, 0, levels)
return nested_result, True
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="""
Create a nested dictionary of dictionaries of array from a JSON input
"""
)
parser.add_argument("levels", nargs="+",
help="Keys on which to nest starting from highest level")
args = parser.parse_args()
with open('input.json') as js:
result, successful = nest(json.load(js), args.levels)
print(result)
| true |
84e197a8d77a79842e3b744d2b365b45140fbf4d | Python | ZeljkoLupsa/Domaci-5 | /03_zadatak.py | UTF-8 | 549 | 3.6875 | 4 | [] | no_license | """
Napisati kod koji za datu osnovicu a i krak b jednakokrakog trougla
racuna povrsinu i zapreminu tijela koje se dobija rotacijom trougla oko visine
spustene na osnovicu.
"""
import math
a = 4
b = 5
h = math.sqrt (b*b - ((1/2) * a) * ((1/2) * a))
povrsina_baze = ((1/2) * a) * ((1/2) * a) * math.pi
povrsina_tijela = b * ((1/2) * a) * math.pi
povrsina_kupe = povrsina_baze + povrsina_tijela
print('POVRSINA KUPE IZNOSI: ', povrsina_kupe)
zapremina_kupe = (povrsina_baze * h)/3
print('ZAPREMINA KUPE IZNOSI:', zapremina_kupe) | true |
49c7fa4693b991a3e4dfc9a2edaddfec6165c05e | Python | kyungjunleeme/CodingTest | /programmers/연습문제/[Lv2] 124 나라의 숫자.py | UTF-8 | 122 | 2.53125 | 3 | [] | no_license | def solution(n):
q,r=divmod(n,3)
if r==0: q,r=q-1,4
if q==0: return str(r)
else: return solution(q)+str(r) | true |
f757b8bf33748fa82dcb96e306cd3bb337806b21 | Python | zhengjunyue/speech-inversion-dnn | /contextualize.py | UTF-8 | 1,859 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 14 13:20:07 2016
@author: ganesh
"""
import numpy as np
#function mat_contxt = contextualize3(mat_inp,context,step)
# mat_inp - Input feature matrix for one utterance (feature_dimension x num_of_frames)
# context - Number of adjacent frames to concatenate.
# If you say N, then N frames from right and N from the left are concatenated with current frame
# skip - Number of frames to skip while doing concatenation.
# If you say 2, then every alterante frame will be concatenated.
# 2 is better because you are having overlap of half frame in every adjacent frame
def contextualize(mat_inp,context,step):
feat_dim = mat_inp.shape[0]
if context == 0:
mat_contxt = mat_inp
else:
mat_g = mat_inp
mat_rshft = mat_g
mat_lshft = mat_g
for iter1 in range(context):
buff_r = []
buff_l = []
for iter2 in range(step):
buff_r.append(mat_rshft[:,0,None])
buff_l.append(mat_lshft[:,-1,None])
buff_r = np.concatenate(buff_r, axis=1)
buff_l = np.concatenate(buff_l, axis=1)
mat_rshft = np.concatenate((buff_r, mat_rshft[:,0:-step]),axis=1)
mat_lshft = np.concatenate((mat_lshft[:,step:], buff_l),axis=1)
mat_g = np.concatenate((mat_rshft, mat_g, mat_lshft), axis=0)
mat_contxt = np.zeros(mat_g.shape)
cdim = 2*context+1
cfeat_dim = cdim*feat_dim
for i in range(feat_dim):
st = cdim*i
en = st+cdim
mat_contxt[range(st,en),:] = mat_g[range(i,cfeat_dim,feat_dim),:]
return mat_contxt
#if __name__ == "__main__":
# a = np.round(np.random.rand(4,10)*100)
# print(a)
# b = contextualize(a,1,2)
# print(b)
| true |
52ef42c05a9018c3f7bb3de5c04dca0bae66f704 | Python | ermongroup/rbpf_fireworks | /general_tracking/targets.py | UTF-8 | 18,542 | 2.671875 | 3 | [] | no_license | import os
from collections import defaultdict
from collections import deque
from global_params import *
class TargetState:
#everything that uniquely defines a target at a single instance in time
def __init__(self, cur_time, id_, measurement):
'''
Inputs:
NOT NOW -SPEC: dictionary, should contain keys:
NOT NOW *'P'
NOT NOW *'theta_death'
NOT NOW *'alpha_death'
NOT NOW *'BORDER_DEATH_PROBABILITIES'
NOT NOW *'NOT_BORDER_DEATH_PROBABILITIES'
NOT NOW *'USE_CONSTANT_R'
NOT NOW *'R'
NOT NOW *'Q'
'''
assert(measurement != None)
#target state, [x, x_vel, y, y_vel].T
#apologies for using x as the traditional Kalman filter state
#and also as one of the coordinates in image space
self.x = np.array([[measurement.x], [0], [measurement.y], [0]])
#error covariance matrix of our estimated target state, self.x
# self.P = SPEC['P']
self.width = measurement.width
self.height = measurement.height
assert(self.x.shape == (4, 1))
self.birth_time = cur_time
#Time of the last measurement data association with this target
#Or the last time this target produced a measurement for data generation
self.last_measurement_association = cur_time
self.id_ = id_ #named id_ to avoid clash with built in id
self.death_prob = -1 #calculate at every time instance
#if target's predicted location is offscreen, set to True and then kill
self.offscreen = False
self.updated_this_time_instance = True
#set to false when death is sampled during data generation
self.alive = True
def near_border(self):
near_border = False
x1 = self.x[0][0] - self.width/2.0 #left edge of bounding box
x2 = self.x[0][0] + self.width/2.0 #right edge of bounding box
y1 = self.x[2][0] - self.height/2.0 #top of bounding box, (I think, assuming images are 0 at top)
y2 = self.x[2][0] + self.height/2.0 #bottom of bounding box, (I think, assuming images are 0 at top)
if(x1 < 10 or x2 > (CAMERA_PIXEL_WIDTH - 15) or y1 < 10 or y2 > (CAMERA_PIXEL_HEIGHT - 15)):
near_border = True
return near_border
def is_offscreen(self):
is_offscreen = False
x1 = self.x[0][0] - self.width/2.0 #left edge of bounding box
x2 = self.x[0][0] + self.width/2.0 #right edge of bounding box
y1 = self.x[2][0] - self.height/2.0 #top of bounding box, (I think, assuming images are 0 at top)
y2 = self.x[2][0] + self.height/2.0 #bottom of bounding box, (I think, assuming images are 0 at top)
if(x2 < 10 or x1 > (CAMERA_PIXEL_WIDTH - 15) or y2 < 10 or y1 > (CAMERA_PIXEL_HEIGHT - 15)):
is_offscreen = True
return is_offscreen
def target_death_prob(self, cur_time, prev_time, SPEC):
""" Calculate death probability for this target.
Input:
- cur_time: The current measurement time (float)
- prev_time: The previous time step when a measurement was received (float)
- SPEC: firework spec, for death probabilities
Return:
- death_prob: Probability of target death if this is the only target (float)
"""
if USE_POISSON_DEATH_MODEL:
#scipy.special.gdtrc(b, a, x) calculates
#integral(gamma_dist(k = a, theta = b))from x to infinity
last_assoc = self.last_measurement_association
# if USE_GENERATED_DATA:
cur_time = cur_time/10.0
prev_time = prev_time/10.0
last_assoc = self.last_measurement_association/10.0
# #I think this is correct
# death_prob = gdtrc(SPEC['theta_death'], SPEC['alpha_death'], prev_time - last_assoc) \
# - gdtrc(SPEC['theta_death'], SPEC['alpha_death'], cur_time - last_assoc)
# death_prob /= gdtrc(SPEC['theta_death'], SPEC['alpha_death'], prev_time - last_assoc)
# return death_prob
#this is used in paper's code
#Basically this is predicting death over the next time step, as opposed
#to over the previous time step, which is what I wrote above
time_step = cur_time - prev_time
death_prob = gdtrc(SPEC['theta_death'], SPEC['alpha_death'], cur_time - last_assoc) \
- gdtrc(SPEC['theta_death'], SPEC['alpha_death'], cur_time - last_assoc + time_step)
death_prob /= gdtrc(SPEC['theta_death'], SPEC['alpha_death'], cur_time - last_assoc)
assert(death_prob >= 0.0 and death_prob <= 1.0), (death_prob, cur_time, prev_time)
return death_prob
else:
if(self.offscreen == True):
cur_death_prob = 1.0
else:
frames_since_last_assoc = int(round((cur_time - self.last_measurement_association)/SPEC['time_per_time_step']))
assert(abs(float(frames_since_last_assoc) - (cur_time - self.last_measurement_association)/SPEC['time_per_time_step']) < .00000001)
if(self.near_border()):
if frames_since_last_assoc < len(SPEC['BORDER_DEATH_PROBABILITIES']):
cur_death_prob = SPEC['BORDER_DEATH_PROBABILITIES'][frames_since_last_assoc]
else:
cur_death_prob = SPEC['BORDER_DEATH_PROBABILITIES'][-1]
# cur_death_prob = 1.0
else:
if frames_since_last_assoc < len(SPEC['NOT_BORDER_DEATH_PROBABILITIES']):
cur_death_prob = SPEC['NOT_BORDER_DEATH_PROBABILITIES'][frames_since_last_assoc]
else:
cur_death_prob = SPEC['NOT_BORDER_DEATH_PROBABILITIES'][-1]
# cur_death_prob = 1.0
assert(cur_death_prob >= 0.0 and cur_death_prob <= 1.0), cur_death_prob
return cur_death_prob
#################### Inference Methods ####################
######### def kf_update(self, measurement, meas_noise_cov, SPEC):
######### """ Perform Kalman filter update step and replace predicted position for the current time step
######### with the updated position in self.all_states
######### Input:
######### - measurement: the measurement (numpy array)
######### - cur_time: time when the measurement was taken (float)
######### - SPEC: fireworks spec with any extras we need, clean this up sometime
######### Output:
######### -updated_x: updated state, numpy array with dimensions (4,1)
######### -updated_P: updated covariance, numpy array with dimensions (4,4)
#########
#########!!!!!!!!!PREDICTION HAS BEEN RUN AT THE BEGINNING OF TIME STEP FOR EVERY TARGET!!!!!!!!!
######### """
######### if SPEC['USE_CONSTANT_R']:
######### S = np.dot(np.dot(H, self.P), H.T) + SPEC['R']
######### else:
######### S = np.dot(np.dot(H, self.P), H.T) + meas_noise_cov
######### K = np.dot(np.dot(self.P, H.T), inv(S))
######### residual = measurement - np.dot(H, self.x)
######### updated_x = self.x + np.dot(K, residual)
######### # updated_self.P = np.dot((np.eye(self.P.shape[0]) - np.dot(K, H)), self.P) #NUMERICALLY UNSTABLE!!!!!!!!
######### updated_P = self.P - np.dot(np.dot(K, S), K.T) #not sure if this is numerically stable!!
######### assert(updated_P[0][0] > 0 and
######### updated_P[1][1] > 0 and
######### updated_P[2][2] > 0 and
######### updated_P[3][3] > 0), (self.P, SPEC['R'], SPEC['USE_CONSTANT_R'], meas_noise_cov, K, updated_P)
########## print "kf_update called :)"
######### return (updated_x, updated_P)
#########
#########
######### def update(self, measurement, cur_time, meas_noise_cov):
######### """ Perform update step and replace predicted position for the current time step
######### with the updated position in self.all_states
######### Input:
######### - measurement: the measurement (numpy array)
######### - cur_time: time when the measurement was taken (float)
#########!!!!!!!!!PREDICTION HAS BEEN RUN AT THE BEGINNING OF TIME STEP FOR EVERY TARGET!!!!!!!!!
######### """
######### reformat_meas = np.array([[measurement.x],
######### [measurement.y]])
######### assert(self.x.shape == (4, 1))
#########
######### (self.x, self.P) = self.kf_update(reformat_meas, meas_noise_cov)
#########
######### assert(self.x.shape == (4, 1))
######### assert(self.P.shape == (4, 4))
#########
######### self.width = measurement.width
######### self.height = measurement.height
######### assert(self.all_time_stamps[-1] == round(cur_time, 2) and self.all_time_stamps[-2] != round(cur_time, 2))
######### assert(self.x.shape == (4, 1)), (self.x.shape, np.dot(K, residual).shape)
#########
########## self.all_states[-1] = (self.x, self.width, self.height)
######### self.updated_this_time_instance = True
######### self.last_measurement_association = cur_time
#########
#########
#########
######### def kf_predict(self, dt):
######### """
######### Run kalman filter prediction on this target
######### Inputs:
######### -dt: time step to run prediction on
######### Output:
######### -x_predict: predicted state, numpy array with dimensions (4,1)
######### -P_predict: predicted covariance, numpy array with dimensions (4,4)
#########
######### """
######### F = np.array([[1.0, dt, 0.0, 0.0],
######### [0.0, 1.0, 0.0, 0.0],
######### [0.0, 0.0, 1.0, dt],
######### [0.0, 0.0, 0.0, 1.0]])
######### x_predict = np.dot(F, self.x)
######### P_predict = np.dot(np.dot(F, self.P), F.T) + SPEC['Q']
######### assert(P_predict[0][0] > 0 and
######### P_predict[1][1] > 0 and
######### P_predict[2][2] > 0 and
######### P_predict[3][3] > 0), (self.P, SPEC['Q'], P_predict[0][0])
########## print "kf_predict called :)"
#########
######### return (x_predict, P_predict)
#########
######### def predict(self, dt, cur_time):
######### """
######### Run prediction on this target
######### Inputs:
######### -dt: time step to run prediction on
######### -cur_time: the time the prediction is made for
######### """
######### assert(self.all_time_stamps[-1] == round((cur_time - dt), 2))
######### (self.x, self.P) = self.kf_predict(dt)
#########
######### assert(self.x.shape == (4, 1))
######### assert(self.P.shape == (4, 4))
#########
########## self.all_states.append((self.x, self.width, self.height))
########## self.all_time_stamps.append(round(cur_time, 2))
#########
########## if(self.x[0][0]<0 or self.x[0][0]>=CAMERA_PIXEL_WIDTH or \
########## self.x[2][0]<0 or self.x[2][0]>=CAMERA_PIXEL_HEIGHT):
######### self.offscreen = self.is_offscreen()
#########
######### self.updated_this_time_instance = False
#########
#########
#################### Data Generation Methods ####################
def move(self, dt, process_noise):
"""
Update target state according to the linear motion model of the
target's state over the specified time interval plus noise
Leaves bounding box size unchanged, consider changing in the future
Inputs:
- dt: float, time step movement corresponds to
- process_noise: numpy array (4x4), add noise to the new target state drawn
from a Gaussian with this covariance
Output:
- none, but update the target's state and whether the target is offscreen
"""
F = np.array([[1.0, dt, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, dt],
[0.0, 0.0, 0.0, 1.0]])
noise = np.random.multivariate_normal([0,0,0,0], process_noise)
noise.shape = (-1, 1) #reshape to be a column vector
assert(noise.shape == self.x.shape)
self.x = np.dot(F, self.x) + noise
self.offscreen = self.is_offscreen()
def sample_measurement(self, meas_noise, cur_time):
"""
Sample a measurement for this target and update last_measurement_association to
the current time
Inputs:
- cur_time: float, time measurement is produced
- meas_noise: numpy array (2x2), add noise to the measurement drawn
from a Gaussian with this covariance
Output:
- sampled measurement (numpy array with shape (2,))
"""
self.last_measurement_association = cur_time
sampled_noise = np.random.multivariate_normal([0,0], meas_noise)
true_position = np.dot(H, self.x).reshape(-1)
measurement = true_position + sampled_noise
return measurement
class TargetTrack:
#A sequence of positions for a single target
def __init__(self, parent_track = None):
#parent_track has type TargetTrack. This target track
#really has parent_track's positions prepended to
#it's positions, but we don't actually copy them for efficiency.
#This is a REFERENCE to the parent_track
self.parent_track = parent_track
#list of type BoundingBox
self.bounding_boxes = []
def add_bb(self, bounding_box):
'''
Add a bounding box to this TargetTrack
Inputs:
- bounding_box: type BoundingBox, the bounding box we are adding
to this TargetTrack
'''
#note this is a REFERENCE to the bounding_box we pass in
self.bounding_boxes.append(bounding_box)
#class TargetSet:
# #A sequence of positions for a single target
#
# def __init__(self, parent_track = None):
class TargetSet:
"""
Contains ground truth states for all targets. Also contains all generated measurements.
"""
def __init__(self):
'''
'''
#list of type Target containing targets currently alive
self.living_targets = []
self.measurements = [] #generated measurements for a generative TargetSet
def plot_all_target_locations(self, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i in range(self.total_count):
life = len(self.all_targets[i].all_states) #length of current targets life
locations_1D = [self.all_targets[i].all_states[j][0] for j in range(life)]
ax.plot(self.all_targets[i].all_time_stamps, locations_1D,
'-o', label='Target %d' % i)
legend = ax.legend(loc='lower left', shadow=True)
plt.title('%s, unique targets = %d, #targets alive = %d' % \
(title, self.total_count, self.living_count)) # subplot 211 title
def plot_generated_measurements(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
time_stamps = [self.measurements[i].time for i in range(len(self.measurements))
for j in range(len(self.measurements[i].val))]
locations = [self.measurements[i].val[j][0] for i in range(len(self.measurements))
for j in range(len(self.measurements[i].val))]
ax.plot(time_stamps, locations,'o')
plt.title('Generated Measurements')
def write_measurements_to_KITTI_format(self, results_filename, SPEC, gt = False, plot_filename = None, plot_target_locations = False):
'''
Inputs:
- gt: boolean, if true write ground truth target id's for each bounding box
'''
x_locations_all_targets = defaultdict(list)
y_locations_all_targets = defaultdict(list)
if not os.path.exists(os.path.dirname(results_filename)):
try:
os.makedirs(os.path.dirname(results_filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
f = open(results_filename, "w")
for measurement in self.measurements:
frame_idx = int(round(measurement.time/SPEC['time_per_time_step']))
for idx in range(len(measurement.val)):
x_pos = measurement.val[idx][0]
y_pos = measurement.val[idx][1]
width = measurement.widths[idx]
height = measurement.heights[idx]
if gt:
cur_id = measurement.ids[idx]
else:
cur_id = -1
left = x_pos - width/2.0
top = y_pos - height/2.0
right = x_pos + width/2.0
bottom = y_pos + height/2.0
f.write( "%d %d Car -1 -1 2.57 %d %d %d %d -1 -1 -1 -1000 -1000 -1000 -10 1\n" % \
(frame_idx, cur_id, left, top, right, bottom))
x_locations_all_targets[cur_id].append(x_pos)
y_locations_all_targets[cur_id].append(y_pos)
f.close()
#plot target locations
if(plot_target_locations):
assert(len(x_locations_all_targets) == len(y_locations_all_targets))
print "plotting target locations, ", len(x_locations_all_targets), " targets"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for target_id, x_locations in x_locations_all_targets.iteritems():
print "target", target_id, "is alive for", len(x_locations), "time instances"
y_locations = y_locations_all_targets[target_id]
ax.plot(x_locations, y_locations,
'-o', label='Target %d' % target_id)
# legend = ax.legend(loc='lower left', shadow=True)
# plt.title('%s, unique targets = %d, #targets alive = %d' % \
# (title, self.total_count, self.living_count)) # subplot 211 title
print "ABOUT TO TRY TO SAVE FIG!!!!!!!!!"
fig.savefig(plot_filename)
print "CALL MADE TO TRY TO SAVE FIG!!!!!!!!!"
| true |
34953e98429f1415b3fa16f2a491dce5dc3bbf5e | Python | jcostaroberts/mouseloaf | /mouseloaf/actor.py | UTF-8 | 999 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
class Actor(object):
def __init__(self, coordinator):
#self.name = "%s-%s" % \
# (self.__class__.__bases__[0].__name__.replace("Base", ""),
# self.__class__.__name__)
self.name = self.__class__.__name__
self.coordinator = coordinator
""" API exposed to subclasses """
def _subscribe(self, publisher, handler):
self.coordinator.subscribe(self.name, publisher, handler)
def _publish(self, message):
self.coordinator.publish(self.name, message)
def _register_activity(self, activity_name, activity):
self.coordinator.register_activity(activity_name, self.name, activity)
def _register_auxiliary_data(self, name, data):
self.coordinator.register_auxiliary_data(name, data)
def _mount_auxiliary_data(self, name, ready_callback):
return self.coordinator.mount_auxiliary_data(self.name, name,
ready_callback)
| true |
1cebc6ce597683eec91b2bf23b3ea2cc1286d4fd | Python | NCDCCC/matplot_learning | /numpy2.py | UTF-8 | 492 | 3.03125 | 3 | [] | no_license | import numpy as np
scores = np.array([[80,85],
[82,81],[67,90],[65,76],[88,96]],dtype='float64')
print(scores)
print(scores>80)
print(np.where(scores<80,0,100))
print(np.amax(scores,axis=0))#column
print(np.amin(scores,axis=1))#row
print(np.mean(scores,axis=0))#average
print(np.std(scores,axis=1))#s2
scores[:,0] /= 2
print(scores)
A = np.array([[2,0,4],[3,4,2]])
print(scores@A)
print(np.dot(scores,A))
B = A.T
print(B)
print(np.vstack((scores,B)))
print(np.hstack((scores,np.eye(5)))) | true |
bd508fe49293d5804e2daaf0e87e42ce4de2f1ca | Python | russell-stewart/pubMedTextMiner | /pmcTextMiner.py | UTF-8 | 8,610 | 2.90625 | 3 | [] | no_license | #pmcTextMiner.py
#Russell O. Stewart
#8/9/2017
#A text miner for PubMed
#Mines PubMed's API for a query (eg. a couple of gene names, proteins, etc...)
#runs the resulting abstracts through Neji (a biomedical named entity
#recognizer), tallies the named entities into an .xlsx file, and creates pretty
#word clouds for found genes, anatomy features, and disorders/diseases.
#
#Usage:
#python pmcTextMiner.py --query <string> --ofilepath <path> [--mineBodies] [--retmax <int>] [--email <string>] [--nerPath <path>] [--threads <int>]
#--query: the query string to search in PubMed.
#--ofilepath: the directory to output results to. Mac users: don't use ~/
#--email: optional (if not given, uses value specified in this file).
# email required for PubMed API access (idk why). Probably a good idea to
# update this for your machine.
#--nerPath: optional (if not given, uses value specified in this file).
# the path to Neji. Probably a good idea to update this for your machine.
#--threads: optional (if not given, uses value specified in this file).
# the number of threads available for your task. more threads will speed up Neji named entity recognition.
#--mineBodies: optional. if specified, program will mine pmc for full articles,
# instead of just mining PubMed for abstracts.
#--retmax: optional, default is 20 (PubMed API's standard). number of articles to mine.
#
#Dependencies
#
#Non-standard Python packages (all available from pip):
#BeautifulSoup: xml parsing
#unirest: api handling
#lxml: xml parsing (required by BeautifulSoup)
#wordcloud: for word cloud output
#xlsxwriter: for xlsx output
#
#Other Programs
#Neji: biomedical named entity recognition. For documentation and download, see:
#https://github.com/BMDSoftware/neji
try:
from bs4 import BeautifulSoup
import unirest
import lxml
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import getopt
import os
from PIL import Image
from wordcloud import WordCloud
from xlsxwriter import Workbook
except ImportError:
raise Exception('Not all packages installed! Please run sudo pip packagehelper.py')
# -*- coding: utf-8-*-
#Default parameters
#It might be useful to update nerPath and threads, and email for your machine,
#as these likely won't change every time you run the script.
nerPath = '/Users/russellstewart/Documents/NationalJewish/Seibold/neji'
threads = 4
email = 'russells98@gmail.com'
#stores freqencies of all NamedEntities associated with one tag
class Tag:
def __init__(self , classification , firstEntity):
self.classification = classification
self.entities = [NamedEntity(firstEntity)]
def add(self , new):
found = False
for entity in self.entities:
if new == entity.text:
entity.increment()
found = True
if not found:
self.entities.append(NamedEntity(new))
def toString(self):
self.sort()
string = u'Classification: %s\n' % self.classification
for entity in self.entities:
string += ' %s\n' % entity.toString()
string += '\n'
return string
def sort(self):
self.entities.sort(key = lambda x: x.occurances , reverse = True)
def EntityString(self):
string = u''
for entity in self.entities:
for i in range(0 , len(self.entities)):
string += entity.text + ' '
return string
#stores a named entity's text and its number of occurances
class NamedEntity:
def __init__(self , text):
self.text = text
self.occurances = 1
def increment(self):
self.occurances += 1
def toString(self):
return '%s: %d' % (self.text , self.occurances)
#Get parameters from program call
opts = getopt.getopt(sys.argv[1:] , '' , ['query=' , 'ofilepath=' , 'email=' , 'nerPath=' , 'threads=' , 'mineBodies' , 'retmax='])
query = None
ofilepath = None
db = 'pubmed'
retmax = 20
for opt , arg in opts[0]:
if opt == '--query':
query = arg
if opt == '--ofilepath':
ofilepath = arg
if opt == '--email':
email = arg
if opt == '--nerPath':
nerPath = arg
if opt == '--threads':
threads = arg
if opt == '--mineBodies':
db = 'pmc'
if opt == '--retmax':
retmax = arg
if query == None or ofilepath == None:
raise Exception('Remember to specify --query and --ofilepath!!!!')
#Search PubMed (w/ user specifed query) and extract all resulting article IDs
print 'Finding relevant articles on PubMed...'
searchURL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
searchResponse = unirest.get(
searchURL,
params = {
'db' : db,
'term' : query,
'tool' : 'pmcTextMiner',
'email' : email,
'retmax' : retmax
}
)
ids = BeautifulSoup(searchResponse.body , 'lxml').find_all('id')
print ' %d results found.' % len(ids)
ids = [current.get_text() for current in ids]
ids = ','.join(ids)
#Retrieve the abstracts for all article ids obtained above
print 'Retrieving article abstracts...'
getRecordURL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
getRecordResponse = unirest.get(
getRecordURL,
params = {
'db' : db,
'id' : ids,
'retmode' : 'xml'
}
)
response = BeautifulSoup(getRecordResponse.body , 'lxml').find_all('abstract')
abstracts = ''
for abstract in response:
abstracts += abstract.get_text().strip()
if db == 'pmc':
bodies = BeautifulSoup(getRecordResponse.body , 'lxml').find_all('sec')
for body in bodies:
abstracts += body.get_text().strip()
#write abstracts to files for use by the named entity
#recognition pipeline
abstractsPath = ofilepath + '/abstracts.txt'
abstractFile = open(abstractsPath , 'w')
abstractFile.write(abstracts.encode('utf8'))
abstractFile.close()
#run named entity recognition pipeline on titles and on abstracts
print 'Running NER on abstracts...'
os.chdir(nerPath)
command = '%s/neji.sh -i %s -o %s -d %s/resources/dictionaries -m %s/resources/models -t %d -if RAW -of XML' %(nerPath , ofilepath , ofilepath , nerPath , nerPath , threads)
print '\n' + command + '\n'
returncode = os.system(command)
#after pipeline finishes, import the Neji results and xml parse them.
#(I chose XML because I already had to import XML parsers to deal with PubMed.)
print '\nNER done! Importing results...'
annotatedAbstracts = BeautifulSoup(open(ofilepath + '/abstracts.xml' , 'r').read().encode('UTF-8') , 'lxml')
#iterate over every named entity in every sentence, and parse the tag from
#the id. see if the tag exists in the tags database, add the entity to that Tag.
#if the tag is not found, create a new Tag in tags
tags = []
for sentence in annotatedAbstracts.find_all('s'):
for annotation in sentence.find_all('e'):
classification = annotation['id']
i = 0
while i < len(classification):
if classification[i] == ':':
classification = classification[(i + 1):]
i = -1
if classification[i] == '|' or classification[i] == ')':
classification = classification[:i]
i = len(classification)
i+= 1
found = False
for currentTag in tags:
if currentTag.classification == classification:
currentTag.add(annotation.get_text())
found = True
if not found:
tags.append(Tag(classification , annotation.get_text()))
#generate word clouds and output .xlsx file
print 'Writing output files...'
ofile = Workbook((ofilepath + '/' + query + '_' + 'results.xlsx'))
for tag in tags:
if tag.classification == 'PRGE':
tag.classification = 'genes'
elif tag.classification == 'ANAT':
tag.classification = 'anatomy'
elif tag.classification == 'DISO':
tag.classification = 'diseases'
worksheet = ofile.add_worksheet(tag.classification.encode('UTF-8'))
worksheet.write_string(0 , 0 , 'Named Entity')
worksheet.write_string(0 , 1 , 'Occurances')
i = 0
tag.sort()
for entity in tag.entities:
worksheet.write_string(i , 0 , entity.text.encode('UTF-8'))
worksheet.write_number(i , 1 , entity.occurances)
i += 1
image = WordCloud().generate(tag.EntityString()).to_image()
image.save(ofilepath + '/' + query + '_' + tag.classification + '.bmp')
ofile.close()
#delete the temporary abstracts files used for communication with neji
print 'Cleaning up...'
os.remove(ofilepath + '/' + 'abstracts.txt')
os.remove(ofilepath + '/' + 'abstracts.xml')
| true |
89aa021911400d0d63d8710f68c3e22d27bf9385 | Python | sativa/Tikon | /COSO.py | UTF-8 | 8,083 | 3.15625 | 3 | [] | no_license | import os
import shutil
import numpy as np
import json
import random as aleatorio
import datetime as ft
from Controles import directorio_base
"""
Un "coso", por falta de mejor palabra, se refiere a todo, TODO en el programa
Tikon que representa un aspecto físico del ambiente y que tiene datos. Incluye
paisajes parcelas, variedades de cultivos, suelos, insectos, etc. Todos tienen la misma
lógica para leer y escribir sus datos en carpetas externas, tanto como para la
su calibración.
"""
class Coso(object):
def __init__(símismo, nombre, ext, dic, directorio, reinic=False):
símismo.nombre = nombre # El nombre del objeto
símismo.ext = ext # La extensión para este tipo de documento. (Para guadar y cargar datos.)
símismo.dic = dic
símismo.dic_incert = {}
símismo.objetos = {}
# La carpeta dónde se ubica este objeto
símismo.directorio = os.path.join(directorio_base, "Proyectos", directorio)
# El nombre del documento utilizado para guardar este objeto
símismo.dirección = os.path.join(símismo.directorio, '%s.%s' % (símismo.nombre, símismo.ext))
# reinic Indica si el programa debe reinitializar or utilizar carpetas existentes.
# Borrar/crear de nuevo o buscar/leer la carpeta de datos, si existe
if reinic: # Si estamos reinicializando el objeto, borrar y recrear el directorio
if os.path.isdir(símismo.dirección):
shutil.rmtree(símismo.dirección)
if os.path.isfile(símismo.dirección):
os.remove(símismo.dirección)
# Y crear el directorio/documento de nuevo
os.getcwd()
os.makedirs(símismo.directorio)
else: # Si no estamos reinicializando el objeto, leer el documento, si existe
if os.path.isfile(símismo.dirección):
símismo.cargar(símismo.dirección)
else: # Si no existe, crearlo
if not os.path.isdir(símismo.directorio):
os.makedirs(símismo.directorio)
# Función para escribir los datos a un documento externo
def guardar(símismo, documento=""):
if not len(documento):
documento = símismo.dirección
# Si necesario, añadir el nombre y la extensión del documento al fin de la carpeta
if símismo.ext not in documento.lower():
if símismo.nombre not in documento:
documento += "\\%s.%s" % (símismo.nombre, símismo.ext)
else:
documento += '.%s' % símismo.ext
# Para guardar el diccionario de incertidumbre:
documento_incert = "%si" % documento
# Para guardar diccionarios de objetos, utilizamos el módulo JSON que escribe los diccionarios en
# formato JSON, un formato fácil a leer por humanos ya varios programas (JavaScript, Python, etc.)
# Primero, convertimos objetos fechas en forma "cadena"
dic_temp = símismo.dic.copy() # Para no afectar el diccionario del objeto sí mismo
def convertir_fechas(obj):
n = -1
for i in obj:
if type(obj) is list:
n += 1
elif type(obj) is dict:
n = i
if type(obj[n]) is list or type(obj[n]) is dict:
convertir_fechas(obj[n]) # Buscar en cada sub-diccionario o sub-lista del objeto
elif type(obj[n]) is ft.datetime:
obj[n] = obj[n].strftime('%Y-%m-%d') # Convertir fechas en formato cadena
elif type(obj[n]) is Coso:
obj[n] = ''
print('Aviso: objeto en diccionario de objeto %s.' % símismo.nombre)
elif type(obj[n]) is np.ndarray:
obj[n] = list(obj[n])
convertir_fechas(dic_temp)
try:
with open(documento, mode="w", encoding='utf8') as d:
json.dump(dic_temp, d, ensure_ascii=False, indent=2, sort_keys=True)
except IOError:
print("Documento " + documento + " no se pudo abrir para guadar datos.")
if len(símismo.dic_incert):
try:
dic_incert_temp = símismo.dic_incert.copy()
convertir_fechas(dic_incert_temp)
try:
with open(documento_incert, encoding='utf8', mode="w") as d:
json.dump(dic_incert_temp, d, ensure_ascii=False, sort_keys=True, indent=2)
except IOError:
print("Documento " + documento + " no se pudo abrir para guadar datos de incertidumbre.")
except AttributeError:
pass
# Función para leer los datos desde un documento externo
def cargar(símismo, documento=""):
if not len(documento):
documento = símismo.dirección
documento_incert = documento + "i"
try:
with open(documento, mode="r", encoding='utf8') as d:
try:
símismo.dic = json.load(d)
except ValueError:
print('Error en documento %s.' % documento)
os.remove(documento)
except IOError:
print("Documento " + documento + " no se pudo abrir para leer datos.")
try:
with open(documento_incert, mode="r", encoding='utf8') as d:
try:
símismo.dic_incert = json.load(d)
except ValueError:
print('Error en documento %s.' % documento_incert)
os.remove(documento_incert)
except IOError:
return "Documento " + documento + " no se pudo abrir para leer datos de incertidumbre."
# Convertir las fechas en objetos de fechas de Python
def convertir_fechas(a):
if type(a) is dict:
i = [x for x in sorted(a.items())]
elif type(a) is list:
i = enumerate(a)
else:
raise ValueError('convertir_fechas() necesita una lista o diccionario como parámetro.')
f = ft.datetime(1, 1, 1)
for ll, v in i:
if type(v) is list or type(v) is dict:
convertir_fechas(v)
elif type(v) is str:
try: # Ver si el carácter se puede convertir en fecha
a[ll] = f.strptime(v, '%Y-%m-%d')
except ValueError: # Si no era una fecha, ignorarlo
pass
convertir_fechas(símismo.dic)
convertir_fechas(símismo.dic_incert)
def inic_incert(símismo):
# Si no existe diccionario de valores de incertidumbre, copiar la estructura del diccionario ordinario
if not len(símismo.dic_incert):
# Para cada variable numérico en el diccionario, crear una lista para contener varios valores posibles
# del variable (análisis de incertidumbre)
def dic_lista(d, d_i):
for ll, v in d.items():
if type(v) is float or type(v) is int: # Si el valor es numérico
d_i[ll] = [v] # poner el valor en una lista
# Si el elemento es una lista no vacía con valores numéricos
if type(v) is list and len(v) and (type(v[0]) is float or type(v[0]) is int):
d_i[ll] = [v]
elif type(v) is dict:
d_i[ll] = {}
dic_lista(v, d_i[ll])
dic_lista(símismo.dic['coefs'], símismo.dic_incert)
# Una subclase de Coso que se puede simular como modelo independiente (p. ej., redes AE y parcelas, pero no suelos).
class Simulable(Coso):
def simul(símismo, **kwargs):
raise NotImplementedError
'''
def calib(símismo, opciones_simul, it, quema, espacio):
genmodbayes(símismo, opciones_simul)
guardar(símismo, calib(símismo.simul, it=it, quema=quema, espacio=espacio))
''' | true |
7a30a3dfb23d87ec9f68ba46f1bc32350480463e | Python | titodeal/desktop | /app/models/root/root_model.py | UTF-8 | 673 | 2.71875 | 3 | [] | no_license | class RootModel:
def __init__(self, _id, owner_id, root_folder, sharing):
self.id = _id
self.owner_id = owner_id
self.root_folder = root_folder
self.sharing = sharing
@staticmethod
def get_user_roots(server, user_id):
respone = server.get_user_roots(user_id)
if not respone[0]:
return []
roots = []
for root_data in respone[1]:
root = RootModel(root_data["root_id"],
f"{user_id}",
root_data["root_folder"],
root_data["sharing"])
roots.append(root)
return roots
| true |
ae3c32ba4c80647df5c3a1173d4ad91ce26ddf36 | Python | joon2974/python_study | /practice/Chap5/5_1_if.py | UTF-8 | 202 | 3.984375 | 4 | [] | no_license | car = 'Kia'
number = 20
print("Is car == 'kia'? I predict True.")
print(car == 'kia')
print(car.lower() == 'kia')
print("Is car == 'kia' and number == 20?")
print(car.lower() == 'kia' and number == 20)
| true |
a8b50d99d63e6ddd3fc082d51b976f1233dc8195 | Python | colobas/track-and-field-data | /notebooks/example-notebook.py | UTF-8 | 7,840 | 2.5625 | 3 | [] | no_license | # %%
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# %%
import pandas as pd
import numpy as np
import math
# %%
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %%
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
# %%
df = pd.read_csv("1500m.csv")
# %%
split_points = [str(i*100) for i in range(1, 16)]
split_points
# %%
def convert(x):
if ":" in str(x):
m, s = x.split(":")
return int(m)*60 + float(s)
else:
try:
return float(x)
except:
return np.nan
for pt in split_points:
df[pt+"_sec"] = df[pt].apply(convert)
# %%
for i, pt1 in enumerate(reversed(split_points)):
for pt2 in list(reversed(split_points))[i+1:]:
df[pt1+"-"+pt2] = df[pt1+"_sec"] - df[pt2+"_sec"]
# %%
for c in df.columns.tolist():
if "-" in c:
df[c+"_normed"] = df[c]/df["1500_sec"]
# %%
sec_cols = [col for col in df.columns.tolist() if "sec" in col]
df.loc[:, sec_cols] = df[sec_cols].interpolate(axis=1)
# %%
df["categ"] = pd.qcut(df["1500_sec"], 3, labels=["blue", "green", "orange"])
# %%
init_notebook_mode(connected=True)
layout = go.Layout(
autosize=False,
width=1000,
height=500,
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
)
dimensions = []
for col in ["300_sec", "500_sec", "700_sec", "900_sec", "1100_sec", "1300_sec", "1500_sec"]:
dimensions += [
dict(range=[df[col].min(), df[col].max()],
label=col,
values=df[col].values)
]
data = [
go.Parcoords(
line=dict(color=df["categ"].astype("category").cat.codes/2,
colorscale=[[0, "orange"], [0.5, "green"], [1, "blue"]]),
dimensions=dimensions
)
]
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename = 'parcoord-dimensions')
# %%
interpolated_df = df.copy()
interpolated_df[[col for col in df.columns if ("_sec" in col) and ("_normed" not in col)]] = df[[col for col in df.columns if ("_sec" in col) and ("_normed" not in col)]].interpolate(method="linear", axis=1)
# %%
cols = [col for col in df.columns if ("_sec" in col) and ("_normed" not in col)]
# %%
init_notebook_mode(connected=True)
layout = go.Layout(
autosize=False,
width=1000,
height=500,
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
)
dimensions = []
for col in cols[2:]:
m = interpolated_df[col].min()
M = interpolated_df[col].max()
pad = (12 - (M - m))/2
tickvals = [m-pad] + list(range(math.ceil(m-pad), math.floor(M+pad)+1)) + [M+pad]
if col not in ("300_sec", "400_sec"):
ticktext = [f"{int(tick//60)}:{int(tick%60):02}" for tick in tickvals]
dimensions += [
dict(range=[m-pad, M+pad],
label=col,
values=interpolated_df[col].values,
tickvals=tickvals,
ticktext=ticktext)
]
else:
dimensions += [
dict(range=[m-pad, M+pad],
label=col,
values=interpolated_df[col].values,
tickvals=tickvals)
]
data = [
go.Parcoords(
#line=dict(color=interpolated_df["categ"].astype("category").cat.codes/2,
# colorscale=[[0, "orange"], [0.5, "green"], [1, "blue"]]),
dimensions=dimensions
)
]
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename = 'parcoord-dimensions')
# %%
loc = interpolated_df.loc[lambda x: x["1500_sec"] <= 246]
f, axs = plt.subplots(1, 7, figsize=(28, 5))
for ax, col in zip(axs, [
"300_sec",
"400_sec",
"700_sec",
"800_sec",
"1100_sec",
"1200_sec",
"1500_sec"]):
sns.swarmplot(y=interpolated_df[col], ax=ax)
if col not in ("300_sec", "400_sec"):
formatter = mpl.ticker.FuncFormatter(
lambda sec, x: f"{int(sec//60)}:{int(sec%60)}")
ax.yaxis.set_major_formatter(formatter)
plt.show()
# %%
plt.figure(figsize=(20, 10))
sns.violinplot(data=df[["300_sec", "500_sec", "700_sec", "900_sec", "1100_sec", "1300_sec", "1500_sec"]])
# %%
init_notebook_mode(connected=True)
layout = go.Layout(
autosize=False,
width=1000,
height=500,
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
)
dimensions = []
for col in ["300-100", "500-300", "700-500", "900-700", "1100-900","1300-1100", "1500-1300", "1500_sec"]:
dimensions += [
dict(range=[df[col].min(), df[col].max()],
label=col,
values=df[col].values)
]
data = [
go.Parcoords(
line=dict(color=df["categ"].astype("category").cat.codes/2,
colorscale=[[0, "orange"], [0.5, "green"], [1, "blue"]]),
dimensions=dimensions
)
]
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename = 'parcoord-dimensions')
# %%
plt.figure(figsize=(20, 10))
plt.title("percentage of time spent in segment")
sns.violinplot(data=df[[
"300-100_normed",
"500-300_normed",
"700-500_normed",
"900-700_normed",
"1100-900_normed",
"1300-1100_normed",
"1500-1300_normed"]]*100)
# %%
df["100_sec_normed"] = df["100_sec"]/df["1500_sec"]
df["300_sec_normed"] = df["300_sec"]/df["1500_sec"]
# %%
cols1 = ["300_sec_normed", "700-300_normed", "1100-700_normed"]
cols = cols1+["categ"]
plt.figure(figsize=(20, 10))
def plot_row(row):
plt.plot(row[cols1], color=row["categ"], alpha=0.3, marker="o")
(df[cols]).apply(plot_row, axis=1)
plt.show()
# %%
df["300_speed"] = df["300_sec"].apply(lambda x: 300/x)
df["1500_speed"] = df["300_sec"].apply(lambda x: 1500/x)
for col in ["700-300", "1100-700", "1500-1100"]:
df[col+"_speed"] = df[col].apply(lambda x: 400/x)
# %%
cols1 = ["300_speed", "700-300_speed", "1100-700_speed", "1500-1100_speed", "1500_sec"]
init_notebook_mode(connected=True)
layout = go.Layout(
autosize=False,
width=1000,
height=500,
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
)
dimensions = []
for col in cols1:
dimensions += [
dict(range=[df[col].min(), df[col].max()],
label=col,
values=df[col].values)
]
data = [
go.Parcoords(
line=dict(color=df["categ"].astype("category").cat.codes/2,
colorscale=[[0, "orange"], [0.5, "green"], [1, "blue"]]),
dimensions=dimensions
)
]
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename = 'parcoord-dimensions')
# %%
df[cols1].dtypes
# %%
cols = ["300_speed", "700-300_speed", "1100-700_speed", "1500-1100_speed"]
new_cols = []
for i in range(1, len(cols)):
df[f"{cols[i]}_{cols[i-1]}_diff"] = df[cols[i]] - df[cols[i-1]]
new_cols.append(f"{cols[i]}_{cols[i-1]}_diff")
new_cols
# %%
cols1 = new_cols
init_notebook_mode(connected=True)
layout = go.Layout(
autosize=False,
width=1000,
height=500,
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5',
)
dimensions = []
for col in cols1:
dimensions += [
dict(range=[df[col].min(), df[col].max()],
label=col,
values=df[col].values)
]
data = [
go.Parcoords(
line=dict(color=df["categ"].astype("category").cat.codes/2,
colorscale=[[0, "orange"], [0.5, "green"], [1, "blue"]]),
dimensions=dimensions
)
]
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename = 'parcoord-dimensions')
# %%
for col in ["300_sec", "700-300", "1100-700"]:
plt.figure(figsize=(20, 10))
plt.title(f"1500_sec x {col}")
plt.scatter(df[col], df["1500_sec"], s=10)
plt.show()
# %%
| true |
bb519b675fc93dcfff8e268f655ef2108698edda | Python | debugvelop/PySoesoe | /Crypto/ReadFromCaesar.py | UTF-8 | 759 | 3.796875 | 4 | [
"MIT"
] | permissive | gibber = input("What's the gibberish?: ")
direction = input("Left/Right?: ")
retry = int(input("How much do you want to try?: "))
charlist = "abcdefghijklmnopqrstuvwxyz "
dictionary = {}
index = 0
for e in charlist:
dictionary[e] = index
index += 1
jump = 1
if direction == "Right":
for cycle in range(retry):
notsecret = ""
for char in gibber:
index = dictionary[char]
notsecret += charlist[(index - jump) % len(dictionary)]
jump += 1
print(notsecret)
else:
for cycle in range(retry):
notsecret = ""
for char in gibber:
index = dictionary[char]
notsecret += charlist[(index + jump) % len(dictionary)]
jump += 1
print(notsecret)
| true |
325a14469ccf7730e0b833c61d1be645e82c38a1 | Python | GrailFinder/questionnaire | /services/questmaker/tests/test_inquery.py | UTF-8 | 5,507 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | import json
from services.questmaker.tests.base import BaseTestCase
from services.questmaker.tests.utils import add_quest, add_inquiry, add_user, add_choice, add_answer
class TestinquiryService(BaseTestCase):
"""Tests for the inquiries"""
def test_add_inquiry(self):
"""Ensure that creating new inquiry behaves normal"""
user = add_user('test', 'test@test.com', 'test')
resp_login = self.client.post(
'/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
with self.client:
response = self.client.post(
"/inquiries",
data=json.dumps(dict(
title="testone",
user_id=user.id,
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('success', data['status'])
def test_add_public_inquiry(self):
"""create inq open for everyone to see results"""
with self.client:
resp = self.client.post(
"/inqs/",
data=json.dumps(dict(
title="public test",
)),
content_type='application/json',
)
data = json.loads(resp.data.decode())
self.assertEqual(resp.status_code, 201)
self.assertIn('id', data)
def test_single_inquiry(self):
"""test getting inquiry by id"""
inq = add_inquiry(title="Are you even test?")
with self.client:
response = self.client.get(f'inq/{inq.id}')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertTrue('created_at' in data)
self.assertIn('Are you even test?', data['title'])
def test_inq_view(self):
"""
get all questions with all choices and other stuff
{question: (multichoice, choice)}
"""
# user
u0 = add_user(username='grail', email="test@example.com", password='test')
# first question
i1 = add_inquiry(title="Who are you from the star wars?", user=u0)
q1 = add_quest(title="How was your day, sweety?", inq=i1)
a1 = add_choice("Okay", quest=q1)
a2 = add_choice("Good", quest=q1)
a3 = add_choice("Bad", quest=q1)
a4 = add_choice("Who are you again?", quest=q1)
q2 = add_quest(title="Anyway how is your sex life?", inq=i1)
add_choice("You're just a little chicken", quest=q2)
add_choice("Its not true, I did not hit her. I did not", quest=q2)
add_choice("I am so happy to have you as my best friend and I love Lisa so much", quest=q2)
add_choice("If a lot of people would love each other, the world would be a better place to live", quest=q2)
with self.client:
response = self.client.get(f'inq-view/{i1.id}')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
print(data["data"])
self.assertIn('How was your day, sweety?', data['data'])
self.assertIn('success', data['status'])
self.assertIn('Good', data['data']['How was your day, sweety?']["choice"].values())
self.assertFalse(data['data']['How was your day, sweety?']["multichoice"])
def test_inq_passing(self):
'''user doesnt see inqs if he answered on them before'''
# user
u0 = add_user(username='grail', email="test@example.com", password='test')
# first question
i1 = add_inquiry(title="Who are you from the star wars?", user=u0)
q1 = add_quest(title="How was your day, sweety?", inq=i1)
a1 = add_choice("Okay", quest=q1)
a2 = add_choice("Good", quest=q1)
a3 = add_choice("Bad", quest=q1)
a4 = add_choice("Who are you again?", quest=q1)
q2 = add_quest(title="Anyway how is your sex life?", inq=i1)
ch1 = add_choice("You're just a little chicken", quest=q2)
add_choice("Its not true, I did not hit her. I did not", quest=q2)
add_choice("I am so happy to have you as my best friend and I love Lisa so much", quest=q2)
add_choice("If a lot of people would love each other, the world would be a better place to live", quest=q2)
# create user, inqs, questions, choices
# user gets list of inqs
with self.client:
response = self.client.get(f"/uinqs/{u0.id}")
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
inq_num = len(data)
self.assertTrue(inq_num > 0)
# answers to one of them
add_answer(inq_id=i1.id, quest_id=q1.id,
choice_id=ch1.id, user_id=u0.id)
# gets list of inqs without one he answered
response = self.client.get(f"/uinqs/{u0.id}")
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertTrue(inq_num > len(data))
| true |
0c5063962ca5b452f6e9d5b2fad1e2fd0ef991af | Python | haodongxi/leetCode | /jzoffer/22.py | UTF-8 | 673 | 3.625 | 4 | [] | no_license | # -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 返回从上到下每个节点值列表,例:[1,2,3]
def PrintFromTopToBottom(self, root):
if root == None:
return []
pool = []
pool.append(root)
result = []
while(len(pool)>0):
tempNode = pool[0]
del pool[0]
result.append(tempNode.val)
if tempNode.left!=None:
pool.append(tempNode.left)
if tempNode.right!=None:
pool.append(tempNode.right)
return result | true |
5cd49341b867714b9e62472d5f696c341f99cf60 | Python | sgricci/port_scanner | /Con.py | UTF-8 | 524 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
class Con:
def __init__(self, file):
self.file = file
self.fh = open(file)
self.buffer = self.fh.readlines()
self.mem = {};
self.parse()
return
def get(self, config_name):
if self.mem.has_key(str(config_name)) != True:
return 'unknown'
return self.mem[str(config_name)]
def total(self):
return len(self.mem)
def parse(self):
mem = dict()
for line in self.buffer:
sp = line.split(':')
sp[0] = str(sp[0])
mem[sp[0]] = sp[1].strip();
self.mem = mem
return
| true |
204acd806c622959da7255708d0325115dbf3fc7 | Python | Gmbzhp/Music-Code | /scripts/music_code/analytics.py | UTF-8 | 6,497 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | import mysql.connector
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sql_kit import SQL_Kit
plt.style.use('seaborn-dark-palette')
import getpass
# Pandas settings
# display untruncated data from pandas
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', 999)
pd.set_option('display.max_rows', 100)
# this pulls data from the SQL database, then displays a dashboard of interactive plots, widgets and animations!
class Dashboard:
""" This dashboard is designed to visualize trends in the Music-Code database """
def __init__(self, database):
# sql info
self.userID = input('User ID: ')
self.password = getpass.getpass('Password: ')
self.database = database
""" SELECT * FROM table """
def select_table(self, table):
s = SQL_Kit(self.userID, self.password, self.database)
data = s.select_table(table)
return data
def display(self):
""" visualize Music-Code database data """
# initialize SQL kit to access database
s = SQL_Kit(self.userID, self.password, self.database)
""" Total Activity by hour """
# get activity data
all_date_times = self.activity().index
all_days = []
all_hours = []
for item in all_date_times:
all_days.append((item.timetuple().tm_yday))
all_hours.append(item.hour)
x = all_days
y = all_hours
x_labels = pd.Series(all_days).unique()
fig1, ax1 = plt.subplots()
ax1.set_title('Hourly Activity')
ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)
ax1.set_xlabel('day of year')
ax1.set_ylabel('hour')
ax1.xaxis.grid(True)
if len(x_labels) > 5:
ax1.xaxis.set_ticks([min(all_days), max(all_days)])
else:
ax1.xaxis.set_ticks(x_labels)
ax1.yaxis.grid(False)
plt.show()
""" MOVING AVERAGE """
df = self.activity().reset_index()
def day_of_year(datetime_entry):
return datetime_entry.timetuple().tm_yday
df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))
daily_count = df['day_of_year'].value_counts().sort_index()
averages = []
i=1
for value_count in daily_count:
values = daily_count[:i]
average = round(sum(values)/len(values),2)
averages.append(average)
i+=1
day_list = list(df['day_of_year'].unique())
avg_move_df = pd.DataFrame([day_list,averages]).T
avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)
avg_move_df.set_index('day_id',inplace=True)
fig1, ax1 = plt.subplots()
ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')
ax1.set_title('Moving AVG')
ax1.set_xlabel('day_of_year')
ax1.xaxis.set_ticks([min(all_days), max(all_days)])
ax1.set_ylabel('Daily Activity')
plt.show()
""" Top 5 Samples """
data = s.select_table('sample')['SoundCategory'].value_counts()
objects = list(data)[:5]
y_pos = list(data.index)[:5]
# get class info from class_absence_stats dataframe
#fig2 = plt.figure(2)
plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')
plt.ylabel('Usage')
plt.xlabel('Sound Category')
plt.title('Top 5 Samples')
plt.show()
""" Top 3 Chords """
data = s.select_table('chord')['ChordLabel'].value_counts()
objects = list(data)[:3]
y_pos = list(data.index)[:3]
# get class info from class_absence_stats dataframe
#fig2 = plt.figure(2)
plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')
plt.ylabel('Usage')
plt.xlabel('Chord Label')
plt.title('Top 3 Chords')
plt.show()
""" Top 3 Wave Types """
# get SQL table data
set_1 = s.select_table('createwave')
set_2 = s.select_table('sequence')
set_3 = s.select_table('arpeggio')
set_4 = s.select_table('chord')
# concat tables into single pandas series
all_wave_types = pd.concat([set_1['WaveType'], set_2['WaveType'], set_3['WaveType'], set_4['WaveType']])
# sort values, show top 3
top_3 = all_wave_types.value_counts().head(3)
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = list(top_3.index)
sizes = list(top_3.values)
explode = (0, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, colors=['g','b','r'], startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax1.set_title('Top Wave Types')
plt.show()
def activity(self):
s = SQL_Kit(self.userID, self.password, self.database)
# create chronological activity log
# all sql tables in database
all_tables = ['addwaves','arpeggio','bounce','chord','createwave','delay','fade','hpf',
'joinwaves','lfo','loopmethod','lpf','pan','rest','reverse','sample','sequence',
'timeedit','timemethod','viewmethod','volume']
activity_log = pd.DataFrame(columns=['EventDateTime','method'])
for table in all_tables:
# get data from sql
data = s.select_table(table)['EventDateTime']
# convert to pandas dataframe
data = pd.DataFrame(data)
# create method column
data['method'] = table
# append activity to master dataframe
activity_log = activity_log.append(data).sort_values('EventDateTime')
activity_log.reset_index(inplace=True)
activity_log.drop('index',axis=1, inplace=True)
activity_log.set_index('EventDateTime', inplace=True)
return activity_log
| true |
2632649e128773ee32f73d5fbf91faf140fc4798 | Python | adneena/think-python | /Conditionals-recursion/excercise2.py | UTF-8 | 1,507 | 5.25 | 5 | [] | no_license | '''
If you are given three sticks, you may or may not be able to arrange them in a triangle.
For example, if one of the sticks is 12 inches long and the other two are one inch long, it is clear that
you will not be able to get the short sticks to meet in the middle. For any three lengths, there is a
simple test to see if it is possible to form a triangle:
If any of the three lengths is greater than the sum of the other two, then you cannot
form a triangle. Otherwise, you can. (If the sum of two lengths equals the third, they
form what is called a “degenerate” triangle.)
1. Write a function named is_triangle that takes three integers as arguments, and that prints
either “Yes” or “No,” depending on whether you can or cannot form a triangle from sticks
with the given lengths.
2. Write a function that prompts the user to input three stick lengths, converts them to integers,
and uses is_triangle to check whether sticks with the given lengths can form a triangle.
'''
# 1
def is_triangle(a, b, c):
if c > (a+b) or b > (a+c) or a > (b+c):
print('No')
else:
print('Yes')
is_triangle(1, 2, 3) # it's possible to arrange a triangle
is_triangle(1, 2, 9) # it's not possible to arrange a triangle
print()
# 2
def triangle():
a = int(input('Please enter the length of the 1st stick:\n'))
b = int(input('Please enter the length of the 2nd stick:\n'))
c = int(input('Please enter the length of the 3rd stick:\n'))
is_triangle(a, b, c)
triangle()
| true |
7c49400e9d5a4b9c8377421a82b01a84c04f1362 | Python | sdpython/mathenjeu | /src/mathenjeu/apps/common/auth_app.py | UTF-8 | 8,466 | 2.5625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
@file
@brief Starts an application.
"""
import hashlib
from starlette.responses import RedirectResponse
from itsdangerous import URLSafeTimedSerializer
import ujson
class AuthentificationAnswers:
"""
Defines answers for an application with authentification.
It stores a cookie with only the user alias.
The method `authentify_user <mathenjeu.apps.common.auth_app.AuthentificationAnswers.authentify_user>`_
must be overwritten. The method
`page_context <mathenjeu.apps.qcm.acm_app.ACMApp.page_context>`_
returns additional information to add before applying any template.
"""
def __init__(self, app,
login_page="login.html",
notauth_page="notauthorized.html",
auth_page="authorized.html",
redirect_logout="/", max_age=14 * 24 * 60 * 60,
cookie_key=None, cookie_name="mathenjeu",
cookie_domain="127.0.0.1", cookie_path="/",
secure=False, page_context=None, userpwd=None):
"""
@param app :epkg:`starlette` application
@param login_page name of the login page
@param notauth_page page displayed when a user is not authorized
@param auth_page page displayed when a user is authorized
@param redirect_logout a not authorized used is redirected to this page
@param max_age cookie's duration in seconds
@param cookie_key to encrypt information in the cookie (cannot be None)
@param cookie_name name of the session cookie
@param cookie_domain cookie is valid for this path only
@param cookie_path path of the cookie once storeds
@param secure use secured connection for cookies
@param page_context to retrieve additional context
before rendering the pages (as a function
which returns a dictionary)
@param userpwd users are authentified with any alias but a common password
"""
if cookie_key is None:
raise ValueError("cookie_key cannot be None")
self.app = app
self.login_page = login_page
self.notauth_page = notauth_page
self.auth_page = auth_page
self.redirect_logout = redirect_logout
self.cookie_name = cookie_name
self.cookie_domain = cookie_domain
self.cookie_path = cookie_path
self.cookie_key = cookie_key
self.max_age = max_age
self.secure = secure
self.signer = URLSafeTimedSerializer(self.cookie_key)
self.userpwd = userpwd
self.hashed_userpwd = None if userpwd is None else self.hash_pwd(
userpwd)
self._get_page_context = page_context
app._get_session = self.get_session
for method in ['log_event', 'log_any']:
if hasattr(self, method):
setattr(app, '_' + method, getattr(self, method))
async def login(self, request):
"""
Login page. If paramater *returnto* is specified in the url,
the user will go to this page after being logged.
"""
ps = request.query_params
context = {'request': request, 'returnto': ps.get('returnto', '/')}
context.update(self._get_page_context())
return self.templates.TemplateResponse(self.login_page, context) # pylint: disable=E1101
def hash_pwd(self, pwd):
"""
Hashes a password.
@param pwd password
@return hashed password in hexadecimal format
"""
m = hashlib.sha256()
m.update(pwd.encode("utf-8"))
return m.hexdigest()
async def authenticate(self, request):
"""
Authentification.
@param request request
@return response
"""
try:
fo = await request.form()
except Exception as e:
raise RuntimeError( # pylint: disable=W0707
"Unable to read login and password due to '{0}'".format(e))
if 'alias' not in fo:
return self.is_allowed(
alias=None, pwd=None, request=request)
ps = request.query_params
loge = getattr(self, 'logevent', None)
if loge:
loge("authenticate", request, session={}, # pylint: disable=E1102
alias=fo['alias'])
res = self.is_allowed(alias=fo['alias'], pwd=fo['pwd'],
request=request)
if res is not None:
return res
data = dict(alias=fo['alias'], hashpwd=self.hash_pwd(fo['pwd']))
returnto = ps.get('returnto', '/')
context = {'request': request,
'alias': fo['alias'], 'returnto': returnto}
context.update(self._get_page_context())
response = self.templates.TemplateResponse( # pylint: disable=E1101
'authorized.html', context)
self.save_session(response, data)
return response
# response = RedirectResponse(url=returnto)
# return response
async def logout(self, request):
"""
Logout page.
"""
response = RedirectResponse(url=self.redirect_logout)
response.delete_cookie(self.cookie_name, domain=self.cookie_domain,
path=self.cookie_path)
return response
def save_session(self, response, data):
"""
Saves the session to the response in a secure cookie.
@param response response
@param data data
"""
data = ujson.dumps(data) # pylint: disable=E1101
signed_data = self.signer.dumps([data]) # pylint: disable=E1101
response.set_cookie(self.cookie_name, signed_data,
max_age=self.max_age,
httponly=True, domain=self.cookie_domain,
path=self.cookie_path, secure=self.secure)
def get_session(self, request, notnone=False):
"""
Retrieves the session.
@param request request
@param notnone None or empty dictionary
@return session
"""
cook = request.cookies.get(self.cookie_name)
if cook is not None:
unsigned = self.signer.loads(cook)
data = unsigned[0]
jsdata = ujson.loads(data) # pylint: disable=E1101
# We check the hashed password is still good.
hashpwd = jsdata.get('hashpwd', '')
if not self.authentify_user(jsdata.get('alias', ''), hashpwd, False):
# We cancel the authentification.
return {}
return jsdata
return {} if notnone else None
def is_allowed(self, alias, pwd, request):
"""
Checks that a user is allowed. Returns None if it is allowed,
otherwise an page with an error message.
@param alias alias or iser
@param pwd password
@param request received request
@return None if allowed, *HTMLResponse* otherwise
"""
if not self.authentify_user(alias, pwd):
context = {'request': request, 'alias': alias}
context.update(self._get_page_context())
return self.templates.TemplateResponse('notauthorized.html', context) # pylint: disable=E1101
return None
def authentify_user(self, alias, pwd, hash_before=True):
"""
Overwrites this method to allow or reject users.
@param alias alias or user
@param pwd password
@param hash_before hashes the password before comparing, otherwise,
the function assumes it is already hashed
@return boolean
The current behavior is to allow anybody if the alias is longer
than 3 characters.
"""
if alias is None or len(alias.strip()) <= 3:
return False
if self.hashed_userpwd is None:
return True
if hash_before:
hashed_pwd = self.hash_pwd(pwd)
return hashed_pwd == self.hashed_userpwd
return pwd == self.hashed_userpwd
| true |
5a7a244f54189ab776cc7c2be790dc7863eb70f4 | Python | greenteasocha/Gitlog | /tests/Objects/commit_object_test.py | UTF-8 | 2,065 | 2.5625 | 3 | [] | no_license | import pytest
from Objects.commit_object import CommitObject
from Objects.git_object import GitObject
def test_get_commit():
o = GitObject()
o.obj_type = "commit"
o.obj_size = "265"
o.data = 'tree de7dfe27b9f4a4621c067b045cf2101a76440e35\n' \
'parent af80478c1338269ce4b3ac0106da3d3a5ef6e6f9\n' \
'parent b3ac0106da3d3a5ef6e6f9af80478c1338269ce4\n' \
'author abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'committer abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'\n' \
'removing pycache\n' \
'\n'
c = CommitObject()
c.get_commit(o)
assert c.tree == "de7dfe27b9f4a4621c067b045cf2101a76440e35"
assert c.parents == [
"af80478c1338269ce4b3ac0106da3d3a5ef6e6f9",
"b3ac0106da3d3a5ef6e6f9af80478c1338269ce4"
]
assert c.author == "abe <kouheiatts@gmail.com> 1625495822 +0900"
assert c.committer == "abe <kouheiatts@gmail.com> 1625495822 +0900"
assert c.message == "\nremoving pycache\n\n"
def test_get_commit_without_parent():
o = GitObject()
o.obj_type = "commit"
o.obj_size = "217"
o.data = 'tree de7dfe27b9f4a4621c067b045cf2101a76440e35\n' \
'author abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'committer abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'\n' \
'removing pycache\n' \
'\n'
c = CommitObject()
c.get_commit(o)
assert c.tree == "de7dfe27b9f4a4621c067b045cf2101a76440e35"
assert c.author == "abe <kouheiatts@gmail.com> 1625495822 +0900"
assert c.message == "removing pycache\n\n"
def test_get_commit_invalid_fail():
o = GitObject()
o.obj_type = "commit"
o.obj_size = "148"
o.data = 'invalid something value\n' \
'author abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'committer abe <kouheiatts@gmail.com> 1625495822 +0900\n' \
'\n' \
'removing pycache\n' \
'\n'
with pytest.raises(Exception):
c = CommitObject()
c.get_commit(o)
| true |
ce05cee052167242d179a8c52155ca7821b3c4bf | Python | Anjana-5856/Image-Processing-Tutorial | /dct.py | UTF-8 | 878 | 3.09375 | 3 | [] | no_license | from scipy.fftpack import dct, idct
# implement 2D DCT
def dct2(a):
return dct(dct(a.T, norm='ortho').T, norm='ortho')
# implement 2D IDCT
def idct2(a):
return idct(idct(a.T, norm='ortho').T, norm='ortho')
from skimage.io import imread
from skimage.color import rgb2gray
import numpy as np
import matplotlib.pylab as plt
# read lena RGB image and convert to grayscale
im = rgb2gray(imread('E:\ImageProcessing\lena.png'))
imF = dct2(im)
im1 = idct2(imF)
# check if the reconstructed image is nearly equal to the original image
np.allclose(im, im1)
# True
# plot original and reconstructed images with matplotlib.pylab
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original image', size=20)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('reconstructed image (DCT+IDCT)', size=20)
plt.show() | true |
815175b83e0cbe1715902d86ac66623c4ddc9f2b | Python | su795/Bigdata | /Ch01/weather.py | UTF-8 | 2,701 | 2.734375 | 3 | [] | no_license | """
날짜 : 2020/07/15
이름 : 권기민
내용 : 파이썬 가상 웹브라우저 실습하기
"""
import os
import requests as req
from datetime import datetime
from bs4 import BeautifulSoup as bs
#세션시작
sess = req.session()
# 날씨 데이터 요청
html = sess.get('https://www.weather.go.kr/weather/observation/currentweather.jsp')
#파싱
dom = bs(html.text, 'html.parser')
# 지역, 시정, 현재기온, 이슬점온도, 체감온도, 일강수, 습도, 풍향, 해면기압
locals = dom.select('#content_weather > table > tbody > tr > td > a')
visibilities = dom.select('#content_weather > table > tbody > tr > td:nth-child(3)')
temps = dom.select('#content_weather > table > tbody > tr > td:nth-child(6)')
dews = dom.select('#content_weather > table > tbody > tr > td:nth-child(7)')
sens_temps = dom.select('#content_weather > table > tbody > tr > td:nth-child(8)')
precipitations = dom.select('#content_weather > table > tbody > tr > td:nth-child(9)')
humidities = dom.select('#content_weather > table > tbody > tr > td:nth-child(10)')
direction_winds = dom.select('#content_weather > table > tbody > tr > td:nth-child(11)')
sea_pressures = dom.select('#content_weather > table > tbody > tr > td:nth-child(13)')
# 저장 디렉터리 생성
dir = '/home/bigdata/weather/weather-{:%y-%m-%d}'.format(datetime.now())
if not os.path.exists(dir):
os.mkdir(dir)
# 파일로 저장 '20-07-15-16.csv'
fname = "{:%y-%m-%d-%H}.csv".format(datetime.now())
file = open(dir+'/'+fname, mode='w', encoding='utf8')
# csv파일 헤더
file.write('지역,시정,현재기온,이슬점온도,체감온도,일강수,습도,풍향,해면기압\n')
for i in range(0, len(locals)):
rs1 = locals[i].text
rs2 = visibilities[i].text if visibilities[i].text.strip() else 'NA'
rs3 = dews[i].text if dews[i].text.strip() else 'NA'
rs4 = sens_temps[i].text if sens_temps[i].text.strip() else 'NA'
rs5 = precipitations[i].text if precipitations[i].text.strip() else 'NA'
rs6 = humidities[i].text if humidities[i].text.strip() else 'NA'
rs7 = direction_winds[i].text if direction_winds[i].text.strip() else 'NA'
rs8 = sea_pressures[i].text if sea_pressures[i].text.strip() else 'NA'
file.write(rs1+','+
rs2+','+
rs3+','+
rs4+','+
rs5+','+
rs6+','+
rs7+','+
rs8+'\n')
#cron작업 등록
#crontab -e
# * * * * * python3 /root/naver.py
#(분, 시, 일, 월, 요일)
#매분마다 python3 /root/naver.py 을 실행
#cron데몬 서비스 시작/종료
#systemctl start crond
#systemctl stop crond
#cron작업 조회
#crentab -l
#cron작업 삭제
#crentab -r
| true |
57584eadd09bdec01d621099426420977dd197b7 | Python | potatoes-and-molasses/ml-course-project | /utils/misc.py | UTF-8 | 423 | 2.765625 | 3 | [] | no_license |
def sentencify(inference_results, dct):
sentences = [[] for i in inference_results[0]]
reverse_dict = {dct[j]:j for j in dct}
for i in range(len(inference_results)):
for j in range(len(inference_results[0])):
sentences[j].append(reverse_dict[inference_results[i][j].item()])
return sentences
def kld_coef(i):
import math
return (math.tanh((i - 3500)/1000) + 1)/2 | true |
145de571ce603b7edcb6242586128229ffc2fc26 | Python | b51/bitbots_navigation | /bitbots_visual_compass/src/videocv.py | UTF-8 | 723 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python2
import time
import cv2
from threading import Thread
class Videocv():
"""
Ensures constant frame rates for the CV2 video input.
"""
def __init__(self, src=0, fps=30):
self.fps = float(fps)
self._vc = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self._vc.read()
self.ended = False
def run(self):
Thread(target=self.get, args=()).start()
return
def get(self):
while not self.ended:
time.sleep(1/self.fps)
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self._vc.read()
def stop(self):
self.ended = True
| true |
0e69144180fc70e5942eb1b0e80ed05eb4524289 | Python | vkinakh/Die-modeling | /die.py | UTF-8 | 273 | 4.03125 | 4 | [] | no_license | from random import randint
class Die():
"""Basic die class"""
def __init__(self, num_sides = 6):
"""By default there is 6 sides"""
self.num_sides = num_sides
def roll(self):
"""Random roll"""
return randint(1, self.num_sides)
| true |
712a161304c9c58a9289d3a92e1af96d0ec667d9 | Python | mikronavt/pycodes | /string_changer.py | UTF-8 | 369 | 3.15625 | 3 | [] | no_license | s = input()
a = input()
b = input()
if (a in b) and (s.find(a) >= 0):
print("Impossible")
else:
#lst = []
counter = 0
while True:
if s.find(a) < 0:
print(counter)
break
else:
#lst.append(s)
s = s.replace(a,b)
counter += 1
#if s in lst:
# print("Impossible") | true |
4f50cd27717e90876d435744a21be4dce893f078 | Python | Great-designer/BUAA-OJ-Project | /python/3847 最大公约数与最小公倍数.py | UTF-8 | 327 | 3.03125 | 3 | [] | no_license | import math
n = int(input())
for i in range(n):
x, y = map(int, input().split())
if y % x != 0:
print(0)
continue
m = y // x
summ = 0
for j in range(1, m + 1):
if m % j == 0:
k = m // j
if math.gcd(j, k) == 1:
summ = summ + 1
print(summ)
| true |
30ed19d26532b2cb3bc1a7aeef578a5615b89d35 | Python | auttij/aoc2019 | /10/puzzle.py | UTF-8 | 1,831 | 3.25 | 3 | [] | no_license | from math import gcd, atan2, pi
import math
import operator
filepath = "./input.txt"
def read_file_to_arr(filepath):
with open(filepath) as fp:
return [line.strip() for line in fp.readlines()]
def asteroids(arr):
ast = {}
for yi, y in enumerate(arr):
for xi, x in enumerate(y):
if x == "#":
ast[(xi, yi)] = "#"
return ast
def dist(x1, y1, x2, y2):
ax, ay = x2-x1, y2-y1
g = gcd(ax, ay)
return int(ax/g), int(ay/g)
def behind(i, p, l):
xi, yi = i
xp, yp = p
xlen, ylen = l
out = []
xi += xp
yi += yp
while 0 <= xi and xi < xlen and 0 <= yi and yi < ylen:
out.append((xi, yi))
xi += xp
yi += yp
return out
def diff(li1, li2):
li_dif = [i for i in li1 if i not in li2]
return li_dif
def get_visible(ast, a, xlen, ylen):
x, y = a
others = [ai for ai in ast if ai != a]
d = [dist(x, y, xi, yi) for xi, yi in others]
b = [behind(others[i], p, (xlen, ylen)) for i, p in enumerate(d)]
flat = [item for sublist in b for item in sublist]
return diff(others, flat)
def part1(ast, xlen, ylen):
visible = {}
for a in ast:
vis = get_visible(ast, a, xlen, ylen)
visible[a] = len(vis)
return max(visible, key=visible.get), visible[max(visible, key=visible.get)]
def ang(angle):
x = angle - pi/2
return x if x >= -pi else x + 2*pi
def part2(ast, res1, xlen, ylen):
angles = {}
a = res1[0]
x, y = a
vis = get_visible(ast, a, xlen, ylen)
for v in vis:
xi, yi = v
dx, dy = xi-x, yi-y
angle = ang(atan2(dy, dx))
angles[v] = angle
sor = [k for k, v in sorted(angles.items(), key=lambda item: item[1])]
xv, xy = sor[199]
return 100*xv + xy
if __name__ == "__main__":
arr = read_file_to_arr(filepath)
ast = asteroids(arr)
result = part1(ast, len(arr[0]), len(arr))
print("part A:", result)
result = part2(ast, result, len(arr[0]), len(arr))
print("Part B:", result)
| true |
92e663c63b94e5c26116ae05b6b774ea50f163bf | Python | daniel-reich/ubiquitous-fiesta | /L2nw2N2YqZCboiaYM_8.py | UTF-8 | 117 | 2.921875 | 3 | [] | no_license |
def repeated(s):
return any([len(s)==len(s[:i])*s.count(s[:i]) for i in range(2,len(s))]) if len(s)>1 else False
| true |
11290359f6c2560ea8250dfbf6d2836cccfcbb61 | Python | shyukahn/BOJ | /Python/2000~2999/2839.py | UTF-8 | 130 | 3.015625 | 3 | [] | no_license | N = int(input())
a = N//5
if N != 4 and N != 7:
while (N-2*a)%3 != 0:
a -= 1
print((N-2*a)//3)
else:
print(-1) | true |
f27644c290d7fce5453eb09ce5de83469ec0c204 | Python | Weidaoqin/Pythom.work | /ex1_1.py | UTF-8 | 173 | 3.453125 | 3 | [] | no_license | stril1=input("请输入一个人的名字")
stril2=input("请输入一个国家名字")
print("世界那么大我想去看看,{}想去{}看看。".format(stril1,stril2))
| true |
a7a8182dcfa242d624d8d541c81aeec031bf89c6 | Python | FrankLeeC/MachineLearning | /svm/test.py | UTF-8 | 8,373 | 3.078125 | 3 | [
"MIT"
] | permissive | import svm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Circle
import random
import math
# rbf
def get_rbf_training_examples():
X1, X2 = [], []
y1, y2 = [], []
for i in range(110):
x = -5.5 + 0.1 * i
if x < -5 or x > 5:
X1.append([x, random.random() * 6 - 3])
y1.append(1)
else:
f = math.sqrt(25-x**2)
fm = -f
if i % 2 == 0:
y = f + random.random() * 6 - 3
else:
y = fm + random.random() * 6 - 3
if (y > f or y < fm):
X1.append([x, y])
y1.append(-1)
elif (y < f and y > fm):
X2.append([x, y])
y2.append(-1)
return X1, y1, X2, y2
# rbf
def get_rbf_test_examples():
X1, X2 = [], []
y1, y2 = [], []
for i in range(110):
x = -5.5 + 0.1 * i
if x < -5 or x > 5:
X1.append([x, random.random() * 6 - 3])
y1.append(1)
else:
f = math.sqrt(25-x**2)
fm = -f
if i % 2 == 0:
y = f + random.random() * 6 - 3
else:
y = fm + random.random() * 6 - 3
if (y > f or y < fm):
X1.append([x, y])
y1.append(-1)
elif (y < f and y > fm):
X2.append([x, y])
y2.append(-1)
return X1, y1, X2, y2
# polynomial
def get_polynomial_training_examples():
X1, X2 = [], []
y1, y2 = [], []
for i in range(50):
x = 0.1 + 0.1 * i
f = 1.0 / x
y = f + random.random() * 8 - 4
if y < f:
X1.append([x, y])
y1.append(1)
elif y > f:
X2.append([x, y])
y2.append(-1)
return X1, y1, X2, y2
# polynomia
def get_polynomial_test_examples():
X1, X2 = [], []
y1, y2 = [], []
for i in range(20):
x = 0.1 + 0.1 * i
f = 1.0 / x
y = f + random.random() * 8 - 4
if y < f:
X1.append([x, y])
y1.append(1)
elif y > f:
X2.append([x, y])
y2.append(-1)
return X1, y1, X2, y2
# outlier
def get_linear_outlier_training_examples():
X1 = np.array([[8, 7], [4, 10], [9, 7], [7, 10],
[9, 6], [4, 8], [10, 10]])
y1 = np.ones(len(X1))
X2 = np.array([[2, 7], [8, 3], [7, 5], [4, 4],
[7, 8], # the outlier
[4, 6], [1, 3], [2, 5]])
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
# outlier
def get_linear_outlier_test_examples():
X1 = np.array([[2, 9], [1, 10], [1, 11], [3, 9], [11, 5],
[10, 6], [10, 11], [7, 8], [8, 8], [4, 11],
[9, 9], [7, 7], [11, 7], [5, 8], [6, 10]])
X2 = np.array([[11, 2], [11, 3], [1, 7], [5, 5], [6, 4],
[9, 4], [2, 6], [9, 3], [7, 4], [7, 2], [4, 5],
[3, 6], [1, 6], [2, 3], [1, 1], [4, 2], [4, 3]])
y1 = np.ones(len(X1))
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
# linear
def get_linear_training_examples():
X1 = np.array([[8, 7], [4, 10], [9, 7], [7, 10],
[9, 6], [4, 8], [10, 10]])
y1 = np.ones(len(X1))
X2 = np.array([[2, 7], [8, 3], [7, 5], [4, 4],
[4, 6], [1, 3], [2, 5]])
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
# linear
def get_linear_test_examples():
X1 = np.array([[2, 9], [1, 10], [1, 11], [3, 9], [11, 5],
[10, 6], [10, 11], [7, 8], [8, 8], [4, 11],
[9, 9], [7, 7], [11, 7], [5, 8], [6, 10]])
X2 = np.array([[11, 2], [11, 3], [1, 7], [5, 5], [6, 4],
[9, 4], [2, 6], [9, 3], [7, 4], [7, 2], [4, 5],
[3, 6], [1, 6], [2, 3], [1, 1], [4, 2], [4, 3]])
y1 = np.ones(len(X1))
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def linear():
x1, y1, x2, y2 = get_linear_outlier_training_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
model = svm.SVM(kernel='linear', C=0.0)
model.fit(x, y)
for p in x1:
plt.scatter(p[0], p[1], c='blue', marker='o', alpha=1, edgecolors='none')
for p in x2:
plt.scatter(p[0], p[1], c='red', marker='o', alpha=1, edgecolors='none')
w, b = model.get_model()
print(w)
print(b)
k = float(-w[0:1,0:1]/w[0:1,1:2])
intercept = float(b/w[0:1,1:2])
print(k, intercept)
p1 = [0, 10]
p2 = [float(b), k*10+intercept]
plt.plot(p1, p2, c='black')
x1, y1, x2, y2 = get_linear_outlier_test_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
succ = 0
total = 0
s_set = set()
for i in range(x.shape[0]):
total += 1
pred = model.predict(x[i])
if pred == y[i]:
s_set.add(i)
succ += 1
print('accuracy:', succ / total)
c = 0
for p in x1:
if c in s_set:
plt.scatter(p[0], p[1], c='blue', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
for p in x2:
if c in s_set:
plt.scatter(p[0], p[1], c='red', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
plt.grid(True)
plt.show(block=True)
def polynomial():
x1, y1, x2, y2 = get_polynomial_training_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
model = svm.SVM(kernel='polynomial', C=0.0)
model.fit(x, y)
for p in x1:
plt.scatter(p[0], p[1], c='blue', marker='o', alpha=1, edgecolors='none')
for p in x2:
plt.scatter(p[0], p[1], c='red', marker='o', alpha=1, edgecolors='none')
w, b = model.get_model()
print(w)
print(b)
x1, y1, x2, y2 = get_polynomial_test_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
succ = 0
total = 0
s_set = set()
for i in range(x.shape[0]):
total += 1
pred = model.predict(x[i])
if pred == y[i]:
s_set.add(i)
succ += 1
print('accuracy:', succ / total)
c = 0
for p in x1:
if c in s_set:
plt.scatter(p[0], p[1], c='blue', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
for p in x2:
if c in s_set:
plt.scatter(p[0], p[1], c='red', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
plt.grid(True)
plt.show(block=True)
def rbf():
x1, y1, x2, y2 = get_rbf_training_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
model = svm.SVM(kernel='rbf', C=0.0)
model.fit(x, y)
fig = plt.figure()
ax = fig.add_subplot(111)
circle = Circle(xy=(0.0, 0.0), radius=5, alpha=0.3)
ax.add_patch(circle)
for p in x1:
plt.scatter(p[0], p[1], c='blue', marker='o', alpha=1, edgecolors='none')
for p in x2:
plt.scatter(p[0], p[1], c='red', marker='o', alpha=1, edgecolors='none')
w, b = model.get_model()
print(w)
print(b)
x1, y1, x2, y2 = get_rbf_test_examples()
x = np.vstack((x1, x2))
y = np.hstack((y1, y2))
succ = 0
total = 0
s_set = set()
for i in range(x.shape[0]):
total += 1
pred = model.predict(x[i])
if pred == y[i]:
s_set.add(i)
succ += 1
print('accuracy:', succ / total)
c = 0
for p in x1:
if c in s_set:
plt.scatter(p[0], p[1], c='blue', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
for p in x2:
if c in s_set:
plt.scatter(p[0], p[1], c='red', marker='^', alpha=1, edgecolors='none')
else:
plt.scatter(p[0], p[1], c='black', marker='^', alpha=1, edgecolors='none')
c += 1
plt.grid(True)
plt.show(block=True)
def main():
# linear()
polynomial()
# rbf()
if __name__ == '__main__':
main() | true |
0f3d87bc3339e2a460ffd680fb961d1df226c7f9 | Python | faizanzafar40/Intro-to-Programming-in-Python | /5. Full Exercises/e2_factor_dictionary.py | UTF-8 | 1,441 | 4.875 | 5 | [] | no_license | """
Problem
-------
This is an easy one. Take a number as input (say number 10)
For each number between 2 and the given number (i.e. 10) generate a dictionary of factors for non-prime numbers.
Put the code in a function and return the dictionary.
Note: dictionary is a container like list but with key value pair and we use curly brackets instead of square e.g
names_list = ['john', 'tom', 'sam']
names_dict = { 'name':'john', 'name':'tom', 'name':'sam' }
You can import the factors function, created previously.
For example, if the given input is 10, the output should be:
{
4: [2, 2],
6: [2, 3],
8: [2, 2, 2],
9: [3, 3],
10: [2, 5]
}
Notice that each number in the keys is a non-prime number, and it is associated with a list of its factors.
Hints:
1. A number is a prime number if it has only one factor, i.e. itself.
"""
def isPrime(number):
for i in range(2,number):
if number%i==0:
return False
break
else:
return True
def get_factors(number):
factors=[]
for i in range(2,number+1):
while(number%i==0):
number=number//i
factors.append(i)
return factors
def get_non_prime_factors(number):
a={}
for i in range(2,number+1):
if not isPrime(i):
a.update({i:get_factors(i)})
return a
limitRange=int(input("Enter the range: "))
res=get_non_prime_factors(limitRange)
print(res)
| true |
48c75be3e30187323ab138428aaa2724b3a9d7f1 | Python | mapaction/mapactionpy_controller | /mapactionpy_controller/xml_exporter.py | UTF-8 | 4,213 | 2.671875 | 3 | [] | no_license | import os
from dicttoxml import dicttoxml
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
def _check_for_export_metadata(recipe):
"""
Checks for the presence of a number of keys in the `recipe.export_metadata` returned by `_do_export`.
This method does not check for the validity/sanity of any of the values.
raises ValueError: If any of the requried keys are missing.
"""
minimal_keys = {
'themes',
'pdffilename',
'jpgfilename',
'mapNumber',
'title',
'versionNumber',
'summary',
"xmin",
"ymin",
"xmax",
"ymax",
'product-type'
}
missing_keys = minimal_keys.difference(set(recipe.export_metadata.keys()))
if missing_keys:
if len(missing_keys) > 0:
raise ValueError(
'Error creating xml file: `recipe.export_metadata` did not contain the required export_parameters.'
' The missing parameter(s) is/are: {}'.format(', '.join(missing_keys)))
def write_export_metadata_to_xml(recipe):
xml_fname = recipe.core_file_name+".xml"
xml_fpath = os.path.join(recipe.export_path, xml_fname)
xmls = _export_metadata_to_xmls(recipe)
with open(xml_fpath, "wb") as xml_file:
xml_file.write(xmls)
return xml_fpath
def _sort_xml_by_element(xml_str):
"""
Sorts a string represenation of a XML and sorts it by elment name.
Used to make comparision and testing of XML output easier
Based on https://stackoverflow.com/a/47097105
"""
def sort_layer(node):
"""
Recurvisely sort node
"""
# sort the first layer
temp_node = sorted(node, key=lambda child: child.tag)
# sort the second layer
for sub_node in temp_node:
sub_node[:] = sort_layer(sub_node)
return temp_node
tree = ET.ElementTree(ET.fromstring(xml_str))
root = tree.getroot()
# Call recurvise function to sort xml tree starting at root
root[:] = sort_layer(root)
result_xml_str = ET.tostring(root, encoding="utf-8", method="xml")
return result_xml_str.decode("utf-8")
def _export_metadata_to_xmls(recipe):
"""
returns: A XML String representation of the export metadata
"""
# First check that the necessary params are included:
_check_for_export_metadata(recipe)
# Now create an xml-ready dict:
export_params_dict = _create_export_params_dict(recipe)
def get_list_item_name(item_name):
"""
Returns a custom list item name for know cases
"""
if item_name == 'themes':
return 'theme'
return item_name
xml = dicttoxml(export_params_dict, attr_type=False, custom_root='mapdoc', item_func=get_list_item_name)
xml = _sort_xml_by_element(xml)
return parseString(xml).toprettyxml(encoding='utf-8')
def _create_export_params_dict(recipe):
# Hard coded default values:
all_export_metadata = {
'imagerydate': "",
'papersize': "A3",
'access': "MapAction",
'accessnotes': "",
'location': "",
'qclevel': "Automatically generated",
'proj': "",
'datasource': "",
'createdate': "",
'createtime': "",
'scale': "",
'datum': ""
# ,
# "language-iso2": recipe.hum_event.language_iso2,
# "pdfresolutiondpi": recipe.hum_event.default_pdf_res_dpi,
# "jpgresolutiondpi": recipe.hum_event.default_jpeg_res_dpi,
# "countries": recipe.hum_event.country_name,
# "glideno": recipe.hum_event.glide_number,
# "operationID": recipe.hum_event.operation_id,
# "sourceorg": recipe.hum_event.default_source_organisation
}
for propertyToRemove in ["exportemf", "exportDirectory"]:
if propertyToRemove in (recipe.export_metadata):
del recipe.export_metadata[propertyToRemove]
# Copy from params
all_export_metadata.update(recipe.export_metadata)
versionNumber = int(all_export_metadata.get("versionNumber", 1))
if (versionNumber == 1):
all_export_metadata["status"] = "New"
return {'mapdata': all_export_metadata}
| true |
2a5f31c989365942bfcb96e5f11950250e2a27c1 | Python | tobymyers/codingthematrix | /chapter6/chapter6_problems.py | UTF-8 | 5,495 | 3.109375 | 3 | [] | no_license | #7.5 show row space and column space to be equivalent
from triangular import *
from GF2 import *
from matutil import *
from solver import solve
from The_Basis_problems import *
from independence import *
from vecutil import *
"""A [120]
[021]
row space == [1,0][0,1]
column space == [1,0][0,1]
row space == column space therefore they have the same span
and both span R2
B
[1400]
[0220]
[0011]
row space == [140][020][001]
column space == [100][420][001]
rank rs == rank cs because both are bases therefore linearly indpendent
therefore not superfluous. definition of row rank is rank of basis of rows
C
[1]
[2]
[3]
row basis == [1]
column basis == [1]
this is a point, I think"""
#6.7.5
def morph(S, B):
"""procedure that morphs one set into another using the exchange lemma"""
#6.7.6
def my_is_independent(L):
"""procedure that uses rank to test whether or not a set is indpendent using the concept of rank. The idea here is that
a basis for any Span must have rank == that span. E.g. a basis for r3 is 001 010 100. All bases are the same size, so any basis for R3 will have rank 3. Thus, if the length of a se of vectors is > it's rank,
there must be a linear dependency."""
return rank(L) == len(L)
L = [list2vec(l) for l in [[2,4,0],[4,5,6],[0,0,7]]]
print(my_is_independent(L))
#6.7.7
def my_rank(L):
"""uses subset basis to find a basis for L, then finds the length of that basis. Because all bases are the same size,
we can know dimension L and therefore rank L"""
return len(subset_basis(L))
L = [list2vec(l) for l in [[1,2,3],[4,5,6],[1.1,1.1,1.1]]]
print(my_rank(L))
#6.7.8 prove that if a vector space has dimension N then N+1 of it's vectors are linearly dependent.
"""let V be a vector space of dimension N and S be a basis for V. By the morphing lemma, a set of linearly independent vectors contained in the generators for V will have at most
|generators|. By theorem 6.1.3, the smallest set of generators must be a basis. By proposition 6.2.4, rank S (the dimension of the vector space V) <= |S|. So, dim V == dim S == N
All bases have the same size (rank N), and by definition span the entire vector space V. Therefore, any additional vector added to S already exists in the span, and by the definition of
linear dependence is linearly dependent. Any such vector added to S would bring make rank S == N+1. All of S in contained in V, therefore this holds for V as well as S"""
#6.7.9
def is_solution(A, x, b):
residual = b - A * x
if residual * residual <= 10**-14:
return True
else:
return False
def direct_sum_decompose(U_basis, V_basis, w):
"""finds the u in U and v in V that were added to create the vector w in the direct sum
uses the fact that U_basis union V_basis is a basis for V direct sum U """
U_V_basis = U_basis + V_basis
A = coldict2mat(U_V_basis)
u_plus_v = solve(A,w)
print(u_plus_v)
print(list(u_plus_v.D))
if not is_solution(A,u_plus_v, w):
return "not a solution"
else:
print(A*u_plus_v)
list_D = list(u_plus_v.D)
u_D = set(list_D[:len(U_basis)])
v_D = set(list_D[:len(V_basis)])
print(u_D, v_D)
u_coeffs = Vec((u_D),{k:u_plus_v.f[k] for k in u_D})
v_coeffs = Vec(v_D,{k:u_plus_v.f[k+len(u_D)] for k in v_D})
print(u_coeffs, v_coeffs)
u = coldict2mat(U_basis) * u_coeffs
v = coldict2mat(V_basis) * v_coeffs
return [u, v] #running into some very weird rounding errors here, but theory is correct.
U_basis = [list2vec(l) for l in [[2,1,0,0,6,0],[11,5,0,0,1,0],[3,1.5, 0,0,7.5,0]]]
V_basis = [list2vec(l) for l in [[0,0,7,0,0,1],[0,0,15,0,0,2]]]
print(direct_sum_decompose(U_basis, V_basis, list2vec([2,5,0,0,1,0])))
#6.7.10
def is_invertible(M):
"""tests to see if a matrix is invertible based on the criteria that it's square
and that the columns are linearly independent. Implied is that the rows are also linearly
independent"""
cols = mat2coldict(M)
rows = mat2rowdict(M)
col_vals = [vec for vec in cols.values()]
return len(cols) == len(rows) and my_is_independent(col_vals)
m1 = listlist2mat([[1,2,3],[3,1,1]])
m2 = listlist2mat([[1,0,1,0],[0,2,1,0],[0,0,3,1],[0,0,0,4]])
print(is_invertible(m2))
#6.7.13
def find_inverse(GF2_M):
"""finds the inverse of a matrix over GF2, using the fact that a matrix * it's inverse is the identity matrix"""
assert is_invertible(GF2_M) == True
cols = mat2coldict(GF2_M)
b_list = [Vec({i for i in range(len(cols))},{i:one}) for i in range(len(cols))]
print(b_list)
inverse_vecs = [solve(GF2_M, b) for (col, b) in zip( cols,b_list)]
inverse = coldict2mat(inverse_vecs)
identity = GF2_M * inverse
return inverse, identity
mat = listlist2mat([[0,one,0],[one, 0,0],[0,0,one]])
print(find_inverse(mat))
#6.7.14
def find_triangular_matrix_inverse(A):
"""uses the fact that A * A ^-1 is the identity matrix"""
rows = [row for row in mat2rowdict(A).values()]
labels = {d for d in range(len(rows))}
b = Vec(labels, {i:1 for i in labels})
new_b = [Vec({i for i in range(len(rows))},{i:1}) for i in range(len(rows))]
print(rows, 'rows',list(labels), b)
result = [triangular_solve(rows, list(labels), b) for b in new_b]
print(result)
res_A = coldict2mat(result)
print(res_A*A)
A = listlist2mat([[1, .5, .2, 4], [0, 1,.3 ,.9], [0, 0, 1, .1 ], [0,0,0,1]])
print(find_triangular_matrix_inverse(A))
| true |
bfdb488db06a80e55c99deb5dc7d3c338719229b | Python | ryanh153/Morsels | /53_78/72_track_instances/tracker2.py | UTF-8 | 1,075 | 3.0625 | 3 | [] | no_license | from weakref import WeakSet
from functools import partial
def track_instances(cls_or_str):
def class_decorator(cls, name):
def __init__(self, *args, **kwargs):
original_init(self, *args, **kwargs)
getattr(self, name).add(self)
setattr(cls, name, WeakSet())
original_init = cls.__init__
cls.__init__ = __init__
return cls
if isinstance(cls_or_str, str):
return partial(class_decorator, name=cls_or_str)
return class_decorator(cls_or_str, name='instances')
class InstanceTracker(type):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs) # Construct from our parent (type)
cls._instances = WeakSet()
def __call__(cls, *args, **kwargs):
# This is called when an instance of the class using us for a meta is made
instance = super().__call__(*args, **kwargs) # Again rely on type to do the normal stuff
cls._instances.add(instance)
return instance
def __iter__(cls):
yield from cls._instances
| true |
3a01bb5caadb454b3664bbaa69597e282773673c | Python | bpowning/SI206-FinalProject-TechYeah | /githubjobs-api-reader.py | UTF-8 | 9,188 | 2.578125 | 3 | [] | no_license | import json
import os
import requests
import sqlite3
import re
from bs4 import BeautifulSoup
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# get directory path and create cache file
path = os.path.dirname(os.path.realpath(__file__))
jobs_file = path + '/' + "cache_jobs.json"
# if there is data in the cache file, load the content into a dictionary or leave it empty
def read_cache(cache_file):
try:
file = open(cache_file, 'r')
cache_dict = json.loads(file.read())
file.close()
return cache_dict
except:
cache_dict = {}
return cache_dict
# writes the JSON to the cache file and saves the results
def write_cache(cache_file, cache_dict):
file_path = os.path.join(os.path.dirname(__file__), cache_file)
file = open(file_path, 'w')
file.write(json.dumps(cache_dict))
# functions to create a url based on location
def url_by_location(location):
base_url = "https://jobs.github.com/positions.json?" + "location=" + location
return base_url
def url_by_description(description):
base_url = "https://jobs.github.com/positions.json?" + "description=" + description
return base_url
# get data from an API call
def get_data(base_url, cache_file = jobs_file):
cache_dict = read_cache(cache_file)
if base_url in cache_dict:
return cache_dict[base_url]
else:
request = requests.get(base_url)
cache_dict[base_url] = json.loads(request.text)
write_cache(cache_file, cache_dict)
return cache_dict[base_url]
# add data to cache_jobs.json
cities = ["newyork", "boston", "chicago", "sanfrancisco", "losangeles", "seattle", "phillidelphia", "newjersey", "detroit", "texas"]
for city in cities:
base_url = url_by_location(city)
get_data(base_url)
types = ["engineer", "developer", "remote"]
for typ in types:
base_url = url_by_description(typ)
get_data(base_url)
# read the cache file
job_data = read_cache('cache_jobs.json')
# =======================================================================================================================================================================================================================================================
# set up the database
path = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(path + '/' + "coronavirus.db")
cur = conn.cursor()
# set up a table called Job Type for all types of job listings
cur.execute("DROP TABLE IF EXISTS JobType")
cur.execute("CREATE TABLE IF NOT EXISTS JobType (id INTEGER PRIMARY KEY, type TEXT)")
job_types = ["analyst", "python", "developer", "engineer"]
for i in range(len(job_types)):
cur.execute("INSERT INTO JobType (id,type) VALUES (?,?)",(i,job_types[i]))
conn.commit()
# set up a table called JobListings for all 2020 job listings
cur.execute("DROP TABLE IF EXISTS JobListings")
cur.execute("CREATE TABLE IF NOT EXISTS JobListings (job_id TEXT PRIMARY KEY, title TEXT, company TEXT, location TEXT, type_id INTEGER, date TEXT, description TEXT, application TEXT, remote BOOL)")
job_ids = [] # making sure there is no repeat of jobs
for city in job_data:
for job in job_data[city]:
if job['id'] not in job_ids:
job_id = job['id']
title = job['title']
company = job['company']
location = job['location']
# date is a string of month, year
date_list = job['created_at'].split()
date = date_list[1] + ", " + date_list[-1]
# type is
type_name = job['title'].lower()
if "analyst" in type_name:
typ = "analyst"
elif "python" in type_name:
typ = "python"
elif "engineer" in type_name:
typ = "engineer"
elif "developer" in type_name:
typ = "developer"
cur.execute("SELECT id FROM JobType WHERE type = ?", (typ,))
type_id = cur.fetchone()[0]
soup = BeautifulSoup(job['description'], 'html.parser')
description = soup.get_text()
soup = BeautifulSoup(job['how_to_apply'], 'html.parser')
application = soup.get_text()
if 'remote' in description or 'remote' in title:
remote = True
else:
remote = False
cur.execute("INSERT INTO JobListings (job_id, title, company, location, type_id, date, description, application, remote) VALUES (?,?,?,?,?,?,?,?,?)", (job_id, title, company, location, type_id, date, description, application, remote))
job_ids.append(job_id)
conn.commit()
# =======================================================================================================================================================================================================================================================
# functions for calculations
# takes a str month and an int year and returns a list of the job titles
def get_listings_by_date(month, year, cur= cur, conn = conn):
month_code = month[:3] + ", " + str(year)
cur.execute("SELECT title FROM JobListings WHERE date = ?", (month_code,))
conn.commit()
return cur.fetchall()
# takes a str month and an int year and returns a list of remote job titles
def remote_listings_by_date(month, year, cur= cur, conn = conn):
month_code = month[:3] + ", " + str(year)
cur.execute("SELECT title FROM JobListings WHERE remote = 1 AND date = ?", (month_code,))
return cur.fetchall()
# get the remote total listing by month
def remote_total_by_month(month, year):
return len(remote_listings_by_date(month, year))
# returns a list of the totals in month order
def month_totals_list(month_list, year):
lst = []
for month in month_list:
lst.append(remote_total_by_month(month, year))
return lst
# returns the yearly remote total
def yearly_total(month_list, year):
total = 0
lst = month_totals_list(month_list, year)
for num in lst:
total += num
return total
# returns total remote listings
def get_total_remote():
cur.execute("SELECT title FROM JobListings WHERE remote = 1")
return len(cur.fetchall())
# returns percentage of remote listings
def get_percent_remote():
total_remote = get_total_remote()
cur.execute("Select * FROM JobListings")
total_listings = len(cur.fetchall())
return total_remote/total_listings
# returns job listings by job type
def get_listings_by_type(typ):
cur.execute("SELECT JobListings.title FROM JobListings, JobType WHERE JobType.id = JobListings.type_id AND JobType.type = ?", (typ,))
return cur.fetchall()
# returns number of job listings by job type
def num_listings_by_type(typ):
return len(get_listings_by_type(typ))
#=======================================================================================================================================================================================================================================================
# code to use these functions to calculate stuff from the data base
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
remote2018_list = month_totals_list(months, 2018)
remote2019_list = month_totals_list(months, 2019)
remote2020_list = month_totals_list(months[:4], 2020)
remote2018 = yearly_total(months, 2018)
remote2019 = yearly_total(months, 2019)
remote2020 = yearly_total(months[:4], 2020)
total_remote = get_total_remote()
percent_remote = get_percent_remote()
total_analyst = num_listings_by_type("analyst")
total_python = num_listings_by_type("python")
total_developer = num_listings_by_type("developer")
total_engineer = num_listings_by_type("analyst")
# =======================================================================================================================================================================================================================================================
# creating visualizations
# remote jobs by month 2020
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.bar(months[:4], remote2020_list, color = 'lightblue')
ax.set_xlabel('month')
ax.set_ylabel('number of remote job listings')
ax.set_title('Remote job listings in 2020 by month ')
fig.savefig('remote2020.png')
plt.show()
# remote jobs by month 2020 and NYT covid mentions by month 2020 (in hundreds)
cur.execute('SELECT coronavirus_hits FROM NYTPostData')
hits_by_month = cur.fetchall()
hits = []
for hit in hits_by_month[3:]:
hits.append(hit[0]/100)
fig, ax = plt.subplots()
N = 4
width = 0.35
ind = np.arange(N)
b1 = ax.bar(ind, remote2020_list, width, color='lightblue')
b2 = ax.bar(ind + width, hits, width, color='darkblue')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('Jan', 'Feb', 'Mar', 'Apr'))
ax.legend((b1[0],b2[0]), ('# of NYT COVID-19 Mentions', '# of remote job listings'))
ax.autoscale_view()
ax.set(xlabel='month', title='Remote job listings vs NYT mentions (in hundreds) by month 2020')
fig.savefig("remoteListings_NYTmentions.png")
plt.show()
| true |
d1fcdce735c3f2b86e7828c8f24c1cf4d10670c8 | Python | OptimalDesignLab/ElasticNozzleMDO | /plots/nozzle.py | UTF-8 | 3,173 | 2.75 | 3 | [] | no_license | import argparse
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [r'\usepackage[cm]{sfmath}']
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'cm'
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
parser = argparse.ArgumentParser(description='Plot initial or final nozzle.')
parser.add_argument('--init', action='store_true')
parser.add_argument('--show', action='store_true')
args = parser.parse_args()
# set some formating parameters
axis_fs = 8 # axis title font size
axis_lw = 1.0 # line width used for axis box, legend, and major ticks
label_fs = 8 # axis labels' font size
# get data to plot
file_dir = '../IDF/121node/20dv'
if args.init:
in_file = '%s/init_pressure_area.dat'%file_dir
else:
in_file = '%s/quasi1d.dat'%file_dir
data = open(in_file, 'r')
[x, A, rho, rhou, e, p, p_targ, u, Mach, Mach_exact] = \
np.loadtxt(data, skiprows=3, unpack=True)
# set figure size in inches, and crete a single set of axes
fig = plt.figure(figsize=(4, 3), dpi=300)
ax = fig.add_subplot(111)
# plot the data
ms = 4.0 # marker size
mew = 0.75 # marker edge width
lw = 0.75 # line width
press, = ax.plot(x, p, '-k', linewidth=lw)
press_targ, = ax.plot(
x, p_targ, ':ks', linewidth=2*lw, ms=ms, mfc='w', mew=mew, markevery=1)
# Tweak the appeareance of the axes
ax.set_xlabel('x', fontsize=axis_fs, weight='bold')
ax.set_ylabel('Pressure', fontsize=axis_fs, weight='bold', labelpad=6)
ax.grid(which='major', axis='y')
ax.axis([0.0, 1.0, 0.99*min(p_targ), 1.01*max(p_targ)]) # axes ranges
# adjust gridlines
gridlines = ax.get_ygridlines()
for line in gridlines:
line.set_linestyle(':')
# ticks on bottom and left only
ax.xaxis.tick_bottom() # use ticks on bottom only
ax.yaxis.tick_left()
for label in ax.xaxis.get_ticklabels():
label.set_fontsize(label_fs)
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(label_fs)
# define and format the minor ticks
ax.xaxis.set_ticks(np.arange(0, 1.0, 0.1), minor=True)
ax.xaxis.set_tick_params(which='minor', length=3, width=2.0*axis_lw/3.0)
# Now add second axis if desired
ax2 = ax.twinx()
area, = ax2.plot(x, A, '--k', linewidth=lw)
ax2.set_ylabel('Area', fontsize=axis_fs, weight='bold', labelpad=12, rotation=270)
ax2.axis([0.0, 1.0, 0.95*1.49, 1.05*2.])
# ticks on bottom and left only
ax2.yaxis.tick_right()
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(label_fs)
# plot and tweak the legend
ax.legend(
(press, press_targ, area),
('Pressure', 'Targ. press.', 'Nozzle Area'),
loc='upper right', numpoints=1, fontsize=label_fs, labelspacing=0.75,
borderpad=0.75, handlelength=3, fancybox=False, framealpha=1.0, edgecolor='k')
save_dir = '/Users/denera/Documents/RPI/Optimal Design Lab/IDF-RSNK-journal'
if args.init:
out_name = 'init_nozzle'
else:
out_name = 'final_nozzle'
plt.savefig('%s/%s.eps'%(save_dir, out_name), format='eps', dpi=300,
bbox_inches='tight')
plt.savefig('%s/%s.png'%(save_dir, out_name), format='png',
bbox_inches='tight')
if args.show:
plt.show()
plt.close() | true |
c840dfd749f5b168417c594ba509e40848f5ebeb | Python | TAPAlves/Trabalho-Lab-Algoritmos | /blast_analise.py | UTF-8 | 1,510 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 06 17:44:27 2015
@author: Tiago
"""
#importacao de dados
from Bio.Blast import NCBIXML
from Bio import SeqIO
record = SeqIO.read("sequence.gb", "genbank")
#organizacao dos ficheiros em diferentes listas
featcds = [ ]
featgene=[]
outrasfeat=[]
for feat in range(len(record.features)):
if record.features[feat].type == "CDS":
featcds.append(record.features[feat])
elif record.features[feat].type=="gene":
featgene.append(record.features[feat])
else:
outrasfeat.append(record.features[feat])
tamanhos=[]#tamanhos das sequencias da query
for i in range(len(featcds)):
result_handle = open("Blast/ficheiro_blast_proteinas"+str(i)+".xml","r")
tamanhos.append(len(featcds[i].qualifiers["translation"][0]))
E_VALUE_THRESH = 1
blast_record = NCBIXML.read(result_handle)
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < E_VALUE_THRESH:
print "****Alinhamento****"
print "####Proteina %s- locus_tag: %s####"%(i,featcds[i].qualifiers["locus_tag"][0])
print 'Sequencia:', alignment.title
print 'Score:', hsp.bits
print 'e-value:', hsp.expect
print "Tamanho do alinhamento:",hsp.align_length
print "Cobertura da query:",float(hsp.align_length)/tamanhos[i]
result_handle.close()
| true |
8c6913795ab9650f61b7e6209bac5a82f5b3a63a | Python | dyanfee/code | /84.柱状图中最大的矩形.py | UTF-8 | 342 | 2.984375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=84 lang=python3
#
# [84] 柱状图中最大的矩形
#
# @lc code=start
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
stack = []
for i, h in enumerate(heights):
if not stack or stack[-1]<=h:
stack.append(h)
# @lc code=end
| true |
caec4148e2384195522b01ff01766f0be404d15f | Python | microsoft/ContextualSP | /lemon/executor/strongsup/tests/test_value_function.py | UTF-8 | 2,320 | 2.59375 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | import numpy as np
import pytest
import tensorflow as tf
from gtd.ml.framework import Feedable
from gtd.ml.utils import guarantee_initialized_variables
from strongsup.value_function import LogisticValueFunction, ValueFunctionExample
from strongsup.utils import OptimizerOptions
class DummyParseModel(Feedable):
def __init__(self, weights):
self._weights = tf.Variable(weights, dtype=tf.float32)
# Batch size x Embedding size
self._placeholder = tf.placeholder(tf.float32, shape=[None, 2])
self._case_encodings = tf.matmul(self._placeholder, self._weights)
@property
def case_encodings(self):
return self._case_encodings
def inputs_to_feed_dict(self, cases, ignore_previous_utterances, caching):
# Ignore cases, ignore_previous_utterances, and caching
dummy_parse_model_inputs = np.array([[1.0, 2.0], [2.0, 3.0]])
return {self._placeholder: dummy_parse_model_inputs}
@pytest.fixture
def weights():
return np.array([[0.0, 1.0], [1.0, 0.0]])
@pytest.fixture
def dummy_cases():
# Never gets used
return [1, 2]
@pytest.fixture
def rewards():
return [1.0, 0.0]
@pytest.fixture
def value_function(weights):
return LogisticValueFunction(
DummyParseModel(weights), 0.01, OptimizerOptions("adam"))
def test_value_function(value_function, weights, dummy_cases, rewards):
sess = tf.InteractiveSession()
guarantee_initialized_variables(sess)
fetch = {
"loss": value_function._loss
}
feed_dict = value_function.inputs_to_feed_dict(dummy_cases, rewards)
# Test that the loss decreases after taking a train step
loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
values = value_function.values(dummy_cases)
for i in range(10):
vf_examples = [ValueFunctionExample(c, r) for c, r in zip(dummy_cases, rewards)]
value_function.train_step(vf_examples)
new_loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
new_values = value_function.values(dummy_cases)
assert new_loss < loss
# Test that the weights didn't propagate to the ParseModel
fetch = {
"weights": value_function._parse_model._weights
}
model_weights = sess.run(fetch, feed_dict=feed_dict)["weights"]
assert np.array_equal(model_weights, weights)
| true |
3d256dd9637688ccd391d001762f495397888ccc | Python | MartinKlapacz/PacManHunter | /window_var.py | UTF-8 | 521 | 2.546875 | 3 | [] | no_license | import pygame
#alle möglichen Variablen sind in diesem Modul
game_size = (750,500)
game_running = True
time = 0
game_window = pygame.display.set_mode(game_size)
y_position_bottom = int(0.8 * game_size[1])
flight_level = int(0.75 * game_size[1])
FPS = 60
#Farben
red = (255,0,0)
green = (0,255,0)
blue = (100,0,255)
black = (0,0,0)
white = (255,255,255)
player_color = red
enemy_color = black
ground_color = green
x_change_player = 0
y_change_player = 0
last_collision_player_enemy = 0
| true |
85805c1fe6c66be065bbdd12117046ff201892d2 | Python | JeromeBlanchet/StatCan-WDS | /scripts/GetTable.py | UTF-8 | 967 | 2.828125 | 3 | [] | no_license | """
Short Python script to download StatCan data tables using the Web Data Service.
Last modified by Joseph Kuchar, Oct 1, 2020
"""
import requests
#helper function to clean the product id a bit (remove hyphens, cut trailing characters)
def PID_Cleaner(s):
if type(s)==int:
s=str(s)
s=s.replace('-','')
if len(s)==10:
s=s[0:8]
return s
def getFullTable(pid,path=''):
pid = PID_Cleaner(pid)
print('cleaning pid {}'.format(pid))
url='https://www150.statcan.gc.ca/t1/wds/rest/getFullTableDownloadCSV/{}/en'.format(pid)
R=requests.get(url)
response=R.json()
if response['status']!='SUCCESS':
raise ValueError('Failed to retrieve valid response')
url2=response['object']
T=requests.get(url2, proxies=credentials.proxy)
if path !='' and ~path.endswith('/'):
path+='/'
g=open(path+pid+'.zip','wb')
g.write(T.content)
g.close()
#S=2510005501
#getFullTable(S)
| true |
8a044b3505a4391028024146253b3582959ee18a | Python | jana0601/icu-predictions | /preprocessing.py | UTF-8 | 5,963 | 3.15625 | 3 | [] | no_license | from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, RobustScaler
import data
import pandas as pd
import numpy as np
import warnings
# We need to standardize them together
def standardize_features(train_features: pd.DataFrame, test_features: pd.DataFrame):
"""
Process the training data from the raw data frame that was read from csv using data.get_training_features
:param appendix: can be used to retrieve csv from file rather than being passed a dataframe
:param features: the reshaped data frame
:param export_csv: if True, a csv with the normalized features will be saved
:return: the standardized data as dataframe!
"""
scaler = RobustScaler()
large_set = pd.concat([train_features, test_features])
matrix = scaler.fit_transform(large_set)
large_set.loc[:] = matrix
train_features_imputed = large_set.loc[train_features.index]
test_features_imputed = large_set.loc[test_features.index]
return train_features_imputed, test_features_imputed
# We should impute them together to get a more general mean for missing values
def impute_features(train_features: pd.DataFrame, test_features: pd.DataFrame):
large_set = pd.concat([train_features, test_features])
imp = SimpleImputer(strategy='median', missing_values=np.nan)
matrix = imp.fit_transform(large_set)
large_set.loc[:] = matrix
train_features_imputed = large_set.loc[train_features.index]
test_features_imputed = large_set.loc[test_features.index]
return train_features_imputed, test_features_imputed
def verify_mean(processed_features, target_mean=0):
"""
Compute mean along axis to verify we have target_mean
:param processed_features: the features as array or dataframe
:param target_mean: the mean you want to have. Default=0
"""
counter = 0
averages = np.average(processed_features.values, axis=0)
for average in averages:
if np.abs(average - target_mean) > 1e-3:
counter += 1
if counter != 0:
warnings.warn('%.d columns have not target mean' % counter)
def verify_std(processed_features, target_std=1):
"""
Compute the std along the axis to verify all features have target_std
:param processed_features: the feautres as array or dataframe
:param target_std: the std you want to have. Default = 0
"""
counter = 0
averages = np.std(processed_features.values, axis=0)
for average in averages:
if np.abs(average - target_std) > 1e-3:
counter += 1
if counter != 0:
warnings.warn('%.d columns have not target standard deviation' % counter)
# this only takes one set of features because we impute every patient by themselves.
def prepare_features(features: pd.DataFrame = None, appendix: str = None, read_from_file= False):
"""
Flatten the measurements for every patient
:param features: features as dataframe
:param appendix: for the filename when the features are being saved
:return: the prepared features as dataframe
"""
if read_from_file:
df = pd.read_csv('transformed_data/reshaped_features_' + appendix + '.csv', index_col='pid')
return df
pids = features['pid'].drop_duplicates()
labels_per_patient = features.columns.drop(['Age','pid', 'Time'])
# get one row per patient
ages = features.drop_duplicates(subset='pid')
ages.set_index('pid', inplace=True)
# now only keep one column, the age.
ages = ages.loc[:,['Age']]
# From the task description we know that every patient has 12 timestamps but let's check
data_for_patient_1 = features.loc[features['pid'] == pids[0], :]
number_of_timestamps = data_for_patient_1.shape[0]
labels = ['Age']
for index in range(number_of_timestamps):
new_labels = [ label + "_" + str(index) for label in labels_per_patient.values]
labels.extend(new_labels)
new_features = pd.DataFrame(index=pids, columns=labels)
for patient in pids.values:
imp = SimpleImputer(strategy='mean', missing_values=np.nan)
# first, retrieve the rows that are for this patient
sub_frame = features.loc[features['pid'] == patient]
# now lets find how many nans we have for every measurement
sum_of_nans = sub_frame.isna().sum()
# for every measuremnt, determine if we need to impute in the first place:
# if we have all nans, this column will be dropped which is not what we want.
impute_indices = [False if sum==number_of_timestamps else True for sum in sum_of_nans]
mask = pd.array(impute_indices, dtype="boolean")
# this has only the columns where not all the values are nan, this is where we will impute
frame_to_impute = sub_frame.iloc[:,mask]
imputed_matrix = imp.fit_transform(frame_to_impute)
imputed_frame = pd.DataFrame(imputed_matrix, index=frame_to_impute.index, columns=frame_to_impute.columns)
# now insert the imputed columns in the old dataframe. We will still have nans here, but they will be imputed
# at a later stage when all patients are combined.
new_sub_frame = sub_frame.copy()
for index,label in enumerate(sub_frame.columns):
if impute_indices[index]:
new_sub_frame.loc[:,label] = imputed_frame[label]
# sort and flatten for thi spatient
sub_frame = new_sub_frame
sorted = sub_frame.sort_values('Time')
sorted = sorted.drop(['pid', 'Time', 'Age'], axis='columns')
matrix = sorted.values
vector = matrix.flatten()
vector = np.insert(vector,0,ages.at[patient,'Age'])
# insert this patient into the dataframe
new_features.loc[patient,:] = vector
return new_features
# raw_data = data.get_training_features()
# prepared_features = prepare_features(features=raw_data, appendix = 'train', read_from_file=False)
| true |
501522d72623e762b9a3705ec15852a25dc0d996 | Python | timota/webapp | /app.py | UTF-8 | 1,512 | 3 | 3 | [] | no_license | # flask app
from flask import Flask
# need to parse the request data
from flask import request
# connect Python Flask with MongoDB
from pymongo import MongoClient
from bson.json_util import dumps
# import json module to load the request.data
import json
# date module
import datetime
# read config file
try:
with open('app.conf') as json_data_file:
data = json.load(json_data_file)
except:
print('Cannot read config file')
# MongoDB connector
client = MongoClient(data['server'], int(data['port']))
db = client.ContactDB
# start Flask
app = Flask(__name__)
# add routers
# POST - add data to db
@app.route("/add", methods = ['POST'])
def add():
try:
# get our data from request and decode it
data = json.loads(request.data.decode('utf-8'))
# parse response
event = data['event']
# if value exists - add it to database with timestamp
if event:
status = db.Contacts.insert_one({
"event" : event,
"date" : datetime.datetime.utcnow()
})
# if data has been added - return success, overwise - error
return dumps({'message' : 'SUCCESS'})
except Exception as e:
return dumps({'error' : str(e)})
# GET - lets get our data from DB
@app.route("/get_all_events", methods = ['GET'])
def get_all_events():
try:
contacts = db.Contacts.find()
return dumps(contacts)
except Exception as e:
return dumps({'error' : str(e)})
| true |
e8c5cbf99e1a2c50a4d6cdefade913ad1d594569 | Python | jiadaizhao/LeetCode | /1001-1100/1065-Index Pairs of a String/1065-Index Pairs of a String.py | UTF-8 | 745 | 3.171875 | 3 | [
"MIT"
] | permissive | import collections
class TrieNode:
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.isEnd = False
class Solution:
def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:
root = TrieNode()
for word in words:
curr = root
for c in word:
curr = curr.children[c]
curr.isEnd = True
result = []
for i in range(len(text)):
j = i
curr = root
while j < len(text) and text[j] in curr.children:
curr = curr.children[text[j]]
if curr.isEnd:
result.append([i, j])
j += 1
return result
| true |
9e7ab12a590bc2490d02269c490583c2dbee0734 | Python | 0xShone/PythonWeb_origin | /String/test11-1.py | UTF-8 | 223 | 3.4375 | 3 | [] | no_license | #coding: UTF-8
str = "ABCDE"
print u"対象文字列 " + str
print "[1:3] " + str[1:3]
print "[1:-1] " + str[1:-1]
print "[1:] " + str[1:]
print "[:2] " + str[:2]
print "[:] " + str[:]
print "[-2:5] " + str[-2:5]
| true |
0c942fc4c7ff9e6969442d20bb7c5c96525c0f40 | Python | HaoyangCui0830/SearchingAgent | /search/search.py | UTF-8 | 11,670 | 3.390625 | 3 | [] | no_license | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from util import Stack
from util import PriorityQueue
from util import manhattanDistance
maxDepth = 500
infinit = 10000000
Weight = 2
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***"
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***"
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***"
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***"
util.raiseNotDefined()
visited_list = []
def iterativeDeepeningSearch(problem):
"""Search the deepest node in an iterative manner."""
"*** YOUR CODE HERE FOR TASK 1 ***"
#print("Start:", problem.getStartState())
#print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
##print("Start's successors:", problem.getSuccessors((22,16)))
ActionList = []
##return ['East','West', 'East','West', 'East','West', 'East']
for limit in range(maxDepth):
stack = Stack()
#visited_list = Stack()
visited_list.clear()
#print(limit)
if deepthLimitSearch(stack,problem, problem.getStartState(), limit ) == True:
while stack.isEmpty() == False:
ActionList.append(stack.pop())
ActionList.reverse()
#print(ActionList)
return ActionList
##util.raiseNotDefined()
""" should add visited list """
def deepthLimitSearch(stack, problem, state, limit):
if state in visited_list and visited_list is not None:
return False
else:
#visited_list.push(state)
visited_list.append(state)
if problem.isGoalState(state):
return True
if limit <= 0 :
return False
for i in problem.getSuccessors(state):
stack.push(i[1])
if deepthLimitSearch(stack, problem, i[0], limit-1 )==True:
return True
else:
stack.pop()
#visited_list.pop()
visited_list.pop()
return False
class NodeState:
def __init__(self, node_state, parent_state, parent_g, cost, action):
self.state = node_state
self.father = parent_state
self.g = parent_g + cost
self.cost = cost
self.action = action
def Get_g(NodeStateList, node_state):
for i in range(len(NodeStateList)):
if NodeStateList[i].state == node_state:
return NodeStateList[i].g
def Get_Parent(NodeStateList, node_state):
for i in range(len(NodeStateList)):
if NodeStateList[i].state == node_state:
return NodeStateList[i].father
def Get_Action(NodeStateList, node_state, node_father):
for i in range(len(NodeStateList)):
if NodeStateList[i].state == node_state and NodeStateList[i].father == node_father:
return NodeStateList[i].action
def ReverseAction(action):
if action == "West": return "East"
if action == "East": return "West"
if action == "North": return "South"
if action == "South": return "North"
def getLocalSuccessors(NodeStateList, node_father):
son_list = []
for i in range(len(NodeStateList)):
if NodeStateList[i].father == node_father:
son_list.append((NodeStateList[i].state, NodeStateList[i].action, NodeStateList[i].cost))
return son_list
def waStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has has the weighted (x 2) lowest combined cost and heuristic first."""
"*** YOUR CODE HERE FOR TASK 2 ***"
TotalActions = []
solved = False
asked_nodes = []
recorded_nodes = []
#while problem.goal is not None:
while True:
ActionList = []
open = PriorityQueue()
node = None
nodes = []
nodes.append( NodeState(problem.getStartState(), None, 0, 0, None) )
open.push(problem.getStartState(), 0 + Weight * heuristic(problem.getStartState(), problem) )
closed = []
best_g = {problem.getStartState():0}
while open.isEmpty() == False:
node = open.pop()
if node not in closed or Get_g(nodes, node) < best_g.get(node):
closed.append(node)
best_g[node] = Get_g(nodes, node)
if problem.isGoalState(node):
#print("reached one goal")
break
if node in asked_nodes:
for sub_node in getLocalSuccessors(recorded_nodes,node):
#print(sub_node)
nodes.append( NodeState(sub_node[0], node, Get_g(nodes, node), sub_node[2], sub_node[1]) )
if heuristic(sub_node[0],problem) < infinit:
open.push(sub_node[0], Get_g(nodes, sub_node[0]) + Weight * heuristic(sub_node[0],problem) )
else:
for sub_node in problem.getSuccessors(node):
#print(sub_node)
asked_nodes.append(node)
recorded_nodes.append( NodeState(sub_node[0], node, Get_g(nodes, node), sub_node[2], sub_node[1]) )
nodes.append( NodeState(sub_node[0], node, Get_g(nodes, node), sub_node[2], sub_node[1]) )
if heuristic(sub_node[0],problem) < infinit:
open.push(sub_node[0], Get_g(nodes, sub_node[0]) + Weight * heuristic(sub_node[0],problem) )
#if problem.goal == node:
#print(problem.isGoalState(node))
if hasattr(problem, 'goal'):
if problem.goal == node:
solved = True
else:
if problem.isGoalState(node):
solved = True
while Get_Parent(nodes, node) is not None:
parent_node = Get_Parent(nodes, node)
ActionList.append(Get_Action(nodes, node, parent_node))
node = parent_node
ActionList.reverse()
TotalActions.extend(ActionList)
if solved:
break
# if ( str(type(problem))=='<class \'searchAgents.CapsuleSearchProblem\'>' ):
# problem.heuristicInfo['hasCapsule'] = True
# start_node = problem.goal
# while problem.allFoodEat() == False:
# goal = problem.getNextGoalPoint(start_node)
# #print(goal)
# ExtraActionList = []
# open = PriorityQueue()
# node = None
# nodes = []
# nodes.append( NodeState(start_node , None, 0, 0, None) )
# open.push(start_node, 0 + Weight * heuristic(problem.getStartState(), problem) )
# closed = []
# best_g = {start_node:0}
# while open.isEmpty() == False:
# node = open.pop()
# if node not in closed or Get_g(nodes, node) < best_g.get(node):
# closed.append(node)
# best_g[node] = Get_g(nodes, node)
# if node == goal :
# start_node = node
# break
# for sub_node in problem.getSuccessors(node):
# if sub_node[0] in problem.food.asList() and sub_node[0]!=goal :
# nodes.append( NodeState(sub_node[0], node, Get_g(nodes, node), -9, sub_node[1]) )
# else:
# nodes.append( NodeState(sub_node[0], node, Get_g(nodes, node), -1, sub_node[1]) )
# if heuristic(sub_node[0],problem) < infinit:
# open.push(sub_node[0], Get_g(nodes, sub_node[0]) + Weight * heuristic(sub_node[0],problem) )
# if node in problem.food.asList():
# problem.food[node[0]][node[1]] = False
# while Get_Parent(nodes, node) is not None:
# parent_node = Get_Parent(nodes, node)
# ExtraActionList.append(Get_Action(nodes, node, parent_node))
# node = parent_node
# ExtraActionList.reverse()
# ActionList.extend(ExtraActionList)
# return ActionList
#print(TotalActions)
return TotalActions
#return ActionList
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
ids = iterativeDeepeningSearch
wastar = waStarSearch
| true |
d337ab488ab27c43ca655a57dd306033fc16d68c | Python | Aasthaengg/IBMdataset | /Python_codes/p02886/s644787791.py | UTF-8 | 138 | 2.8125 | 3 | [] | no_license | import itertools
N = int(input())
d = list(map(int, input().split()))
print(sum([i * j for i, j in list(itertools.combinations(d, 2))])) | true |
c2d31952a133b6a6d1c0fa0f92ff132ed6fec03e | Python | NickChlam/nflScores | /web_scraper/getGames.py | UTF-8 | 8,214 | 2.640625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
import json
import pprint
from pymongo import MongoClient
import platform
import datetime
# TODO load conn string in env variables
conn = 'mongodb://localhost:27017/scores'
# is platform windows
if platform.system() == 'Windows':
conn = 'mongodb://localhost:27017/scores'
client = MongoClient(conn)
class NFLScores:
def __init__(self, *args, **kwargs):
options = webdriver.ChromeOptions()
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--no-sandbox')
options.add_argument('window-size=1400,650')
options.add_argument('headless')
self.bot = webdriver.Chrome(options=options)
# TODO: create function to chnage array to object
self.matches = []
def getGames(self, week):
print('please wait scraping data .... ')
Matches = []
matchup = []
home = {
"name" : '',
"rank" : '',
"PPG" : '',
"Opp_PPG" : '',
"Pass_YPG" : '',
"Rush_YPG" : '',
"url" : '',
"style" : ''
}
away = {
}
bot = self.bot
bot.get(f'https://www.foxsports.com/nfl/scores?season=2019&seasonType=1&week={week}')
scoreContainer = bot.find_element_by_id('wisbb_scoresContainer')
games = bot.find_elements_by_class_name('wisbb_scoreChip')
for game in games:
generalInfo = []
# getting stats
stats = self.getMatchup(game)
# get away info --------------------------------------------------------------------->
teams = game.find_element_by_class_name('wisbb_teams')
teamA = teams.find_element_by_class_name('wisbb_teamA')
awayTeamName = teamA.find_element_by_class_name('wisbb_name').get_attribute('innerHTML')
awayRecord = teamA.find_element_by_class_name('wisbb_record').get_attribute('innerHTML')
away["name"] = awayTeamName
away["rank"] = awayRecord
# away URL and Style
url = self.getUrl(game, 'wisbb_teamA')
away["url"] = url[0]
away["style"] = url[1]
# matchup stats
away["PPG"] = stats[0]
away["Opp_PPG"] = stats[3]
away["Pass_YPG"] = stats[6]
away["Rush_YPG"] = stats[9]
# GameTime
#get home info --------------------------------------------------------------------->
teamB = teams.find_element_by_class_name('wisbb_teamB')
homeTeamName = teamB.find_element_by_class_name('wisbb_name').get_attribute('innerHTML')
homeRecord = teamB.find_element_by_class_name('wisbb_record').get_attribute('innerHTML')
home["name"] = homeTeamName
home["rank"] = homeRecord
# Home URL and Style
url = self.getUrl(game, 'wisbb_teamB')
home["url"] = url[0]
home["style"] = url[1]
# matchup stats
home["PPG"] = stats[2]
home["Opp_PPG"] = stats[5]
home["Pass_YPG"] = stats[8]
home["Rush_YPG"] = stats[11]
# append data to matches
matchup.append(home)
matchup.append(away)
try:
headline = self.getHeadline(game)
except Exception as ex:
headline = ''
matchup.append(
{
"gameTime" : self.getAirTime(game),
"headline" : headline
}
)
self.matches.append(matchup)
matchup = []
home = {}
away = {}
# TODO REMOVE PRINT
print(awayTeamName + ' ' + awayRecord + ' vs. ' + homeTeamName + ' ' + homeRecord)
def getMatchup(self, game):
match = []
stats = game.find_element_by_class_name('wisbb_matchup')
data = stats.find_elements_by_tag_name('tr')
for row in data:
data2 = row.find_elements_by_tag_name('td')
for d in data2:
match.append(d.get_attribute('innerHTML'))
match.pop(0)
match.pop(0)
match.pop(0)
return match
def getUrl(self, game, team):
logos = []
branding = game.find_element_by_class_name('wisbb_teamBranding')
teamA = branding.find_element_by_class_name(team)
urlA = teamA.find_element_by_tag_name('img').get_attribute('src')
color = teamA.find_element_by_class_name('wisbb_stripe').get_attribute('style')
logos.append(urlA)
logos.append(color)
return logos
def getAirTime(self, game):
gameTime = {}
info = game.find_element_by_class_name('wisbb_status')
network = info.find_element_by_class_name('wisbb_network').get_attribute('innerHTML')
day = info.find_element_by_class_name('wisbb_gameTime').get_attribute('innerHTML')
try:
time = info.find_elements_by_class_name('wisbb_gameTime')[1].get_attribute('innerHTML')
except Exception as ex:
time = ''
gameTime = {"network" : network, "day" : day, "time" : time}
return gameTime
def getHeadline(self, game):
headline = game.find_element_by_class_name('wisbb_headlines').get_attribute('innerHTML')
return headline.strip()
def getWeek(self, date):
week10 = datetime.datetime(2019, 11, 5).date()
week10End = datetime.datetime(2019, 11, 7).date()
week11 = datetime.datetime(2019, 11, 12).date()
week11End = datetime.datetime(2019, 11, 14).date()
week12 = datetime.datetime(2019, 11, 19).date()
week12End = datetime.datetime(2019, 11, 21).date()
week13 = datetime.datetime(2019, 11, 26).date()
week13End = datetime.datetime(2019, 11, 29).date()
week14 = datetime.datetime(2019, 12, 3).date()
week14End = datetime.datetime(2019, 12, 5).date()
week15 = datetime.datetime(2019, 12, 10).date()
week15End = datetime.datetime(2019, 12, 12).date()
week16 = datetime.datetime(2019, 12, 17).date()
week16End = datetime.datetime(2019, 12, 19).date()
week17 = datetime.datetime(2019, 12, 24).date()
week17End = datetime.datetime(2019, 12, 26).date()
print(date)
print(week10)
print(week10End)
if(date >= week10 and date < week10End):
return '10'
if(date >= week11 and date < week11End):
return '11'
if(date >= week12 and date < week12End):
return '12'
if(date >= week13 and date < week13End):
return '13'
if(date >= week14 and date < week14End):
return '14'
if(date >= week15 and date < week15End):
return '15'
if(date >= week16 and date < week16End):
return '16'
if(date >= week17 and date < week17End):
return '17'
# if no matches return none
return None
scores = NFLScores()
db = client.scores
posts = db.games
CurrentDate = datetime.datetime.today().date()
week = scores.getWeek(CurrentDate)
print(week)
if week == None :
quit()
#sys.argv[1]
#
data = posts.find({'week' : week})
# see if there is data for week. If not get it
try:
data.next()
except Exception as ex:
scores.getGames(week)
if scores.matches == []:
scores.getGames(week)
for game in scores.matches:
pprint.pprint(game)
scores.bot.close()
scores.bot.quit()
for game in scores.matches:
post = {
"week" : week,
"game" : game
}
save = posts.insert_one(post)
quit()
print(f'data for week {week} already exists')
quit()
# bills_post = posts.find()
# for post in bills_post:
# print(post)
# result = db.posts.delete_many({'author': 'jacl'})
# print(result.deleted_count) | true |
4128ac81be8902d125ae865147dcfae13380252a | Python | yashbonde/chessshhh | /chess_engine/ai_move.py | UTF-8 | 1,445 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | """
This is the ai_move.py file which has the AI model
17.11.2019 - @yashbonde
"""
# chess
import chess
# custom
from chess_engine.ai_model import make_random_move
from chess_engine.utils import board_position_to_int, int_to_board_position
def move_orchestrator(prev_board_state):
"""
takes in the previous board state and returns object with keys
{
"new_state",
"from",
"to",
}
:param prev_board_state:
:param move_obj:
:return:
"""
# default setup
from_ = None
to_ = None
content = None
san = None,
new_state = prev_board_state
# to understand the FEN: https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation
# feed the state to the model and get the move and updated state
move_obj, game_state, san_move = make_random_move(
board_state_fen = prev_board_state
)
# --- Write Outputs --- #
if move_obj is 'checkmate':
new_state = game_state
content = "Checkmate, You Lost! Fucking Loser!"
elif move_obj is "stalemate":
new_state = game_state
content = "Stalemate! Game Over Asshole!"
else:
new_state = game_state
from_ = int_to_board_position[move_obj.from_square]
to_ = int_to_board_position[move_obj.to_square]
san = san_move
# --- output body --- #
return {
"new_state": new_state,
"from": from_,
"to": to_,
"content": content,
"san": san
}
| true |
2bf385c4dbb5e1b74a10c530706518d92ee68a53 | Python | a35931chi/AIND-Sudoku-Solver | /solution.py | UTF-8 | 7,447 | 3.28125 | 3 | [] | no_license | #this is written with Python 3.5 IDLE
#shell code was taken from github: https://github.com/udacity/aind-sudoku
#majority of structure/functions are taken from udacity sudoku exercises
assignments = []
rows = 'ABCDEFGHI'
cols = '123456789'
def cross(A, B): #loop through rows and columsn to create box labels
"Cross product of elements in A and elements in B."
return [a+b for a in A for b in B]
boxes = cross(rows, cols) #produce all the boxes
row_unit = [cross(r, cols) for r in rows] #produce a list containing all the row units
col_unit = [cross(rows, c) for c in cols] #produce a list containing all thr column units
square_unit = [cross(rs, cs) for rs in ['ABC','DEF','GHI'] for cs in ['123','456','789']] #produce a list containing all the square units
diag_unit = [[rows[i] + cols[i] for i in range(len(rows))] , [rows[-i-1] + cols[i] for i in range(len(rows))]] #produce a list containing all the diagonal units
if True: #taking in acocunt diagonal units
unitlist = row_unit + col_unit + square_unit + diag_unit
else:
unitlist = row_unit + col_unit + square_unit
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
# Don't waste memory appending actions that don't actually change any values
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def grid_values(grid): #initalize the values dictionary
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
chars = []
digits = '123456789'
for c in grid:
if c in digits:
chars.append(c)
if c == '.':
chars.append(digits)
assert len(grid) == 81
return dict(zip(boxes, chars))
def display(values): #show the sudoku (only works if puzzle is solved)
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values): #if we have solved boxes, all the peers of those boxes should ride those solved values
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
for peer in peers[box]:
assign_value(values, peer, values[peer].replace(values[box], ''))
return values
def only_choice(values): #if a value is unique in a unit, assign that value to that box
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
assign_value(values, dplaces[0], digit)
return values
def naked_twins(values): #if in the unit there exists two cells with the same values of length two, other cells shouldn't have those values
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
#identify cells with length 2
len2_boxes = [box for box in values.keys() if len(values[box]) == 2]
len2_values = [values[box] for box in len2_boxes]
#for each twin value found
for box in len2_boxes:
if len(values[box]) == 2:
#identify the units that it belongs to
for unit in units[box]:
#iterate through the unit to see if we find a value that is the same
for another_box in unit:
if box != another_box and values[box] == values[another_box]:
digits = values[box]
#if we do, we iterate through the cells in other units and remove those values
for yet_another_box in unit:
if yet_another_box != another_box and yet_another_box != box and (digits[0] in values[yet_another_box] or digits[1] in values[yet_another_box]):
assign_value(values, yet_another_box, values[yet_another_box].replace(digits[0], ''))
assign_value(values, yet_another_box, values[yet_another_box].replace(digits[1], ''))
return values
def reduce_puzzle(values): #chaining the value processor together
solved_values = [box for box in values.keys() if len(values[box]) == 1]
print('{} solved values: {}'.format(len(solved_values), solved_values))
stalled = False
while not stalled:
solved_values_before = [box for box in values.keys() if len(values[box]) == 1]
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = [box for box in values.keys() if len(values[box]) == 1]
stalled = (solved_values_before == solved_values_after)
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values): #if stalled, then build a tree and try all the combination until solution is found
values = reduce_puzzle(values)
if values is not False:
print('value reduced')
if values is False:
print('stuck')
return False
if all([len(values[s]) == 1 for s in boxes]):
print('solved!')
return values
n, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
for value in values[s]:
print('for square {}, trying {} out of {}'.format(s, value, values[s]))
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid): # the main function that calls other functions
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
#initialize
values = grid_values(grid)
#solve
values = search(values)
return values
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
try_this = '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................'
display(solve(try_this))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| true |
7764286b8fa58c2abad0bafc7131bc102f2e3bfd | Python | lamatehu/study-c-python | /python/chapter/wendu.py | UTF-8 | 200 | 2.640625 | 3 | [] | no_license | wendu = input()
if wendu[0] in ['F','f']:
C = ((eval(wendu[1:]))-32)/1.8
print("C{:.2f}".format(C))
elif wendu[0] in ['c','C']:
F = (eval(wendu[1:]))*1.8+32
print("F{:.2f}".format(F))
| true |
ca8fe2f423b867ad813bb8571d26d80316aa0201 | Python | strazdas/Cpp_global_opt | /results/show_stats.py | UTF-8 | 1,937 | 2.890625 | 3 | [] | no_license | # should construct simplexes and call show potential function
from numpy import array as a, matrix as m, arange, sqrt, isnan, pi, cos, sin, mean
from itertools import permutations
from os import listdir
# List directories (exclude bin)
# iterate through classes filenames and summarise them
ignore = ['bin', '.ropeproject', 'show_stats.py']
def show_stats(root_path="."):
for path in listdir(root_path):
if path not in ignore:
print('===== ' + path + ' =====')
for cls in range(1, 9):
stats = {'alg': '', 'cls': '', 'calls': [], 'subregions': [], 'duration': []}
for fid in range(1, 101):
filename = str(cls) + "_" + str(fid)
try:
f = open(path + '/' + filename)
except: # Temporarily
continue
file_content = f.read().strip()
for o in file_content.split(','):
if ':' in o:
key, value = [e.strip() for e in o.strip().split(':')]
if key == 'calls' or key == 'subregions':
stats[key].append(int(value))
if key == 'duration':
stats[key].append(float(value))
print " ", cls,
if stats['calls']:
l = len(stats['calls'])
stats['calls'] = sorted(stats['calls'])
stats['subregions'] = sorted(stats['subregions'])
print "fc50: %5d fc100: %5d calls: %9.3f runs: %3d parts50: %6d parts100: %6d" % (stats['calls'][l/2], stats['calls'][-1], mean(stats['calls']), len(stats['calls']), stats['subregions'][l/2], stats['subregions'][-1])
else:
print
if __name__ == '__main__':
show_stats()
| true |
48798d0b81b05de205c82c6f0482143bd6e7a819 | Python | doddydad/python-for-the-absolute-beginner-3rd-ed | /Chapter 10/order up.py | UTF-8 | 3,732 | 3.375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 16:34:58 2020
@author: Andrew
"""
from tkinter import *
class Application(Frame):
"""A GUI for menu ordering"""
def __init__(self, master):
"""initialising the frame"""
super(Application, self).__init__(master)
self.grid()
self.create_widgets()
def create_widgets(self):
"""Creates all the widgets for the thing"""
#welcome message
Label(self,
text = "Here's you menu I guess lol"
).grid(row = 0, column = 0, columnspan = 2, sticky = W)
# Ask and receive table number
Label(self,
text = "Table Number: "
).grid(row = 1, column = 0, sticky = W)
self.table_number = Entry(self)
self.table_number.grid(row = 1, column = 1, sticky = W)
# ordering starters
Label(self,
text = "What starter(s) would you like?"
).grid(row = 2, column = 0, columnspan = 2, sticky = W)
# Choose starters
self.snails = BooleanVar()
Checkbutton(self,
text = "snails",
variable = self.snails
).grid(row = 3, column = 0, sticky = W)
self.soup = BooleanVar()
Checkbutton(self,
text = "soup",
variable = self.soup
).grid(row = 3, column = 1, sticky = W)
self.shrimp_cocktail = BooleanVar()
Checkbutton(self,
text = "shrimp cocktail",
variable = self.shrimp_cocktail
).grid(row = 3, column = 2, columnspan = 2, sticky = W)
# main meal, you can only have one
Label(self,
text = "What main would you like?: "
).grid(row = 4, column = 0, sticky = W)
self.main_meal = StringVar()
self.main_meal.set(None)
main_meals = ["Lasagne", "Risotto", "Curry"]
column = 0
for meal in main_meals:
Radiobutton(self,
text = meal,
variable = self.main_meal,
value = meal
).grid(row= 5, column = column, sticky = W)
column += 1
# Button to change the text box response
Button(self,
text = "Order",
command = self.order_meal
).grid(row = 6, column = 0, sticky = W)
# Output text box
self.meal_order_txt = Text(self, width = 50, height = 2, wrap = WORD)
self.meal_order_txt.grid(row = 7, column = 0, columnspan = 4)
# def order_meal(self):
# self.table_number.grid_forget()
# this hides a widget, but only one at a time. not viable menu system.
def order_meal(self):
"""Repeat the order back"""
# Beginning of string
meal_order = "Ok, we'll be back to you table "
meal_order += self.table_number.get()
meal_order += " soon with: "
#Data from user
if self.snails.get():
meal_order += "snails, "
if self.soup.get():
meal_order += "soup, "
if self.shrimp_cocktail.get():
meal_order += "shrimp cocktail, "
meal_order += "and your "
meal_order += self.main_meal.get()
# Display this
self.meal_order_txt.delete(0.0, END)
self.meal_order_txt.insert(0.0, meal_order)
#main
root = Tk()
root.title("Order Up!")
app = Application(root)
root.mainloop() | true |
d92d0eaae792a0c8a8751b421cf90d14625f3252 | Python | ProgramFan/bentoo | /bentoo/tools/merge.py | UTF-8 | 4,255 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
bentoo-merger.py - merge performance data from different sources
This tool merges collected performance data from different data sources. It
understands the internal structure of performance database and selects merge
keys automatically. Currently, it only supports data generated by Likwid
parser.
'''
from builtins import map
from builtins import zip
import argparse
import sqlite3
import fnmatch
import pandas
import re
def glob_strings(source, patterns):
if not source or not patterns:
return []
return [x for x in source for y in patterns if fnmatch.fnmatch(x, y)]
def quote(string):
return "\"%s\"" % string
def find_first_of(contents, candidates):
for c in candidates:
try:
i = contents.index(c)
except ValueError:
i = -1
if i >= 0:
return (c, i)
return (None, -1)
def split_columns(columns):
'''split 'columns' into (index_columns, data_columns)'''
timer_column_index = columns.index("TimerName")
return (columns[:timer_column_index + 1], columns[timer_column_index + 1:])
def extract_column_names(conn, table="result"):
orig_row_factory = conn.row_factory
conn.row_factory = sqlite3.Row
r = conn.execute("SELECT * FROM %s LIMIT 1" % table).fetchone()
names = list(r.keys())
conn.row_factory = orig_row_factory
return names
def merge_db(main_db,
ref_db,
out_db,
replace=None,
append=None,
replace_with=None):
conn0 = sqlite3.connect(main_db)
conn1 = sqlite3.connect(ref_db)
main_cols = extract_column_names(conn0)
ref_cols = extract_column_names(conn1)
if replace_with:
replace_cols = [x.split("=")[0] for x in replace_with]
replace_refs = [x.split("=")[1] for x in replace_with]
else:
replace_cols = glob_strings(main_cols, replace)
append_cols = glob_strings(ref_cols, append)
index_cols, _ = split_columns(ref_cols)
if replace_with:
index_sql = ", ".join(map(quote, index_cols))
replace_sql = ", ".join("\"{0}\" AS \"{1}\"".format(x, y)
for x, y in zip(replace_refs, replace_cols))
append_sql = ", ".join(map(quote, append_cols))
sql = [index_sql, replace_sql, append_sql]
sql = [x for x in sql if x]
sql = "SELECT %s FROM result" % ", ".join(sql)
else:
sql = index_cols + replace_cols + append_cols
sql = list(map(quote, sql))
sql = "SELECT %s FROM result" % ", ".join(sql)
ref_data = pandas.read_sql_query(sql, conn1)
ref_data = ref_data.set_index(index_cols)
main_data = pandas.read_sql_query("SELECT * FROM result", conn0)
main_data = main_data.set_index(index_cols)
for x in append_cols:
assert (x not in main_data)
main_data[x] = 0
main_data.update(ref_data)
conn2 = sqlite3.connect(out_db)
# IMPORTANT: use flattern index so index=False in to_sql works properly,
# i.e, dataframe index is ignored.
main_data = main_data.reset_index()
main_data.to_sql("result", conn2, if_exists="replace", index=False)
conn2.commit()
conn2.close()
conn1.close()
conn0.close()
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("main_db", help="Database to be updated")
parser.add_argument("ref_db", help="Database to get update data")
parser.add_argument("out_db", help="Database to store output")
parser.add_argument(
"-a",
"--append",
nargs="+",
default=None,
help="Columns to append, supports shell wildcards")
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"-r",
"--replace",
nargs="+",
default=None,
help="Columns to replace, supports shell wildcards ")
grp.add_argument(
"-w",
"--replace-with",
action="append",
default=[],
help="Replace column x with y (format: x=y)")
args = parser.parse_args()
merge_db(**vars(args))
if __name__ == "__main__":
main()
| true |
8a042148ef88c08c5027c70fbb6cef25ab77e5b8 | Python | Zyb3rWolfi/chillyold | /cogs/basic.py | UTF-8 | 957 | 2.59375 | 3 | [] | no_license | import discord
from discord.ext import commands
class basicCommands(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.command(aliases=['help'])
async def helpP(self, ctx):
embed = discord.Embed(title = "Chilly Bot Commands", description = "The Bot Prefix Is `c`", colour = discord.Color.blurple())
embed.add_field(name="Purge Messages", value="`purge`", inline=True)
embed.add_field(name="Ban User", value="`ban`", inline=True)
embed.add_field(name="Unban User", value="`unban`", inline=True)
embed.add_field(name="Kick User", value="`kick`", inline=True)
embed.add_field(name="Lock Channels", value="`lock`", inline=True)
embed.add_field(name="Unlock Channels", value="`unlock`", inline=True)
embed.add_field(name="Add Log Channels", value="`log create`", inline=True)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(basicCommands(bot)) | true |
caa5a46785772dcf20cdd849e7bfa8d9b76bf874 | Python | cofinoa/cfdm | /cfdm/core/domainancillary.py | UTF-8 | 1,186 | 2.859375 | 3 | [
"MIT"
] | permissive | from . import abstract
class DomainAncillary(abstract.PropertiesDataBounds):
'''A domain ancillary construct of the CF data model.
A domain ancillary construct provides information which is needed for
computing the location of cells in an alternative coordinate
system. It is referenced by a term of a coordinate conversion formula
of a coordinate reference construct. It contains a data array which
depends on zero or more of the domain axes.
It also contains an optional array of cell bounds, stored in a
`Bounds` object, recording the extents of each cell (only applicable
if the array contains coordinate data), and properties to describe the
data.
An array of cell bounds spans the same domain axes as the data array,
with the addition of an extra dimension whose size is that of the
number of vertices of each cell.
.. versionadded:: 1.7.0
'''
@property
def construct_type(self):
'''Return a description of the construct type.
.. versionadded:: 1.7.0
:Returns:
`str`
The construct type.
**Examples:**
>>> f.construct_type
'domain_ancillary'
'''
return 'domain_ancillary'
#--- End: def
#--- End: class
| true |
4822527f185eaf6604de2438de6ea1a6c8c6ed72 | Python | afcarl/Coding_Interview_Problems | /Python/APL/ch15/binomialCoef.py | UTF-8 | 888 | 3.515625 | 4 | [] | no_license | '''
Created on Jan 24, 2015
@author: Ben Athiwaratkun (pa338)
'''
#from __future__ import division
import numpy as np
def binomialCoef(n,k):
# index: (n,k)
''' don't really need this 2-d array'''
''' Would it be more efficient?: yes '''
# build a 2-d array
ar = np.zeros((n+1,n+1))
ar[:,0] = 1
for i in range(n+1):
ar[i,i] = 1
return int(aux2(ar,n,k))
def aux2(ar,n,k):
# DP
# only add if it has not been calculated before
if ar[n,k] == 0:
ar[n,k] = aux2(ar,n-1,k-1) + aux2(ar,n-1,k)
return ar[n,k]
def aux(n,k):
# simple recursion
if k == 0 or k == n:
return 1
else:
return aux(n-1,k-1) + aux(n-1,k)
def main():
print binomialCoef(50,23) # this is quicker (doesn't calculate for the same node twice)
print aux(50,23) # very slow
if __name__ == "__main__":
main() | true |
a386a4da8ea27391facbb2312d9dce57bcdef3e2 | Python | grue0000/vc_project | /edge detection_0719.py | UTF-8 | 729 | 2.546875 | 3 | [] | no_license | import numpy as np
import cv2 as cv2
import matplotlib.pyplot as plt
img = cv2.imread('Images/charge.jpg', cv2.IMREAD_GRAYSCALE)
edge1 = cv2.Canny(img, 100, 200)
edge2 = cv2.Canny(img, 140, 200)
edge3 = cv2.Canny(img, 170, 200)
_, contours, hierachy= cv2.findContours(edge3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (255, 0, 0), 2)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if w>30 and h>30:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
plt.imshow(img)
cv2.imshow('original', img)
cv2.imshow('Canny Edge1', edge1)
cv2.imshow('Canny Edge2', edge2)
cv2.imshow('Canny Edge3', edge3)
cv2.waitKey()
cv2.destroyAllWindows()
| true |
3bc938a218054ee76cadd093b3bf4f0d8d05eb20 | Python | huangjenny/wallbreakers | /week4/subsets.py | UTF-8 | 642 | 3.265625 | 3 | [] | no_license | # Jenny Huang
# Wallbreakers Cohort #3
# Week 4
# Subsets
# https://leetcode.com/problems/subsets/
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
powerset = []
subset = []
generateSubsets(self, 0, nums, subset, powerset)
return powerset
def generateSubsets(self, index, nums, subset, powerset):
# append unique subset to powerset
powerset.append(set(subset))
for i in range(index, len(nums)):
subset.append(nums[i])
generateSubsets(self, i+1, nums, subset, powerset)
del subset[-1] | true |
a0d9b1ca3bae66fd08a8d53765c4f77007275218 | Python | miloszlakomy/algutils | /primes/cached_primes_test.py | UTF-8 | 757 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from algutils.primes import cached_primes
import unittest
class TestIsPrime(unittest.TestCase):
def test_is_prime(self):
sps = {2, 3, 5, 7, 11, 13, 17, 19}
lim = 23
for i in range(lim):
self.assertEqual(cached_primes.is_prime(i), i in sps)
class TestGetPrimesList(unittest.TestCase):
def test_get_primes_list(self):
want = [2, 3, 5, 7, 11, 13, 17, 19]
got = cached_primes.get_primes_list(min_lim=23)
self.assertEqual(want, got[:len(want)])
class TestGetPrimesSet(unittest.TestCase):
def test_get_primes_set(self):
want = {2, 3, 5, 7, 11, 13, 17, 19}
got = cached_primes.get_primes_set(min_lim=23)
self.assertTrue(want <= got)
if __name__ == '__main__':
unittest.main()
| true |
a97128d0514472b195da23a77dffc39bf339af3f | Python | JaySon-Huang/SecertPhotos | /cipher/JPEGImageCipher.py | UTF-8 | 18,841 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#encoding=utf-8
'''
定义一个类似`接口`的存在
'''
import math
from pyjpegtbx.constants import DCTSIZE2
from .utils import exgcd, multiplicative_inver, pow_mod
class Base_JPEGImageCipher(object):
def encrypt(self, image):
'''返回一个加密之后的JPEGImage对象
'''
raise NotImplementedError
def decrypt(self, image):
'''返回一个解密之后的JPEGImage对象
'''
raise NotImplementedError
def encrtptAndEmbData(self, image, data=b'Attack at dawn!'):
'''返回一个嵌入信息并加密之后的JPEGImage对象
'''
raise NotImplementedError
def embData(self, image, data):
'''返回一个嵌入信息之后的JPEGImage对象
'''
raise NotImplementedError
def extractData(self, image):
'''返回提取到的信息
'''
raise NotImplementedError
def decryptAndExtractData(self, image):
'''返回解密后的图像以及提取到的信息
'''
raise NotImplementedError
class FixedLogisticShuffeler(object):
def __init__(self, seed):
'''x0: (0, 1) without 0.5'''
self.seed = seed
def next(self):
self.seed = 4 * self.seed * (1-self.seed)
return 2 / math.pi * math.asin(math.sqrt(self.seed))
def shuffle(self, lst):
k = len(lst)-1
while k > 0:
ind = int(k*self.next())
tmp = lst[k]
lst[k] = lst[ind]
lst[ind] = tmp
k -= 1
class JPEGImageCipher0(object):
'''
广义Arnold变换方法, 默认为典型Arnold变换(猫映射)
default :(a, b) (1, 1)
(c, d) = (1, 2)
'''
MAX_NBITS_MESSAGE_LENGTH = 16
MAX_MESSAGE_LENGTH = (1 << MAX_NBITS_MESSAGE_LENGTH)-1
def __init__(self, seed=0.362, abcd=(1, 1, 1, 2), sqrtN=8):
'''
abcd: 4-int-tuple
sqrtN: sqrt of N
'''
super().__init__()
self.a, self.b, self.c, self.d = abcd
self.sqrtN = sqrtN
gcd, _, _ = exgcd(self.a*self.d - self.b*self.c, sqrtN*sqrtN)
if gcd != 1:
raise ValueError("Must satisfy gcd(ad-bc, N)=1")
self.shuffler = FixedLogisticShuffeler(seed)
def encrypt(self, image):
'''返回一个加密之后的JPEGImage对象
'''
ec_image = image.copy()
for com in range(3):
for i, block in enumerate(ec_image.data[com]):
block = self.scrambledBlock(block)
ec_image.data[com][i] = block
ec_image.data[com] = self.shuffledComponemt(ec_image.data[com])
return ec_image
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def shuffledComponemt(self, comp):
length = len(comp)
ptrlst = [_ for _ in range(length)]
self.shuffler.shuffle(ptrlst)
ncomp = [None] * length
for i, block in enumerate(comp):
ncomp[i] = comp[ptrlst[i]]
return ncomp
def decrypt(self, image):
'''返回一个解密之后的JPEGImage对象
'''
dc_image = image.copy()
for com in range(3):
for i, block in enumerate(dc_image.data[com]):
block = self.unscrambledBlock(block)
dc_image.data[com][i] = block
dc_image.data[com] = self.unshuffledComponemt(dc_image.data[com])
return dc_image
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = multiplicative_inver(
self.a*self.d-self.b*self.c, self.sqrtN
)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * y) % self.sqrtN
yy = inver*(-self.c * x + self.a * y) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unshuffledComponemt(self, comp):
length = len(comp)
ptrlst = [_ for _ in range(length)]
self.shuffler.shuffle(ptrlst)
ncomp = [None] * length
for i, block in enumerate(comp):
ncomp[ptrlst[i]] = comp[i]
return ncomp
def encrtptAndEmbData(self, image, data=b'Attack at dawn!'):
image = self.encrypt(image)
self.embData(image, data)
return image
def embData(self, image, data):
length = len(data) * 8
# 使用 MAX_NBITS_MESSAGE_LENGTH 个bit来存储长度信息
# 最多可以存储 MAX_MESSAGE_LENGTH 长度的信息
assert length < self.MAX_MESSAGE_LENGTH, \
"嵌入数据量太大: %d(MAX: %d)" % (length, self.MAX_MESSAGE_LENGTH)
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist0 = ColorSpaceHistorgram(_id, image.data[_index])
# print('embData before', hist0.at(1))
pos_infos = self.__shiftData(0, image, length)
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist1 = ColorSpaceHistorgram(_id, image.data[_index])
bs = BitInputStream(data)
pos_infos_index = 0
for i in range(self.MAX_NBITS_MESSAGE_LENGTH):
bit = 1 if (length & (0x01 << i)) > 0 else 0
_index, coef_index, val_index, ori = pos_infos[pos_infos_index]
if bit == 0:
image.data[_index][coef_index][val_index] = ori - 1
elif bit == 1:
image.data[_index][coef_index][val_index] = ori + 1
pos_infos_index += 1
for bit in bs.read():
_index, coef_index, val_index, ori = pos_infos[pos_infos_index]
if bit == 0:
image.data[_index][coef_index][val_index] = ori-1
elif bit == 1:
image.data[_index][coef_index][val_index] = ori+1
pos_infos_index += 1
# _id = image.comp_infos[0]['component_id']
# _index = image.comp_infos[0]['component_index']
# hist2 = ColorSpaceHistorgram(_id, image.data[_index])
# print('embData after ', hist2.at(1))
def __shiftData(self, cindex, image, need):
_id = image.comp_infos[cindex]['component_id']
_index = image.comp_infos[cindex]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
pos_infos = []
nalloc = 0
for val_index in range(1, DCTSIZE2):
for coef_index, coef_block in enumerate(image.data[_index]):
topVal, topNum = hist.top(val_index)
# TODO: 没有处理使用多个slot嵌入数据的功能, 导致可嵌入数据量较少
# 50% 也有待商榷?
assert int(topNum*0.5) > need, \
"嵌入数据量太大: %d(MAX: %d)" % (need, )
val = coef_block[val_index]
if val < topVal:
coef_block[val_index] -= 1
elif val > topVal:
coef_block[val_index] += 1
else: # 峰值位置, 记录可平移位置信息
pos_infos.append((_index, coef_index, val_index, topVal))
nalloc += 1
return pos_infos
def extractData(self, image):
_id = image.comp_infos[0]['component_id']
_index = image.comp_infos[0]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
bout = BitOutputStream()
isGettingMsg = False
try:
for val_index in range(1, DCTSIZE2):
for coef_index, coef_block in enumerate(image.data[_index]):
topVal, _ = hist.top(val_index)
val = coef_block[val_index]
if val == topVal - 1:
bout.write(0)
elif val == topVal + 1:
bout.write(1)
if not isGettingMsg:
if len(bout) == 16:
# 前MAX_NBITS_MESSAGE_LENGTH bit存储嵌入了多长的数据
emb_message_length = bout.getInt(
nbit=self.MAX_NBITS_MESSAGE_LENGTH
)
isGettingMsg = True
elif len(bout) == emb_message_length:
# 已经获取全部嵌入数据
raise Exception
except Exception:
pass
msg = bytearray(bout._bytes)
return msg
def clearData(self, image):
_id = image.comp_infos[0]['component_id']
_index = image.comp_infos[0]['component_index']
hist = ColorSpaceHistorgram(_id, image.data[_index])
# print('clearData before', hist.at(1))
hasGetLength = False
bout = BitOutputStream()
for val_index in range(1, DCTSIZE2):
for coef_block in image.data[_index]:
topVal, _ = hist.top(val_index)
val = coef_block[val_index]
if val == topVal - 1:
bout.write(0)
elif val == topVal + 1:
bout.write(1)
if val < topVal:
coef_block[val_index] += 1
elif val > topVal:
coef_block[val_index] -= 1
if not hasGetLength:
if len(bout) == 16: # 前16bit用来存储嵌入了多长的数据
emb_message_length = bout.getInt(
nbit=self.MAX_NBITS_MESSAGE_LENGTH
)
hasGetLength = True
# TODO: 没有处理使用多个slot嵌入数据的功能
# if hasGetLength and emb_message_length < :
break
# hist1 = ColorSpaceHistorgram(_id, image.data[_index])
# print('clearData after ', hist1.at(1))
def decryptAndExtractData(self, image):
bdata = self.extractData(image)
self.clearData(image)
image = self.decrypt(image)
return image, bdata
class JPEGImageCipher1(JPEGImageCipher0):
def __init__(self, seed=0.362):
super().__init__(seed)
self.k = 24
self.p = 2
self.r = 300
def f(self, x):
return self.k * (x ** self.p) + self.r
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y + self.f(xx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = int(multiplicative_inver(self.a*self.d-self.b*self.c, self.sqrtN))
for x in range(self.sqrtN):
fx = self.f(x)
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * (y - fx)) % self.sqrtN
yy = inver*(-self.c * x + self.a * (y - fx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
class JPEGImageCipher2(JPEGImageCipher1):
def f(self, x, mod):
# return (self.k * pow_mod(x, self.p, mod) + self.r) % mod
return (self.k * ((x**self.p) % mod) + self.r) % mod
def scrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
for x in range(self.sqrtN):
for y in range(self.sqrtN):
xx = (self.a * x + self.b * y) % self.sqrtN
yy = (self.c * x + self.d * y + self.f(xx, self.sqrtN)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
def unscrambledBlock(self, block):
res = [0] * (self.sqrtN * self.sqrtN)
inver = int(multiplicative_inver(self.a*self.d-self.b*self.c, self.sqrtN))
for x in range(self.sqrtN):
fx = self.f(x, self.sqrtN)
for y in range(self.sqrtN):
xx = inver*(self.d * x - self.b * (y - fx)) % self.sqrtN
yy = inver*(-self.c * x + self.a * (y - fx)) % self.sqrtN
res[xx*self.sqrtN+yy] = block[x*self.sqrtN+y]
return res
class ColorSpaceHistorgram(object):
def __init__(self, cid, component_datas):
self.slots = [None] * DCTSIZE2
for coef_block in component_datas:
for i, val in enumerate(coef_block):
if not self.slots[i]:
self.slots[i] = {}
self.slots[i][val] = self.slots[i].get(val, 0) + 1
def min_max(self, slot_index=None):
'''slot_index=None:返回所有slot的min、max值
如果指定slot_index, 则返回该slot的min、max值
'''
try:
_ = self._min_max_vals
except AttributeError:
ret = []
for slot in self.slots:
keys = slot.keys()
ret.append((min(keys), max(keys)))
self._min_max_vals = tuple(ret)
finally:
if slot_index is None:
return self._min_max_vals
else:
return self._min_max_vals[slot_index]
def top(self, slot_index=None):
try:
_ = self._topVals
except AttributeError:
ret = []
for slot in self.slots:
items = sorted(slot.items(), key=lambda pair: pair[1])
ret.append(items[-1])
self._topVals = tuple(ret)
finally:
if slot_index is None:
return self._topVals
else:
return self._topVals[slot_index]
def at(self, slot_index):
return sorted(self.slots[slot_index].items())
def __str__(self):
lst = []
for i, slot in enumerate(self.slots):
keys = slot.keys()
maxVal, minVal = max(keys), min(keys)
items = sorted(slot.items(), key=lambda pair: pair[1])
topPos, topVal = items[-1]
lst.append(
'''`%2d`: { [top @%2d:%5d] range: %4d ~ %4d }'''
% (i, topPos, topVal, minVal, maxVal)
)
return '\n'.join(lst)
def __repr__(self):
return self.__str__()
class BitInputStream(object):
def __init__(self, _bytes):
self._bytes = _bytes
def read(self):
for ch in self._bytes:
for i in range(8):
yield 1 if (ch & (0x1 << i) > 0) else 0
def __len__(self):
return len(self._bytes) * 8
class BitOutputStream(object):
def __init__(self):
self._bytes = []
self._curByte = 0
self._curShift = 0
def write(self, bit):
self._curByte |= (bit << self._curShift)
self._curShift += 1
if self._curShift == 8:
self._bytes.append(self._curByte)
self._curByte = 0
self._curShift = 0
def hexdump(self):
return ''.join(map(lambda x: '%02x' % x, self._bytes))
def __len__(self):
return len(self._bytes) * 8 + self._curShift
def getInt(self, nbit=32):
ret = 0
for byte in reversed(self._bytes[:nbit//8]):
ret <<= 8
ret += byte
self._bytes = self._bytes[nbit//8:]
return ret
def encdec(img, cls):
cipher = cls()
encImg = cipher.encrypt(img)
# cipher = cls()
# decImg = cipher.decrypt(encImg)
def main():
## 图片加密解密的基本case
# from pyjpegtbx import JPEGImage
# img = JPEGImage.open('../sos.jpg')
# cipher = JPEGImageCipher2()
# encImg = cipher.encrypt(img)
# encImg.save('lfs_enc.jpg')
# cipher = JPEGImageCipher2()
# decImg = cipher.decrypt(encImg)
# decImg.save('lfs_dec.jpg')
# rg = FixedLogisticShuffeler(0.500001)
## 混沌序列的结果
# for _ in range(100):
# print(rg.next())
## 利用混沌序列进行置乱和恢复
# length = 100
# target = [_ for _ in range(length)]
# enc = [0] * length
# dec = [0] * length
# ptrlst = [_ for _ in range(length)]
# print('ori', target)
# rg.shuffle(ptrlst)
# print('ptr', ptrlst)
# for x in range(length):
# enc[x] = target[ptrlst[x]]
# print('enc', enc)
# for x in range(length):
# dec[ptrlst[x]] = enc[x]
# print('dec', dec)
## 三种图像加密方式的时间对比
# import time
# from pyjpegtbx import JPEGImage
# img = JPEGImage('sos.jpg')
# clses = [JPEGImageCipher0, JPEGImageCipher1, JPEGImageCipher2]
# for cls in clses:
# beg = time.time()
# encdec(img, cls)
# end = time.time()
# print("Time for %s:%f" % (cls, end - beg))
## 快速幂和`**`运算的时间对比
# import time
# run_round = 100000
# for i in range(2):
# beg = time.time()
# if i == 0:
# for x in range(run_round):
# p = (x**20) % 1007
# print(p)
# elif i == 1:
# for x in range(run_round):
# p = pow_mod(x, 20, 1007)
# print(p)
# end = time.time()
# print("Time :%f" % (end - beg))
## 直方图功能函数测试
# from pyjpegtbx import JPEGImage
# img = JPEGImage.open('../lfs.jpg')
# historgrams = []
# for comp_info in img.comp_infos:
# _id = comp_info['component_id']
# _index = comp_info['component_index']
# historgrams.append(
# ColorSpaceHistorgram(
# _id, img.data[_index]
# )
# )
# import IPython
# IPython.embed()
# print(historgrams[0].top())
# print(historgrams[0].min_max())
# print(str(historgrams[0]))
# print(str(historgrams[0].at(0)))
## 位流的测试
# bs = BitInputStream(b'\xff\x01\x30')
# for i, bit in enumerate(bs.read()):
# print(bit, end='')
# if i % 8 == 7:
# print()
## 图像隐写部分
from pyjpegtbx import JPEGImage
img = JPEGImage.open('../sos.jpg')
cipher = JPEGImageCipher0()
encImg = cipher.encrtptAndEmbData(img, '冰菓如茶'.encode('utf-8'))
encImg.save('lfs_enc.jpg')
cipher = JPEGImageCipher0()
decImg, data = cipher.decryptAndExtractData(encImg)
decImg.save('lfs_dec.jpg')
print(data.decode('utf-8'))
if __name__ == '__main__':
main()
| true |
ac646266cc3bd733dc56f30cb55642c36ad17810 | Python | suprodigy/Algorithm | /BOJ_python/10798.py | UTF-8 | 311 | 3.03125 | 3 | [] | no_license | import sys
sys.stdin = open('input.txt', 'r')
readlines = sys.stdin.readlines
input_data = [line[:-1] for line in readlines()]
max_len = max([len(x) for x in input_data])
ans = ''
for i in range(max_len):
for j in range(5):
if i < len(input_data[j]):
ans += input_data[j][i]
print(ans)
| true |
30e916edfbdf7574d8fcb9f215c03f86afbaa0c6 | Python | belledon/blockworld | /blockworld/builders/simple_builder.py | UTF-8 | 4,573 | 3.0625 | 3 | [
"MIT"
] | permissive | import copy
import pprint
import functools
import numpy as np
from shapely import geometry, affinity
# from shapely.prepared import prep
from blockworld.utils import math_2d, geotools
from blockworld.builders.builder import Builder
class SimpleBuilder(Builder):
"""
Interface for tower builder.
Given a tower (may be empty), iteratively stacks blocks until the given
criteria for completion has been met.
Attributes:
max_blocks (int): The maximum number of blocks to be added.
max_height (int): The maximum height to be added.
"""
def __init__(self, max_height = 100):
self.max_height = max_height
# Properties #
@property
def max_height(self):
return self._max_height
@max_height.setter
def max_height(self, v):
v = int(v)
if v <= 0 :
msg = '`max_height` must be greater than 0'
raise ValueError(msg)
self._max_height = v
# Methods #
def find_placements(self, tower, block):
"""
Enumerates the geometrically valid positions for
a block surface on a tower surface.
This is done in three stages:
1) Enumeration:
First, an grid is defined over the tower base.
This grid represents all possible points in an
xy plane.
Next, the z-normal xy planes of the tower are aggregated into
polygon collections (tower levels), where any xy planes residing on
the same z-axis belong to the same collection.
Finally, find the intersect between the xy grid and each tower level.
2) Proposition:
For each tower level-grid intersect, determine if placing the given
block at the point causes a collision.
3) Stability Evalutation:
For each non-colliding point on the grid, determine if the placement
would be locally stable (algorithm describing in `geotools` module).
The "parents" of a block are defined as any block that supports the
stability of the proposed placement.
Arguments:
tower (`Tower`): Base to build on
block_dims (`np.ndarray`): Dimensions of new block
stability (bool): If `True`, ensures global stability (False).
Returns:
An list of tuples of the form [(`Parent`, position)..]
The tower surfaces are expected to be sorted along the
z-axis.
"""
positions = []
parents = []
# The base of the tower
base_grid = geotools.make_grid(tower.base)
all_blocks, levels = tower.levels()
# Each z-normal surface currently available on the tower
for (level_z, level_blocks) in levels:
block_ids, blocks = zip(*level_blocks)
# defining the layer
block_z_surfaces = [b.surface for b in blocks]
# Create a collection of polygons describing this z-slice
layer = geometry.MultiPolygon(block_z_surfaces)
# Find the intersect between the grid of possible points and z-layer
grid = base_grid.intersection(layer.envelope)
# Find all points on grid where the new block would not collide
proposals = geotools.propose_placements(block, grid, level_z)
locally_stable_f = lambda p : geotools.local_stability(p, layer)
locally_stable = list(filter(locally_stable_f, proposals))
collision_f = lambda p : all(
map(lambda b : not p.collides(b), all_blocks))
no_collision = list(filter(collision_f, locally_stable))
level_parents = [[i for i,b in level_blocks if pot.isparent(b)]
for pot in no_collision]
positions.extend(no_collision)
parents.extend(level_parents)
return zip(parents, positions)
def __call__(self, base_tower, blocks, stability = True):
"""
Builds a tower ontop of the given base.
Follows the constrains given in `max_blocks` and `max_height`.
"""
t_tower = copy.deepcopy(base_tower)
for ib, block in enumerate(blocks):
if t_tower.height >= self.max_height:
break
valids = list(self.find_placements(t_tower, block))
if len(valids) == 0:
print('Could not place any more blocks')
break
parents, b = valids[np.random.choice(len(valids))]
t_tower = t_tower.place_block(b, parents)
return t_tower
| true |
53ebe5c73128a41091bd6717b28555511498883f | Python | anonymousr007/Deep-Learning-for-Human-Activity-Recognition | /models/sdae.py | UTF-8 | 9,183 | 2.734375 | 3 | [
"MIT"
] | permissive | """Functions for training Stacked Denoising AutoEncoder (SDAE)"""
from logging import getLogger
import os
import random
from typing import Any, Dict, List, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Dropout, Conv2D
from tensorflow.keras import optimizers
from tensorflow.keras import backend as K
from src.utils import plot_learning_history, plot_model
from src.keras_callback import create_callback
tf.random.set_seed(0)
logger = getLogger(__name__)
class SDAE:
def __init__(self, LOG_DIR: str, fold_id: int = 0, **params: Dict[str, Any]):
self.LOG_DIR = LOG_DIR
self.fold_id = fold_id
self.pretrain_lr = params["pretrain_lr"]
self.pretrain_epochs = params["pretrain_epochs"]
self.pretrain_batch_size = params["pretrain_batch_size"]
self.finetune_lr = params["finetune_lr"]
self.finetune_epochs = params["finetune_epochs"]
self.finetune_batch_size = params["finetune_batch_size"]
self.verbose = params["verbose"]
self.freeze_layers = params["freeze_layers"]
def add_noise(
self, signal: np.ndarray, noise_type: str = "mask", noise_factor: float = 0.4, seed: int = 0
) -> np.ndarray:
"""Add noise to create corrupted signals
Args:
signal (np.ndarray): input signal
noise_type (str): chosen from "mask" (masking noise) or "noise" (additive Gaussian noise)
noise_factor (float): strength of corruption
seed (int): seed of normal distribution of noise
Returns:
signal (np.ndarray): corrupted signal
Note:
noise_factor is preffered to be set 0.4 for mask noise and 0.5 for Gaussian noise.
This method is not used this time bacause I used dropout layer instead.
"""
if noise_type == "mask":
random.seed(seed)
corrupt_idx = random.sample(range(len(signal)), int(len(signal) * noise_factor))
signal[corrupt_idx] = 0
elif noise_type == "noise":
np.random.seed(seed=seed)
noise = np.random.normal(loc=0.0, scale=1.0, size=signal.shape)
signal = signal + noise_factor * noise
signal = np.clip(signal, 0.0, 1.0)
return signal
def train_1st_level(
self, X_train: np.ndarray, X_valid: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""train 1st level DAE
Note that input must to be scaled betweeen [0, 1] because output layer is governed by sigmoid function.
Args
X_train (np.ndarray): has shape (num_samples, window_size * num_channel)
X_valid (np.ndarray): has shape (num_samples, window_size * num_channel)
"""
# Build base model
model = Sequential()
model.add(Dropout(0.4, seed=10, name="1st_level_dropout"))
model.add(
Dense(100, input_dim=X_train.shape[1], activation="sigmoid", name="1st_level_fc")
) # encoder
model.add(Dense(X_train.shape[1], activation="sigmoid")) # decoder
model.compile(
loss="mean_squared_error",
optimizer=optimizers.Adam(lr=self.pretrain_lr),
metrics=["mse"],
)
callbacks = create_callback(
model=model,
path_chpt=f"{self.LOG_DIR}/trained_model_1st_level_fold{self.fold_id}.h5",
metric="mse",
verbose=50,
epochs=self.pretrain_epochs,
)
fit = model.fit(
x=X_train,
y=X_train,
batch_size=self.pretrain_batch_size,
epochs=self.pretrain_epochs,
verbose=self.verbose,
validation_data=(X_valid, X_valid),
callbacks=callbacks,
)
plot_learning_history(
fit=fit, metric="mse", path=f"{self.LOG_DIR}/history_1st_level_fold{self.fold_id}.png"
)
plot_model(model, path=f"{self.LOG_DIR}/model_1st_level.png")
# Load best model
model = keras.models.load_model(
f"{self.LOG_DIR}/trained_model_1st_level_fold{self.fold_id}.h5"
)
self.model_1st_level = model
encoder = Model(inputs=model.input, outputs=model.get_layer("1st_level_fc").output)
pred_train = encoder.predict(X_train) # predict with clean signal
pred_valid = encoder.predict(X_valid) # predict with clean signal
return pred_train, pred_valid
def train_2nd_level(
self, X_train: np.ndarray, X_valid: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""train 2nd level DAE
Note that input must to be scaled betweeen [0, 1] because output layer is governed by sigmoid function.
Args
X_train (np.ndarray): has shape (num_samples, output dim of 1st level)
X_valid (np.ndarray): has shape (num_samples, output dim of 1st level)
"""
# Build base model
model = Sequential()
model.add(Dropout(0.4, seed=20, name="2nd_level_dropout"))
model.add(
Dense(30, input_dim=X_train.shape[1], activation="sigmoid", name="2nd_level_fc")
) # encoder
model.add(Dense(X_train.shape[1], activation="sigmoid", name="2nd_level_output")) # decoder
model.compile(
loss="mean_squared_error",
optimizer=optimizers.Adam(lr=self.pretrain_lr),
metrics=["mse"],
)
# Create callback
callbacks = create_callback(
model=model,
path_chpt=f"{self.LOG_DIR}/trained_model_2nd_level_fold{self.fold_id}.h5",
metric="mse",
verbose=50,
epochs=self.pretrain_epochs,
)
fit = model.fit(
x=X_train,
y=X_train,
batch_size=self.pretrain_batch_size,
epochs=self.pretrain_epochs,
verbose=self.verbose,
validation_data=(X_valid, X_valid),
callbacks=callbacks,
)
plot_learning_history(
fit=fit, metric="mse", path=f"{self.LOG_DIR}/history_2nd_level_fold{self.fold_id}.png"
)
plot_model(model, path=f"{self.LOG_DIR}/model_2nd_level.png")
# Load best model
model = keras.models.load_model(
f"{self.LOG_DIR}/trained_model_2nd_level_fold{self.fold_id}.h5"
)
self.model_2nd_level = model
encoder = Model(inputs=model.input, outputs=model.get_layer("2nd_level_fc").output)
pred_train = encoder.predict(X_train) # predict with clean signal
pred_valid = encoder.predict(X_valid) # predict with clean signal
return pred_train, pred_valid
def stack_encoders(
self,
) -> Model:
"""Stack encoders of 1st and 2nd level
Returns
Model: stacked model
"""
model_1st_level = self.model_1st_level
model_2nd_level = self.model_2nd_level
model = Sequential()
model.add(model_1st_level.get_layer("1st_level_fc"))
model.add(model_2nd_level.get_layer("2nd_level_fc"))
return model
def finetune(
self,
X_train: np.ndarray,
X_valid: np.ndarray,
X_test: np.ndarray,
y_train: np.ndarray,
y_valid: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Model]:
"""Fine-tune stacked model for classification.
Returns
Model: stacked model
"""
model = self.stack_encoders()
model.add(
Dense(y_train.shape[1], activation="softmax", name="output")
) # Add output layer for classification
for layer in model.layers:
if layer in self.freeze_layers:
model.get_layer([layer]).trainable = False
logger.debug(f"Freezed {layer=}")
# Recompile model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=self.finetune_lr),
metrics=["accuracy"],
)
callbacks = create_callback(
model=model,
path_chpt=f"{self.LOG_DIR}/trained_model_finetune_fold{self.fold_id}.h5",
verbose=50,
epochs=self.finetune_epochs,
)
fit = model.fit(
X_train,
y_train,
batch_size=self.finetune_batch_size,
epochs=self.finetune_epochs,
verbose=self.verbose,
validation_data=(X_valid, y_valid),
callbacks=callbacks,
)
plot_learning_history(
fit=fit, path=f"{self.LOG_DIR}/history_finetune_fold{self.fold_id}.png"
)
plot_model(model, path=f"{self.LOG_DIR}/model_finetune.png")
model = keras.models.load_model(
f"{self.LOG_DIR}/trained_model_finetune_fold{self.fold_id}.h5"
)
pred_train = model.predict(X_train)
pred_valid = model.predict(X_valid)
pred_test = model.predict(X_test)
K.clear_session()
return pred_train, pred_valid, pred_test, model
| true |
19e6c93c5cb2b76038da2aa772df9f8630d03e41 | Python | djbn65/python-parser | /testFiles/multFuncs.py | UTF-8 | 491 | 3.3125 | 3 | [] | no_license | def factorial(N):
answer = N
N = N - 1
while N != 0:
answer = answer * N
N = N - 1
end
answer
end
def C(n,r):
origN = n
facN = n
n = n - 1
while n != 0:
facN = facN * n
n = n - 1
end
origR = r
facR = r
r = r - 1
while r != 0:
facR = facR * r
r = r - 1
end
nr = origN - origR
facNR = nr
nr = nr - 1
while nr != 0:
facNR = facNR * nr
nr = nr - 1
end
answer = (facN) / (facR * facNR)
answer
end
C(10,5)
factorial(10) | true |
7efb5acc574440c19107b85309679b0092ef194f | Python | DragonWarrior15/dog-breed-classifier | /app_server.py | UTF-8 | 2,369 | 2.53125 | 3 | [] | no_license | # python app_server.py
import os
import json
from flask import Flask, request, render_template, jsonify
from utils import dogBreedDataset, dogBreedTransforms, dogBreedClassifier, logger_setup
import torch
from torch.utils.data import DataLoader
from torch import nn
from torchvision.io import read_image
# global definitions of image transforms and the model
input_size = 224
output_size = 133
transforms = dogBreedTransforms(resize_size=input_size)
# model is picked based on the highest test set accuracy from the logs
model = torch.load(os.path.join('models', '202106150749', '202106150749_00008'))
model.eval()
# softmax
sm = nn.Softmax(dim=1)
# class to labels mapping
with open('label_map', 'r') as f:
label_map = json.load(f)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def start_page():
"""the app page where user can upload image"""
return render_template('app_ui.html')
@app.route('/inference', methods=['POST'])
def inference():
"""files are stored in the request.files attr while other form entries
are part of request.form
https://stackoverflow.com/questions/51765498/flask-file-upload-unable-to-get-form-data
to read the data from the passed file
https://stackoverflow.com/questions/20015550/read-file-data-without-saving-it-in-flask
read filestream image
torch.tensor
convert image to numpy array
torch.tensor
"""
if 'img' not in request.files:
return jsonify({'preds': [[label_map[i], 0] for i in label_map]})
# first save the image to disk to make a file for pytorch to read
img = request.files['img'].read()
with open('tmp', 'wb') as f:
f.write(img)
#read the image
img = read_image('tmp').float()
# add the batch dimension
img = torch.unsqueeze(img, dim=0)
# convert the image as required
img = transforms.img_transform_test(img)
# run the inference, apply softmax to get probabilities
preds = sm(model(img)).tolist()[0]
# get the top 10 probabilities in sorted order
preds = [[i, x] for i, x in enumerate(preds)]
preds = sorted(preds, key=lambda x: x[1], reverse=True)
preds = preds[:10]
preds = [[label_map[str(x[0])], int(100*x[1])] for x in preds]
# convert to numpy array and return
return jsonify({'preds': preds})
if __name__ == '__main__':
app.run(debug = True)
| true |
cd4c9d73bcffb5dfe4f12918b41d9271860befaa | Python | bpare/wikihistory-mirror | /visualization/processHTML.py | UTF-8 | 2,476 | 3.25 | 3 | [] | no_license | #!/usr/bin/python
from bs4 import BeautifulSoup
import argparse
import re
import os.path
import styleHTML
import scoreHTML
def process(title, directory):
"""
Finds all articles with title to create csv with
section titles, dates, and scores. Also adds HTML styling
"""
fileNameList = []
for fileName in os.listdir(directory):
if title in fileName and fileName.endswith('.html'):
fileNameList.append(fileName)
#need to make sure files are in chronological order
fileNameList.sort()
print (fileNameList)
for i in range(len(fileNameList)-1):
with open(directory + fileNameList[i]) as fp:
soup = BeautifulSoup(fp,"html.parser")
#find dates using time stamp in title
print(fileNameList[i])
startDate = re.search(r'\d+\-\d+\-\d+', fileNameList[i]).group()
endDate = re.search(r'\d+\-\d+\-\d+', fileNameList[i+1]).group()
sectionScores, totalScore = scoreHTML.findSectionScore(soup)
scoreHTML.writeCSV(sectionScores, totalScore, startDate, endDate, title, directory)
soup = styleHTML.addID(soup, title)
#rewrite HTML to how we want it to look...
with open(directory + fileNameList[i],'w+') as file:
file.write(str(soup.prettify()))
def fileSetUp(title, directory):
""" Uses the template javascript files to create the ones needed for each article"""
with open(directory + '/../temp.js') as f:
contents = f.read()
contents = contents.replace("TITLE", title)
with open(directory + '/' + title + '.js', 'w') as f:
f.write(contents)
with open(directory + '/../temp_mat1.js') as f:
contents = f.read()
contents = contents.replace("TITLE", title)
with open(directory + title + '_mat.js', 'w') as f:
f.write(contents)
with open(directory + '/../temp_mat2.html') as f:
contents = f.read()
contents = contents.replace("TITLE", title)
with open(directory + title + '_mat.html', 'w') as f:
f.write(contents)
def parse_args():
parser = argparse.ArgumentParser(usage='processHTML.py [article title] [article path]')
parser.add_argument('title', help='title of article')
parser.add_argument('directory', help='path to html of articles')
n=parser.parse_args()
process(n.title, n.directory)
fileSetUp(n.title, n.directory)
if __name__ == '__main__':
parse_args()
| true |
2b6158495c365fc36de016cffdfe0e1163e33b58 | Python | topicgit/learngit | /py/sys/pickling.py | UTF-8 | 260 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python
#Filename:pickling.py
import cPickle as p
shoplistfile='shoplist.data'
shoplist=['apple','mango','carrot']
f=file(shoplistfile,'w')
p.dump(shoplist,f)
f.close()
del shoplist
f=file(shoplistfile)
storedlist=p.load(f)
print storedlist
| true |
08ec2239bd150db98aa6085a9f8ec4f7b680abaa | Python | outcastofmusic/jikken | /src/jikken/database/database.py | UTF-8 | 6,321 | 2.71875 | 3 | [
"MIT"
] | permissive | import os
from contextlib import contextmanager
from .query import ExperimentQuery, MultiStageExperimentQuery
from jikken.experiment import Experiment
from jikken.multistage import MultiStageExperiment
from .config import get_config, JikkenConfig
class Singleton(type):
def __init__(self, *args, **kwargs):
self._instance = None
super(Singleton, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self._instance is None:
self._instance = super(Singleton, self).__call__(*args, **kwargs)
return self._instance
elif self._instance.db != kwargs['config'].db_type:
self._instance = super(Singleton, self).__call__(*args, **kwargs)
return self._instance
else:
return self._instance
class DataBase(metaclass=Singleton):
def __init__(self, config: JikkenConfig):
self.db = config.db_type
if config.db_type == 'tiny':
os.makedirs(config.db_path, exist_ok=True)
from .db_tinydb import TinyDB
self._database = TinyDB(config.db_path, config.db_name)
elif config.db_type == 'mongo':
from .db_mongo import MongoDB
self._database = MongoDB(config.db_path, config.db_name)
elif config.db_type == 'es':
from .db_es import ElasticSearchDB
self._database = ElasticSearchDB(config.db_path, config.db_name)
else:
raise ValueError("db_type must be a 'tiny' or 'mongo'")
if self._database is None:
raise ConnectionError("could not connect to database")
def add(self, data_object: (Experiment, MultiStageExperiment)) -> int:
if isinstance(data_object, Experiment):
"""Add an experiment dict to db."""
return self._database.add(data_object.to_dict())
elif isinstance(data_object, MultiStageExperiment):
multistage_dict = data_object.to_dict()
for step, exp in data_object:
exp_dict = None
if exp.doc_id is not None:
exp_dict = self._database.get(exp.doc_id, "experiment")
if exp_dict is None:
_id = self._database.add(exp.to_dict())
else:
_id = exp.doc_id
step_index = data_object.step_index(step)
multistage_dict['experiments'][step_index] = (step, _id)
return self._database.add(multistage_dict)
else:
raise TypeError("experiment {} was not Experiment|multistage".format(type(data_object)))
def get(self, doc_id: int, doc_type: str) -> dict: # type (int) -> dict
"""Return a experiment dict with matching id."""
assert doc_type in self._database.collections, "doc_type {} not in db"
doc = self._database.get(doc_id, doc_type)
if doc["type"] == "multistage":
for index, (step, exp_id) in enumerate(doc["experiments"]):
exp = self._database.get(exp_id, "experiment")
doc["experiments"][index] = (step, exp)
return doc
def list_experiments(self, query: ExperimentQuery = None) -> list: # type (str) -> list[dict]
"""Return list of experiments."""
query = ExperimentQuery() if query is None else query
return self._database.list_experiments(query=query)
def list_ms_experiments(self, query: MultiStageExperimentQuery = None) -> list:
query = MultiStageExperimentQuery() if query is None else query
results = self._database.list_ms_experiments(query=query)
for doc in results:
for index, (step, exp_id) in enumerate(doc['experiments']):
exp = self._database.get(exp_id, "experiment")
doc["experiments"][index] = (step, exp)
return results
def count(self) -> int: # type () -> int
"""Return number of experiments in db."""
# TODO add ability to return count of experiments, multistage experiments or everything
return self._database.count()
def update(self, experiment_id: int, experiment: Experiment) -> None:
"""Modify experiment in db with given experiment_id."""
return self._database.update(experiment_id, experiment.to_dict())
def update_std(self, experiment_id, string, std_type):
"""Update the std tag with new data"""
if std_type in ['stdout', 'stderr']:
self._database.update_key(experiment_id, string, std_type, mode='add')
else:
raise ValueError("std_type was not stdout or stderr")
def update_status(self, experiment_id: int, status: str):
"udpate the status of the experiment"
if status in ['created', 'running', 'completed', 'error', 'interrupted']:
self._database.update_key(experiment_id, status, "status", mode='set')
else:
raise ValueError("status: {} not correct".format(status))
def update_monitored(self, experiment_id, key, value):
exp = self._database.get(experiment_id, collection="experiment")
if key not in exp['monitored']:
self._database.update_key(experiment_id, value=[value], key=['monitored', key], mode='set')
else:
self._database.update_key(experiment_id, value=[value], key=['monitored', key], mode='add')
def delete(self, experiment_id, doc_type="experiment"): # type (int) -> ()
"""Remove a experiment from db with given experiment_id."""
if doc_type == "experiment":
self._database.delete(experiment_id)
elif doc_type == "multistage":
self._database.delete_mse(experiment_id)
else:
raise ValueError("doc_type {} not supported".format(doc_type))
def delete_all(self):
"""Remove all experiments from db."""
self._database.delete_all()
def stop_db(self):
"""Disconnect from DB."""
self._database.stop_db()
@contextmanager
def setup_database():
config_path = os.path.join(os.getcwd(), ".jikken", "config")
_database = None
try:
config = get_config(config_path)
print(config)
_database = DataBase(config)
yield _database
finally:
_database.stop_db()
_database = None
| true |
7c0159ab1fd725907243b67ae5965f3ba4721bc6 | Python | leafsummer/keeplearning | /test/gevent_test/gevent_test2.py | UTF-8 | 787 | 3.21875 | 3 | [] | no_license | import gevent
from gevent import Greenlet
# class MyGreenlet(Greenlet):
# def __init__(self, message, n):
# Greenlet.__init__(self)
# self.message = message
# self.n = n
# def _run(self):
# print self.message
# gevent.sleep(self.n)
# g = MyGreenlet("Hi there!", 3)
# g.start()
# g.join()
def win():
return 'You win!'
def fail():
raise Exception('You fail at failing.')
winner = gevent.spawn(win)
loser = gevent.spawn(fail)
print winner.started
print loser.started
try:
gevent.joinall([winner, loser])
except Exception as e:
print 'This will never be reached'
print winner.value
print loser.value
print winner.ready()
print loser.ready()
print winner.successful()
print loser.successful()
print loser.exception | true |
4c40d540de7f95e1c58abf58efa01c2c0025fd1d | Python | Nanguage/simucaller | /simucaller/gui/widget/control_panel.py | UTF-8 | 3,128 | 2.59375 | 3 | [] | no_license | from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QCheckBox, QLabel, QSlider, QVBoxLayout, QHBoxLayout
from simucaller.helpers import get_logger
log = get_logger(__name__)
class ControlPanel(object):
"""
Abstract class for create control panel.
ControlPanel(QHBoxLayout)
* checkboxes(QVBoxLayout)
- grid_checkbox
* sliders(QVBoxLayout)
"""
def on_slide(self):
"""
handler for slider value change event.
"""
t = self.slider_t.value()
z = self.slider_z.value()
self.position['t'] = t
self.position['z'] = z
if hasattr(self, 'series'):
tiv = self.series.time_interval
else:
tiv = 0
if hasattr(self, 'heatmap'):
self.load_heatmap2d()
self.label_slider_t.setText("T Axis: %d (%.2fs)"%(t, t*tiv))
self.label_slider_z.setText("Z Axis: %d"%z)
#
# draw heatmap when heatmap checked
if hasattr(self, 'heatmap') and self.heatmap_cb.isChecked():
try:
pvalue = float(self.heatmap_cutoff_input.text())
self.heatmap_cutoff = pvalue
self.on_draw()
except ValueError as e:
log.error(e)
else:
self.on_draw()
def create_slider(self, axis, max_value, init_value=0):
"""
Generate slider widget with the label.
"""
label = axis.upper() + "Axis"
slider_label = QLabel(label + ": " + str(init_value))
slider = QSlider(Qt.Horizontal)
slider.setRange(0, max_value)
slider.setValue(init_value)
slider.setTracking(True)
slider.setTickPosition(QSlider.TicksBothSides)
slider.valueChanged.connect(self.on_slide) # connect to refresh
return slider, slider_label
def create_sliders(self):
"""
sliders(QVBoxLayout)
* label_slider_t(QLabel)
* slider_t
* label_slider_z
* slider_z
"""
if hasattr(self, 'series'):
t_max = self.series.shape[0] - 1
z_max = self.series.shape[3] - 1
else:
t_max = 1
z_max = 1
self.slider_t, self.label_slider_t = self.create_slider("t", t_max)
self.slider_z, self.label_slider_z = self.create_slider("z", z_max)
sliders = QVBoxLayout()
for w in [self.label_slider_t, self.slider_t, self.label_slider_z, self.slider_z]:
sliders.addWidget(w)
return sliders
def create_control_hbox(self):
# checkboxes
checkboxes = QVBoxLayout()
# Grid check box
#
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.grid_cb.stateChanged.connect(self.on_draw)
checkboxes.addWidget(self.grid_cb)
# sliders
#
sliders = self.create_sliders()
#
# Layout with box sizers
#
hbox = QHBoxLayout()
hbox.addLayout(checkboxes)
hbox.addLayout(sliders)
return hbox
| true |
eddf17d109db04888604323b4340ecae81e73412 | Python | Leownhart/My_Course_of_python | /Exercicios/ex014.py | UTF-8 | 390 | 4.46875 | 4 | [] | no_license | #(54 °C × 9 / 5) + 32 = 129, 2 °F
Celsius = float(input('Informe a temperatura em ºC: '))
fahrenheit = (Celsius * 9 / 5) + 32
print('A temperatura em Fahrenheit e {:.1f}F'.format(fahrenheit))
# (32 °F − 32) × 5/9 = 0 °C
fahrenheit = float(input('Informe a temperatura em ºF: '))
Celsius = (fahrenheit - 32) * 5 / 9
print('A temperatura em Fahrenheit e {:.1f}ºC'.format(Celsius)) | true |
254c06d3ad85f2fc1dfcb29fc3845f84f0da4b08 | Python | Aasthaengg/IBMdataset | /Python_codes/p03456/s940157396.py | UTF-8 | 511 | 2.71875 | 3 | [] | no_license | import sys
from collections import deque
import numpy as np
import math
sys.setrecursionlimit(10**6)
def S(): return sys.stdin.readline().rstrip()
def SL(): return map(str,sys.stdin.readline().rstrip().split())
def I(): return int(sys.stdin.readline().rstrip())
def IL(): return map(int,sys.stdin.readline().rstrip().split())
def solve():
c = int(a+b)
r = int(math.sqrt(c))
if r*r<c:
print('No')
else:
print("Yes")
return
if __name__=='__main__':
a,b = SL()
solve() | true |
3d94763bf74288b78775d75deb347bf565a79f18 | Python | vincentdavis/srtm.py | /srtm/utils.py | UTF-8 | 2,386 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
# Copyright 2013 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as mod_logging
import math as mod_math
import zipfile as mod_zipfile
from io import BytesIO as cStringIO # looks hacky but we are working with bytes
from typing import *
ONE_DEGREE = 1000. * 10000.8 / 90.
class Color(NamedTuple):
red: int
green: int
blue: int
alpha: int
def distance(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float) -> float:
"""
Distance between two points.
"""
coef = mod_math.cos(latitude_1 / 180. * mod_math.pi)
x = latitude_1 - latitude_2
y = (longitude_1 - longitude_2) * coef
return mod_math.sqrt(x * x + y * y) * ONE_DEGREE
def get_color_between(color1: Color, color2: Color, i: float) -> Color:
""" i is a number between 0 and 1, if 0 then color1, if 1 color2, ... """
if i <= 0:
return color1
if i >= 1:
return color2
return Color(int(color1[0] + (color2[0] - color1[0]) * i),
int(color1[1] + (color2[1] - color1[1]) * i),
int(color1[2] + (color2[2] - color1[2]) * i),
int(color1[3] + (color2[3] - color1[3]) * i))
def zip(contents: bytes, file_name: str) -> bytes:
mod_logging.debug('Zipping %s bytes' % len(contents))
result = cStringIO()
zip_file = mod_zipfile.ZipFile(result, 'w', mod_zipfile.ZIP_DEFLATED, False)
zip_file.writestr(file_name, contents)
zip_file.close()
result.seek(0)
mod_logging.debug('Zipped')
return result.read()
def unzip(contents: bytes) -> bytes:
mod_logging.debug('Unzipping %s bytes' % len(contents))
zip_file = mod_zipfile.ZipFile(cStringIO(contents))
zip_info_list = zip_file.infolist()
zip_info = zip_info_list[0]
result = zip_file.open(zip_info).read()
mod_logging.debug('Unzipped')
return result
| true |
7781791c4e2236ba274b3dbb52f2fdddb7acac51 | Python | putetrekk/maestro-net | /transformer_v2/predictor.py | UTF-8 | 2,508 | 2.84375 | 3 | [] | no_license | import pickle
import numpy
import tensorflow as tf
def get_note_id(predictions):
predictions = predictions[:, -1:, :]
best_k = tf.math.top_k(predictions, k=3, sorted=False)
indexes = best_k[1][0][0]
values = best_k[0][0][0]
values = [v.numpy() for v in values]
probabilities = [value / sum(values) for value in values]
predicted_id = numpy.random.choice(indexes, p=probabilities)
return predicted_id
class Predictor():
def __init__(self, tokenizer=False, max_length=80, tokenizer_pickle_path=''):
if tokenizer:
self.tokenizer = tokenizer
else:
with open(tokenizer_pickle_path, 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.start_token = [self.tokenizer.vocab_size]
self.end_token = [self.tokenizer.vocab_size + 1]
self.vocab_size = self.tokenizer.vocab_size + 2
self.max_length = max_length
def evaluate(self, sentence, model):
sentence = tf.expand_dims(
self.start_token + self.tokenizer.encode(sentence) + self.end_token, axis=0)
output = tf.expand_dims(self.start_token, 0)
for i in range(self.max_length):
predictions = model(inputs=[sentence, output], training=False)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, self.end_token[0]):
break
# concatenated the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)
def predict(self, sentence, model):
prediction = self.evaluate(sentence, model)
predicted_sentence = self.tokenizer.decode(
[i for i in prediction if i < self.tokenizer.vocab_size])
return predicted_sentence.lstrip()
def generate_work(self, model, length=10, initial_notes=''):
seed_notes_max_size = 3500
# Initial Note(s)
if not initial_notes:
initial_notes = "wait50 wait50 wait29 v10 p55"
seed_notes = initial_notes
music = initial_notes
for i in range(length):
print(f'music length: {len(music)}')
if i > 0:
# Take seed notes from the generated music
seed_notes = music[-seed_notes_max_size:]
# Remove the first word, as it may be invalid
seed_notes = seed_notes[seed_notes.index(" "):]
print(len(seed_notes))
notes = self.predict(seed_notes, model)
music += notes + ' '
print(f'final music: {music}')
return music
| true |
4ea88afb1fbe95ecc90aa7669810154d00f155bd | Python | vmayil/python | /prime.py | UTF-8 | 143 | 3.4375 | 3 | [] | no_license | num=int(input(Enter a number:")
if(num>1):
for i in range(2,num):
if(num%i)==0:
print(is not prime")
else:
print("is a prime")
| true |
3115df9c456ee673a330fa5b601dd1a2daf17158 | Python | jainendrak/python-training | /slicing.py | UTF-8 | 133 | 2.890625 | 3 | [] | no_license | data="hello python"
print(data[:-1])
print(data[0:-1])
print(data[4::-1])
print(data[-1:-7:-1])
print(data[::-1])
print(data[:5:-1])
| true |
f11050a42c61601c03a32d121a91a61139823f48 | Python | kdipippo/gdq-graph | /reduce_dataset.py | UTF-8 | 1,539 | 3.46875 | 3 | [] | no_license | def readfileintowords(filename):
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def get_time_at_interval(hours_interval):
time = []
hours = 0
while len(time) < 90:
time.append(str(hours))
hours += hours_interval
return time
def get_data_at_interval(event, hours):
# seconds,utc_unixtimestamp,donation_id,donation_amount,cumulative_donation_amount
CURR_SECONDS = 0
event_data = []
for i in range(1, len(event)):
row = event[i].split(",")
if int(row[0]) > CURR_SECONDS:
event_data.append(row[4])
CURR_SECONDS += hours * 60 * 60
return event_data
if __name__ == "__main__":
# STEP 1: Update the list with the new events to add to the graph
events = [
"agdq2018",
"sgdq2017",
"sgdq2018",
"sgdq2020"
]
f = open("reduced_data.txt", "w")
time = get_time_at_interval(3)
f.write("['" + "','".join(time) + "']\n\n\n")
for event in events:
event_result = readfileintowords(f"data-sorted/{event}_result.csv")
event_result_data = get_data_at_interval(event_result, 3)
f.write("['" + "','".join(event_result_data) + "']\n\n\n")
f.close()
# STEP 2: Update reduce.js with the new datasets results.
# Color hexes are starting from the last row of the "Bright Color Palettes" image:
# https://wondernote.org/color-palettes-for-web-digital-blog-graphic-design-with-hexadecimal-codes/
| true |
f5e40f303bcc401ca3c377d80f099994a24274f7 | Python | jsit-cloud523/crawl_recruitment_info | /jobCollect/lagou/lagou.py | UTF-8 | 10,381 | 2.625 | 3 | [] | no_license | import random
import ua
from lxml import etree
import requests
import time
import global_var
import re
import logging
class Lagou:
def __init__(self):
self.url = 'https://www.lagou.com'
self.headers = {
'User-Agent': random.choice(ua.ua_pool),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.lagou.com',
'Upgrade-Insecure-Requests': '1',
}
'''
{'技术': {'后端开发':{'java': 'https://www.lagou.com/zhaopin/java','C++': '...',...}}
{'移动开发': {...}}
.
.
.
.
.
}
.
.
.
.
'''
def get_all_positions(self):
s = requests.Session()
response = s.get(self.url, headers=self.headers)
selector = etree.HTML(response.text)
top_cat_dict = {}
for i in range(0, 7):
'''
以【技术】大类举例说明
【后端开发】://*[@id="sidebar"]/div/div[1]/div[2]/dl[1]/dt/span
2级分类【后端开发】下面的3级分类
【Java 】://*[@id="sidebar"]/div/div[1]/div[2]/dl[1]/dd/a[1]
【C++ 】://*[@id="sidebar"]/div/div[1]/div[2]/dl[1]/dd/a[2]
【PHP 】://*[@id="sidebar"]/div/div[1]/div[2]/dl[1]/dd/a[3]
【移动开发】://*[@id="sidebar"]/div/div[1]/div[2]/dl[2]/dt/span
2级分类【移动开发】下面的3级分类
【HTML5 】://*[@id="sidebar"]/div/div[1]/div[2]/dl[2]/dd/a[1]
【Android】://*[@id="sidebar"]/div/div[1]/div[2]/dl[2]/dd/a[2]
……
以【产品】大类举例说明
【产品经理】: //*[@id="sidebar"]/div/div[2]/div[2]/dl[1]/dt/span
2级分类【产品经理】下面的3级分类
【产品经理 】://*[@id="sidebar"]/div/div[2]/div[2]/dl[1]/dd/a[1]
【网页产品经理】://*[@id="sidebar"]/div/div[2]/div[2]/dl[1]/dd/a[2]
【移动产品经理】://*[@id="sidebar"]/div/div[2]/div[2]/dl[1]/dd/a[3]
【产品设计师】://*[@id="sidebar"]/div/div[2]/div[2]/dl[2]/dt/span
2级分类【产品设计师】下面的3级分类
【网页产品设计师】://*[@id="sidebar"]/div/div[2]/div[2]/dl[2]/dd/a[1]
【无线产品设计师】://*[@id="sidebar"]/div/div[2]/div[2]/dl[2]/dd/a[2]
对比结果 //*[@id="sidebar"]/div/div[2]/div[2]/dl[1]/dt/span
| | |
代表1级大类中序号-------------------------- | |
代表2级分类中序号--------------------------------------- |
dt代表2级分类,dd代表3级分类--------------------------------
'''
top_cat = selector.xpath('//*[@id="sidebar"]/div/div[' + str(i + 1) + ']/div[1]/div/h2/text()')[0]
# 技术 产品 设计 运营 市场与销售 职能 金融
top_cat_str = str(top_cat).strip()
'''
['后端开发', '移动开发', '前端开发', '人工智能', '测试', '运维', 'DBA', '高端职位', '项目管理', '硬件开发', '企业软件']
['产品经理', '产品设计师', '高端职位']
['视觉设计', '交互设计', '用户研究', '高端职位']
['运营', '编辑', '客服', '高端职位']
['市场/营销', '公关', '销售', '供应链', '采购', '投资', '高端职位']
['人力资源', '行政', '财务', '法务', '高端职位']
['投融资', '风控', '审计税务', '高端职位']
'''
grade2_cat_list = selector.xpath('//*[@id="sidebar"]/div/div[' + str(i + 1) + ']/div[2]/dl/dt/span/text()')
categories_dict_list = []
# grade2_cat_list = ['后端开发', '移动开发', '前端开发', '人工智能', '测试', '运维', 'DBA', '高端职位', '项目管理', '硬件开发', '企业软件']
# grade2_cat_dict:{'高端职位': {'CTO': 'https://www.lagou.com/zhaopin/CTO/',......}, '移动开发': {......}, ......}
grade2_cat_dict = {}
for j in range(0, len(grade2_cat_list)):
# jobname_list = ['Java', 'C++', 'PHP', '数据挖掘', '搜索算法', '精准推荐', 'C', 'C#', '全栈工程师', '.NET', 'Hadoop', 'Python', 'Delphi', 'VB', 'Perl', 'Ruby', '
jobname_list = selector.xpath(
'//*[@id="sidebar"]/div/div[' + str(i + 1) + ']/div[2]/dl[' + str(j + 1) + ']/dd/a/text()')
link_list = selector.xpath(
'//*[@id="sidebar"]/div/div[' + str(i + 1) + ']/div[2]/dl[' + str(j + 1) + ']/dd/a/@href')
'''
key: 后端开发
value: {'ASP': 'https://www.lagou.com/zhaopin/asp/', 'C#': 'https://www.lagou.com/zhaopin/C%23/', '全栈工程师': 'https://www.lagou.com/zhaopin/quanzhangongchengshi/', '.NET': 'https://www.lagou.com/zhaopin/.NET/', 'Java': 'https://www.lagou.com/zhaopin/Java/', 'Hadoop': 'https://www.lagou.com/zhaopin/Hadoop/', 'Perl': 'https://www.lagou.com/zhaopin/Perl/', 'PHP': 'https://www.lagou.com/zhaopin/PHP/', 'C': 'https://www.lagou.com/zhaopin/C/', 'Shell': 'https://www.lagou.com/zhaopin/shell/', '搜索算法': 'https://www.lagou.com/zhaopin/sousuosuanfa/', '
'''
grade2_cat_dict[grade2_cat_list[j]] = dict(zip(jobname_list, link_list))
top_cat_dict[top_cat_str] = grade2_cat_dict
return top_cat_dict
def transtime(self, str):
p = re.match(r'(\d{1,2}:\d{1,2})发布', str)
if p:
return time.strftime('%Y-%m-%d',time.localtime(time.time())) + " " + p.group(1)
else:
return str
def getJobListPerPage(self, url, s):
# s = requests.Session()
headers = {
'User-Agent': random.choice(ua.ua_pool),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.lagou.com',
'Upgrade-Insecure-Requests': '1',
}
response = s.get(url, headers=headers)
print(response.cookies)
print(response.request.headers)
selector = etree.HTML(response.text)
position_list = selector.xpath('//*[@id="s_position_list"]/ul/li')
'''
position_name: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[1]/a/h3
address: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[1]/a/span
format_time: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[1]/span
money: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[2]/div/span
requirement: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[2]/div/text()
company_name: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[2]/div[1]/a
industry: //*[@id="s_position_list"]/ul/li[1]/div[1]/div[2]/div[2]
label: //*[@id="s_position_list"]/ul/li[1]/div[2]/div[1]
strengs: //*[@id="s_position_list"]/ul/li[1]/div[2]/div[2]
'''
listPerPage = []
for node in position_list:
listPerJob = []
position_name = node.xpath('div[1]/div[1]/div[1]/a/h3/text()')[0]
# 唯一标识
position_id = node.xpath('./@data-positionid')[0]
address = node.xpath('string(div[1]/div[1]/div[1]/a/span)')
format_time = node.xpath('div[1]/div[1]/div[1]/span/text()')[0]
money = node.xpath('div[1]/div[1]/div[2]/div/span/text()')[0]
requirement = node.xpath('div[1]/div[1]/div[2]/div/text()')[2].strip()
company_name = node.xpath('div[1]/div[2]/div[1]/a/text()')[0]
industry = node.xpath('div[1]/div[2]/div[2]/text()')[0].strip()
label = node.xpath('div[2]/div[1]/span/text()')
strengs = node.xpath('div[2]/div[2]/text()')[0]
listPerJob.append(position_name)
listPerJob.append(position_id)
listPerJob.append(address)
# 将"10:11发布"格式化为"2017-11-27 10:11"
listPerJob.append(self.transtime(format_time))
listPerJob.append(money)
listPerJob.append(requirement)
listPerJob.append(company_name)
listPerJob.append(industry)
listPerJob.append(str(label))
listPerJob.append(strengs)
listPerJob.append(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
listPerPage.append(listPerJob)
# 找下一页的链接
next_url = selector.xpath('//a[text()="下一页"]/@href')
headers['Referer'] = url
if len(next_url) != 0:
next_url = next_url[0]
page_num = global_var.get_value("PAGE_NUM_PROCESSING")
print("当前第" + str(page_num) + "页爬取成功")
yield listPerPage
if next_url == 'javascript:;':
global_var.set_value("isLastPage", True)
print("最后一页了。。。。")
return
else:
global_var.set_value("PAGE_NUM_PROCESSING", page_num + 1)
print("下一页链接:" + next_url)
# self.getJobListPerPage(next_url, s)
# return listPerPage
joblistgen = self.getJobListPerPage(next_url, s)
for joblist in joblistgen:
yield joblist
else:
no_position = selector.xpath('//div[text()="暂时没有符合该搜索条件的职位"]')
if no_position:
logging.info("no position: " + url)
global_var.set_value("isLastPage", True)
return
else:
# print(response.text)
print("被检测出来了。。。。")
return
| true |
7b8301e5215b40c6fbc00a169c7399d2687f6280 | Python | tostrander-matc/TOstrander | /midterm-jpark.py | UTF-8 | 1,357 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python3
print(f"\nName: Timothy Ostrander\n")
#DICTIONARIES=======================
password_database = {"Username":"dnedry"}
password_database["Password"] = "please"
#DICT.N02
command_database = {"reboot":"Ok. I will reboot all park systems."}
command_database["shutdown"] = "Ok. I will shut down all park systems."
command_database["done"] = "I hate all this hacker stuff."
#TWO OBJECTS
white_rabbit_object = 0
counter = 0
while (white_rabbit_object == 0) and (counter < 3):
input_user=input("Username: ")
input_pass=input("Password: ")
if input_user.lower() == password_database["Username"] and input_pass.lower() == password_database["Password"]:
white_rabbit_object = 1
print(f"\nHi, Dennis. You're still the best hacker in Jurassic Park.")
print(command_database.keys())
input_command=input("Enter one of the above commands: ")
if input_command.lower() == "reboot":
print(f"\n")
print(command_database["reboot"])
elif input_command.lower() == "shutdown":
print(f"\n")
print(command_database["shutdown"])
elif input_command.lower() == "done":
print(f"\n")
print(command_database["done"])
else:
print(f"\nThe Lysine Contingency has been put into effect.")
else:
counter += 1
print(f"You didn't say the magic word! {counter}")
if counter == 3:
print((f"You didn't say the magic word!\n") * 25)
| true |
c6b8ef6e8f962817daa122c692befd620ade5215 | Python | alexwang19930101/PythonBase | /函数/可变长参数.py | UTF-8 | 402 | 2.9375 | 3 | [] | no_license | #-*- coding:utf-8 -*-
from test.test_array import ArraySubclassWithKwargs
#只能放最后面
def sum2nums(a,*args):
if len(args)>0:
for num in args:
a += num
return a
print sum2nums(1,2,3)
print sum2nums(1)
def testKW(a,*args,**kwargs):
print(a)
print(args)
print(kwargs)
testKW(1,2,3,key='value')
A=(11,)
B={'key':'value'}
testKW(11,A,B)
testKW(11,*A,**B) | true |
ec9557a68174a81127c06795dd8cb7bf31c7e3cc | Python | Remyaaadwik171017/myproject2 | /advancedpython/exam.py | UTF-8 | 165 | 3.3125 | 3 | [] | no_license | import re
n=input("Enter a word to validate:")
x="[A-Z]+[a-z]+$"
match=re.fullmatch(x,n)
if match is not None:
print("valid:",n)
else:
print("Not valid:",n)
| true |
99005954ca59afee4f050efe4be4d93f2848965b | Python | deejes/python_coding_challenge | /part1/basic.py | UTF-8 | 4,621 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
NOTE:
There are many ways of solving these problems, but what we
are looking from you is to specifically use idiomatic Python.
Please answer each of these problems below using Python 2.7.
If you enjoy a challenge, you can provide multiple solutions to
these basic questions. :)
"""
"""
**Exercise 1:**
Transform these lists (left side is INPUT, right side is OUTPUT):
[1,2,3,1,5] → [15,11,13,12,11]
[‘a’,’b’,’c’,’d’,’e’] → [‘e’,’d’,’c’,’b’,’a’]
[‘a’,’b’,’c’,’d’,’e’] → [‘a’,’c’,’e’]
[‘a’,’b’,’c’,’d’,’e’] → [‘b’,’d’]
[11,6,10] → [11,10,6,[27]]
"""
# Exercise 1 solution
numeric_arr1 = [1,2,3,1,5]
output_arr1 = [15,11,13,12,11]
def reverse_add_ten(input):
return [x+10 for x in input][::-1]
assert reverse_add_ten(numeric_arr1) == output_arr1
alpha_array = ['a','b','c','d','e']
output_arr2 = ['e','d','c','b','a']
output_arr3 = ['a','c','e']
output_arr4 = ['b','d']
def reverse_array(input):
return input[::-1]
assert reverse_array(alpha_array) == output_arr2
def odd_elements_array(input):
return input[::2]
assert odd_elements_array(alpha_array) == output_arr3
def even_elements_array(input):
return input[1::2]
assert even_elements_array(alpha_array) == output_arr4
numeric_arr2= [11,6,10]
output_arr5 = [11,10,6,[27]]
def sort_append_sum_aslist(input):
result = sorted(input,reverse=True)
result.append([sum(result)])
return result
assert sort_append_sum_aslist(numeric_arr2) == output_arr5
"""
**Exercise 2:**
We have a function `complex_function` to compute certain data, printing out
the result after the computation. This is great, but we want to add some
functionality. We want to push to a log:
- the time used by the function to run
- the name of the function
- the input values of the function.
Note: We cannot modify the body of the original `complex_function` function.
"""
# Exercise 2 solution
import time
def logger(input_function):
def wrapper_function(input):
start = time.time()
input_function(input)
print "This function took this many seconds to run -",time.time()-start
print "Its name is -",input_function.__name__
print "It had the following inputs -", input
return wrapper_function
@logger
def complex_function(num):
time.sleep(1)
return num**2
complex_function(23)
"""
**Exercise 3:**
Define a custom `MyDict` class that allows the following operations:
- set/read values using both the dot notation (e.g. `mydict.name`) and
item access notation used for dictionaries (e.g. `mydict[name]`).
In case the mapped value is not present, returns `None`.
- A + B addition operation:
`MyDict` + `dict` = `MyDict`;
`MyDict` + `MyDict` = `MyDict`;
the result of this operation is a `MyDict` object, having all the fields
of both dictionaries. In case of common keys between the dictionaries,
their values need to be added/appended together (according to their type.
For the sake of the exercise, admissible types are only
`int` and `string`).
Example:
```
m = MyDict()
m.a = 10
m['b'] = 20
print m['c'] # prints `None`
n = {'a': 10, 'c': 15}
print m + n # prints `{'a': 20, 'b':20, 'c': 15}
```
"""
# Exercise 3 solution
class MyDict(dict):
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
try:
return self.__dict__[key]
except KeyError:
return None
# There is something here I don't quite understand.
# This function is hit only when the attribute is not present in the instance of MyDict.
# Honestly not sure what is happening here, but it works as we'd like it.
def __getattr__(self,key):
print "hello from __getattr__"
return None
def as_dict(self):
return self.__dict__
def iteritems(self):
return self.__dict__.iteritems()
def __add__(self,addend):
result = self.__dict__.copy()
for key, value in addend.iteritems():
try:
result[key] += value
except KeyError:
result[key] = value
return result
first_dict = MyDict()
first_dict.a = 10
first_dict['b'] = 20
assert first_dict['c'] == None
second_dict = {'a': 10, 'c': 15}
assert (first_dict + second_dict) == {'a': 20, 'b':20, 'c': 15}
| true |
0daefdb7a5dca5b4062d6349c4c1daf2c33a4259 | Python | LyzV/python-learn | /archive.py | UTF-8 | 553 | 2.53125 | 3 | [] | no_license | #! /usr/bin/env python3
import os
import subprocess
import time
# directories of most important user data
sources = ['/home/lyzv/work/prj/']
# archive directory
archive_dir = '/home/lyzv/work/archive'
# target file of archive
target = archive_dir + os.sep + time.strftime('%Y%m%d%H%M%S') + '.zip'
# archiving cmd
cmd = ['zip', '-qr', target]
for src in sources:
cmd.append(src)
print(cmd)
result = subprocess.run(cmd)
if 0 != result.returncode:
print('I can not create archive copy!')
else:
print('Archive copy created successfully.')
| true |
8319c4dfb24dd01fc1ebcb586a32fe53f5d468a0 | Python | DrSkippy/lane-queues | /lane_queues/stores.py | UTF-8 | 2,641 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python3
# coding: utf-8
import numpy as np
import random
class Line():
def __init__(self, label):
self.label = label
self.shoppers = []
def __len__(self):
return len(self.shoppers)
def join_line(self, t, shopper):
shopper.join_time = t
self.shoppers.append(shopper)
return self
def checkout(self, t):
shopper_checked_out = None
if len(self.shoppers) > 0:
shopper_checked_out = self.shoppers.pop(0)
shopper_checked_out.checkout_time = t
return shopper_checked_out
def __str__(self):
res = "\n ".join([
"line: {}".format(self.label),
"shoppers: {:2}{}".format(len(self.shoppers), "*"*len(self.shoppers)),
""
])
return res
class Shopper():
def __init__(self):
self.join_time = 0
self.checkout_time = 0
def decision(self, t, select_from_lines):
idx = -1
min_size = 1000000
for i, l in enumerate(select_from_lines):
if len(l) < min_size:
idx = i
min_size = len(l)
return select_from_lines[idx]
def wait(self):
return self.checkout_time - self.join_time
class Store():
def __init__(self, n=5):
self.lines = [Line("line_{}".format(i)) for i in range(n)]
self.metrics = []
self.customer_wait_metrics = []
def random_line(self, k=1):
return random.sample(self.lines, k)
def index_line(self, indexes):
return [self.lines[i] for i in indexes]
def checkout(self, t, line_checkout_status):
checked_out_shoppers = []
for i, l in enumerate(self.lines):
if line_checkout_status[i]:
checked_out_shoppers.append(l.checkout(t))
else:
checked_out_shoppers.append(None)
return checked_out_shoppers
def random_checkout(self, t, p):
line_checkout_status = np.random.random(len(self.lines)) < p
return self.checkout(t, line_checkout_status)
def report(self, t, cos, output=False):
self.customer_wait_metrics = [c.wait() for c in cos if c is not None]
avg_n = np.average([len(l) for l in self.lines])
avg_t = [np.average([t - shopper.join_time for shopper in l.shoppers])
for l in self.lines]
avg_wait = np.average(self.customer_wait_metrics)
self.metrics.append([t, avg_n, avg_wait]+avg_t)
for line in self.lines:
print(line) if output else None
| true |
f8c2291b44288481c032b140f32198efc02a877e | Python | Dew92/python | /interface_de_connexion/formulaire.py | UTF-8 | 3,519 | 2.875 | 3 | [] | no_license | # coding:utf-8
from tkinter import * # import les menu et autres widgets
from tkinter import messagebox # sous module pour les message
import tkinter # créer directement la fenetre
import json
class emptyFields(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def valid(user, pasw):
contenu = None
try:
with open("connect.json", "r") as fich:
contenu = fich.read() # lecture du contenu du fichier
contenu = json.loads(contenu) # transform le string en dict
# contenu trouvé et vérifiable
except FileNotFoundError:
print("Fichier introuvable")
messagebox.showerror("Fichier d'indentification manquant", "Contacter le créateur de l'application")
if contenu["id_user"] == user and contenu["passw"] == pasw:
messagebox.showinfo("Connexion réussi", "Bievenue " + contenu["id_user"].capitalize() + "")
return True
else:
messagebox.showerror("Erreur de connexion", "Combinaison < Utilisateur - Mot de passe > incorrect")
def rien():
pass
def forgot(): # pass oublier
pass
def press(event):
#messagebox.showinfo("Cliquer", "Bien reçu")#lier à l'appli
check()
def check():
# valeur saisi
tuser = zs_user.get()
tpass = zs_pass.get()
try:
if tuser == "" or tpass == "": # si vide
raise emptyFields("Champs vides") # excption vide
if valid(tuser, tpass):
print("next frame =>") # chargement du profil
except emptyFields:
messagebox.showerror("Erreur de saisie", "Un des champs n'est pas rempli : connexion impossible")
app = tkinter.Tk()
app.title("Gestionnaire d'utilisateur")
# trouver les dimensions de l'ecran
screen_x = int(app.winfo_screenwidth())
screen_y = int(app.winfo_screenheight())
window_x = 640
window_y = 480
# centrage
pos_X = (screen_x // 2) - (window_x // 2)
pos_Y = (screen_y // 2) - (window_y // 2)
# affichage
geo = "{}x{}+{}+{}".format(window_x, window_y, pos_X, pos_Y)
# fixer la taille (max=min)
app.maxsize(window_x, window_y)
app.minsize(window_x, window_y)
# appliquer
app.geometry(geo)
# test
cf = tkinter.LabelFrame(app, text="Connexion", width=400, height=300, bd=2, padx=20, pady=40)
# widgets du menubar (statique)
menubar = Menu(app)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Nouveau", command=rien, state="disabled")
filemenu.add_command(label="Charger", command=rien, state="disabled")
filemenu.add_command(label="Sauvegarde", command=rien, state="disabled")
filemenu.add_separator()
filemenu.add_command(label="Quitter", command=app.quit)
menubar.add_cascade(label="Fichier", menu=filemenu)
app.config(menu=menubar)
# widgets de connexion
lb_user = tkinter.Label(cf, text="Identifiant :", justify="left", anchor="w")
zs_user = tkinter.Entry(cf)
lb_pass = tkinter.Label(cf, text="Mot de passe :", justify="left")
zs_pass = tkinter.Entry(cf, show="*")
bt_cnx = tkinter.Button(cf, text="Connexion", command=check)
# event widgets
app.bind("<KeyPress-Return>", press)#
# centrage LF connexion
cfc_X = (window_x // 2) - (cf.winfo_width() // 2) - 10
cfc_Y = (window_y // 2) - (cf.winfo_height() // 2) - 30
# rattacher a la fenetre
cf.place(x=cfc_X, y=cfc_Y, anchor=CENTER)
lb_user.grid(row=0, column=0)
zs_user.grid(row=0, column=1)
lb_pass.grid(row=1, column=0)
zs_pass.grid(row=1, column=1)
bt_cnx.grid(row=2, column=1, padx=30, pady=15)
# maintenir la fenetre ouverte
app.mainloop()
| true |
121a5ba0ba218eb92730e05124413f6dfc5d6664 | Python | mjmoon/vis-covid-19-can | /py/helper.py | UTF-8 | 2,428 | 3.140625 | 3 | [] | no_license | """A set of helper functions."""
import fileinput
import re
from datetime import date
import pandas as pd
def update_ref_access(reference):
"""Update reference access date."""
today = date.today().strftime('%B %d, %Y')
reference_sub = re.escape(reference) + ' \((.*)\)'
reference_new = reference + ' (Retrieved on {})'.format(today)
print(reference_new)
with fileinput.FileInput('README.md', inplace=True, backup='.bak') as file:
for line in file:
print(
re.sub(
reference_sub,
reference_new,
line
), end=''
)
def get_population_country():
"""
Retrieve world population by country from Wikipedia.
Source:
https://en.wikipedia.org/wiki/
List_of_countries_by_population_(United_Nations)
https://en.wikipedia.org/wiki/MS_Zaandam
https://www.princess.com/news/notices_and_advisories/
notices/diamond-princess-update.html
https://en.wikipedia.org/wiki/
Demographics_of_the_Palestinian_territories
"""
url = "https://en.wikipedia.org/wiki/"\
+ "List_of_countries_by_population_(United_Nations)"
html = pd.read_html(url, attrs={'id': 'main'})
data = html[0].iloc[:, [0, 4]].copy()
data.columns = ['country', 'population']
data['country'] = data['country'].apply(
lambda x: re.sub(r'\[.+\]', '', x).strip())
data = data.append([
{'country': 'Diamond Princess', 'population': 3711},
{'country': 'MS Zaandam', 'population': 1829},
{'country': 'West Bank and Gaza', 'population': 4543126},
{'country': 'Kosovo', 'population': 1797086}
]).copy()
data = data.set_index('country')
data.loc['Serbia', 'population'] =\
data.loc['Serbia', 'population'] - data.loc['Kosovo', 'population']
return data
def get_population_province():
"""
Retrieve Canada's population by province.
Source:
https://en.wikipedia.org/wiki/
Population_of_Canada_by_province_and_territory
"""
url = "https://en.wikipedia.org/wiki/"\
+ "Population_of_Canada_by_province_and_territory"
html = pd.read_html(url, attrs={'class': 'wikitable sortable'})
data = html[0].iloc[:, [1, -3]]
data.columns = ['province', 'population']
data = data.set_index('province')
return data
| true |
a326609e1d9a3fdd7054f94b7b4762d2da12cbb8 | Python | kylapurcell/A01088856_1510_assignments | /A3/test_location_normal.py | UTF-8 | 2,810 | 3.09375 | 3 | [] | no_license | from unittest import TestCase
from unittest.mock import patch
import io
from A3 import sud
class TestLocationNormal(TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_location_normal(self, mock_stdout):
# Tests printed output for character location with rows < 4 and columns < 4
character = {'Name': 'Kyla', 'Class': 'Hello Kitty', 'Health': 10,
'Dexterity': 0, 'Location': [2, 1], 'Inventory': [], 'Cursed': False}
expected_output = """You are in the valley, a barren location, that likely used to be a suburb before the war
In the distance you see a large building with sturdy looking pillars\n"""
sud.location_normal(character)
self.assertEqual(mock_stdout.getvalue(), expected_output)
@patch('sys.stdout', new_callable=io.StringIO)
def test_location_normal2(self, mock_stdout):
# Tests printed output for character location with rows >= 4 and columns >=4
character = {'Name': 'Kyla', 'Class': 'Hello Kitty', 'Health': 10,
'Dexterity': 0, 'Location': [4, 4], 'Inventory': [], 'Cursed': False}
expected_output = """You are in the city ruins, a charred location filled with decaying buildings
In the distance you see the run down remains of a grocery store\n"""
sud.location_normal(character)
self.assertEqual(mock_stdout.getvalue(), expected_output)
@patch('sys.stdout', new_callable=io.StringIO)
def test_location_normal3(self, mock_stdout):
# Tests printed output for character location with rows >= 4 and columns <= 4
character = {'Name': 'Kyla', 'Class': 'Hello Kitty', 'Health': 10,
'Dexterity': 0, 'Location': [4, 3], 'Inventory': [], 'Cursed': False}
expected_output = """You are in the lake region, green radioactive particles shimmer in the brown \
coloured waters
You feel something in the humid air as if this region is the location of an Easter Egg
Easter Egg? You have no idea what that is or why you thought of it\n"""
sud.location_normal(character)
self.assertEqual(mock_stdout.getvalue(), expected_output)
@patch('sys.stdout', new_callable=io.StringIO)
def test_location_normal4(self, mock_stdout):
# Tests printed output for character location with rows < 4 and columns >= 4
character = {'Name': 'Kyla', 'Class': 'Hello Kitty', 'Health': 10,
'Dexterity': 0, 'Location': [3, 4], 'Inventory': [], 'Cursed': False}
expected_output = """You are crossing a bridge leading to the outskirts of the city ruins
In the distance you spot a large building in good condition surrounded by stacks of burnt books\n"""
sud.location_normal(character)
self.assertEqual(mock_stdout.getvalue(), expected_output)
| true |
8cfc1d655541b7f7d13ba9e90eb9f49bdf2a2960 | Python | ganrod/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-in-Python | /quiz/quiz palindrome.py | UTF-8 | 298 | 4 | 4 | [] | no_license | def isPalindrome(aString):
'''
aString: a string
'''
# Your code here
b=len(aString)
c=0
for i in range(b):
if aString[i]==aString[-(i+1)]:
c=c+1
if c==b:
return True
else:
return False
print(isPalindrome('a')) | true |
743ce641b1ec5afad564a71aa729b82fc0a1296d | Python | Charch1t/spectrum-inteeernship-drive-2021 | /prgm2.py | UTF-8 | 263 | 4.15625 | 4 | [] | no_license | #To find the trailing zeroes of the factorial of a number
n = int(input("Enter a number"))
i = 5
count = 0
while((n / i) >= 1):
count = count + int((n / i))
i = i*5
print("The no. of trailing zeroes in the factorial of a number is",count)
| true |
7db5adb2583c3dde6c6a4fc11c4b96795d4968d8 | Python | shumpei-noda/getTips | /get_tips.py | UTF-8 | 3,402 | 2.609375 | 3 | [] | no_license | import os
import sys
import json
import random
import requests
VERSION = '20170801'
def get_venues_tips(venue_id):
# venueidをと結合するためurlを分ける
first_get_venues_tips_url = 'https://api.foursquare.com/v2/venues/'
second_get_venues_tips_url = '/tips'
# 各venueidをapiurlと結合してAPIのget_venue_tipsを叩く
# 帰ってきたデータを保存する
get_venues_tips_url = ( first_get_venues_tips_url
+ venue_id
+ second_get_venues_tips_url)
get_venues_tips_params = {'v': VERSION,
"client_id": os.environ['FOURSQUARE_CLIENT_ID'],
"client_secret": os.environ['FOURSQUARE_CLIENT_SECRET'],
'limit': 500}
get_venues_tips_response = requests.get(url=get_venues_tips_url, params=get_venues_tips_params)
venue_tips_data = json.loads(get_venues_tips_response.text)
if 'errorType' in venue_tips_data['meta']:
raise Exception(venue_tips_data['meta']['errorType'])
venue_tips_data_dict = {}
venue_tips_data_dict[second_get_venues_tips_url] = venue_tips_data['response']
# 各venue_idのtipsをまとめて保管する
tips = {}
for venue_id in venue_tips_data_dict:
one_venue_tips = []
one_venue_info = {}
for tips_items_data in venue_tips_data_dict[venue_id]['tips']['items']:
one_venue_tips += [tips_items_data['text']]
one_venue_info['tips'] = one_venue_tips
one_venue_info['count'] = venue_tips_data_dict[venue_id]['tips']['count']
tips[venue_id] = one_venue_info
return tips, get_venues_tips_response.text
def save_tips_json(tips, path):
tips_json = json.dumps(tips, sort_keys=True, ensure_ascii=False, indent=2)
with open(path, "w") as fh:
fh.write(tips_json)
def fill_missing_value(search_parameters):
parameter_keys = ['ll','near','intent','radius', 'sw',
'ne', 'query', 'limit', 'categoryId',
'llAcc', 'alt', 'altAcc', 'url', 'providerId', 'linkedId']
for parameter_key in parameter_keys:
if parameter_key not in search_parameters:
search_parameters[parameter_key] = None
return search_parameters
"""
def main():
# venue検索条件ファイルの取得
if len(sys.argv) != 2:
print("検索パラメータくれ")
return
search_parameters_file_name = sys.argv[1]
with open(search_parameters_file_name, 'r') as f:
search_parameters = json.load(f)
with open("id.json", 'r') as f:
token = json.load(f)
for search_name in search_parameters:
search_parameters[search_name] = fill_missing_value(search_parameters[search_name])
# parameterにclientIdとsecretIdを追加
for key in token:
search_parameters[search_name][key] = token[key]
# venue_idの取得
tips_num_lower_limit = 10
venue_ids = get_venue_id(search_parameters[search_name],tips_num_lower_limit)
if not venue_ids:
continue
# 取得したvenue_idからtipsを取得
tips = get_venues_tips(venue_ids, token)
# 取得してきたtipsの保存先
path = 'tips/tips_ja/' + search_name + '_tips.json'
save_tips_json(tips, path)
"""
if __name__ == "__main__":
main()
| true |
ac1e89fe27675cd56fd65ecdcd6141406043ce7d | Python | xiaonanln/myleetcode-python | /src/315. Count of Smaller Numbers After Self.py | UTF-8 | 995 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
N = len(nums)
nums = [(n, i) for i, n in enumerate(nums)]
# print nums
aux = [None] * N
self.moveRight = [0] * N
self.mergeSort(nums, 0, N-1, aux)
# print nums
return self.moveRight
def mergeSort(self, nums, i, j, aux):
if i>=j:
return
m = (i+j) // 2
self.mergeSort(nums, i, m, aux)
self.mergeSort(nums, m+1, j, aux)
# print 'merge', i, m, m+1, j
# merge i:m and m+1:j
aux[i:j+1] = nums[i:j+1]
wi, r1, r2 = i, i, m+1
while r1 <= m and r2 <= j:
if aux[r1] <= aux[r2]:
nums[wi] = aux[r1]
if r1 < wi:
self.moveRight[aux[r1][1]] += (wi-r1)
r1 += 1
else:
nums[wi] = aux[r2]
if r2 < wi:
self.moveRight[aux[r2][1]] += (wi-r2)
r2 += 1
wi += 1
while r1 <= m:
nums[wi] = aux[r1]
if r1 < wi:
self.moveRight[aux[r1][1]] += (wi-r1)
r1 += 1
wi += 1
print Solution().countSmaller([5, 2, 6, 1]) | true |