blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
da1b8e64ada846bd53f7da574d45a5fcef8d1d37 | Python | danry25/CSC-110 | /Lab-10-Parameterized-Gui.py | UTF-8 | 3,733 | 4.21875 | 4 | [] | no_license | # Dan Ryan
# Lab 10: Parameterized Gui
# Plus
# Draw shapes on the screen, of different sizes and shapes depending on user input
# Pull in Gui3 so we can use it...
import Gui3
import math
# define win as the gui subcomponent of gui3
# Handles gathering and checking user input
def userInput():
# Gather the actual user input and store it
stuff = {
"title": input('Enter a title: '),
"width": input('Width for the rectangle: '),
"height": input('Height for the rectangle: '),
"corners": input('Enter the number of corners for the plus: ')
}
# Authenticate user input so as to avoid program crashes
while stuff['title'] == '':
# user input error triggered
print('Error in input. Title cannot be blank.')
stuff['title'] = input('Try again. Enter a title: ')
while not stuff['width'].isdigit():
# user input error triggered
print('Error in input. Width cannot be blank or non-numeric.')
stuff['width'] = input('Try again. Enter the width: ')
while not stuff['height'].isdigit():
# user input error triggered
print('Error in input. height cannot be blank or non-numeric.')
stuff['height'] = input('Try again. Enter the height: ')
while not stuff['corners'].isdigit():
# user input error triggered
print('Error in input. Corners cannot be blank or non-numeric.')
stuff['width'] = input('Try again. Enter the Corners: ')
return stuff
# Creates 2 different types of windows, dependent on input
def showWindow(stuff, type):
# Window 1 is created if type is true
if type:
# Create window on user's screen
win = Gui3.Gui()
# Title the Window
win.title(stuff['title'])
# Set the variable that controls canvas & inner shape size
width = int(stuff['width'])
height = int(stuff['height'])
# Create canvas
canvas = win.ca(width + 50, height + 50)
# Draw shapes
canvas.rectangle([[-width/2, -height/2], [width/2, height/2]], fill='yellow')
canvas.oval([[-width/2, -height/2], [width/2, height/2]], fill='#00ff00')
canvas.polygon([[-width/2, 0], [0, height/2], [width/2, 0], [0, -height/2]],
outline='black', fill='white')
# Show the canvas to the user
win.mainloop()
# If type is false, Window 2 gets created
else:
# Create window on user's screen
win = Gui3.Gui()
# Title the Window
win.title('Second Window')
# Set the variable that controls inner shape size
width = 200
height = 200
# Create a variable that rachets up each loop, for use inside equation
step = 0
# Make a place to store these beautiful coordinates
coords = []
for corner in range(int(stuff['corners'])):
r = 100
O = math.pi/2 + (step * (2*math.pi)/int(stuff['corners']))
coords.append([int(r * math.cos(O)), int(r * math.sin(O))])
step += 1
# Create canvas
canvas = win.ca(250, 250)
# Draw shapes
canvas.rectangle([[-width/2, -height/2], [width/2, height/2]], fill='yellow')
canvas.oval([[-width/2, -height/2], [width/2, height/2]], fill='#00ff00')
# Make a polygon with the aformentioned coordinates
canvas.polygon(coords, outline='black', fill='white')
# Show the canvas to the user
win.mainloop()
# Main funtion, where the magic happens!
def main():
# Get user input
stuff = userInput()
# Create window 1
showWindow(stuff, True)
# Make window 2
showWindow(stuff, False)
# Lets run our main function!
main()
| true |
f1d9c64ee0082dfd504eb3af18009e7d70d6c3e3 | Python | xingrui/algorithm | /ball_box/split_dp.py | UTF-8 | 1,181 | 3.203125 | 3 | [] | no_license | import sys
def init_num(n, data):
if n < 1:
return 0
for i in range(1, n):
data[i][1] = 1
for j in range(2, i):
data[i][j] = data[i - j][j] + data[i][j - 1]
data[i][i] = data[i][i - 1] + 1
for j in range(i, n):
data[i][j] = data[i][i]
def get_num(n, m):
if n < 1 or m < 1:
return 0
if data[n][m] != 0:
return data[n][m]
res = 0
if n == 1 or m == 1:
res = 1
elif n < m:
res = get_num(n, n)
elif n == m:
res = 1 + get_num(n, n - 1)
else:
res = get_num(n - m, m) + get_num(n, m - 1)
data[n][m] = res
return res
def get_array(n, m):
return [[0 for col in range(n)] for row in range(m)]
def print_array(dat, print_len):
for i in range(1, print_len):
data_list = []
for j in range(1, print_len):
data_list.append("%d" % data[i][j])
print '\t'.join(data_list)
if __name__ == "__main__":
n = 200
n += 1
data = get_array(n, n)
init_num(n, data)
for i in range(1, n):
print("%d : %d" % (i, get_num(i, i)))
print_len = 20
print_array(data, print_len)
| true |
1ade8f0b4276ff1b24528d66bcbecf4e476eab45 | Python | markgalup/topcoder | /Solved/GuessTheNumber (SRM 157 Div. 2 (250 pts)).py | UTF-8 | 768 | 3.921875 | 4 | [] | no_license | class GuessTheNumber(object):
def noGuesses(self, upper, answer):
lower = 1
guesses = 0
x = (int(round((upper - lower) / 2.0)))
while True:
guesses += 1
if x == answer:
return guesses
elif x > answer:
upper = x
x = (int(round((upper-lower) /2.0) + lower))
else:
lower = x
x = (int(round((upper-lower) /2.0) + lower))
print GuessTheNumber().noGuesses(9, 6) #Returns: 3
print GuessTheNumber().noGuesses(1000, 750) #Returns: 2
print GuessTheNumber().noGuesses(643, 327) #Returns: 7
print GuessTheNumber().noGuesses(157, 157) #Returns: 8
print GuessTheNumber().noGuesses(128, 64) #Returns: 1
| true |
9d42c9d054bfaf1b864a12ca48785aaa6c937256 | Python | Aasthaengg/IBMdataset | /Python_codes/p03658/s309552291.py | UTF-8 | 145 | 2.578125 | 3 | [] | no_license | n, k = map(int, input().split())
l = list(map(int, input().split()))
L = sorted(l, reverse=True)
a = 0
for i in range(k):
a = a + L[i]
print(a) | true |
1c7740c4e297fdcc523292bd24fee5e3391dc4ef | Python | buukhanh243/Python100Days | /Dictionary/Dictionary.py | UTF-8 | 137 | 3.109375 | 3 | [] | no_license | my_dict = {'name': 'Khanh ong buu', 'sex': 'Male', 'age': '20'}
print(my_dict)
my_dict2 = dict(name = 'codeXplore@gmail.com', city = 'HCM')
print(my_dict2) | true |
6d490bbd45af4495a0f95769f8c9460212de40b7 | Python | TShi/voice | /fixed_pitch/record.py | UTF-8 | 4,169 | 2.734375 | 3 | [] | no_license | import os
from utils import *
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
# return max(snd_data) < THRESHOLD
return np.mean(map(abs,snd_data)) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
print "Go!"
num_periods = 0
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
# print np.mean(map(abs,snd_data)), is_silent(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
if num_periods <= 10:
print "Too short, resampling"
snd_started = False
r = array('h')
num_periods = 0
continue
else:
break
elif silent and not snd_started: # hasn't started yet
continue
elif not silent and snd_started: # okay
r.extend(normalize(snd_data))
num_periods += 1
print num_periods,len(r)
else: # sound just started
print "Start recording"
snd_started = True
print "Finish"
r = r[:-CHUNK_SIZE]
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
return sample_width, r
def findmax(label):
largest = -1
for filename in glob.glob(DATA_DIR+"fixed_pitch/%s_*.wav" % label):
largest = max(largest,int(re.findall(DATA_DIR+"fixed_pitch/%s_(\d+).wav" % label,filename)[0]))
return largest
def record_to_file_full(label):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
seq = findmax(label) + 1
wf = wave.open(DATA_DIR+"fixed_pitch/%s_%d.wav" % (label,seq), 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
return seq
def record_to_file(label):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
seq = findmax(label) + 1
for data_chunk in chunks(data,RATE * 1): # 1s chunks
wf = wave.open(DATA_DIR+"fixed_pitch/%s_%d.wav" % (label,seq), 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data_chunk)
wf.close()
seq += 1
if __name__ == '__main__':
label = sys.argv[1]
print "label: %s" % label
if (os.path.isfile(DATA_DIR+"fixed_pitch/%s_0.wav" % label)):
fund_freq, fs = getFundFreq(label, 0)
print "Listen, here's your pitch (%.1f Hz)" % fund_freq
playNote(fund_freq, fs)
print "Now duplicate it!"
seq = record_to_file_full(label)
new_fund_freq, _ = getFundFreq(label, seq)
print "Original is %f, new is %f" % (fund_freq, new_fund_freq)
if (fund_freq < new_fund_freq + 3) or (fund_freq > new_fund_freq - 3):
print "Success, sample %s_%d saved" % (label, seq)
else:
print "Failure, sample not saved"
os.remove(DATA_DIR+"fixed_pitch/%s_%d.wav" % (label, seq))
else:
print("New label! Please make a first recording.")
record_to_file_full(label)
print("done - result written to %s" % label)
| true |
94c76946b9f17fc46a973cc4e64d36cec2009193 | Python | HyeonwooNoh/Gumbel-Softmax-VAE-in-tensorflow | /util/preprocess.py | UTF-8 | 3,215 | 2.796875 | 3 | [] | no_license | """
Image preprocessing modules.
This code is brought from https://github.com/CuriousAI/ladder/blob/master/nn.py
"""
import scipy
import numpy as np
class ZCA(object):
def __init__(self, n_components=None, data=None, filter_bias=0.1):
self.filter_bias = np.float32(filter_bias)
self.P = None
self.P_inv = None
self.n_components = 0
self.is_fit = False
if n_components and data:
self.fit(n_components, data)
def fit(self, n_components, data):
if len(data.shape) == 2:
self.reshape = None
else:
assert n_components == np.product(data.shape[1:]), \
'ZCA whitening components should be %d for convolutional data'\
% np.product(data.shape[1:])
self.reshape = data.shape[1:]
data = self._flatten_data(data)
assert len(data.shape) == 2
n, m = data.shape
self.mean = np.mean(data, axis=0)
bias = self.filter_bias * scipy.sparse.identity(m, 'float32')
cov = np.cov(data, rowvar=0, bias=1) + bias
eigs, eigv = scipy.linalg.eigh(cov)
assert not np.isnan(eigs).any()
assert not np.isnan(eigv).any()
assert eigs.min() > 0
if self.n_components:
eigs = eigs[-self.n_components:]
eigv = eigv[:, -self.n_components:]
sqrt_eigs = np.sqrt(eigs)
self.P = np.dot(eigv * (1.0 / sqrt_eigs), eigv.T)
assert not np.isnan(self.P).any()
self.P_inv = np.dot(eigv * sqrt_eigs, eigv.T)
self.P = np.float32(self.P)
self.P_inv = np.float32(self.P_inv)
self.is_fit = True
def apply(self, data, remove_mean=True):
data = self._flatten_data(data)
d = data - self.mean if remove_mean else data
return self._reshape_data(np.dot(d, self.P))
def inv(self, data, add_mean=True):
d = np.dot(self._flatten_data(data), self.P_inv)
d += self.mean if add_mean else 0.
return self._reshape_data(d)
def _flatten_data(self, data):
if self.reshape is None:
return data
assert data.shape[1:] == self.reshape
return data.reshape(data.shape[0], np.product(data.shape[1:]))
def _reshape_data(self, data):
assert len(data.shape) == 2
if self.reshape is None:
return data
return np.reshape(data, (data.shape[0],) + self.reshape)
class ContrastNorm(object):
def __init__(self, scale=55, epsilon=1e-8):
self.scale = np.float32(scale)
self.epsilon = np.float32(epsilon)
def apply(self, data, copy=False):
if copy:
data = np.copy(data)
data_shape = data.shape
if len(data.shape) > 2:
data = data.reshape(data.shape[0], np.product(data.shape[1:]))
assert len(data.shape) == 2, 'Contrast norm on flattened data'
data -= data.mean(axis=1)[:, np.newaxis]
norms = np.sqrt(np.sum(data ** 2, axis=1)) / self.scale
norms[norms < self.epsilon] = np.float32(1.)
data /= norms[:, np.newaxis]
if data_shape != data.shape:
data = data.reshape(data_shape)
return data
| true |
5c764084ba3eb3d4ad98279898d50479b7ee4167 | Python | piglaker/PTA_ZJU_mooc | /src19.py | UTF-8 | 1,954 | 2.890625 | 3 | [] | no_license | def read():
return list(map(int, input().split()))
global d
n, d = read()
G = []
for i in range(int(n)):
G.append(read())
from math import sqrt
def get_distance(a, b): return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def is_safe(vector):
if vector[0] + d >= 50 or vector[0] - d <= -50 or vector[1] + d >= 50 or vector[1] - d <= -50:
return True
else: return False
def BFS(layer, his, path, G):
if not layer: return
his += layer
layer_ = []
path_ = {}
for node in layer:
for p in range(len(G)):
if not p in his:
if get_distance(G[node], G[p]) <= d:
layer_.append(p)
if not str(p) in path_.keys():
path_[str(p)] = path[str(node)] + [p]
else:
pre = G[path_[str(p)][0]]
post = G[node]
if get_distance([0,0], post) < get_distance([0,0], pre):
path_[str(p)] = path[str(node)] + [p]
return layer_, his, path_
def get_ans(G):
ans = []
for i in range(len(G)):
if is_safe(G[i]):
ans.append(i)
return ans
def get_layer(G):
layer = []
for i in range(len(G)):
if get_distance([0,0], G[i]) <= d + 7.5:
layer.append(i)
return layer
ans, layer = get_ans(G), get_layer(G)
for p in layer:
if p in ans:print(1);exit(0)
if not ans or not layer:print(0);exit(0)
his, path = [], {str(i):[i] for i in layer}
cycle = True
while layer and cycle:
layer, his, path = BFS(layer, his, path, G)
result = []
for p in layer:
if p in ans:
tmp = [G[i] for i in path[str(p)]]
result.append(tmp)
cycle = False
if not result:print(0);exit(0)
print(len(result[0]) + 1)
result.sort(key = lambda x:get_distance([0,0],x[0]))
for i in result[0]:
print(' '.join(list(map(str, i)))) | true |
bc9cb6beb6ecd845278babcf6a86d79896fca9e4 | Python | romannort/Zivs | /scripts/zivs4.py | UTF-8 | 2,805 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import math
import json
import base64
import numpy as np
poly = sys.argv[1]
poly_len = len(poly)
def printAll(matrix):
for i in range(len(matrix)):
if(i < 10):
print i, ' ',
else:
print i, ' ',
for j in range(len(matrix)):
print matrix.item(i, j),
print ""
def generateFirstState(poly_len):
firstState = [0 for i in range(poly_len)]
firstState[0] = 1
return firstState
firstState = generateFirstState(poly_len)
def generateStates(poly, state):
newStateStart = 0
for i in range(len(state)):
if poly[i] == "1":
newStateStart ^= state[i]
newState = [newStateStart]
for i in range(0, len(state)-1):
newState.append(state[i])
global firstState
if firstState == newState:
return []
else:
states = generateStates(poly, newState)
states.append(newState)
return states
def getDecimal(state):
res = 0
length = len(state)
for i in range(length):
res = res + state[i] * (2**(length - i - 1))
return res
def generateTMatrix(states, size):
T = [[0 for x in range(size)] for x in range(size)]
for i in range(len(states) - 1):
T[getDecimal(states[i+1])][getDecimal(states[i])] = 1
T[getDecimal(states[0])][getDecimal(states[len(states)-1])] = 1
return np.matrix(T)
def getFactorial(n):
if n < 2:
return 1
else:
return n * getFactorial(n - 1);
def generateCMatrix(size):
C = [[0 for x in range(size)] for x in range(size)]
for i in range(size):
for j in range(i+1):
C[i][j] = (getFactorial(i) / (getFactorial(j) * getFactorial(i - j)))
return np.matrix(C)
def checkLinearity(L, poly_len):
rowIndex = 2**(poly_len - 1)
global poly
twos = []
for i in range(len(poly)):
if(poly[i] == "1"):
twos.append(2 ** (i+1))
for i in twos:
print i, L.item(rowIndex, i-1)
if (L.item(rowIndex, i-1) == 0):
return False
return True
def cellMul(size, a, b, x, y):
result = 0
for i in range(size):
result = result + a.item(x, i) * b.item(i, y)
return result % 2
def mul(a, b):
size = len(a)
res = [[0 for x in range(size)] for x in range(size)]
for i in range(size):
for j in range(size):
res[i][j] = cellMul(size, a, b, i, j)
return np.matrix(res)
states = generateStates(poly, firstState)
states.append(firstState)
states = states[::-1]
print "\nStates:"
print np.matrix(states)
T = generateTMatrix(states, 2**poly_len)
# print "\nMatrix T:"
# printAll(T)
notBinC = generateCMatrix(2**poly_len)
C = notBinC % 2
transposedC = C.transpose()
# print "\nMatrix C:"
# printAll(C)
# print "\nMatrix C Transposed:"
# print transposedC
tempL = mul(transposedC, T)
# print "\nMatrix L temp:"
# printAll(tempL)
L = mul(tempL, transposedC)
print "\nMatrix L:"
printAll(L)
# isLinear = checkLinearity(L, poly_len)
# if isLinear:
# print "Linear"
# else:
# print "Not Linear" | true |
256d13c8bdac41f458484a4fcbce1559bc925ed8 | Python | kotexxx/1 | /RandomForestRegressor.py | UTF-8 | 6,471 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import pydotplus
import csv
import matplotlib.pyplot as plt
import os
import pydot
#import six
from sklearn.model_selection import train_test_split
#from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing
from IPython.display import Image
from graphviz import Digraph
from sklearn.externals.six import StringIO
from sklearn.metrics import (roc_curve, auc, accuracy_score)
from sklearn.tree import export_graphviz
from sklearn import tree
#from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
# CSVファイルを読み込む
#df = pd.read_csv('weight_2.csv')
df = pd.read_csv("Traindatafor05072019.csv",header=None)
#a = df[df.iloc[:,16] <120]
#b = a[a.iloc[:,16] >= 80]
#c = b[b.iloc[:,13] >= 15]
# 説明変数、目的変数
X = df.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]]#.values
y = df.iloc[:,[16]]#.values
#test data
do= pd.read_csv("070719data test.csv",header=None)
aa = do[do.iloc[:,16] <120]
bb = aa[aa.iloc[:,16] >= 80]
ds = bb.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]]#.values
dd = bb.iloc[:,[16]]#.values,7,8,9,10,11,12,13,14,15
# 学習する
clf = RandomForestRegressor()
#clf = RandomForestClassifier()
clf = clf.fit(X, y)
"""
・n_estimators →デフォルト値10。決定木の数。
・max_features →デフォルト値は特徴量数の平方根。決定木の特徴量の数。大きいと似たような決定木が増える、小さいと決定木がばらけるがデータに適合できにくくなる。
・random_state →デフォルト値なし。乱数ジェネレータをどの数値から始めるか。前回と異なるモデルを得たい場合は数値を変更すると良いです。
max_depth →デフォルト値なし。決定木の最大の深さ。
min_samples_split →デフォルト値2。木を分割する際のサンプル数の最小数
"""
# 評価する
predict = clf.predict(ds)
rate_sum = 0
print("予測値 " "正解値")
for i in range(len(dd)):
p = int(predict[i])
t = int(dd.iloc[i])
rate_sum += int(min(t, p) / max(t, p) * 100)#minとmaxでtかpを選択して大きいほうで割ることで1以下の数値になる(実測値と推測値を比べている)
print(p , t)
print("精度 ")
print(rate_sum / len(dd))#平均値を算出
rms = np.sqrt(mean_squared_error(predict,dd))
print(rms)
#特徴量の重要度
feature = clf.feature_importances_
#特徴量の重要度を上から順に出力する
f = pd.DataFrame({'number': range(0, len(feature)),
'feature': feature[:]})
f2 = f.sort_values('feature',ascending=False)
f3 = f2.ix[:, 'number']
#特徴量の名前
label = df.columns[0:]
#特徴量の重要度順(降順)
indices = np.argsort(feature)[::-1]
for i in range(len(feature)):
print(str(i + 1) + " " + str(label[indices[i]]) + " " + str(feature[indices[i]]))
plt.title('Feature Importance')
plt.bar(range(len(feature)),feature[indices], color='lightblue', align='center')
plt.xticks(range(len(feature)), label[indices], rotation=90)
plt.xlim([-1, len(feature)])
plt.tight_layout()
plt.show()
#決定木の可視化
dot_data = StringIO()
i_tree=0
col=['weight','K length','K Perimeter length','K Radius','Total pixels','curvature','Tilt','K area','3D K length','3D weight','3D K length','3D K Perimeter length','3D K Radius','Height in 3D','Average height in 3D','Luminance value']
for tree_in_forest in clf.estimators_:
export_graphviz(tree_in_forest, out_file='tree dot',feature_names=col, max_depth=3)#max_depth=5
(graph,)=pydot.graph_from_dot_file('tree dot')
name= 'tree'+ str(i_tree)
graph.write_png(name+'.png')
os.system('dot -Tpg tree.dot -o tree.png')
i_tree+=1
# In[17]:
# In[1]:
#cross_validation 交差検定
#def get_score(clf,train_data, train_label):
# train_data, test_data,train_label,test_label = cross_validation.train_test_split(train_data, train_label,test_size=0.2,random_state=0) #random_state=0
# clf.fit(train_data, train_label)
# print (clf.score(test_data, test_label) ) #cross_validation.train_test_splitは一定の割合が検証用データとなる
#def get_accuracy(clf,train_data, train_label):
# scores = cross_validation.cross_val_score(clf,train_data, train_label, cv=10)
# print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#精度検証
#pred = clf.predict(test_data)
#fpr, tpr, thresholds = roc_curve(test_label, pred, pos_label=1)
#auc(fpr, tpr)
#accuracy_score(pred, test_label)
#graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
#graph.write_pdf("decisiontree.pdf")
#Image(graph.create_png())
#from sklearn import tree
#for i,val in enumerate(clf.estimators_):
# tree.export_graphviz(clf.estimators_[i], out_file='tree_%d.dot'%i)
K = 5
kf = KFold(n_splits=K, shuffle=True, random_state=17)
score_train_tmp = 0
score_test_tmp = 0
for train_index, test_index in kf.split(data):
train_data, train_label= data[train_index], data[test_index]
train_label, test_label = label[train_index], label[test_index]
# 構築データでモデル構築
clf.fit( train_data, train_label)
# 構築データの予測値
pred_train = clf.predict(train_data)
# 構築データのaccuracy
auccuracy = accuracy_score(pred_train, train_label)
#構築データのaccuracyを足していく
score_train_tmp+=auccuracy
#検証データの予測値
pred_test = clf.predict(test_label)
#検証データのaccuracy
auccuracy = accuracy_score(pred_test, y_test)
#検証データのaccuracyを足していく
score_test_tmp+=auccuracy
#決定木の可視化
dot_data = StringIO()
i_tree=0
col=['weight','K length','K Perimeter length','K Radius','Total pixels','K area','3D K length','3D weight','3D K length','3D K Perimeter length','3D K Radius','Height in 3D','Average height in 3D','Luminance value']
for tree_in_forest in clf.estimators_:
export_graphviz(tree_in_forest, out_file='tree dot',feature_names=col, max_depth=5)#max_depth=5
(graph,)=pydot.graph_from_dot_file('tree dot')
name= 'tree'+ str(i_tree)
graph.write_png(name+'.png')
os.system('dot -Tpg tree.dot -o tree.png')
i_tree+=1
# In[ ]:
| true |
cd7f4265bf85547db86c266d55973155292c0c4a | Python | hrishikeshv/Project | /v2/lenet5/lenet_poly.py | UTF-8 | 4,015 | 2.65625 | 3 | [] | no_license | '''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
import argparse
import sys
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import PolyDense, Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import EarlyStopping
from keras.initializations import normal
from keras.regularizers import l2
batch_size = 512
nb_classes = 10
nb_epoch = 50
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
kernel_size = (3, 3)
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
parser = argparse.ArgumentParser()
parser.add_argument("--l1", default= 'normal', help="use dense or polydense")
parser.add_argument("--l2", default= 'normal', help="use dense or polydense")
parser.add_argument("--deg1", default= 10, help="use dense or polydense", type=int)
parser.add_argument("--deg2", default= 10, help="Input polynomial degree", type=int)
parser.add_argument("--epoch",default= 100, help="Number of epochs", type=int)
parser.add_argument("--activ",default= "sigmoid", help="Number of hidden layers")
parser.add_argument("--reg", help="Regularization weight")
args = parser.parse_args(sys.argv[1:])
model = Sequential()
model.add(Convolution2D(6,5,5,border_mode="same",input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool),strides=(2,2)))
model.add(Convolution2D(16, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool),strides=(2,2)))
model.add(Flatten())
W = normal((model.output_shape[-1], 120)).eval()
reg = None
if args.reg:
reg = l2(float(args.reg))
if args.l1 == 'normal':
model.add(Dense(120, bias=False, weights=[W]))
else:
coeff = np.polynomial.polynomial.polyfit(np.arange(model.output_shape[-1]) + 1.0, W, deg=args.deg1 + 1)
model.add(PolyDense(120, deg = args.deg1,weights=[coeff]))
model.add(Activation(args.activ))
W = normal((model.output_shape[-1], 84)).eval()
if args.l2 == 'normal':
model.add(Dense(84, bias=False, weights=[W]))
else:
coeff = np.polynomial.polynomial.polyfit(np.arange(model.output_shape[-1]) + 1.0, W, deg=args.deg2 + 1)
model.add(PolyDense(84, deg = args.deg2,weights=[coeff], W_regularizer = reg))
model.add(Activation(args.activ))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
earlystopping = EarlyStopping(monitor = 'val_loss', patience=10, mode = 'min', verbose = 0)
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=args.epoch,callbacks=[earlystopping],
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
if args.l2 == "poly":
model.save_weights('lenet5comp.h5')
else:
model.save_weights('lenet5uncomp.h5')
#with open('testscoreLenet5.txt','a') as f:
# f.write(str(score[0]) + ' ' + str(score[1])+'\n')
| true |
519e198eb7a2294fc9fd80e0912d7f8977b549d9 | Python | polika78/zendesk-coding-challenge | /tests/searchapp/repository/test_ticket_repo.py | UTF-8 | 7,207 | 2.8125 | 3 | [] | no_license | import pytest
from searchapp.models.ticket import Ticket
from searchapp.repository.ticket_repo import TicketRepo
from searchapp.errors.unknown_search_term_error import UnknownSearchTermError
class TestTicketRepo:
@pytest.fixture
def ticket_records(self):
return [
{
"_id": "436bf9b0-1147-4c0a-8439-6f79833bff5b",
"created_at": "2016-04-28T11:19:34-10:00",
"type": "incident",
"subject": "A Catastrophe in Korea (North)",
"assignee_id": 24,
"tags": [
"Ohio",
"Pennsylvania",
"American Samoa",
"Northern Mariana Islands"
]
},
{
"_id": "6aac0369-a7e5-4417-8b50-92528ef485d3",
"created_at": "2016-06-15T12:03:55-10:00",
"type": "question",
"subject": "A Nuisance in Latvia",
"assignee_id": 29,
"tags": [
"Washington",
"Wyoming",
"Ohio",
"Pennsylvania"
]
},
{
"_id": "8629d5fa-89c4-4e9b-9d9f-221b68b079f4",
"created_at": "2016-02-03T03:44:33-11:00",
"subject": "A Drama in Indonesia",
"tags": [
"Ohio",
"Pennsylvania",
"American Samoa",
"Northern Mariana Islands"
]
}
]
def test_given_json_load_sets_tickets_and_indexing(self, ticket_records):
ticket_repo = TicketRepo()
expected_tickets = dict([(str(record["_id"]), Ticket(**record)) for record in ticket_records])
ticket_repo.load(ticket_records)
assert ticket_repo.tickets == expected_tickets
assert ticket_repo.indexing == {
'created_at': {
"2016-04-28t11:19:34-10:00": ["436bf9b0-1147-4c0a-8439-6f79833bff5b"],
"2016-06-15t12:03:55-10:00": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
"2016-02-03t03:44:33-11:00": ["8629d5fa-89c4-4e9b-9d9f-221b68b079f4"]
},
'type': {
"incident": ["436bf9b0-1147-4c0a-8439-6f79833bff5b"],
"question": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
"": ["8629d5fa-89c4-4e9b-9d9f-221b68b079f4"]
},
'subject': {
"a catastrophe in korea (north)": ["436bf9b0-1147-4c0a-8439-6f79833bff5b"],
"a nuisance in latvia": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
"a drama in indonesia": ["8629d5fa-89c4-4e9b-9d9f-221b68b079f4"]
},
'assignee_id': {
"24": ["436bf9b0-1147-4c0a-8439-6f79833bff5b"],
"29": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
"": ["8629d5fa-89c4-4e9b-9d9f-221b68b079f4"]
},
'tags': {
"american samoa": ["436bf9b0-1147-4c0a-8439-6f79833bff5b", "8629d5fa-89c4-4e9b-9d9f-221b68b079f4"],
"northern mariana islands": ["436bf9b0-1147-4c0a-8439-6f79833bff5b", "8629d5fa-89c4-4e9b-9d9f-221b68b079f4"],
"ohio": ["436bf9b0-1147-4c0a-8439-6f79833bff5b", "6aac0369-a7e5-4417-8b50-92528ef485d3", "8629d5fa-89c4-4e9b-9d9f-221b68b079f4"],
"pennsylvania": ["436bf9b0-1147-4c0a-8439-6f79833bff5b", "6aac0369-a7e5-4417-8b50-92528ef485d3", "8629d5fa-89c4-4e9b-9d9f-221b68b079f4"],
"washington": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
"wyoming": ["6aac0369-a7e5-4417-8b50-92528ef485d3"],
}
}
def test_after_loaded_given_id_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("_id", "436bf9b0-1147-4c0a-8439-6f79833bff5b")
assert tickets == [Ticket(**ticket_records[0])]
def test_after_loaded_given_created_at_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("created_at", "2016-06-15T12:03:55-10:00")
assert tickets == [Ticket(**ticket_records[1])]
def test_after_loaded_given_type_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("type", "incident")
assert tickets == [Ticket(**ticket_records[0])]
def test_after_loaded_given_type_term_with_empty_string_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("type", "")
assert tickets == [Ticket(**ticket_records[2])]
def test_after_loaded_given_subject_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("subject", "A Nuisance in Latvia")
assert tickets == [Ticket(**ticket_records[1])]
def test_after_loaded_given_assigned_id_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("assignee_id", "24")
assert tickets == [Ticket(**ticket_records[0])]
def test_after_loaded_given_assignee_id_term_with_empty_string_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("assignee_id", "")
assert tickets == [Ticket(**ticket_records[2])]
def test_after_loaded_given_tags_term_search_by_term_returns_matched_tickets(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("tags", "ohio")
assert tickets == [Ticket(**ticket_records[0]), Ticket(**ticket_records[1]), Ticket(**ticket_records[2])]
def test_after_loaded_given_not_found_term_search_by_term_returns_empty(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("tags", "foo")
assert tickets == []
def test_after_loaded_given_not_found_id_search_by_term_returns_empty(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
tickets = ticket_repo.search_by_term("_id", "400")
assert tickets == []
def test_after_loaded_given_unknown_search_term_search_by_term_raise_unknown_search_term_error(self, ticket_records):
ticket_repo = TicketRepo()
ticket_repo.load(ticket_records)
with pytest.raises(UnknownSearchTermError) as e:
ticket_repo.search_by_term("unknown", "900")
| true |
016b4f280b4b26677890b403faedf0f14d2f3e4b | Python | JhonSmith0x7b/reclannad | /app/tiebasearch/static/db/db_converter.py | UTF-8 | 1,297 | 2.640625 | 3 | [] | no_license | #-*- coding:utf-8 -*-
import sqlite3
def main():
old_db = sqlite3.connect('/Users/jhonsmith/develop/workspace/python_workspace/reclannad/app/tiebasearch/static/db/tiebadata_db.db')
old_cursor = old_db.cursor()
old_db.text_factory = bytes
old_data = old_cursor.execute('select * from tiebadata_table')
new_db = sqlite3.connect('/Users/jhonsmith/develop/workspace/python_workspace/reclannad/app/tiebasearch/static/db/new_tiebadata_db.db')
create_table_sql = """
CREATE TABLE tiebadata_table (id INTEGER,author TEXT, date TEXT, href TEXT, times INTEGER, title TEXT)
"""
new_cursor = new_db.cursor()
new_cursor.execute(create_table_sql)
new_db.commit()
for row in old_data:
try:
new_cursor.execute('insert into tiebadata_table(id, author, date, href, times, title) values(?, ?, ?, ?, ?, ? )',
(row[0],
row[1].decode('gbk'),
row[2].decode('gbk'),
row[3].decode('gbk'),
row[4],
row[5].decode('gbk'),))
new_db.commit()
except Exception as e:
print(str(e))
print(row)
if __name__ == '__main__':
main() | true |
8992847e47921888c6c6d9f588910f307c0e89ac | Python | AndrewSukhobok95/tracking_autorobot_bsf_project | /utils/plotting.py | UTF-8 | 1,020 | 2.5625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import plotly
import plotly.express as px
import plotly.graph_objects as go
def plot_interactive_lines(df: pd.DataFrame,
xcol:str,
ycol:list,
title:str,
xaxis_name:str=None,
labels:list=None,
html_name:str="plot.html"):
fig = go.Figure()
for i in range(len(ycol)):
col_name = ycol[i]
if col_name!=xcol:
label = ycol[i]
if labels is not None:
label = labels[i]
scatter = go.Scatter(x=df[xcol], y=df[col_name], name=label, mode='lines+markers')
fig.add_trace(scatter)
xaxis_title = xaxis_name if xaxis_name else xcol
fig.update_layout(
title=title,
xaxis_title=xaxis_title,
yaxis_title='Value')
plotly.offline.plot(fig, filename=html_name)
# fig.show()
| true |
c21fc1681501bde472aad0bfc3f2e816ac9f4138 | Python | kaustubh-pandey/Facebook_Hackercup_2018 | /Round_1/letitflow.py | UTF-8 | 887 | 2.8125 | 3 | [] | no_license | def impossible(a,n):
if(a[0][0]=='#' or a[2][n-1]=='#'):
return True
for i in range(n):
if(a[1][i]=='#'):
return True
return False
with open('let_it_flow.txt','r') as f:
for line in f:
t=int(line)
break
#t=int(input())
for z in range(t):
for line in f:
n=int(line)
break
#n=int(input())
a=[]
for i in range(3):
for line in f:
a.append(list(line.strip()))
break
#print(a)
with open('output.txt','a') as fp:
fp.write('Case #%d: '%(z+1))
if(n%2):
fp.write("0\n")
elif(impossible(a,n)):
fp.write("0\n")
else:
arr=[2]*((n-2)//2)
pos=0
for i in range(1,n-2,2):
if(a[0][i]=='#' or a[0][i+1]=='#'):
arr[pos]-=1
if(a[2][i]=='#' or a[2][i+1]=='#'):
arr[pos]-=1
pos+=1
pro=1
for i in range(len(arr)):
pro=(pro*arr[i])%(10**9+7)
fp.write(str(pro%(10**9+7)))
fp.write("\n")
| true |
297d8fedc782e55f18b17bdd13794dbfe4154aa4 | Python | HeDefine/LeetCodePractice | /Q1848.到目标元素的最小距离.py | UTF-8 | 1,655 | 4.1875 | 4 | [] | no_license | # 给你一个整数数组 nums (下标 从 0 开始 计数)以及两个整数 target 和 start ,
# 请你找出一个下标 i ,满足 nums[i] == target 且 abs(i - start) 最小化 。
# 注意:abs(x) 表示 x 的绝对值。
# 返回 abs(i - start) 。
# 题目数据保证 target 存在于 nums 中。
#
# 示例 1:
# 输入:nums = [1,2,3,4,5], target = 5, start = 3
# 输出:1
# 解释:nums[4] = 5 是唯一一个等于 target 的值,所以答案是 abs(4 - 3) = 1 。
#
# 示例 2:
# 输入:nums = [1], target = 1, start = 0
# 输出:0
# 解释:nums[0] = 1 是唯一一个等于 target 的值,所以答案是 abs(0 - 0) = 0 。
#
# 示例 3:
# 输入:nums = [1,1,1,1,1,1,1,1,1,1], target = 1, start = 0
# 输出:0
# 解释:nums 中的每个值都是 1 ,但 nums[0] 使 abs(i - start) 的结果得以最小化,所以答案是 abs(0 - 0) = 0 。
#
# 提示:
# 1 <= nums.length <= 1000
# 1 <= nums[i] <= 104
# 0 <= start < nums.length
# target 存在于 nums 中
class Solution:
def getMinDistance(self, nums: list, target: int, start: int) -> int:
for i in range(max(start+1, len(nums) - start)):
if start - i >= 0 and nums[start - i] == target:
return i
if start + i < len(nums) and nums[start + i] == target:
return i
return 0
print(Solution().getMinDistance(nums = [1,2,3,4,5], target = 5, start = 3)) # 1
print(Solution().getMinDistance(nums = [1], target = 1, start = 0)) # 0
print(Solution().getMinDistance(nums = [1,1,1,1,1,1,1,1,1,1], target = 1, start = 0)) # 0
print(Solution().getMinDistance([5,3,6], 5, 2)) | true |
1722fe3c2c864f59007dfcf232a8d217c29121d8 | Python | SadBattlecruiser/Godunov-Orthogonalization | /prog/Графики.py | UTF-8 | 1,324 | 2.609375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data0_link = r'out\harmonic0.csv'
data0 = pd.read_csv(data0_link)
data1_link = r'out\harmonic1.csv'
data1 = pd.read_csv(data1_link)
data2_link = r'out\harmonic2.csv'
data2 = pd.read_csv(data2_link)
#data3_link = r'out\harmonic3.csv'
#data3 = pd.read_csv(data3_link)
data4_link = r'out\harmonic4.csv'
data4 = pd.read_csv(data4_link)
plt.figure(figsize=(16,10), dpi= 80)
plt.plot(data0['t'], data0['y2'])
#plt.plot(data1['t'], data1['y0'])
#plt.plot(data2['t'], data2['y0'])
#plt.plot(data3['t'], data3['y0'])
#plt.plot(data4['t'], data4['y0'])
plt.figure(figsize=(16,10), dpi= 80)
plt.plot(data1['t'], data1['y2'])
#plt.plot(data0['t'], data0['y1'])
#plt.plot(data1['t'], data1['y1'])
#plt.plot(data2['t'], data2['y1'])
#plt.plot(data3['t'], data3['y1'])
#plt.plot(data4['t'], data4['y1'])
plt.figure(figsize=(16,10), dpi= 80)
plt.plot(data2['t'], data2['y2'])
#plt.plot(data0['t'], data0['y2'])
#plt.plot(data1['t'], data1['y2'])
#plt.plot(data2['t'], data2['y2'])
#plt.plot(data3['t'], data3['y2'])
#plt.plot(data4['t'], data4['y2'])
plt.figure(figsize=(16,10), dpi= 80)
plt.plot(data2['t'], data4['y2'])
print(data0)
sum = data0['y2']+data1['y2']+data2['y2']+data4['y2']
plt.figure(figsize=(16,10), dpi= 80)
plt.plot(data2['t'], sum)
plt.show()
| true |
538df4431d91c8e5f25388380c519542b4c8887d | Python | btoztas/ASint | /Lab4/Book.py | UTF-8 | 423 | 3.21875 | 3 | [] | no_license | class Book:
def __init__(self, identifier, title, author, publication_date):
self.author = author
self.title = title
self.publication_date = publication_date
self.identifier = identifier
def __str__(self):
return "Title: " + self.title + "\nAuthor: " + self.author + "\nPublication Date: " + self.publication_date \
+ "\nIdentifier: " + str(self.identifier)
| true |
3ede7fc80f09d5949dd90ed0602bb576eb5e1f40 | Python | bmcdonnel/transmission-simulator | /components/engine.py | UTF-8 | 2,484 | 2.890625 | 3 | [] | no_license | import logging
import utilities.map_loader
class Engine(object):
def __init__(self):
self._torque_converter = None
self._torque_map = dict()
self._engine_max_speed = 0
self._engine_speed = 0
self._engine_torque = 0
self._engine_impeller_moment = 0.02 # 10 inch torque converter
self._engine_speed_steps = []
self._torque_steps = []
def Initialize(self, torque_map_filename, torque_converter):
logging.info("Inititalizing Engine from " + torque_map_filename)
self._LoadTorqueMapFromFile(torque_map_filename)
self._torque_converter = torque_converter
def Start(self):
self._engine_speed = 800
self._engine_torque = self._GetEngineTorque(0, self._engine_speed)
self._torque_converter.StepOnce()
logging.info("Engine started; idling at " + str(self._engine_speed) + " RPM")
def StepOnce(self, throttle_position):
self._engine_torque = self._GetEngineTorque(throttle_position, self._engine_speed)
impeller_torque = self._torque_converter.GetImpellerTorque()
logging.info("engine (speed, torque) = ({}, {}), impeller torque {}".format(self._engine_speed, self._engine_torque, impeller_torque))
# TODO integrate this?
self._engine_speed += int((self._engine_torque - impeller_torque) * self._engine_impeller_moment)
logging.info("new engine speed {}".format(self._engine_speed))
if self._engine_speed > self._engine_max_speed:
logging.info("rev limiting engine to {}".format(self._engine_max_speed))
self._engine_speed = self._engine_max_speed
self._engine_speed_steps.append(self._engine_speed)
self._torque_steps.append(self._engine_torque)
self._torque_converter.StepOnce()
def GetEngineSpeed(self):
return self._engine_speed
def GetEngineSpeedSteps(self):
return self._engine_speed_steps
def GetEngineTorque(self):
return self._engine_torque
def GetTorqueSteps(self):
return self._torque_steps
def GetTorqueMap(self):
return self._torque_map
def _GetEngineTorque(self, throttle_position, rpm):
return self._torque_map[throttle_position][rpm]
def _LoadTorqueMapFromFile(self, filename):
mapLoader = utilities.map_loader.MapLoader(filename)
self._torque_map, value_count = mapLoader.LinearlyInterpolate()
self._engine_max_speed = self._torque_map[0].keys()[-1]
logging.info("Loaded " + str(value_count) + " torque values")
logging.info("Max engine RPM " + str(self._engine_max_speed))
| true |
0a8885f061339782cca6dec6300f4af2ca3cb56a | Python | flowerlake/stupidSpider | /crawler.py | UTF-8 | 2,188 | 2.71875 | 3 | [] | no_license | """
time: 2019.06.03 15:34
author: gao yang
"""
import requests
from Config import ExtractRules as Rule
from Config import stupidSpiderConfig as Config
from lxml import etree
from bs4 import BeautifulSoup
from piplines import OriginalWebContent, UrlListPipline, process_data, StupidSpiderPipline, article2file
SOGOU_URL = "http://www.sogou.com/web?query=" + Config.keyword + "+site%3A" + Config.url_list[0]
BAIDU_URL_1 = "http://www.baidu.com/s?wd=" + Config.keyword + " site%3A" + Config.url_list[0]
BAIDU_URL_2 = "http://www.baidu.com/s?wd=" + Config.keyword + " site%3A" + Config.url_list[1]
GOOGLE_URL = "https://www.google.com/search?q=" + Config.keyword + "+site%3A" + Config.url_list[0]
def get_news_list(url):
print("crawl website:{}".format(url))
response = requests.get(url, headers=Config.User_headers)
response.encoding = 'utf-8'
html = etree.HTML(response.text, etree.HTMLParser())
title = html.xpath('//title')[0].text
OriginalWebContent({"title": title, "content": response.text})
element_links = html.xpath(Rule.baidu_news_xpath['link'])
print(element_links)
links = [get_real_url(link) for link in element_links]
# 保存到url_list.txt文件中
UrlListPipline(links)
def get_real_url(url):
response = requests.get(url, allow_redirects=False)
real_url = response.headers.get('Location')
return real_url
def get_content(url):
response = requests.get(url, headers=Config.User_headers)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'lxml')
title = soup.title.string.strip()
html = etree.HTML(response.text, etree.HTMLParser())
time = html.xpath(Rule.thepaper_cn_xpath['time'])[0].strip()
print(title,"-",time)
tags_content = html.xpath(Rule.thepaper_cn_xpath['content'])[0]
content = tags_content.xpath("string(.)").strip()
data = {
"title": title,
"time": time,
"content": content
}
StupidSpiderPipline(data)
article2file(data)
if __name__ == "__main__":
get_news_list(BAIDU_URL_2)
links = process_data()
for link in links:
print("crawl link: {}".format(link))
get_content(link)
| true |
901489d22103e55dd1ffc89afc58e9053d3efa10 | Python | oshal7/Covid-Analytics | /main.py | UTF-8 | 1,490 | 2.515625 | 3 | [] | no_license | import urllib
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import requests
import datetime
import pygsheets
def url_ok(url):
try:
r = requests.head(url)
if r:
return True
except Exception as ex:
return False
# define the scope
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
gc = pygsheets.authorize(service_file='Covid SOS-05c234b81737.json')
ss = gc.open("Sheets")
sheet_instance = ss[0]
x = datetime.datetime.now()
time_ = str(x).split('.')[0]
# url = "http://covidhelpnagpur.in/"
url = "https://nsscdcl.org/covidbeds/"
website_is_up = url_ok(url)
if website_is_up:
html = urlopen(url).read()
soup = BeautifulSoup(html)
for script in soup(["script", "style"]):
script.decompose()
strips = list(soup.stripped_strings)
if 'Asst Commissioner' in strips:
idx = strips.index('Asst Commissioner')
O2_Beds = strips[idx:idx + 5:2]
Non_O2_Beds = strips[idx + 5:idx + 10:2]
ICU_Beds = strips[idx + 10:idx + 15:2]
Ventilators = strips[idx + 15:idx + 20:2]
data = [O2_Beds, Non_O2_Beds, ICU_Beds, Ventilators]
last_updated = [time_, time_, time_, time_]
df = pd.DataFrame(data, columns=['Type', 'Available', 'Occupied'])
df['Last Updated'] = last_updated
print(df)
sheet_instance.set_dataframe(df, (1, 1))
else:
print('No data retrieved')
| true |
363b25809a328b9be0fd4f145f83f4b64f04636f | Python | gbaghdasaryan94/Kapan | /Harut/workspace/pset6/cash/cash.py | UTF-8 | 186 | 2.828125 | 3 | [] | no_license | from cs50 import get_float
while True:
d = get_float("$ = ")
if (d > 0):
break
c = d * 100
q = c // 25 + c % 25 // 10 + c % 25 % 10 // 5 + c % 25 % 10 % 5 // 1
print(q) | true |
3322eef28c50a6c4e68b20c89c9431cacd4b2beb | Python | michaelStettler/BVS | /tests/CNN/test_conv_tf.py | UTF-8 | 1,552 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | import tensorflow as tf
import numpy as np
input = np.zeros((1, 5, 5, 3))
input[0, 1:4, 2, 0] = 1
print("---------------------------------------------------------")
print("input")
print(input[0, :, :, 0])
print(input[0, :, :, 1])
print(input[0, :, :, 2])
print()
# kernel = np.zeros((5, 5, 3, 3))
kernel = np.zeros((1, 1, 3, 3))
kernel[0, 0, 0, 1] = 1
kernel[0, 0, 1, 1] = 2
print("shape kernel", np.shape(kernel))
# kernel = np.repeat(kernel, 3, axis=4)
# print("shape kernel", np.shape(kernel))
print("---------------------------------------------------------")
print("kernel")
for i in range(3):
print(kernel[:, :, 0, i])
print(kernel[:, :, 1, i])
print(kernel[:, :, 2, i])
print()
outputs2 = tf.nn.conv2d(input, kernel, strides=1, padding='SAME')
print("shape outputs2", np.shape(outputs2))
# outputs2 = tf.nn.depthwise_conv2d(input, kernel, strides=[1, 1, 1, 1], padding='SAME')
outputs2 = np.squeeze(outputs2)
print(outputs2[:, :, 0])
print(outputs2[:, :, 1])
print(outputs2[:, :, 2])
kernel = np.moveaxis(kernel, 3, 0)
print("shape kernel", np.shape(kernel))
kernel = np.expand_dims(kernel, axis=4)
print("shape kernel", np.shape(kernel))
outputs = []
for i in range(3):
output = tf.nn.conv2d(input, kernel[i], strides=1, padding='SAME')
outputs.append(output)
print("---------------------------------------------------------")
print("shape outputs", np.shape(outputs))
outputs = np.squeeze(outputs)
print("shape outputs", np.shape(outputs))
print(outputs[0, :, :])
print(outputs[1, :, :])
print(outputs[2, :, :])
| true |
4274dd4a66ee5afda400ccedc4ca1f3ecd6d7228 | Python | m-star18/atcoder | /submissions/joi2007yo/b.py | UTF-8 | 260 | 2.765625 | 3 | [
"Unlicense"
] | permissive | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
s = [int(readline()) for _ in range(28)]
for check in range(1, 31):
if check not in s:
print(check)
| true |
b9e8d586f082442c9802608132670505e3d6873c | Python | mions1/Fog | /simulations/analysis/nolb.py | UTF-8 | 3,401 | 2.53125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import plot as p
from scipy.stats import norm
from scipy.stats import lognorm
from scipy.stats import gamma
from scipy.optimize import curve_fit
import analysis_sca as ansca
import vectorParse as vp
import scaParse as sp
import sys, glob, os
from os import path
#-----------------Analisi FogNoLB----------------------------
def makeFilesNoLB(dir, dir2, dictRun, valuesName):
#Creo files.sp per valuesName
#cioè un file che ha due colonne, la prima con rho e la seconda con i valori delle medie di valuesName
ansca.makeFile(dir, dir2, dictRun, valuesName)
#recupero i file appena creati
files = sp.getFiles(dir2, valuesName+".sp")
#disegno il singolo plot (es. per 1serv20cap)
#drawSingle(dir2, files, "rho",valuesName)
#disegno i plot uniti (stesso mul, per 1serv e 20serv)
#drawDouble(dir2, together(files,"mul_001"),"_001;"+valuesName,x_label="rho",y_label=valuesName,start="MG",end=";")
#drawDouble(dir2, together(files,"mul_1"),"_1;"+valuesName,x_label="rho",y_label=valuesName,start="MG",end=";")
return files
def makeDroppedJobs(dir, dir2, dictRun, preset=1):
#Sostanzialmente fa quello che fa makeFilesNoLB ma in più deve normalizzare i valori di droppedJobs
values1=sp.getValue(dictRun, dir, "droppedJobsTotal")
values2=sp.getValue(dictRun, dir, "totalJobs")
values3 = dict()
for key in values1:
if key not in values3:
values3[key] = list()
for i in range(len(values1[key])):
if values2[key][i] == 0:
values3[key].append(0)
else:
values3[key].append(values1[key][i]/values2[key][i])
means = sp.getMeans(values3)
sp.makeFile(means, path.join(dir2, split[-2]), "droppedJobsTotalfrattoTotalJobs", preset=preset)
files = sp.getFiles(dir2, "frattoTotalJobs.sp")
#drawSingle(dir2, files, "rho", "droppedJobs")
#drawDouble(dir2, together(files,"mul_001"),"_001;droppedJobs",x_label="rho",y_label="droppedJobs",start="MG",end=";")
#drawDouble(dir2, together(files,"mul_1"),"_1;droppedJobs",x_label="rho",y_label="droppedJobs",start="MG",end=";")
return files
#---------------------------------------------
"""
Prime analisi - FogNoLB
"""
dir = sys.argv[1] #Cartella dei files.sca
dir2 = dir if len(sys.argv) <= 2 else sys.argv[2] #Se non passata, dir2 = dir. Cartella output
#Parse dei file .sca, quindi creazione file .sp (dati in colonna)
scaFiles = sp.getFiles(dir,"sca") #Recupero files.sca da dir
dictRun = sp.divideByRun(scaFiles) #Divido i file per le run, utile per fare media
split = dir.split("/") #splitto per prendere l'ultima parte della directory per fare il nome "1serv20cap" etc, sarà la radice dei file.sp che creerò dopo
filesResponseTime = makeFilesNoLB(dir, dir2, dictRun, "responseTime")
filesRho = makeFilesNoLB(dir, dir2, dictRun, "rho")
filesDroppedJobs = makeDroppedJobs(dir, dir2, dictRun)
filesBalancerTime = makeFilesNoLB(dir, dir2, dictRun, "avg_balancerTime_1")
makeFiles1VSFiles2(filesResponseTime, filesRho, dir2, "responseTime", "rho", True)
ansca.makeFiles1VSFiles2(filesDroppedJobs, filesResponseTime, dir2, "droppedJobs", "responseTime", True)
makeFiles1VSFiles2(filesDroppedJobs, filesRho, dir2, "droppedJobs", "rho", True)
#Elimino file temporanei
#cleanDir(path.join(dir2, "*joined*.sp"))
| true |
eef9af8c7c23099745aaa0cb5018acd1fb3f1aab | Python | seongto/code-kata | /20190513-py-09.py | UTF-8 | 1,308 | 3.953125 | 4 | [] | no_license | # * 오늘의 코드 카타
# 오름차순인 숫자 nums 배열과 찾아야할 target을 인자로 주면,
# target이 몇 번째 index인지 return 해주세요.
#
# Input: nums = [-1,0,3,5,9,12], target = 9
# Output: 4
#
# Input: nums = [-1,0,3,5,9,12], target = 2
# Output: -1
# 설명: 찾지 못하면 -1로 return 해주세요.
#
# * nums 배열에 있는 요소는 서로 중복된 값이 없습니다.
# * 이진탐색으로 찾기
def search(nums, target) :
idx = 0
check = True
while check == True :
if len(nums) <= 2:
if nums[0] == target:
return idx
elif nums[1] == target:
return idx+1
else :
return -1
loc = len(nums)//2
print("현재 nums[roc]은 ",nums[loc])
if nums[loc] == target:
check = False
return loc
elif target < nums[loc]:
nums = nums[0:loc]
print("타겟이 작음. 현재 nums : ",nums)
else :
nums = nums[loc+1:]
print("타겟이 큼. 현재 nums : ",nums)
idx = idx + loc+1
# ---------------- 모델 솔루션 ----------------
def search(nums, target):
l, r = 0, len(nums) - 1
while l <= r:
mid = (l + r) // 2
if nums[mid] < target:
l = mid + 1
elif nums[mid] > target:
r = mid - 1
else:
return mid
return -1
| true |
b70b3cca6c638ca42434cda380fb1b6970b5f182 | Python | ipcoo43/algorithm | /lesson131.py | UTF-8 | 161 | 3.125 | 3 | [] | no_license | import pprint
matrix=[[0]*5 for i in range(5)]
i=0
for row in range(0,5):
for col in range(0,row+1):
i=i+1
matrix[row][col]=i
pprint.pprint(matrix)
print() | true |
7f47262bab33b08713ade72f4d77adf5f72b21cd | Python | rgen3/matrix | /__main__.py | UTF-8 | 2,364 | 3.453125 | 3 | [] | no_license | import random, string, time, os
class matrix:
"""
current matrix array
"""
area = []
"""
Size of the matrix
"""
dim = {
'x' : 56,
'y' : 35
}
"""
element index to change sign
"""
current_index = {
'x' : 1,
'y' : 2
}
"""
Max possible letter changes in matrix
"""
max_random = 999
"""
Min possible letter changes in matrix
"""
min_random = 0
"""
Timeout for matrix scrolling
"""
timeout = 1
"""
Max quantity spaces in matrix raw
"""
spaces = 40
def __init__(self):
for i in range(0, self.dim['x']):
self.area.append([])
for j in range(0, self.dim['y']):
self.area[i].append(self.randomChoice())
def __get__(self, instance, owner):
pass
"""
Run the matrix
"""
def run(self):
while True:
self.draw()
time.sleep(self.timeout)
for i in range(1000, 1490):
self.getRandomIndex()
self.changeRandomIndex()
self.moveMatrix()
self.clear()
"""
Move the matrix row
"""
def moveMatrix(self):
self.area.pop()
list = []
for i in range(0, self.dim['x']):
list.append(self.randomChoice())
self.area.insert(0, list)
"""
Draw the matrix
"""
def draw(self):
for i in range(0, self.dim['x']):
line = ''
for j in range(0, self.dim['y']):
line += '{0:4s}'.format(self.area[i][j])
print line
return True
"""
Clear screen
"""
def clear(self):
os.system('clear')
"""
Get random element in matrix to change
"""
def getRandomIndex(self):
self.current_index['x'] = random.randint(0, self.dim['x'] - 1)
self.current_index['y'] = random.randint(0, self.dim['y'] - 1)
"""
Changes random matrix element
"""
def changeRandomIndex(self):
x = self.current_index['x']
y = self.current_index['y']
self.area[x][y] = self.randomChoice()
"""
Choose random element
"""
def randomChoice(self):
return random.choice(string.letters + string.digits + (" " * self.spaces))
m = matrix()
m.run()
| true |
2deeb8361eeb49a1f2d86123cd5c1452b0045b78 | Python | JuneJoshua/Python-Original- | /Mario(Improved).py | UTF-8 | 835 | 3.59375 | 4 | [] | no_license | print("\n")
name = input("Enter a name: ")
print("___________________________________")
print(name.upper(), " WORLD TIME")
print("004250 coins X 01 1-1 283")
print("\n")
print(" |?| ")
def doublePyramid(mario):
for i in range(mario):
for k in range(mario - i):
print(" ", end = "")
for k in range(i):
print("#", end = "")
for i in range(i, 0):
print(i, end = "")
for k in range(4):
print("", end = " ")
for k in range(i):
print("#", end = "")
for i in range(i, 0):
print(i, end = "")
print("\n")
doublePyramid(5)
print("_____ ______________________")
print(" | |")
print("___________________________________")
print("\n")
| true |
e875a3d56d7e49656681a9721686dac04b0c0e75 | Python | goutkannan/HackerRank | /Data Structure and Algorithms/lowerbound.py | UTF-8 | 400 | 3.53125 | 4 | [] | no_license | array = [1,2,3,5,7,8,9,10]
def lower(n,s):
if array[n]==s or (array[n-1]<s and array[n+1]>s):
return n
elif array[n]>s:
return lower(n-1,s)
else:
if len(array)>n:
return lower(n+1,s)
else:
return n
def higher(n,s):
return lower(n,s)-1
i = int(input())
print(higher(int(len(array)/2),i))
print(lower(int(len(array)/2),i))
| true |
a8bc6d6d0e48180e6ff92b695c78f739399837bf | Python | Kelaxon/opinedb_public | /extractor/code/preprocess.py | UTF-8 | 3,664 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | import json
import jsonlines
import csv
import os
import sys
import spacy
import re
common_words = open('data/google-10000-english-no-swears.txt').read().splitlines()
common_words = set(common_words)
nlp = spacy.load('en_core_web_sm')
def handle_punct(text):
text = text.replace("''", "'").replace("\n", ' ').replace("\\n", ' ').replace("\r", ' ')
new_text = ''
i = 0
N = len(text)
while i < len(text):
curr_chr = text[i]
new_text += curr_chr
if i > 0 and i < N - 1:
next_chr = text[i + 1]
prev_chr = text[i - 1]
if next_chr.isalnum() and prev_chr.isalnum() and curr_chr in '!?.,();:':
new_text += ' '
i += 1
return new_text
def has_punct(text):
if re.match("^[a-zA-Z0-9_ ]*$", text):
return False
else:
return True
def sent_tokenizer(text):
punct_flag = has_punct(text)
text = handle_punct(text)
ori_sentences = []
for sent in nlp(text, disable=['tagger', 'ner']).sents:
if len(sent) >= 3:
ori_sentences.append(sent.text)
if punct_flag:
return ori_sentences
else:
# for the booking.com datasets
result = []
for ori_sentence in ori_sentences:
sentences = [[]]
for token in ori_sentence.split(' '):
if len(token) > 0 and token[0].isupper() and token.lower() in common_words and (not len(sentences[-1]) <= 1):
sentences.append([])
sentences[-1].append(token)
result += [' '.join(line) for line in sentences if len(line) > 0]
return result
def preprocess_tagging(input_path, output_path, review_path):
# reviews = json.load(open(input_path))
reviews = []
with open(input_path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
reviews.append(row)
all_sentences = []
for review in reviews:
if 'text' in review:
text = review['text']
else:
text = review['review']
sentences = sent_tokenizer(text)
sentence_ids = []
for sent in sentences:
sentence_ids.append(len(all_sentences))
all_sentences.append(sent)
review['sentence_ids'] = sentence_ids
if 'extractions' in review:
review.pop('extractions')
# convert sentences into tokens and labels
tokens = []
labels = []
for sent in all_sentences:
# token_list = nltk.word_tokenize(sent)
token_list = []
for token in nlp(sent, disable=['parser', 'ner', 'tagger']):
token_list.append(token.text)
tokens.append(token_list)
labels.append(['O' for _ in token_list])
# print to files
if not os.path.exists(output_path):
os.makedirs(output_path)
output_path = os.path.join(output_path, 'test.txt')
with open(output_path, 'w') as f:
for tlist, llist in zip(tokens, labels):
for i in range(len(tlist)):
f.write('%s %s\n' % (tlist[i], llist[i]))
f.write('\n')
# print reviews
with jsonlines.open(review_path, mode='w') as writer:
for obj in reviews:
writer.write(obj)
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: python preprocess.py reviews_csv output_path output_reviews_jsonl")
exit()
#schema_json_file = sys.argv[1]
#review_file = sys.argv[2]
review_file = sys.argv[1]
output_path = sys.argv[2]
output_reviews_path = sys.argv[3]
preprocess_tagging(review_file, output_path, output_reviews_path)
| true |
ca440f61e5f84c07185cb6f70981f6f5074f3bb5 | Python | MRTANKO/CleanOK | /cleanok/promo/tests/test_views.py | UTF-8 | 1,885 | 2.78125 | 3 | [] | no_license | """Тесты views приложения promo."""
import json
from datetime import date
from django.test import TestCase
from promo.models import Promo
class PromoListViewTest(TestCase):
"""Класс для тестирования views."""
@classmethod
def setUpTestData(cls):
"""Создание 11 акций для тестирования."""
number_of_promo = 11
for promo_num in range(number_of_promo):
Promo.objects.create(
title='title_{}'.format(promo_num),
preview='preview_{}'.format(promo_num),
date=date(year=2019, month=2, day=1 + promo_num))
def test_view_url_exists_at_desired_location(self):
"""Тестирование url."""
resp = self.client.get('/promos/')
self.assertEqual(resp.status_code, 200)
def test_pagination_2_page(self):
"""Тестирование пагинации 2-ой страницы."""
resp = self.client.get('/promos/?page=2')
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data['count'] == 11)
self.assertTrue(len(resp.data['results']) == 1)
def test_page_1(self):
"""Тестирование выводимой информации на 1-ой странице."""
number_of_promo = 10
testlist = []
for promo_num in range(number_of_promo):
testlist.append({
'id': promo_num + 1,
'date': str(date(year=2019, month=2, day=1 + promo_num)),
'title': 'title_{}'.format(promo_num),
'preview': 'preview_{}'.format(promo_num)})
resp = self.client.get('/promos/')
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data['count'] == 11)
self.assertEqual(json.loads(resp.content)['results'], testlist)
| true |
41a356d503374dc3fe72f3d49a59d824466edcdc | Python | chenmin700606/reaper-native | /dotnet/reaper-sdk/wrapper_header_generator.py | UTF-8 | 7,034 | 2.625 | 3 | [] | no_license | import re
from collections import Counter
from dataclasses import dataclass
@dataclass
class FnPtrDefinition:
name: str
args: str
return_type: str
needs_manual_fixing: bool
# Prefixes the names in the header/implementation files so
# that they don't collide with reaper_plugin_function.h names
@staticmethod
def name_prefix():
return "_"
@dataclass
class Argument:
name: str
type: str
@property
def argument_list(self) -> list[Argument]:
return self.process_args()
def process_args(self) -> list[Argument]:
results: list[FnPtrDefinition.Argument] = []
if self.args == "":
return []
for entry in self.args.split(", "):
params = entry.split(" ")
if len(params) == 3:
name = params.pop()
ctype = " ".join(params)
results.append(FnPtrDefinition.Argument(name=name, type=ctype))
elif len(params) == 2:
ctype, name = params
results.append(FnPtrDefinition.Argument(name=name, type=ctype))
else:
raise Exception(f"What the fuck? params = {params}")
return results
def to_wrapper_function_impl(self) -> str:
arg_names = map(lambda it: it.name, self.process_args())
return f"""
REAPER_PLUGIN_DLL_EXPORT {self.return_type} {self.name_prefix()}{self.name}({self.args}) {{
return {self.name}({", ".join(arg_names)});
}}
"""
def to_wrapper_function_header(self) -> str:
arg_names = map(lambda it: it.name, self.process_args())
return f"REAPER_PLUGIN_DLL_EXPORT {self.return_type} {self.name_prefix()}{self.name}({self.args});\n"
def is_fn_ptr_definition_line(line: str) -> bool:
return all(char in line for char in ["(", "*", ")", ";"]) and "//" not in line
def process_fn_ptr_definition_line(line: str) -> FnPtrDefinition:
first_l_paren = line.index("(")
first_r_paren = line.index(")")
return_type: str = line[0:first_l_paren].strip()
fn_name: str = line[first_l_paren:first_r_paren].removeprefix("(*")
fn_args: str = line[first_r_paren:].removeprefix(")(").removesuffix(");")
# Normalize whitespace to prevent weird errors later on in text processing arising from accidental double-spaces, etc
return_type = " ".join(return_type.split())
fn_name = " ".join(fn_name.split())
fn_args = " ".join(fn_args.split())
# If num parens greater than 2, it has arguments which are function pointers
# These are just too fucking complicated to try to parse programmatically, and currently only "__mergesort" has this
needs_manual_fixing = line.count("(") > 2
return FnPtrDefinition(name=fn_name, args=fn_args, return_type=return_type, needs_manual_fixing=needs_manual_fixing)
#########################################################################################################################
def parse_header_to_fn_ptr_definitions(header_path: str) -> list[FnPtrDefinition]:
with open(header_path, "r") as f:
reaper_plugin_functions_header = f.read()
first_function_char_idx = reaper_plugin_functions_header.index(
"#if defined(REAPERAPI_WANT")
end_of_functions_idx = reaper_plugin_functions_header.index(
"REAPERAPI_IMPLEMENT", first_function_char_idx) - (len("REAPERAPI_IMPLEMENT") - 2)
raw_functions_text = reaper_plugin_functions_header[
first_function_char_idx:end_of_functions_idx]
text_to_strip = [
r"#if defined\(REAPERAPI_WANT_\w+\) \|\| !defined\(REAPERAPI_MINIMAL\)",
r"REAPERAPI_DEF ",
r"#ifndef REAPERAPI_NO_LICE",
r"#endif"
]
processed_text = raw_functions_text
for it in text_to_strip:
processed_text = re.sub(it, '', processed_text)
processed_text = re.sub(r"\n\n", r"\n", processed_text)
results: list[FnPtrDefinition] = []
for line in processed_text.splitlines():
if is_fn_ptr_definition_line(line):
fn_ptr = process_fn_ptr_definition_line(line)
results.append(fn_ptr)
return results
def generate_wrapper_header():
fn_ptrs = parse_header_to_fn_ptr_definitions(
"C:\\Users\\rayga\\Projects\\tmp\\ReaperDNNE\\reaper-sdk\\sdk\\reaper_plugin_functions.h")
header_output = """
// reaper_plugin_functions_wrapper.hpp
#include "reaper_plugin_functions.h"
REAPER_PLUGIN_DLL_EXPORT int REAPERAPI_LoadAPIWrapper(void *(*getAPI)(const char*));
"""
implementation_output = """
// reaper_plugin_functions_wrapper.cpp
#define REAPERAPI_IMPLEMENT
#include "reaper_plugin_functions_wrapper.hpp"
REAPER_PLUGIN_DLL_EXPORT int REAPERAPI_LoadAPIWrapper(void *(*getAPI)(const char*)) {
return REAPERAPI_LoadAPI(getAPI);
}
"""
for fn_ptr in fn_ptrs:
header_output += fn_ptr.to_wrapper_function_header()
implementation_output += fn_ptr.to_wrapper_function_impl()
with open("./wrapper/reaper_plugin_functions_wrapper.hpp", "w+") as f:
f.write(header_output)
with open("./wrapper/reaper_plugin_functions_wrapper.cpp", "w+") as f:
f.write(implementation_output)
def generate_csharp_dllimport_wrapper():
fn_ptrs = parse_header_to_fn_ptr_definitions(
"C:\\Users\\rayga\\Projects\\tmp\\ReaperDNNE\\reaper-sdk\\sdk\\reaper_plugin_functions.h")
basic_c_types = ["void", "char", "int", "double", "long", "float", "short"]
type_substitutions = (
("const char*", "string"),
("char*", "string"),
("void*", "IntPtr"),
("unsigned char", "byte"),
("unsigned short", "ushort"),
("unsigned int", "uint"),
# Per Tanner: "C long is not C# long, its actually int on Windows and nint on Unix"
("long", "int"),
# uint on Windows and nuint on Unix
("unsigned long", "uint"),
)
c_types = [c_type for (c_type, csharp_type) in type_substitutions]
csharp_types = [csharp_type for (
c_type, csharp_type) in type_substitutions]
for fn_ptr in fn_ptrs:
fails_type_substitution = False
for arg in fn_ptr.argument_list:
if not arg.type in c_types:
fails_type_substitution = True
if fails_type_substitution:
continue
else:
args = fn_ptr.args
return_type = fn_ptr.return_type
for (c_type, csharp_type) in type_substitutions:
args = args.replace(c_type, csharp_type)
return_type = return_type.replace(c_type, csharp_type)
if return_type in basic_c_types:
print(
f'[GeneratedDllImport(NativeExportsNE_Binary, EntryPoint = "_{fn_ptr.name}", CharSet = CharSet.Auto)]')
print("public static extern", return_type, fn_ptr.name, "(", args, ");")
print()
generate_csharp_dllimport_wrapper()
| true |
063d353d5fdd4ed0895a23fe1e175f023106ca5d | Python | plokamar1/ServerSystemStats | /server/networkFunctions.py | UTF-8 | 1,393 | 2.96875 | 3 | [] | no_license | import socket
import time
import json
class SocketObj:
def __init__(self, sock=None):
if sock == None:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect_to_server(self, SE_host, SE_port):
try:
self.s.connect( (SE_host, SE_port))
print('connected to server\n')
except ConnectionRefusedError:
print('The server refused connection\n')
def server_bind(self, host, port):
self.s.bind((host, port))
print('Binded to port: ' + str(port))
def server_listen(self):
self.s.listen(1)
print('Listening...')
def server_receive(self, buffer_size):
conn, addr = self.s.accept()
print(addr)
while 1:
data = conn.recv(buffer_size)
if not data:
break
print('data received at '+time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
return json.loads(data.decode(encoding='UTF-8',errors='strict'))
conn.close()
def send_to_server(self, msg):
totalsent = 0
while totalsent < len(msg):
sent = self.s.send((msg[totalsent:].encode(
encoding='UTF-8', errors='strict')))
if sent == 0:
raise RuntimeError("Socket disconnected")
totalsent += sent
print('Message sent\n')
| true |
fc4f5c9ee03f686683e1785f452554882fb4c33a | Python | shubham18121993/algorithms-specilaization | /3_greedy_algo_and_dp/maximum_weight.py | UTF-8 | 800 | 3.171875 | 3 | [] | no_license |
def get_max_weight(n, lst):
optimal_solution = [0]
prev = 0
curr = 0
for elem in lst:
optimal_solution.append(max(prev+elem, curr))
prev = curr
curr = optimal_solution[-1]
solution_set = []
val = optimal_solution[-1]
for i in range(n, 0, -1):
if val < optimal_solution[i]:
pass
elif val == optimal_solution[i] and optimal_solution[i] !=optimal_solution[i-1]:
val -= lst[i-1]
solution_set.append(i)
else:
pass
return solution_set
with open("../../dataset/course3/mwis.txt", 'r') as f0:
lines = f0.readlines()
n = int(lines[0].strip())
lst = []
for line in lines[1:]:
lst.append(int(line.strip()))
sol = get_max_weight(n, lst)
print(sol) | true |
2f36dbd5381650c3793a87269e8a882e68130d48 | Python | guoea/sample-code | /python/projecteuler/p205.py | UTF-8 | 953 | 3.171875 | 3 | [] | no_license | bucket4 = [0 for _ in range(37)]
for a in range(1, 5):
for b in range(1,5):
for c in range(1,5):
for d in range(1,5):
for e in range(1, 5):
for f in range(1, 5):
for g in range(1, 5):
for h in range(1, 5):
for i in range(1, 5):
bucket4[a+b+c+d+e+f+g+h+i] += 1
bucket6 = [0 for _ in range(37)]
for a in range(1,7):
for b in range(1, 7):
for c in range(1, 7):
for d in range(1, 7):
for e in range(1, 7):
for f in range(1, 7):
bucket6[a + b + c + d + e + f] += 1
print(bucket6)
total_win = 0
for i4 in range(9, 37):
v4 = bucket4[i4]
for i6 in range(6, 37):
v6 = bucket6[i6]
if i4 > i6:
total_win += v4 * v6
print(total_win)
total_win/(4 ** 9 * 6 ** 6) | true |
ecd6177e43de352c83942c8aae4025bbc1bac100 | Python | RandyCalderon/Intro-Python | /src/days-2-4-adv/intro.py | UTF-8 | 490 | 3.203125 | 3 | [] | no_license | class Intro:
@staticmethod
def start():
print("""
Welcome to the land of your dreams.
Live your life out as you see fit!
Explore with no restrictions on your job/class and become the badass you always wanted to be!
""")
@staticmethod
def characterCreated(name):
print(f"""
{name}, this is your chance to be immortalized through your exploits not bound by the mortal realm. Explore and sate your desires!
""") | true |
6069ac6cbf5d5887d6d1a9218a65fc5ce0a46ae9 | Python | C-CCM-TC1028-111-2113/programs-that-require-calculations-a01658393 | /assignments/03Promedio/src/exercise.py | UTF-8 | 337 | 3.78125 | 4 | [] | no_license | def main():
#escribe tu código abajo de esta línea
pass
grade1=float(input("Give me grade 1"))
grade2=float(input("Give me grade 2"))
grade3=float(input("Give me grade 3"))
grade4=float(input("Give me grade 4"))
average=((grade1+grade2+grade3+grade4)/4)
print("Your average is", average)
if __name__ == '__main__':
main()
| true |
0a053d4d55979a63e4ae9f7fe1d0c36489f23163 | Python | mychristopher/test | /pyfirstweek/redis和python交互/redis和python交互.py | UTF-8 | 488 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
import redis
#连接
r = redis.StrictRedis(host="localhost",port=6379,password="7022544qx")
#方法1:根据数据类型的不同,调用相应的方法
#写
#r.set("p1","good")
#读
#print(r.get("p1"))
#方法2:pipeline
#缓冲多条命令,然后依次执行,减少服务器-客户端之间的TCP数据包
pipe = r.pipeline()
pipe.set("p2","nice")
pipe.set("p3","handsome")
pipe.set("p4","cool")
pipe.execute()
print(r.get('p2'))
| true |
4dc4131eb8a96bcbd665ddd6564eed21e997f537 | Python | lkuper/icfp2016 | /download_problems.py | UTF-8 | 3,212 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
## Requires pycurl. Downloads problems.
import json
import pycurl
import time
import os
from io import BytesIO
APIKEY = None
## First, get the list of snapshots. Then find the latest snapshot.
def load_api_key():
out = None
with open("POSTMORTEM_APIKEY") as infile:
lines = infile.readlines()
out = lines[0].strip()
return out
URLPREFIX = "http://130.211.240.134/api/"
def get_curl(apicall):
"""Build a Curl with all the common options set."""
c = pycurl.Curl()
c.setopt(pycurl.ENCODING, 'gzip,deflate')
c.setopt(pycurl.FOLLOWLOCATION, True)
headers = ["Expect:", "X-API-Key: " + APIKEY]
c.setopt(pycurl.HTTPHEADER, headers)
c.setopt(pycurl.URL, URLPREFIX + apicall)
return c
def get_string_response(c):
"""Do the HTTP call, return the raw resulting string.
Also sleep for a second, so we don't go over the rate limit."""
buffer = BytesIO()
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
time.sleep(1)
return body.decode("utf-8")
def get_json_response(c):
"""Do the HTTP call, turn the returned json into a dict, and return it.
Also sleep for a second, so we don't go over the rate limit."""
return json.loads(get_string_response(c))
# 'http://2016sv.icfpcontest.org/api/snapshot/list'
def download_snapshots():
c = get_curl("snapshot/list")
d = get_json_response(c)
return d
def raw_blob_lookup(thehash):
c = get_curl("blob/" + thehash)
s = get_string_response(c)
return s
def blob_lookup(thehash):
c = get_curl("blob/" + thehash)
d = get_json_response(c)
return d
def latest_snapshot_hash(snapshots_d):
"""Given the response from snapshot/list, find the hash of the last
snapshot."""
snapshots = snapshots_d['snapshots']
maxtime = 0
out = None
for d in snapshots:
if d["snapshot_time"] > maxtime:
maxtime = d["snapshot_time"]
out = d["snapshot_hash"]
return out
def list_all_problems(snapshot_hash):
"""Download latest contest snapshot and extract the list of problems.
Returns a list of (problem_id, problem_spec_hash) tuples.
"""
snapshot = blob_lookup(snapshot_hash)
out = []
for problem_d in snapshot["problems"]:
out.append((problem_d["problem_id"], problem_d["problem_spec_hash"]))
return out
def download_save_problem(problem_id, spec_hash):
output_fn = "problems/problem_{:04d}".format(problem_id)
assert os.path.exists("problems/")
if os.path.exists(output_fn):
print("already got that one.")
return
blob = raw_blob_lookup(spec_hash)
with open(output_fn, "w") as outfile:
print(blob, file=outfile, end="")
def main():
global APIKEY
APIKEY = load_api_key()
print("loaded API key:", APIKEY)
snapshots_d = download_snapshots()
snapshot_hash = latest_snapshot_hash(snapshots_d)
problem_pairs = list_all_problems(snapshot_hash)
for problem_id, spec_hash in problem_pairs:
print("downloading problem", problem_id)
download_save_problem(problem_id, spec_hash)
if __name__ == "__main__": main()
| true |
f6e98a55dabf81b20d65fcb12df161241c3c1be8 | Python | calebsimmons/hungry_monsters | /alon/mod2sbml.py | UTF-8 | 18,305 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# mod2sbml.py
# Updated: 1/2/10
import libsbml,sys,re,cStringIO,traceback
__doc__="""mod2sbml version 2.4.1.2
Copyright (C) 2005-2010, Darren J Wilkinson
d.j.wilkinson@ncl.ac.uk
http://www.staff.ncl.ac.uk/d.j.wilkinson/
Includes modifications by:
Jeremy Purvis (jep@thefoldingproblem.com)
Carole Proctor (c.j.proctor@ncl.ac.uk)
Mark Muldoon (m.muldoon@man.ac.uk)
This is GNU Free Software (General Public License)
Module for parsing SBML-shorthand model files, version 2.4.1,
and all previous versions
Typical usage:
>>> from mod2sbml import Parser
>>> p=Parser()
>>> p.parseStream(sys.stdin)
Raises error "ParseError" on a fatal parsing error.
"""
ParseError="Parsing error"
class Parser(object):
"""Parser class
Has constructor:
Parser()
and the following public methods:
parseStream(inStream)
parse(inString)
"""
# context
SBML=1
MODEL=2
UNITS=3
COMPARTMENTS=4
SPECIES=5
PARAMETERS=6
RULES=7
REAC1=8
REAC2=9
REAC3=10
REAC4=11
EVENTS=12
def __init__(self):
self.context=self.SBML
self.count=1
self.d=libsbml.SBMLDocument()
def parse(self,inString):
"""parse(inString)
parses SBML-shorthand model in inString and returns a libSBML SBMLDocument
object"""
inS=cStringIO.StringIO(inString)
return self.parseStream(inS)
def parseStream(self,inS):
"""parseStream(inStream)
parses SBML-shorthand model on inStream and returns a libSBML SBMLDocument
object"""
self.inS=inS
line=self.inS.readline()
while (line):
line=line.strip() # trim newline
line=line.split("#")[0] # strip comments
bits=line.split('"') # split off string names
line=bits[0]
if (len(bits)>1):
name=bits[1]
else:
name=""
line=re.sub("\s","",line) # strip whitespace
if (line==""):
line=self.inS.readline()
self.count+=1
continue # skip blank lines
# now hand off the line to an appropriate handler
# print self.count,line,name
if (self.context==self.SBML):
self.handleSbml(line,name)
elif (self.context==self.MODEL):
self.handleModel(line,name)
elif (self.context==self.UNITS):
self.handleUnits(line,name)
elif (self.context==self.COMPARTMENTS):
self.handleCompartments(line,name)
elif (self.context==self.SPECIES):
self.handleSpecies(line,name)
elif (self.context==self.PARAMETERS):
self.handleParameters(line,name)
elif (self.context==self.RULES):
self.handleRules(line,name)
elif (self.context==self.REAC1):
self.handleReac1(line,name)
elif (self.context==self.REAC2):
self.handleReac2(line,name)
elif (self.context==self.REAC3):
self.handleReac3(line,name)
elif (self.context==self.REAC4):
self.handleReac4(line,name)
elif (self.context==self.EVENTS):
self.handleEvents(line,name)
line=self.inS.readline()
self.count+=1
self.context=self.SBML
return self.d
def handleSbml(self,line,name):
# in this context, only expecting a model
bits=line.split("=")
morebits=bits[0].split(":")
if ((morebits[0]!="@model")):
sys.stderr.write('Error: expected "@model:" ')
sys.stderr.write('at line'+str(self.count)+'\n')
raise ParseError
yetmorebits=morebits[1].split(".")
level=int(yetmorebits[0])
version=int(yetmorebits[1])
revision=int(yetmorebits[2])
self.mangle=100*level+10*version+revision
if (self.mangle>241):
sys.stderr.write('Error: shorthand version > 2.4.1 - UPGRADE CODE ')
sys.stderr.write('at line'+str(self.count)+'\n')
raise ParseError
# sys.stderr.write('lev: '+str(level)+'\n') # debug
self.d.setLevelAndVersion(level,version)
# sys.stderr.write('Leve: '+str(self.d.getLevel())+'\n') # debug
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" at line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
id=bits[1]
self.m=self.d.createModel(id)
if (name!=""):
self.m.setName(name)
self.context=self.MODEL
def handleModel(self,line,name):
# in this context, expect any new context
if (line[0]=='@'):
self.handleNewContext(line,name)
else:
sys.stderr.write('Error: expected new "@section" ')
sys.stderr.write('at line '+str(self.count)+'\n')
raise ParseError
def handleNewContext(self,line,name):
# sys.stderr.write('handling new context '+line[:4]+'\n')
if (line[:4]=="@com"):
self.context=self.COMPARTMENTS
elif (line[:4]=="@uni"):
self.context=self.UNITS
elif (line[:4]=="@spe"):
self.context=self.SPECIES
elif (line[:4]=="@par"):
self.context=self.PARAMETERS
elif (line[:4]=="@rul"):
self.context=self.RULES
elif (line[:4]=="@rea"):
self.context=self.REAC1
elif (line[:4]=="@eve"):
self.context=self.EVENTS
else:
sys.stderr.write('Error: unknown new "@section": '+line)
sys.stderr.write(' at line '+str(self.count)+'\n')
raise ParseError
def handleUnits(self,line,name):
# expect a unit or a new context
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split("=")
if (len(bits)<2):
sys.stderr.write('Error: expected a "=" in: '+line)
sys.stderr.write(' at line '+str(self.count)+'\n')
raise ParseError
id=bits[0]
units="=".join(bits[1:])
ud=self.m.createUnitDefinition()
ud.setId(id)
units=units.split(";")
for unit in units:
bits=unit.split(":")
if (len(bits)!=2):
id=bits[0]
mods=""
else:
(id,mods)=bits
u=self.m.createUnit()
u.setKind(libsbml.UnitKind_forName(id))
mods=mods.split(",")
for mod in mods:
if (mod[:2]=="e="):
u.setExponent(eval(mod[2:]))
elif (mod[:2]=="m="):
u.setMultiplier(eval(mod[2:]))
elif (mod[:2]=="s="):
u.setScale(eval(mod[2:]))
elif (mod[:2]=="o="):
u.setOffset(eval(mod[2:]))
if (name!=""):
ud.setName(name)
def handleCompartments(self,line,name):
# expect a compartment or a new context
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split("=")
c=bits[0]
if (len(bits)>1):
v=bits[1]
else:
v=""
bits=c.split("<")
com=bits[0]
if (len(bits)>1):
out=bits[1]
else:
out=""
c=self.m.createCompartment()
c.setId(com)
if (out!=""):
c.setOutside(out)
if (v!=""):
c.setSize(eval(v))
if (name!=""):
c.setName(name)
# print self.m.toSBML()
def handleSpecies(self,line,name):
# expect either a species or a new section
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split("=")
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(bit,amount)=bits
bits=bit.split(":")
if (len(bits)!=2):
sys.stderr.write('Error: expected ":" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(comp,id)=bits
if (id[0]=="[" and id[-1]=="]"):
conc=True
id=id[1:-1]
else:
conc=False
s=self.m.createSpecies()
s.setId(id)
s.setCompartment(comp)
split=re.search('[a-df-z]',amount)
if (split!=None):
split=split.start()
opts=amount[split:]
amount=amount[:split]
else:
opts=""
while (opts!=""):
if (opts[0]=="b"):
s.setBoundaryCondition(True)
elif (opts[0]=="c"):
s.setConstant(True)
elif (opts[0]=="s"):
s.setHasOnlySubstanceUnits(True)
opts=opts[1:]
if (conc):
s.setInitialConcentration(eval(amount))
else:
s.setInitialAmount(eval(amount))
if (name!=""):
s.setName(name)
#print self.d.toSBML()
def handleParameters(self,line,name):
# expect either a parameter or a new section
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split("=")
p=self.m.createParameter()
p.setId(bits[0])
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(bit,value)=bits
split=re.search('[a-df-z]',value)
if (split!=None):
split=split.start()
opts=value[split:]
value=value[:split]
else:
opts=""
while (opts!=""):
if (opts[0]=="v"):
p.setConstant(False)
opts=opts[1:]
p.setValue(eval(value))
if (name!=""):
p.setName(name)
#print self.d.toSBML()
def handleRules(self,line,name):
# expect either a rule or a new section
# rules are fixed as type AssignmentRule
# this requires the assigned species to have atrribute
# constant set to "False"
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split("=")
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(lhs,rhs)=bits
value=libsbml.parseFormula(rhs)
self.replaceTime(value)
ar=self.m.createAssignmentRule()
ar.setVariable(lhs)
ar.setMath(value)
# print self.d.toSBML()
def handleReac1(self,line,name):
# expect a reaction or a new context
if (line[:3]!="@r=" and line[:4]!="@rr="):
self.handleNewContext(line,name)
else:
bits=line.split("=")
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(tag,id)=bits
if (tag!="@r" and tag!="@rr"):
sys.stderr.write('Error: expected "@r=" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
self.r=self.m.createReaction()
self.r.setId(id)
if (tag=="@r"):
self.r.setReversible(False)
else:
self.r.setReversible(True)
if (name!=""):
self.r.setName(name)
self.context=self.REAC2
def handleReac2(self,line,name):
# expect a reaction equation and possibly modifiers
# of form:
# A + B -> C : M1, M2, M3
chks=line.split(":")
if (len(chks)>1):
pars=chks[1].split(",")
for par in pars:
mdf = self.r.createModifier()
mdf.setSpecies(par)
bits=chks[0].split("->")
if (len(bits)!=2):
sys.stderr.write('Error: expected "->" on line ')
sys.stderr.write(str(self.count)+'\n')
raise ParseError
(lhs,rhs)=bits
if (lhs):
self.handleTerms(lhs,True)
if (rhs):
self.handleTerms(rhs,False)
self.context=self.REAC3
def handleTerms(self,side,left):
terms=side.split("+")
for term in terms:
split=re.search('\D',term).start()
if (split==0):
sto=1.0
else:
sto=eval(term[:split])
id=term[split:]
if (left):
sr=self.r.createReactant()
else:
sr=self.r.createProduct()
sr.setSpecies(id)
sr.setStoichiometry(sto)
def handleReac3(self,line,name):
# expect a kinetic law, a new reaction or a new context
if (line[:3]=="@r=" or line[:4]=="@rr="):
self.handleReac1(line,name)
elif (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split(":")
form=bits[0]
kl=self.r.createKineticLaw()
kl.setFormula(form)
if (len(bits)>1):
pars=bits[1].split(",")
for par in pars:
bits=par.split("=")
if (len(bits)!=2):
sys.stderr.write('Error: expected "=" on ')
sys.stderr.write('line '+str(self.count)+'\n')
raise ParseError
(id,value)=bits
parm=kl.createParameter()
parm.setId(id)
parm.setValue(eval(value))
self.context=self.REAC1
def handleEvents(self,line,name):
# expect an event, or a new context
if (line[0]=="@"):
self.handleNewContext(line,name)
else:
bits=line.split(":")
if (len(bits)!=2):
sys.stderr.write('Error: expected exactly one ":" on ')
sys.stderr.write('line '+str(self.count)+'\n')
raise ParseError
(event,assignments)=bits
bits=event.split(";")
trigbits=bits[0].split("=")
if (len(trigbits)<2):
sys.stderr.write('Error: expected a "=" before ":" on ')
sys.stderr.write('line '+str(self.count)+'\n')
raise ParseError
id=trigbits[0]
trig="=".join(trigbits[1:])
e=self.m.createEvent()
e.setId(id)
trig=self.trigMangle(trig)
triggerMath=libsbml.parseFormula(trig)
self.replaceTime(triggerMath)
trigger=e.createTrigger()
trigger.setMath(triggerMath)
if (len(bits)==2):
delay=e.createDelay()
delayMath=libsbml.parseFormula(bits[1])
delay.setMath(delayMath)
# SPLIT
if (self.mangle>=230):
asslist=assignments.split(";")
else:
asslist=assignments.split(",")
for ass in asslist:
bits=ass.split("=")
if (len(bits)!=2):
sys.stderr.write('Error: expected exactly one "=" in assignment on')
sys.stderr.write('line '+str(self.count)+'\n')
raise ParseError
(var,math)=bits
ea=self.m.createEventAssignment()
ea.setVariable(var)
ea.setMath(libsbml.parseFormula(math))
if (name!=""):
e.setName(name)
def trigMangle(self,trig):
bits=trig.split(">=")
if (len(bits)==2):
return self.binaryOp("geq",bits)
bits=trig.split("<=")
if (len(bits)==2):
return self.binaryOp("leq",bits)
bits=trig.split(">")
if (len(bits)==2):
return self.binaryOp("gt",bits)
bits=trig.split("<")
if (len(bits)==2):
return self.binaryOp("lt",bits)
bits=trig.split("=")
if (len(bits)==2):
return self.binaryOp("eq",bits)
return trig
def binaryOp(self,op,bits):
return(op+"("+bits[0]+","+bits[1]+")")
def replaceTime(self,ast):
if (ast.getType()==libsbml.AST_NAME):
if ((ast.getName()=='t') or (ast.getName()=='time')):
ast.setType(libsbml.AST_NAME_TIME)
for node in range(ast.getNumChildren()):
self.replaceTime(ast.getChild(node))
# if run as a script...
if __name__=='__main__':
p=Parser()
argc=len(sys.argv)
try:
if (argc==1):
d=p.parseStream(sys.stdin)
else:
try:
s=open(sys.argv[1],"r")
except:
sys.stderr.write('Error: failed to open file: ')
sys.stderr.write(sys.argv[1]+'\n')
sys.exit(1)
d=p.parseStream(s)
print '<?xml version="1.0" encoding="UTF-8"?>'
s = d.toSBML().split ('\n')
s[0] = """<sbml xmlns="http://www.sbml.org/sbml/level2/version4" level="2" version="4">"""
print '\n'.join (s)
except:
traceback.print_exc(file=sys.stderr)
sys.stderr.write('\n\n Unknown parsing error!\n')
sys.exit(1)
def parse (string):
SBML = Parser ().parse (string)
s = SBML.toSBML().split ('\n')
s[0] = """
<?xml version="1.0" encoding="UTF-8"?>
<sbml xmlns="http://www.sbml.org/sbml/level2/version4" level="2" version="4">
""".strip()
return '\n'.join (s)
| true |
7cd4f7396ae6233bb4e9a6bc42b1793e3b884922 | Python | python20180319howmework/homework | /caohuan/20180403/h2.py | UTF-8 | 921 | 4.15625 | 4 | [
"Apache-2.0"
] | permissive | '''
2,定义一个北京欢乐谷门票类,应用你所定义的类,计算两个社会青年和一个学生平日比节假日门票能省多少钱
票价是:
除节假日票价100元/天
节假日为平日的1.2倍
学生半价
'''
class Ticket(object):
def __init__(self,adultprice,stuprice):
self.__adultprice = adultprice
self.__stuprice = stuprice
def pri_1(self):
print("平日成人票价为{}元,学生票价为{}元。".format(self.__adultprice,self.__stuprice))
def pri_2(self):
print("假日成人票价为{}元,学生票价为{}元。".format(self.__adultprice * 1.2,self.__stuprice * 1.2))
def panduan(self,a,b):
x = self.__adultprice*a+self.__stuprice*b
y = (self.__adultprice*a+self.__stuprice*b)*1.2
print("{}个成人{}个学生平日里要{}元。节假日要{}元。能省{}元".format(a,b,x,y,y - x))
s = Ticket(100,50)
s.pri_1()
s.pri_2()
s.panduan(2,1)
| true |
ddeae5953a366b66d2e7ca8c9cfb24cbb534ae7c | Python | khanjason/leetcode | /1941.py | UTF-8 | 329 | 3.140625 | 3 | [] | no_license | class Solution:
def areOccurrencesEqual(self, s: str) -> bool:
d=[]
a=[]
for i in s:
if i not in d:
a.append(s.count(i))
d.append(i)
start=a[0]
for t in a:
if t!=start:
return False
return True | true |
fb8f512b8824fb8c1886ebced942be4cdf599512 | Python | CHILDISHIMMORTAL/Animation | /texture.py | UTF-8 | 1,630 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | # https://imagemagick.org/index.php
import os
import math
from PIL import Image
def glue_images(path, line, aligment):
images = [Image.open(path + os.sep + img) for img in os.listdir(path)]
widths, heights = zip(*(i.size for i in images))
list_x, list_y = [], []
list_w = [*widths]
new_height = max(heights)
new_width = sum(widths)
max_width = math.ceil(new_width / line)
image = Image.new(
'RGBA', (max_width if line == 2 else new_width, new_height * line))
x, y, block = 0, 0, False
for j, im in enumerate(images):
if aligment == 'center':
y = round((new_height - heights[j]) / 2.)
elif aligment == 'bottom':
y = new_height - heights[j]
elif aligment == 'top':
y = 0
if (x + widths[j] > max_width or block) and line == 2:
y += new_height
if not block:
x, block = 0, True
image.paste(im, (x, y))
list_x.append(x)
list_y.append(new_height if block else 0)
x += widths[j]
dict_texture = {
f'{path[3:]}': {'x': list_x, 'y': list_y, 'w': list_w, 'h': new_height}
}
with open('texture.txt', 'a') as fl:
fl.write(f'{dict_texture}\n')
# image.save(f'out/{path[3:]}.png')
# image.show()
# print(dict_texture)
if __name__ == "__main__":
paths = 'in'
aligments = ['center', 'bottom', 'top']
lines = [1, 2]
with open('texture.txt', 'w') as f:
f.seek(0)
for num, folder in enumerate(os.listdir(paths)):
glue_images(paths + os.sep + folder, lines[num], aligments[num])
| true |
d2373c77aed090d31629b2e54ac5668e5cfa0aa9 | Python | gcali/drisc | /transl.py | UTF-8 | 2,223 | 3.484375 | 3 | [] | no_license | #! /usr/bin/env python3
from lex import Token
from unit import Unit
from misc import str_list
class LookupTable():
def __init__(self):
self.table = dict()
def add(self, entry_a, entry_b):
self.table[entry_a] = entry_b
self.table[entry_b] = entry_a
def remove(self, entry):
dict.__delitem__(self.table, self.table[entry])
dict.__delitem__(self.table, entry)
def translate(self, entry):
return self.table[key]
class Arg:
def __init__(self, value:str, is_register:bool=True, is_label:bool=False):
self.value = value
self.is_register = is_register
self.is_label = is_label
def is_register(self) -> bool:
return self.is_register
def get_value(self) -> str:
return self.value
def __str__(self):
if self.is_label:
d = "(L)"
elif self.is_register:
d = "(R)"
else:
d = ""
return "{}{}".format(self.value,d)
class Statement:
"""Class to represent an abstract statement
Attributes
op Operation identifier
args Arguments of the statement
label Optional label of the statement
line_number Line number of the statement
"""
def __init__(self, line_number=None, op=None, *args, label=None):
"""Constructor
Sets the attributes of the class
"""
self.op = op
self.args = [a for a in args]
self.label = label
self.line_number = line_number
def is_new(self) -> bool:
if self.op or self.args or self.label or self.line_number:
return False
else:
return True
def to_unit_value(self) -> Unit:
raise NotImplementedError
def __str__(self):
if self.label != None:
l = "(L-{})".format(self.label)
else:
l = ""
a = str_list(self.args)
if self.line_number != None:
n = "{}: ".format(str(self.line_number))
else:
n = ""
return "{}{} {} {}".format(n,l,self.op,a)
if __name__ == '__main__':
Statement("a", "b", "c", "d")
| true |
71df2db28fa3f01102f7832edd18d1fbc3d43c89 | Python | super-rain/j2men-Calligraphy_recognition | /loadImage2.py | UTF-8 | 2,345 | 2.75 | 3 | [] | no_license | import os
import numpy
from PIL import Image #导入Image模块
from pylab import * #导入savetxt模块
#import glob
#以下代码看可以读取文件夹下所有文件
# def getAllImages(folder):
# assert os.path.exists(folder)
# assert os.path.isdir(folder)
# imageList = os.listdir(folder)
# imageList = [os.path.abspath(item) for item in imageList if os.path.isfile(os.path.join(folder, item))]
# return imageList
# print getAllImages(r"D:\\test")
def convertjpg(jpgfile,width=128,height=128):
img=Image.open(jpgfile)
try:
new_img=img.resize((width,height),Image.BILINEAR)
return new_img
except Exception as e:
print(e)
def get_imlist(path): #此函数读取特定文件夹下的jpg格式图像
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]
rootdir = "images"
rootdir = os.path.abspath(rootdir)
for parent, dirnames, filenames in os.walk(rootdir, topdown=False):
for dirname in dirnames:
print(dirname)
c=get_imlist(r"images/"+dirname) #r""是防止字符串转译
print (c) #这里以list形式输出jpg格式的所有图像(带路径)
d=len(c) #这可以以输出图像个数
data=numpy.empty((d,128*128)) #建立d*(128*128)的矩阵
#for jpgfile in glob.glob("images\*.jpg"):
#img=convertjpg(jpgfile)
while d>0:
img=convertjpg(c[d-1]) #打开图像
#img_ndarray=numpy.asarray(img)
img_ndarray=numpy.asarray(img,dtype='float64')/256 #将图像转化为数组并将像素转化到0-1之间
data[d-1]=numpy.ndarray.flatten(img_ndarray) #将图像的矩阵形式转化为一维数组保存到data中
d=d-1
print (data)
f=open('j2men.csv','ab')
for d in data:
A=numpy.array(d).reshape(1,128*128) #将一维数组转化为1,128*128矩阵
#A = np.concatenate((A,[p_])) # 先将p_变成list形式进行拼接,注意输入为一个tuple
#A = np.append(A,1)
#print A
f.write(bytes(dirname+",", encoding = "utf8")) #在前面追加一个字符
savetxt(f,A,fmt="%.0f",delimiter=',') #将矩阵保存到文件中
| true |
b0ada9d261ce6bdeb24e7675a629d0146d6097b6 | Python | Pk13055/bomberman | /people.py | UTF-8 | 2,209 | 3.5 | 4 | [] | no_license |
'''
contains the structure of each person
'''
import config
import numpy as np
class Person:
"""# bomber, enemies etc will be of this type"""
def __init__(self, x, y, ch=config._empty):
'''# the x and y coords wrt top left of board'''
self._x = x
self._y = y
self.structure = np.chararray((2, 4))
self.structure[:, :] = config._empty
self._ch = ch
self._type = config.types[self._ch]
self.is_killable = True
def get_type(self):
'''# returns whether "Bomber", "Enemy", etc'''
return self._type
def get_size(self):
'''# returns (height, width)'''
return self.structure.shape
def get_coords(self):
'''# returns (x, y)'''
return (self._x, self._y)
def update_location(self, board, new_x, new_y, init=False):
'''# update the location of the person'''
if board.draw_obj(type(self)(new_x, new_y)):
# if initial update, will not clear original
if not init:
board.clear_obj(self)
self._x, self._y = new_x, new_y
return True
return False
def __repr__(self):
return "<Person : %s | (%d, %d)>" % (self.get_type(), self._x, self._y)
class Bomber(Person):
"""# this is the class for the bomber
# methods that the bomber can execute are written here"""
def __init__(self, x, y, lives=config.lives[1], bombs=config.bombs[1]):
super(Bomber, self).__init__(x, y, config._bomb_man)
temp_skel = np.matrix([['[', self._ch, self._ch, ']'],
[config._empty, ']', '[', config._empty]])
self.structure[:, :] = temp_skel
self.lives = lives
self.bombs = bombs
self.score = 0
del temp_skel
class Enemy(Person):
"""# this is the enemy class
# enemy specific methods are added here"""
def __init__(self, x, y):
super(Enemy, self).__init__(x, y, config._enemy)
temp_skel = np.matrix([['[', self._ch, self._ch, ']'],
[config._empty, ']', '[', config._empty]])
self.structure[:, :] = temp_skel
del temp_skel
| true |
206e65a8d7ba3460158762fc3fc505b3c067f064 | Python | niteshjha1/PythonWebScrapper | /webscrapping/webscrap.py | UTF-8 | 3,986 | 2.765625 | 3 | [] | no_license | #import all necessary libraries to use
from requests import get
import pandas as pd
from time import sleep
from time import time
from random import randint
from bs4 import BeautifulSoup
from IPython.core.display import clear_output
from warnings import warn
#create lists to store extracted data
#i_index = []
i_name = []
i_price = []
i_ROM = []
i_size = []
i_camera = []
i_processor = []
i_rating = []
#counter=0
#loop over pages
pages = [str(i) for i in range(1,9)]
# Preparing the monitoring of the loop
start_time = time()
requests = 0
for page in pages:
my_url = get("https://www.flipkart.com/search?q=iphone&otracker=AS_Query_HistoryAutoSuggest_2_0&otracker1=AS_Query_HistoryAutoSuggest_2_0&marketplace=FLIPKART&as-show=on&as=off&as-pos=2&as-type=HISTORY&page="+page)
# Pause the loop
sleep(randint(8,15))
# Monitor the requests
requests += 1
elapsed_time = time() - start_time
print('Request:{}; Frequency: {} requests/s'.format(requests, requests/elapsed_time))
clear_output(wait = True)
# Throw a warning for non-200 status codes
if my_url.status_code != 200:
warn('Request: {}; Status code: {}'.format(requests, response.status_code))
# Break the loop if the number of requests is greater than expected
if requests > 72:
warn('Number of requests was greater than expected.')
break
soup = BeautifulSoup(my_url.text, 'html.parser')
containers = soup.findAll("div", {"class":"_1UoZlX"})
#len(containers)
#loop over items and prepare for saving
for container in containers:
iphone_name = container.find("div", {"class":"_3wU53n"}).text
iphone_name = iphone_name.replace(",","|")
i_name.append((iphone_name))
iphone_price = container.find("div", {"class":"_1vC4OE _2rQ-NK"}).text
iphone_price = iphone_price.replace("₹","")
i_price.append((iphone_price))
iphone_ROM = container.find("ul", {"class":"vFw0gD"}).contents[0].text
iphone_ROM = iphone_ROM.replace("|","")
i_ROM.append((iphone_ROM))
iphone_size = container.find("ul", {"class":"vFw0gD"}).contents[1].text
i_size.append((iphone_size))
iphone_camera = container.find("ul", {"class":"vFw0gD"}).contents[2].text
i_camera.append((iphone_camera))
iphone_processor = container.find("ul", {"class":"vFw0gD"}).contents[3].text
i_processor.append((iphone_processor))
iphone_rating = container.find("div", {"class":"niH0FQ"}).text
iphone_rating = iphone_rating[:3]
i_rating.append((iphone_rating))
#counter+=1
#i_index.append((counter))
iphone_ratings = pd.DataFrame({#'id': i_index,
'name': i_name,
'camera': i_camera,
'display': i_size,
'price': i_price,
'processor': i_processor,
'rating': i_rating,
'rom': i_ROM
})
print(iphone_ratings.info())
#iphone_ratings.tail()
#save csv and json files into folders
import os
if not os.path.exists('csv'):
os.mkdir('csv')
iphone_ratings.to_csv('csv\iphones_flipkart.csv', index=False, encoding='utf-8')
#save csv file for database CRUD
iphone_ratings.to_csv('..\Database\DB\my_db\iphones_flipkart.csv', index=False, encoding='utf-8')
else:
#print("Directory already exists")
iphone_ratings.to_csv('csv\iphones_flipkart.csv', index=False, encoding='utf-8')
#save csv file for database CRUD
iphone_ratings.to_csv('..\Database\DB\my_db\iphones_flipkart.csv', index=False, encoding='utf-8')
if not os.path.exists('json'):
os.mkdir('json')
iphone_ratings.to_json(r'json\iphones_flipkart.json')
else:
#print("Directory already exists")
iphone_ratings.to_json(r'json\iphones_flipkart.json') | true |
377376ede16d791c6fc7438a1f14f284b796a674 | Python | vivardiniii/Twitter-Sentiment-Analysis- | /test_ext.py | UTF-8 | 959 | 2.59375 | 3 | [] | no_license | import csv
import fileinput
#Variables that contains the user credentials to access Twitter API
access_token = "1059659088630472705-YgDGgPwcx4DxCkIksDCRJ35hREbKNp"
access_token_secret = "PHqFvPv8nvy5bM9gzXCRf2cVu1DwQgRzLO9FOYzSEVKLk"
consumer_key = "xbNCpYwzHJ8vQ7FTlnceHOPS4"
consumer_secret = "Qr6euKSDCmZ0svNOKPAdtEF3l9u8tsyyb0eOtAl9JceKkxJsHf"
import twitter
api = twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token,
access_token_secret=access_token_secret)
hashtags_to_track = [
"#mood",
]
LANGUAGES = ['en']
stream = api.GetStreamFilter(track=hashtags_to_track, languages=LANGUAGES)
with open('test_tweets.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for line in stream:
# Signal that the line represents a tweet
if 'in_reply_to_status_id' in line:
tweet = twitter.Status.NewFromJsonDict(line)
print(tweet.id)
row = [tweet.id, tweet.user.screen_name, tweet.text]
csv_writer.writerow(row)
csv_file.flush()
| true |
d9b67b6722af8093e0f3f6793ebbfae7e1cfa914 | Python | Indrateja25/Learning-Python | /break.py | UTF-8 | 290 | 3.625 | 4 | [] | no_license | magic_number = int(input('Enter your magic number:'))
numbers_taken = [1,7,12,19,22]
for x in range(20):
if x is magic_number:
print(x ,' is your magic number')
break
else:
print(x)
for v in range(25):
if v in numbers_taken:
continue
print(v) | true |
3b4147827377c23894b2dc7cc56fa6055eeb7bcb | Python | mindis/python-2 | /ApplicationProject/download_files_test.py | UTF-8 | 5,387 | 2.765625 | 3 | [] | no_license | import unittest
import download_files as df
from mock import Mock, patch, mock_open, call
class DownloadFilesTest(unittest.TestCase):
def setUp(self):
self.source_file_name = "testSource.txt"
self.download_folder = "images"
self.filename1 = "image1.jpg"
self.filename2 = "image2.jpg"
self.filename3 = "image3.jpg"
self.url1 = "http://testserver.com/"+self.filename1
self.url2 = "http://testserver.com/"+self.filename2
self.url3 = "http://testserver.com/"+self.filename3
self.source_file_data = '\n'.join([self.url1, self.url2, self.url3])
self.response_content_1 = "responseContent1"
self.response_content_2 = "responseContent2"
self.response_content_3 = 'responseContent3'
self.downloaded_filename1 = self.download_folder + "\\" + self.filename1
self.downloaded_filename2 = self.download_folder + "\\" + self.filename2
self.downloaded_filename3 = self.download_folder + "\\" + self.filename3
# create and setup mocks
self.mock_file_open = mock_open()
self.mock_os = Mock()
self.mock_request = Mock()
self.mock_response1 = Mock()
self.mock_response2 = Mock()
self.mock_response3 = Mock()
self.mock_file_open.return_value.__iter__.return_value = self.source_file_data.splitlines()
self.mock_response1.content = self.response_content_1
self.mock_response1.status_code = 200
self.mock_response2.content = self.response_content_2
self.mock_response2.status_code = 200
self.mock_response3.content = self.response_content_3
self.mock_response3.status_code = 200
def test_get_filename(self):
self.assertEqual(self.downloaded_filename1, df.get_filename(self.url1, self.download_folder))
def test_save_file_to_disc(self):
with patch("__builtin__.open", self.mock_file_open):
df.save_file_to_disc(self.response_content_1, self.filename1)
self.mock_file_open.assert_called_once_with(self.filename1, "wb")
self.mock_file_open().write.assert_calledn_once_with(self.response_content_1)
def test_create_download_folder_new(self):
self.mock_os.path.exists.return_value = False
with patch("download_files.os", self.mock_os):
df.create_download_folder("images")
self.mock_os.path.exists.assert_called_once_with("images")
self.mock_os.makedirs.assert_called_once_with("images")
def test_create_download_folder_exists(self):
self.mock_os.path.exists.return_value = True
with patch("download_files.os", self.mock_os):
df.create_download_folder("images")
self.mock_os.path.exists.assert_called_once_with("images")
self.assertEquals(0, self.mock_os.makedirs.get.call_count)
def test_download_file(self):
self.mock_request.get.return_value = self.mock_response1
with patch("download_files.os", self.mock_os):
with patch("__builtin__.open", self.mock_file_open):
with patch("download_files.requests", self.mock_request):
df.download_file(self.url1, self.download_folder)
self.mock_request.get.assert_called_once_with(self.url1)
self.mock_file_open.assert_called_once_with(self.downloaded_filename1, "wb")
self.mock_file_open().write.assert_calledn_once_with(self.response_content_1)
def test_download_file_bad_response(self):
self.mock_response1.status_code = 404
self.mock_request.get.return_value = self.mock_response1
with patch("download_files.os", self.mock_os):
with patch("__builtin__.open", self.mock_file_open):
with patch("download_files.requests", self.mock_request):
df.download_file(self.url1, self.download_folder)
self.mock_request.get.assert_called_once_with(self.url1)
self.assertEquals(0, self.mock_file_open.call_count)
self.assertEquals(0, self.mock_file_open().write.call_count)
def test_download_files(self):
return_values = [self.mock_response1, self.mock_response2, self.mock_response3]
self.mock_request.get.side_effect = lambda x: return_values[self.mock_request.get.call_count - 1]
with patch("download_files.os", self.mock_os):
with patch("__builtin__.open", self.mock_file_open):
with patch("download_files.requests", self.mock_request):
df.download_files(self.source_file_name, self.download_folder)
open_file_calls = [call(self.source_file_name, "r"), call(self.downloaded_filename1, "wb"),
call(self.downloaded_filename2, "wb"), call(self.downloaded_filename3, "wb")]
self.mock_file_open.assert_has_calls(open_file_calls, any_order=True)
req_get_calls = [call(self.url1), call(self.url2), call(self.url3)]
self.mock_request.get.assert_has_calls(req_get_calls, any_order=True)
write_calls = [call(self.response_content_1), call(self.response_content_2), call(self.response_content_3)]
self.mock_file_open().write.assert_has_calls(write_calls, any_order=True)
if __name__ == "__main__":
unittest.main() | true |
6b1551168b26462029b99b02e10d0dbc55d9c960 | Python | yetime/DCCells | /python/setupenv.py | UTF-8 | 4,790 | 2.75 | 3 | [] | no_license | #!/usr/bin/python
import sys, os, random
import numpy as np
SIZE_OC=180
SIZE_OB=20
SIZE_DCC=20
OC=[]
OB=[]
DCC=[]
worldsize=int(sys.argv[2])
world=np.zeros((worldsize,worldsize), dtype=int)
#Tries to find unoccupied random placements for number cells of type celltype.
def findplacement(celltype, number):
collisions=[]
col=False
size=SIZE_OC
if(celltype==2): size=SIZE_OB
if(celltype==3): size=SIZE_DCC
for i in range(number):
upperlimit=worldsize-size
positionx=random.randint(0,upperlimit)
positiony=random.randint(0,upperlimit)
col=False
#checking if there is enough space for one cell
for k in range(positionx, positionx+size):
for l in range(positiony, positiony+size):
if world[k][l]!=0: col=True
if col==True:
collisions.append([positionx,positiony])
#if there is enough space and no collision occurs,
#mark the occupied fields in the world matrix
if col==False:
for k in range(positionx, positionx+size):
for l in range(positiony,positiony+size):
world[k][l]=celltype
if(celltype==3):
DCC.append([positionx,positiony])
if(celltype==2):
OB.append([positionx,positiony])
if(celltype==1):
OC.append([positionx,positiony])
solved=False
if collisions==[]: solved=True
#deal with the collisions
for i in collisions:
j=i[0]
k=i[1]
#try to find alternate spot by adding random displacements in x and y
while world[j,k]!=0:
j=j+random.randint(-1,1)
k=k+random.randint(-1,1)
#ups, we ran over the boundaries of our world
if(j+size>worldsize): j=0
if(k+size>worldsize): k=0
if world[j+size,k]==0 and world[j,k+size]==0 and world[j+size, k+size]==0 and (j+size)<worldsize and (k+size)<worldsize:
squarefree=True
for p in range(j,j+size):
for q in range(k,k+size):
if world[p,q]!=0:
squarefree=False
if squarefree==True: solved=True
break
if(celltype==3):
DCC.append([j,k])
if(celltype==2):
OB.append([j,k])
if(celltype==1):
OC.append([j,k])
for p in range(j, j+size):
for q in range(k,k+size):
world[p][q]=celltype
if solved==False:
print("Collisions couldn't be resolved, change size of world or number of cells")
if (len(sys.argv) !=7):
print ("Usage: setupenv.py outfile worldsize n_ob n_oc n_dcc clustered")
quit()
outfile=open(sys.argv[1],'w')
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?> \n")
outfile.write("<states>\n")
outfile.write("<itno>0</itno>\n\n")
if(sys.argv[6]=="0"):
findplacement(1,int(sys.argv[3]))
findplacement(2,int(sys.argv[4]))
findplacement(3,int(sys.argv[5]))
outfile.write("<environment>\n")
outfile.write(" <worldsize>"+str(worldsize)+"</worldsize>\n")
outfile.write(" <unitum>1</unitum>\n")
outfile.write(" <oc_speed>10</oc_speed>\n")
outfile.write("</environment>\n\n")
outfile.write("<agents>")
for i in range(len(OC)):
outfile.write("<xagent>\n")
outfile.write(" <name>oc</name>\n")
outfile.write(" <id>OC"+str(i)+"</id>\n")
outfile.write(" <geo>{"+str(OC[i][0])+","+str(OC[i][1])+","+str(SIZE_OC)+"}</geo>\n")
outfile.write("</xagent>\n")
for i in range(len(OB)):
outfile.write("<xagent>\n")
outfile.write("<name>ob</name>\n")
outfile.write(" <id>OB_"+str(i)+"</id>\n")
outfile.write(" <geo>{"+str(OB[i][0])+","+str(OB[i][1])+","+str(SIZE_OB)+"}</geo>\n")
outfile.write("</xagent>\n")
for i in range(len(DCC)):
outfile.write("<xagent>\n")
outfile.write(" <name>dcc</name>\n")
outfile.write(" <id>DCC_"+str(i)+"</id>\n")
outfile.write(" <geo>{"+str(DCC[i][0])+","+str(DCC[i][1])+","+str(SIZE_DCC)+"}</geo>\n")
outfile.write("</xagent>\n")
outfile.write("<xagent>\n")
outfile.write(" <name>pointsource</name>\n")
outfile.write(" <id>ps0</id>\n")
outfile.write(" <descrip>{1,1,10,100,100,0.01,3,0}</descrip>\n")
outfile.write(" <active>1</active>\n")
outfile.write(" <source_start>1</source_start>\n")
outfile.write(" </xagent>\n")
outfile.write("</agents>\n")
outfile.write("</states>")
outfile.close()
#for i in range(worldsize):
# for j in range(worldsize):
# if world[i,j]==0 : print("0"),
# else: print("1"),
# print("")
| true |
8dd8412040aba7ff9bb643577779b8ac420a8b40 | Python | Adrian-Ng/Learning-Python | /BasicDataTypes/Tuples.py | UTF-8 | 426 | 3.828125 | 4 | [] | no_license | #https://www.hackerrank.com/challenges/python-tuples/problem
#Given an integer, , and space-separated integers as input, create a tuple, , of those integers. Then compute and print the result of hash(t).
#Sample Input
#2
#1 2
#Sample Output
#3713081631934410656
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
t = tuple(integer_list)
print(hash(t)) | true |
0cef786a16cce1756f373bc9a089ebb0270339a2 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_4_neat/16_0_4_hotdogee_fractiles.py | UTF-8 | 1,965 | 3 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
def original_sequences(k):
for x in itertools.product('GL', repeat=k):
yield ''.join(x)
def produce_artwork(os, c):
a = os
g = 'G' * len(os)
for i in xrange(c-1):
a = ''.join([os if t == 'L' else g for t in a])
return a
"""
6 6 1: 1866=6*311
5 1 5: 1 2 3 4 5
5 2 3: 2 14 15
5 3 3: 8 20
5 4 2: 39 40
5 5 1: 195=5*39 or 3125-194
5 6 1: 195
4 1 4: 1 2 3 4
4 2 3: 2 12
4 3 2: 2 12 or 7 8
4 4 1: 28=4*7 or 256-27
4 5 1: 28
4 6 1: 28
3 1 3: 1 2 3
3 2 2: 2 3
3 3 1: 6=3*2 or 27-5
3 4 1: 6
2 1 2: 1 2
2 2 1: 2
2 3 1: 2
"""
def print_table(k, c, s):
out = ''
for os in original_sequences(k):
out += '{0}: {1}\n'.format(os, produce_artwork(os, c))
return out
def print_table_transpose(k, c, s):
out = ''
co = 0
for t in sorted([(i+1, ta.count('G'), ''.join(ta)) for i, ta in enumerate(zip(*[produce_artwork(os, c) for os in original_sequences(k)]))], key=lambda x: x[1], reverse=True):
if t[1] < co: break
co = t[1]
out += '{0:3} {1:2} {2}\n'.format(t[0], t[1], t[2])
return out
def solve(k, c, s):
if n == 0: return 'INSOMNIA'
sum = n
s = set(str(sum))
while len(s) < 10:
sum += n
s.update(set(str(sum)))
return sum
def solve_small(k, c, s):
return ' '.join([str(i) for i in range(1, k+1)])
if __name__ == "__main__":
for case in xrange(1, 1+input()):
print "Case #{0}: {1}".format(case, solve_small(*[int(x) for x in raw_input().strip().split()]))
#print "Case #{0}: {1}".format(case, solve(*[int(x) for x in raw_input().strip().split()])),
#print "Case #{0}\n{1}".format(case, print_table(*[int(x) for x in raw_input().strip().split()])),
#print "Case #{0}\n{1}".format(case, print_table_transpose(*[int(x) for x in raw_input().strip().split()])), | true |
2e7000ae21f256b82325708d17667402d90077b1 | Python | laurentluce/blog | /regression/unemployment_price/build_train.py | UTF-8 | 966 | 3.28125 | 3 | [] | no_license | import csv
towns = dict()
with open('population.csv', 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
try:
town_code, unemployed, active = row[0], float(row[22]), float(row[23])
unemployment = unemployed / active * 100
towns[town_code] = [unemployment]
except (ValueError, ZeroDivisionError):
pass
with open('price.csv', 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
try:
town_code, price = row[1], float(row[10])
if town_code in towns:
towns[town_code].append(price)
except ValueError:
pass
towns = {k:v for (k, v) in towns.items() if len(v) == 2}
with open('train.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
for k, v in towns.items():
writer.writerow([k, v[0], v[1]])
| true |
deb1c733e6cf84c9236677e1c1ebc8a9b7c1abec | Python | ermader/FontTableLister | /CmapTable.py | UTF-8 | 12,098 | 2.5625 | 3 | [] | no_license | '''
Created on Apr 4, 2020
@author: emader
'''
import struct
import utility
from PlatformAndEncoding import PLATFORM_ID_UNICODE, PLATFORM_ID_MACINTOSH, PLATFORM_ID_WINDOWS,\
UnicodePlatform, MacintoshPlatform, WindowsPlatform, getPLatformName, getEncodingName
import FontTable
class EncodingRecord:
ENCODING_SUBTABLE_FORMAT = ">H" # only the format
ENCODING_SUBTABLE_LENGTH = struct.calcsize(ENCODING_SUBTABLE_FORMAT)
formatNames = {
0: "Byte encoding table (0)",
2: "High-byte mapping through table (2)",
4: "Segment mapping to delta values (4)",
6: "Trimmed table mapping (6)",
8: "Mixed 16-bit and 32-bit coverage (8)",
10: "Trimmed mapping (10)",
12: "Segmented coverage (12)",
13: "Many-to-one range mappings (13)",
14: "Unicode variation sequences (14)"
}
def getFormatName(self):
if self.subtableFormat in self.formatNames:
return self.formatNames[self.subtableFormat]
return f"Format {self.subtableFormat}"
def readSubtableFormat0(self, rawTable, subtableStart):
ENCODING_SUBTABLE_0_FORMAT = ">HHH"
ENCODING_SUBTABLE_0_LENGTH = struct.calcsize(ENCODING_SUBTABLE_0_FORMAT)
subtableEnd = subtableStart + ENCODING_SUBTABLE_0_LENGTH
(_, subtableLength, subtableLanguage) = struct.unpack(ENCODING_SUBTABLE_0_FORMAT, rawTable[subtableStart:subtableEnd])
glyphIDArrayStart = subtableEnd
glyhCount = 256 # this table maps all single byte character codes
glyphIDArrayFormat = f">{glyhCount}B"
glyphIDArrayEnd = glyphIDArrayStart + struct.calcsize(glyphIDArrayFormat)
charCodes = [charCode for charCode in range(256)]
glyphCodes = struct.unpack(glyphIDArrayFormat, rawTable[glyphIDArrayStart:glyphIDArrayEnd])
return (charCodes, glyphCodes)
def readSubtableFormat4(self, rawTable, subtableStart):
ENCODING_SUBTABLE_4_FORMAT = ">HHHHHHH"
ENCODING_SUBTABLE_4_LENGTH = struct.calcsize(ENCODING_SUBTABLE_4_FORMAT)
subtableEnd = subtableStart + ENCODING_SUBTABLE_4_LENGTH
(_, subtableLength, subtableLanguage, segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(ENCODING_SUBTABLE_4_FORMAT, rawTable[subtableStart:subtableEnd])
charCodes = []
glyphCodes = []
segCount = segCountX2 // 2
segmentArrayUnsignedFormat = f">{segCount}H"
segmentArraySignedFormat = f">{segCount}h"
segmentArrayLength = struct.calcsize(segmentArrayUnsignedFormat)
segmentArrayStart = subtableEnd
segmentArrayEnd = segmentArrayStart + segmentArrayLength
endCodes = struct.unpack(segmentArrayUnsignedFormat, rawTable[segmentArrayStart:segmentArrayEnd])
segmentArrayStart = segmentArrayEnd + struct.calcsize(">H") # reservedPad
segmentArrayEnd = segmentArrayStart + segmentArrayLength
startCodes = struct.unpack(segmentArrayUnsignedFormat, rawTable[segmentArrayStart:segmentArrayEnd])
segmentArrayStart = segmentArrayEnd
segmentArrayEnd = segmentArrayStart + segmentArrayLength
idDeltas = struct.unpack(segmentArraySignedFormat, rawTable[segmentArrayStart:segmentArrayEnd])
segmentArrayStart = segmentArrayEnd
segmentArrayEnd = segmentArrayStart + segmentArrayLength
idRangeOffsets = struct.unpack(segmentArrayUnsignedFormat, rawTable[segmentArrayStart:segmentArrayEnd])
glyphIndexArrayStart = segmentArrayEnd
glyphIndexArrayEnd = subtableStart + subtableLength
glyphIndexArrayCount = (
glyphIndexArrayEnd - glyphIndexArrayStart) // 2 # should really use the size of an "H"...
glyphIndexArrayFormat = f">{glyphIndexArrayCount}H"
glyphIndexArray = struct.unpack(glyphIndexArrayFormat, rawTable[glyphIndexArrayStart:glyphIndexArrayEnd])
for segment in range(segCount - 1): # we skip the last segment, which is for glyph 0xFFFF
startCode = startCodes[segment]
endCode = endCodes[segment]
idDelta = idDeltas[segment]
idRangeOffset = idRangeOffsets[segment]
# idRangeOffset[i], if not zero, is the byte offset from idRangeOffset[i] to the
# corresponding entry into glyphIndexArray. The spec. gives this expression to
# retrieve that entry:
# glyphIndex = *( &idRangeOffset[i] + idRangeOffset[i] / 2 + (charCode - startCode[i]) )
# So: idRangeOffset // 2 is the number of words from idRangeOffset[i] to the entry
# in glyphIndexArray, so the index is idRangeOffset // 2 - segCount + i
glyphIndexArrayIndex = idRangeOffset // 2 - segCount + segment
charCodeRange = range(startCode, endCode + 1)
charCodes.extend(charCodeRange)
if idRangeOffset == 0:
glyphCodes.extend([(charCode + idDelta) & 0xFFFF for charCode in charCodeRange])
else:
for charCode in charCodeRange:
index = glyphIndexArrayIndex + charCode - startCode
glyphID = (glyphIndexArray[index] + idDelta) & 0xFFFF if glyphIndexArray[index] != 0 else 0
glyphCodes.append(glyphID)
return (charCodes, glyphCodes)
def readSubtableFormat6(self, rawTable, subtableStart):
ENCODING_SUBTABLE_6_FORMAT = ">HHHHH"
ENCODING_SUBTABLE_6_LENGTH = struct.calcsize(ENCODING_SUBTABLE_6_FORMAT)
subtableEnd = subtableStart + ENCODING_SUBTABLE_6_LENGTH
(_, subtableLength, subtableLanguage, firstCode, entryCount) = struct.unpack(ENCODING_SUBTABLE_6_FORMAT, rawTable[subtableStart:subtableEnd])
glyphIDArrayFormat = f">{entryCount}H"
glyphIDArrayLength = struct.calcsize(glyphIDArrayFormat)
glyphIDArrayStart = subtableEnd
glyphIDArrayEnd = glyphIDArrayStart + glyphIDArrayLength
charCodes = [charCode for charCode in range(firstCode, firstCode+entryCount)]
glyphCodes = struct.unpack(glyphIDArrayFormat, rawTable[glyphIDArrayStart:glyphIDArrayEnd])
return (charCodes, glyphCodes)
def readSubtableFormat12(self, rawTable, subtableStart):
ENCODING_SUBTABLE_12_FORMAT = ">HHIII"
ENCODING_SUBTABLE_12_LENGTH = struct.calcsize(ENCODING_SUBTABLE_12_FORMAT);
MAP_GROUP_RECORD_FORMAT = ">III"
MAP_GROUP_RECORD_LENGTH = struct.calcsize(MAP_GROUP_RECORD_FORMAT)
charCodes = []
glyphCodes = []
subtableEnd = subtableStart + ENCODING_SUBTABLE_12_LENGTH
(_, _, subtableLength, subtableLanguage, numGroups) = struct.unpack(ENCODING_SUBTABLE_12_FORMAT, rawTable[subtableStart:subtableEnd])
mapGroupStart = subtableEnd
mapGroupEnd = mapGroupStart + MAP_GROUP_RECORD_LENGTH
for _ in range(numGroups):
(startCharCode, endCharCode, startGlyphID) = struct.unpack(MAP_GROUP_RECORD_FORMAT, rawTable[mapGroupStart:mapGroupEnd])
charCodeRange = range(startCharCode, endCharCode + 1)
charCodes.extend(charCodeRange)
gids = [startGlyphID + char - startCharCode for char in charCodeRange]
glyphCodes.extend([startGlyphID + char - startCharCode for char in charCodeRange])
mapGroupStart = mapGroupEnd
mapGroupEnd += MAP_GROUP_RECORD_LENGTH
return (charCodes, glyphCodes)
def __init__(self, rawTable, platformID, encodingID, offset32, offsetToSubtableMap):
self.platformID = platformID
self.encodingID = encodingID
self.offset32 = offset32
encodingSubtableStart = offset32
encodingSubtableEnd = encodingSubtableStart + self.ENCODING_SUBTABLE_LENGTH
(self.subtableFormat, ) = struct.unpack(self.ENCODING_SUBTABLE_FORMAT, rawTable[encodingSubtableStart:encodingSubtableEnd])
if self.offset32 not in offsetToSubtableMap:
charCodes = []
glyphCodes = []
if self.subtableFormat == 0:
(charCodes, glyphCodes) = self.readSubtableFormat0(rawTable, encodingSubtableStart)
elif self.subtableFormat == 4: # want symbolic constants for these?
(charCodes, glyphCodes) = self.readSubtableFormat4(rawTable, encodingSubtableStart)
elif self.subtableFormat == 6:
(charCodes, glyphCodes) = self.readSubtableFormat6(rawTable, encodingSubtableStart)
elif self.subtableFormat == 12:
(charCodes, glyphCodes) = self.readSubtableFormat12(rawTable, encodingSubtableStart)
z = list(zip(charCodes, glyphCodes))
offsetToSubtableMap[offset32] = ({c: g for (c, g) in z}, {g: c for (c, g) in z})
class Table(FontTable.Table):
preferredMappings = [
(PLATFORM_ID_UNICODE, UnicodePlatform.ENCODING_ID_UNICODE_FULL),
(PLATFORM_ID_WINDOWS, WindowsPlatform.ENCODING_ID_UNICODE_UCS4),
(PLATFORM_ID_UNICODE, UnicodePlatform.ENCODING_ID_UNICODE_2_0_FULL),
(PLATFORM_ID_UNICODE, -1), # Any encoding will do...
(PLATFORM_ID_WINDOWS, WindowsPlatform.ENCODING_ID_UNICODE_BMP)
]
preferredMappingCount = len(preferredMappings)
bestMapping = preferredMappingCount
bestEncodingRecord = None
CMAP_HEADER_FORMAT = ">HH"
CMAP_HEADER_LENGTH = struct.calcsize(CMAP_HEADER_FORMAT)
ENCODING_RECORD_FORMAT = ">HHI"
ENCODING_RECORD_LENGTH = struct.calcsize(ENCODING_RECORD_FORMAT)
def rankMapping(self, encodingRecord):
platformID = encodingRecord.platformID
encodingID = encodingRecord.encodingID
for mapping in range(self.preferredMappingCount):
(preferredPlatformID, preferredEncodingID) = self.preferredMappings[mapping]
if preferredPlatformID == platformID and (preferredEncodingID == encodingID or preferredEncodingID == -1):
if mapping < self.bestMapping:
self.bestMapping = mapping
self.bestEncodingRecord = encodingRecord
def __init__(self, fontFile, tagBytes, checksum, offset, length):
FontTable.Table.__init__(self, fontFile, tagBytes, checksum, offset, length)
rawTable = self.rawData()
self.encodingRecords = []
self.offsetToSubtableMap = {}
(version, numTables) = struct.unpack(self.CMAP_HEADER_FORMAT, rawTable[:self.CMAP_HEADER_LENGTH])
encodingRecordStart = self.CMAP_HEADER_LENGTH
encodingRecordEnd = encodingRecordStart + self.ENCODING_RECORD_LENGTH
for _ in range(numTables):
(platformID, encodingID, offset32) = struct.unpack(self.ENCODING_RECORD_FORMAT, rawTable[encodingRecordStart:encodingRecordEnd])
encodingRecord = EncodingRecord(rawTable, platformID, encodingID, offset32, self.offsetToSubtableMap)
self.encodingRecords.append(encodingRecord)
self.rankMapping(encodingRecord)
encodingRecordStart = encodingRecordEnd
encodingRecordEnd += self.ENCODING_RECORD_LENGTH
if self.bestEncodingRecord is not None:
(self.charToGlyphMap, self.glyphToCharMap) = self.offsetToSubtableMap[self.bestEncodingRecord.offset32]
def hasUnicodeMapping(self):
return self.bestEncodingRecord is not None
def getCharCode(self, glyphID):
if glyphID in self.glyphToCharMap:
return self.glyphToCharMap[glyphID]
return None
def getGlyphID(self, charCode):
if charCode in self.charToGlyphMap:
return self.charToGlyphMap[charCode]
return None
def format(self, parentFont):
for encodingRecord in self.encodingRecords:
platformID = encodingRecord.platformID
encodingID = encodingRecord.encodingID
offset32 = encodingRecord.offset32
formatName = encodingRecord.getFormatName()
print(f" {getPLatformName(platformID):10} {getEncodingName(platformID, encodingID):15} {utility.formatHex32(offset32):12} {formatName}") | true |
272baf26cb813b7ed05a01fb719f1785c18672cb | Python | aravindanath/PythonAdvCourse | /Day2/SearchOnAmazon1.py | UTF-8 | 382 | 2.609375 | 3 | [] | no_license | from selenium.webdriver import Chrome
from webdriver_manager.chrome import ChromeDriverManager
class TestCase01():
def __init__(self):
global driver
driver = Chrome(ChromeDriverManager().install())
def test_01(self):
driver.get("https://www.google.com")
def teardown(self):
driver.quit()
t = TestCase01()
t.test_01()
t.teardown() | true |
8b405685f6119c9a3002f6ca7cccbb6f021e9225 | Python | BinYuOnCa/Algo-ETL | /Assignment1-ETL/zhiweili/lib/common.py | UTF-8 | 184 | 2.78125 | 3 | [
"MIT"
] | permissive | from datetime import date, datetime
class TimeUtils:
def date_to_time(self, convert_date: date):
return datetime(convert_date.year, convert_date.month, convert_date.day)
| true |
801e93f25af4bc9c25105d016f095bfb97c09250 | Python | ShengrongYang/Colors-of-Zhang-Yimou | /scripts/ColorEx_Script/plot_indie.py | UTF-8 | 2,104 | 2.671875 | 3 | [
"MIT"
] | permissive | from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
# 读取文件
file = '/Users/mac/Downloads/frametest/Red.Sorghum/results/dominant_array.csv'
# 保存路径,自动加上/results/
path = '/Users/mac/Downloads/frametest/Red.Sorghum/'
df = pd.read_csv(file, header=0, sep=',')
def plotting(path, df, t='cylinder', p='c', w='count', s=50):
"""在HSV柱坐标系中绘制散点图"""
if t == 'cone': # 锥
cos = df.v * df.s * np.cos(df.h * 2 * np.pi)
sin = df.v * df.s * np.sin(df.h * 2 * np.pi)
else: # 柱
cos = df.s * np.cos(df.h * 2 * np.pi)
sin = df.s * np.sin(df.h * 2 * np.pi)
if p == 's': # x对应sin
x = sin
y = cos
else: # x对应cos
x = cos
y = sin
if w == 'percentage':
weight = df.weight_percentage
else:
weight = df.weight_count
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(111, projection='3d')
# y = df.s * np.sin(df.h * 2 * np.pi)
# x = df.s * np.cos(df.h * 2 * np.pi)
z = df.v
c = df.hex
s = weight * s
ax.scatter(x, y, z, c=c, s=s, alpha=.3, edgecolor='k', lw=0)
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(0, 1)
# ax.set_xlabel('H', fontsize=14)
# ax.set_ylabel('S', fontsize=14)
# ax.set_zlabel('V', fontsize=14)
ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
ax.tick_params(
axis='y',
which='both',
bottom=False,
top=False,
right=False,
left=False,
labelbottom=False,
labelright=False,
labelleft=False)
plt.savefig(path + 'results/hsv-scatter-plot_' + "%s_%s" % (t, w) + '.png', bbox_inches='tight')
area = 10
plotting(path, d_df, t='cylinder', p='c', w='count', s=10)
plotting(path, d_df, t='cylinder', p='c', w='percentage', s=10)
plotting(path, d_df, t='cone', p='c', w='count', s=10)
plotting(path, d_df, t='cone', p='c', w='percentage', s=10)
| true |
18ff277302229c2371097e5d86a73558599421da | Python | pravsripad/jumeg | /examples/connectivity/plot_simulated_connectivity.py | UTF-8 | 5,597 | 2.765625 | 3 | [] | permissive | #!/usr/bin/env python
'''Simple implementations of connectivity measures.'''
# Authors : pravsripad@gmail.com
# daniel.vandevelden@yahoo.de
import sys
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.mlab as mlab
n_epochs = 120
sfreq, duration = 1000., 1000
times = np.arange(0, duration, 1 / sfreq)
amp , amp2 , nse_amp = 1., 1., 0.5
nfft = 512
nse1 = np.random.rand(times.size) * nse_amp
nse2 = np.random.rand(times.size) * nse_amp
x = amp * np.sin(2 * np.pi * 200 * times) + nse1
y = amp * np.sin(2 * np.pi * 200 * times + np.pi/5) + nse2
shift = 100 # integer
assert shift < sfreq * duration, 'Choose a smaller shift.'
#y = amp2 * np.roll(x, shift) + nse2
# coherence using mlab function
cohxy, freqs = mlab.cohere(x, y, Fs=sfreq, NFFT=nfft)
n_freqs = int(nfft/2 + 1)
def compute_mean_psd_csd(x, y, n_epochs, nfft, sfreq):
'''Computes mean of PSD and CSD for signals.'''
x2 = np.array_split(x, n_epochs)
y2 = np.array_split(y, n_epochs)
Rxy = np.zeros((n_epochs, n_freqs), dtype=np.complex)
Rxx = np.zeros((n_epochs, n_freqs), dtype=np.complex)
Ryy = np.zeros((n_epochs, n_freqs), dtype=np.complex)
for i in range(n_epochs):
Rxy[i], freqs = mlab.csd(x2[i], y2[i], NFFT=nfft, Fs=sfreq)
Rxx[i], _ = mlab.psd(x2[i], NFFT=nfft, Fs=sfreq)
Ryy[i], _ = mlab.psd(y2[i], NFFT=nfft, Fs=sfreq)
Rxy_mean = np.mean(Rxy, axis=0)
Rxx_mean = np.mean(Rxx, axis=0)
Ryy_mean = np.mean(Ryy, axis=0)
return freqs, Rxy, Rxy_mean, np.real(Rxx_mean), np.real(Ryy_mean)
def my_coherence(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean):
''' Computes coherence. '''
coh = np.zeros((n_freqs))
for i in range(0, n_freqs):
coh[i] = np.abs(Rxy_mean[i]) / np.sqrt(Rxx_mean[i] * Ryy_mean[i])
return coh
def my_imcoh(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean):
''' Computes imaginary coherence. '''
imcoh = np.zeros((n_freqs))
for i in range(0, n_freqs):
imcoh[i] = np.imag(Rxy_mean[i]) / np.sqrt(Rxx_mean[i] * Ryy_mean[i])
return imcoh
def my_cohy(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean):
''' Computes coherency. '''
cohy = np.zeros((n_freqs))
for i in range(0, n_freqs):
cohy[i] = np.real(Rxy_mean[i]) / np.sqrt(Rxx_mean[i] * Ryy_mean[i])
return cohy
def my_plv(n_freqs, Rxy, Rxy_mean):
''' Computes PLV. '''
Rxy_plv = np.zeros((n_epochs, n_freqs), dtype=np.complex)
for i in range(0, n_epochs):
Rxy_plv[i] = Rxy[i] / np.abs(Rxy[i])
plv = np.abs(np.mean(Rxy_plv, axis=0))
return plv
def my_pli(n_freqs, Rxy, Rxy_mean):
''' Computes PLI. '''
Rxy_pli = np.zeros((n_epochs, n_freqs), dtype=np.complex)
for i in range(0, n_epochs):
Rxy_pli[i] = np.sign(np.imag(Rxy[i]))
pli = np.abs(np.mean(Rxy_pli, axis=0))
return pli
def my_wpli(n_freqs, Rxy, Rxy_mean):
''' Computes WPLI. '''
Rxy_wpli_1 = np.zeros((n_epochs, n_freqs), dtype=np.complex)
Rxy_wpli_2 = np.zeros((n_epochs, n_freqs), dtype=np.complex)
for i in range(0, n_epochs):
Rxy_wpli_1[i] = np.imag(Rxy[i])
Rxy_wpli_2[i] = np.abs(np.imag(Rxy[i]))
# handle divide by zero
denom = np.mean(Rxy_wpli_2, axis=0)
idx_denom = np.where(denom == 0.)
denom[idx_denom] = 1.
wpli = np.abs(np.mean(Rxy_wpli_1, axis=0)) / denom
wpli[idx_denom] = 0.
return wpli
def my_con(x, y, n_epochs, nfft, sfreq, con_name='coh'):
'''Computes connectivity measure mentioned on provided signal pair and its surrogates.'''
freqs, Rxy, Rxy_mean, Rxx_mean, Ryy_mean = compute_mean_psd_csd(x, y, n_epochs, nfft, sfreq)
# compute surrogates
x_surr = x.copy()
y_surr = y.copy()
np.random.shuffle(x_surr)
np.random.shuffle(y_surr)
freqs_surro, Rxy_s, Rxy_s_mean, Rxx_s_mean, Ryy_s_mean = compute_mean_psd_csd(x_surr, y_surr, n_epochs, nfft, sfreq)
if con_name == 'coh':
coh = my_coherence(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean)
coh_surro = my_coherence(n_freqs, Rxy_s_mean, Rxx_s_mean, Ryy_s_mean)
return coh, coh_surro, freqs, freqs_surro
if con_name == 'imcoh':
imcoh = my_imcoh(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean)
imcoh_surro = my_imcoh(n_freqs, Rxy_s_mean, Rxx_s_mean, Ryy_s_mean)
return imcoh, imcoh_surro, freqs, freqs_surro
if con_name == 'cohy':
cohy = my_cohy(n_freqs, Rxy_mean, Rxx_mean, Ryy_mean)
cohy_surro = my_cohy(n_freqs, Rxy_s_mean, Rxx_s_mean, Ryy_s_mean)
return cohy, cohy_surro, freqs, freqs_surro
if con_name == 'plv':
plv = my_plv(n_freqs, Rxy, Rxy_mean)
plv_surro = my_plv(n_freqs, Rxy_s, Rxy_s_mean)
return plv, plv_surro, freqs, freqs_surro
if con_name == 'pli':
pli = my_pli(n_freqs, Rxy, Rxy_mean)
pli_surro = my_pli(n_freqs, Rxy_s, Rxy_s_mean)
return pli, pli_surro, freqs, freqs_surro
if con_name == 'wpli':
wpli = my_wpli(n_freqs, Rxy, Rxy_mean)
wpli_surro = my_wpli(n_freqs, Rxy_s, Rxy_s_mean)
return wpli, wpli_surro, freqs, freqs_surro
if con_name == '':
print('Please provide the connectivity method to use.')
sys.exit()
else:
print('Connectivity method unrecognized.')
sys.exit()
con_name = 'wpli'
con, con_surro, freqs, freqs_surro = my_con(x, y, n_epochs, nfft, sfreq, con_name)
# coherence using mlab function
#cohxy, freqs = mlab.cohere(x, y, Fs=sfreq, NFFT=nfft)
#pl.plot(freqs, cohxy)
# plot results
pl.figure('Connectivity')
pl.plot(freqs, con)
pl.plot(freqs_surro, con_surro)
pl.legend(['Con', 'Surrogates'])
pl.tight_layout()
pl.show()
| true |
6d92e20393ec03ce361581239578d1e1f719a750 | Python | wangyu33/LeetCode | /LeetCode399.py | UTF-8 | 1,997 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : LeetCode399.py
# Author: WangYu
# Date : 2021/1/20
from typing import List
from collections import defaultdict
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
d = defaultdict(list)
key = []
for i in equations:
for j in i:
if j not in key:
key.append(j)
d[j].extend([j, 1.0])
# fa = {i:i for i in key}
def find(a):
if a != d[a][0]:
d[a][0] = find(d[a][0])
d[a][1] = d[a][1] * d[d[a][0]][1]
base = 1
fa = a
while d[fa][0] != a:
fa = d[a][0]
base *= d[fa][1]
while a != fa:
original_father = d[a][0]
##### 离根节点越远,放大的倍数越高
d[a][1] *= base
base /= d[original_father][1]
#####
d[a][0] = fa
a = original_father
return root
return d[a][0]
def union(a,b, value):
fa = find(a)
fb = find(b)
if fa != fb:
d[fa][0] = fb
d[fa][1] = d[b][1] * value / d[a][1]
for i in range(len(equations)):
if find(equations[i][0]) != find(equations[i][1]):
union(equations[i][0], equations[i][1], values[i])
ans = []
for a,b in queries:
if a not in d or b not in d:
ans.append(-1.0)
else:
find(a)
find(b)
ans.append(d[a][1]/d[b][1])
return ans
equations = [["a","b"],["e","f"],["b","e"]]
values = [3.4,1.4,2.3]
queries = [["b","a"],["a","f"],["f","f"],["e","e"],["c","c"],["a","c"],["f","e"]]
s = Solution()
print(s.calcEquation(equations,values,queries)) | true |
c02679a685f09e9aea878a8800bf8913120fe8a8 | Python | fjtello/python | /Kaprekar/Kaprekar.starter.py | UTF-8 | 1,171 | 3.53125 | 4 | [] | no_license | cifras = 4
def ordenar(n, s):
m = list(str(n))
m.sort(reverse=(1==s))
return int("".join(m))
def evolucion(n):
numero_az = ordenar(n, 0)
numero_za = ordenar(n, 1)
numero_dif = numero_za - numero_az
serie_actual.append(numero_dif)
# ¿Está 'numero' ya incluido en la lista?
if procesados.__contains__(numero_dif):
return 0
procesados.append(numero_dif)
procesados.sort()
return evolucion(numero_dif)
serie_actual = []
numero_inicial = int("1" + ("0" * (cifras - 1)))
numero_final = int("1" + "0" * cifras) - 1
print(" ==> Desde [ ",numero_inicial," ] hasta [ ", numero_final, " ]")
tablero = [[0,[0,0],0]]
procesados = []
for numero in range (numero_inicial, numero_final + 1):
fin_de_ciclo = False
numero_en_estudio = numero
while fin_de_ciclo == False:
serie_actual = []
serie_actual.append(numero_en_estudio)
numero_evolucion = evolucion(numero_en_estudio)
elemento = [numero_en_estudio, serie_actual, serie_actual.__len__()]
tablero.append(elemento)
fin_de_ciclo = True
print(serie_actual)
print(" ==> ", tablero)
| true |
a08ffcbad847713d9004f59892ed0e63892d7c45 | Python | cowlicks/Numerical_Methods_for_Applications_M368k | /hw9/ch11_1_2a.py | UTF-8 | 538 | 3.21875 | 3 | [] | no_license | import math as m
ya = -0.3
yb = -0.1
h = m.pi/4
t0 = (-0.1 + 0.3)/(m.pi/2.)
t1 = 1.1*t0
def dv(v,x,y):
return v + 2*y + m.cos(x)
def dy(v,x,y):
return v
def v_step(v,x,y):
return v + h*dv(v,x,y)
def y_step(v,x,y):
return y + h*dy(v,x,y)
def euler(t):
v1 = v_step( t, 0., ya)
y1 = y_step( t, 0., ya)
print "v1 = " + str(v1)
print "y1 = " + str(y1)
print ""
v2 = v_step(v1, m.pi/4, y1)
y2 = y_step(v1, m.pi/4, y1)
print "v2 = " + str(v2)
print "y2 = " + str(y2)
return y2
| true |
b66f6f66c19076ecc2f74e53c9c3d1a734327899 | Python | jyotihirak11/Gender-Identification-Using-Speech-Signals- | /MCA.py | UTF-8 | 4,384 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
from keras.models import Sequential
from keras.layers import Activation,Dense, Dropout, Flatten, Conv2D,MaxPooling2D
from glob import glob
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
#Librosa for audio
import librosa as lr
#And the display module for visualization
import librosa.display
# In[2]:
data=pd.read_csv('E:/AI/Gender Recognition of Speaker/training samples/voices/voicesample1.csv')
data.head(5)
#E:\AI\Gender Recognition of Speaker\training samples\voices
# In[3]:
#Display No. of rows and columns
data.shape
# In[4]:
#read data
data_dir = 'E:/AI/Gender Recognition of Speaker/training samples/voices'
audio_files = glob(data_dir + '/*.flac')
#files = librosa.util.find_files('E:/AI/Gender Recognition of Speaker/LibriSpeech/train-clean-100', ext='flac')
print(len(audio_files))
# In[5]:
# Load the audio as a waveform `y`
# Store the sampling rate as `sr`
y,sr=lr.load(audio_files[5], duration=2.97)
print(y)
print(sr)
plt.plot(y)
# Let's make and display a mel-scaled power (energy-squared) spectrogram
ps=librosa.feature.melspectrogram(y=y, sr=sr)
print(ps)
ps.shape
# In[6]:
# Display the spectrogram on a mel scale
librosa.display.specshow(ps, y_axis='mel', x_axis='time')
# In[7]:
y,sr=lr.load(audio_files[16], duration=2.97)
print(y)
print(sr)
plt.plot(y)
# Let's make and display a mel-scaled power (energy-squared) spectrogram
ps=librosa.feature.melspectrogram(y=y, sr=sr)
print(ps)
# In[8]:
librosa.display.specshow(ps, y_axis='mel', x_axis='time')
# In[9]:
D=[] #DataSet
y,sr=lr.load(audio_files[0], duration=2.97)
ps=librosa.feature.melspectrogram(y=y, sr=sr)
D.append((ps,audio_files[0]))
print(D)
# In[5]:
D=[] #DataSet
for row in data.itertuples():
print(row)
# In[6]:
D=[] #DataSet
for row in data.itertuples():
# print(row)
y,sr=lr.load('E:/AI/Gender Recognition of Speaker/training samples/voices/' + row.Filename, duration=2.97)
ps=librosa.feature.melspectrogram(y=y, sr=sr)
if ps.shape !=(128,128):
#print(file)
continue
D.append((ps,row.Class))
print(D)
# In[92]:
'''D=[] #DataSet
for file in range (0,len(audio_files), 1):
y,sr=lr.load(audio_files[file], duration=2.97)
ps=librosa.feature.melspectrogram(y=y, sr=sr)
if ps.shape !=(128,128):
#print(file)
continue
D.append((ps,audio_files[file]))
print(D)'''
# In[12]:
print("Number of samples:",len(D))
# In[7]:
dataset = D
random.shuffle(dataset)
train=dataset[:300]
print(train)
# In[8]:
test=dataset[300:]
print(test)
# In[9]:
X_train, Y_train = zip(*train)
print(X_train)
# In[10]:
print(Y_train)
# In[11]:
X_test, Y_test = zip(*test)
# In[12]:
#Reshape for CNN input
X_train = np.array([x.reshape((128,128,1)) for x in X_train])
X_test = np.array([x.reshape((128,128,1)) for x in X_test])
print(X_train)
# In[13]:
# One-Hot encoding for classes
Y_train = np.array(keras.utils.to_categorical(Y_train,2))
Y_test = np.array(keras.utils.to_categorical(Y_test,2))
print(Y_train)
# In[14]:
print(Y_test)
# In[15]:
model = Sequential()
input_shape=(128,128,1)
model.add(Conv2D(24,(5,5), strides=(1,1), input_shape=input_shape))
model.add(MaxPooling2D ((4,2), strides= (4,2)))
model.add(Activation('relu'))
model.add(Conv2D (48, (5,5), padding = 'valid'))
model.add(MaxPooling2D ((4,2), strides = (4,2)))
model.add(Activation('relu'))
model.add(Conv2D (48, (5,5), padding = 'valid'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout( rate = 0.5))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(rate= 0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
# In[17]:
model.compile(
optimizer="Adam",
loss = "categorical_crossentropy",
metrics=['accuracy'])
# In[18]:
history=model.fit(
x = X_train,
y = Y_train,
epochs = 50,
batch_size = 40,
validation_data = (X_test, Y_test))
# In[19]:
score=model.evaluate(
x=X_test,
y=Y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# In[27]:
model.summary()
# In[20]:
plt.plot(history.history['acc'], label='Train Accuracy')
plt.plot(history.history['val_acc'], label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# In[ ]:
| true |
7551eba59b50d0f95342540f816b06b96ac56d66 | Python | bitkeks/sharepy | /sharepy/filehandling/__init__.py | UTF-8 | 2,348 | 2.875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
All file handling related code:
* Checking file system permissions
* Lookup file information
* Moving files from upload to storage
* Removing files
"""
from collections import namedtuple
import os
from sharepy.config import FILES_UPLOADDIR, FILES_STORAGEDIR
from sharepy.database import get_userfiles
def check_permissions():
"""Check if folders exist and if they are read-writeable.
Should be used in the startup script to check the configured paths.
"""
for d in (FILES_UPLOADDIR, FILES_STORAGEDIR):
if not os.path.exists(d):
exit(u"Directory {} does not exist!".format(d))
if not (os.access(d, os.R_OK) and os.access(d, os.W_OK)):
exit(u"Cannot use directory {}. Wrong permissions!".format(d))
def check_storagefile_exist(filehash):
"""Check if a storage file identified by its hash still exists.
"""
return os.path.exists(os.path.join(FILES_STORAGEDIR, filehash))
def create_useruploaddir(username):
"""Create a users upload dir if it does not exist.
"""
userdir = os.path.join(FILES_UPLOADDIR, username)
if not os.path.exists(userdir):
os.mkdir(userdir, 0700)
def get_unregistered_files(username):
"""Get all files from a users upload dir.
"""
userdir = os.path.join(FILES_UPLOADDIR, username)
files = []
file_tuple = namedtuple('userfile', 'path name size')
for f in os.listdir(userdir):
file_path = os.path.join(userdir, f)
if os.path.isfile(file_path):
files.append(file_tuple(file_path, f, os.lstat(file_path).st_size))
return files
def get_filesize_byte(username, filename):
"""Get the size of an uploaded file in bytes.
"""
file_path = os.path.join(FILES_UPLOADDIR, username, filename)
return os.lstat(file_path).st_size
def get_registered_files(userid):
"""Get all registered files from database. Convenience method.
"""
return get_userfiles(userid)
def register_file(username, filename, filehash):
"""Register a file.
This means the uploaded file is moved into the storage and renamed
to its hashstring.
"""
old_filepath = os.path.join(FILES_UPLOADDIR, username, filename)
new_filepath = os.path.join(FILES_STORAGEDIR, filehash)
os.rename(old_filepath, new_filepath)
| true |
3c3259abb3fb0ef80a42d9077e9343702b923610 | Python | rahuldastidar/python-programming | /variable.py | UTF-8 | 295 | 3.78125 | 4 | [] | no_license | character_name = "Rahul"
character_age = "43"
print("There was a man nameed " + character_name + ",")
print("he was " + character_age + " years old. ")
character_name = "Lopa"
print("she raelly liked the name " + character_name +",")
print("but she did't like being " + character_age +".") | true |
10864b4c2721d78585c002ea37708e06ce002f84 | Python | BitsonFire/P2P-Shop | /network.py | UTF-8 | 3,526 | 2.625 | 3 | [] | no_license | import socket
import threading
from threading import Thread
import time
import select
import parser
import json
broadcastdata = ""
broadcastmode = False
# Barrier used for Broadcasting
barrier = None
event = threading.Event()
class IThread(Thread):
# threadtype -> inbound thread == 0 or outbound thread == 1
def __init__(self, threadtype, clientsocket, clientaddress):
Thread.__init__(self)
self.clientsocket = clientsocket
self.clientaddress = clientaddress
self.threadtype = threadtype
def run(self):
global broadcastmode
global broadcastdata
global barrier
individualsend = False
mdata = None
if(self.threadtype == 0):
print(f"Connection from {self.clientaddress} has been established!")
elif(self.threadtype == 1):
# Exchange Iplist and Itemlist and update
self.clientsocket.send(bytes(parser.createRequest('INITCN'),"utf-8"))
while True:
try:
readable, _, _ = select.select([self.clientsocket], [], [], 0)
except:
print("readable error occured")
if readable:
data = self.clientsocket.recv(1024)
print(data)
if(data == b''):
print("Error: {} Client might have closed the socket".format(self.clientaddress))
break
p = parser.parser(data.decode("utf-8"))
result, individualsend, mdata = p.parseHeader()
# Enables Rebroadcasting
if individualsend == False and mdata != None and result == True:
print("Enabling Rebroadcasting")
broadcastData(mdata)
# Update GUI
connectionManager.setevent()
if individualsend == True:
print("Sending Individually to this thread")
self.clientsocket.send(bytes(mdata, "utf-8"))
individualsend = False
mdata = None
# Update GUI
connectionManager.setevent()
if broadcastmode == True:
i = barrier.wait()
broadcastmode = False
self.clientsocket.send(bytes(broadcastdata,"utf-8"))
# Update GUI
connectionManager.setevent()
self.clientsocket.close()
class connectionManager:
def __init__(self):
self.bindip = socket.gethostname()
def setbindip(self, bindip):
self.bindip = bindip
def inbound(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.bindip, 1234))
s.listen(5)
while(True):
print(f"Listening for incoming connections on " + self.bindip)
clientsocket, address = s.accept()
crt = IThread(0, clientsocket, address)
crt.setName("connectionthread")
parser.addtoiplist(address[0])
crt.start()
def outbound(self, ip, port_number):
try:
z = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(f"Trying to establish connection")
z.connect((ip, port_number))
cst = IThread(1, z, ip)
cst.setName("connectionthread")
cst.start()
except:
print("Error: Cannot Establish connection with requested ip address")
@staticmethod
def getcountofconnectionthreads():
count = 0
allthreads = threading.enumerate()
for t in allthreads:
if t.getName() == "connectionthread":
count = count + 1
return count
@staticmethod
def setevent():
global event
event.set()
@staticmethod
def waitevent():
global event
event.wait()
event.clear()
def broadcastData(data):
global barrier
global broadcastdata
global broadcastmode
broadcastdata = data
broadcastmode = True
barrier = threading.Barrier(connectionManager.getcountofconnectionthreads(), timeout = None) | true |
c884a761f0933f34253163b96f2574d9fe02aa26 | Python | diitaz93/polypharm_predict | /data/ddi_bdm.py | UTF-8 | 3,509 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================================= #
# ddi_bdm.py #
# Author: Juan Sebastian Diaz Boada #
# Creation Date: 23/05/2020 #
# ============================================================================================= #
"""
Calculates the algoritmic complexity of the drug interaction network of the DECAGON dataset.
The dataset is given as a list of adjacency matrices, each of dimension 𝑁𝑑𝑟𝑢𝑔𝑠×𝑁𝑑𝑟𝑢𝑔𝑠,
corresponding to the connectivity per each joint side effect. The code uses the package pybdm
to calculate the complexity contribution of each node and its corresponding edges per side
effect. The result is a list of feature vectors, exported as a pickle readable format file
along with relevant data.
Parameters
----------
path : string
(Relative) path to the file of data structures.
"""
# ============================================================================================= #
import numpy as np
import time
import os
import sys
import psutil
import pickle
import warnings
from pybdm import BDM
from pybdm.partitions import PartitionRecursive
from algorithms import PerturbationExperiment, NodePerturbationExperiment
from getpass import getuser
# Settings and loading of the list of adj matrices
input_file = str(sys.argv[1])
start = time.time()
pid = os.getpid()
ps= psutil.Process(pid)
warnings.filterwarnings("ignore")
with open(input_file, 'rb') as f:
ddi_adj_list = pickle.load(f)['ddi_adj_list']
print('\nInput data loaded\n')
jobs = 16
bdm = BDM(ndim=2, partition=PartitionRecursive)
part = 'PartitionRecursive'
# ============================================================================================= #
# CALCULATION
nodebdm_ddi_list = []
add_edgebdm_ddi_list = []
rem_edgebdm_ddi_list = []
ddi_nodeper = NodePerturbationExperiment(bdm,metric='bdm',bipartite_network=False,
parallel=True,jobs=jobs)
ddi_edgeper = PerturbationExperiment(bdm, bipartite_network=False)
total = len(ddi_adj_list)
count=1
for i in ddi_adj_list:
ddi_nodeper.set_data(np.array(i.todense()))
ddi_edgeper.set_data(np.array(i.todense()))
print('set data')
nodebdm_ddi_list.append(ddi_nodeper.run())
rem_edgebdm_ddi_list.append(ddi_edgeper.run_removing_edges())
prog = count*100/total
count += 1
print(prog,'% completed')
print('Node and Edge BDM for DDI calculated\n')
# ============================================================================================= #
# EXPORTING
drugs = np.shape(ddi_adj_list[0])[0]
memUse = ps.memory_info()
total_time=time.time()-start
output_data = {}
output_data['nodebdm_ddi_list'] = nodebdm_ddi_list
output_data['rem_edgebdm_ddi_list'] = rem_edgebdm_ddi_list
output_data['vms_ddi'] = memUse.vms
output_data['rss_ddi'] = memUse.rss
output_data['time_ddi'] = total_time
output_data['jobs_ddi'] = jobs
output_data['partition_type'] = part
path = os.getcwd()
words = input_file.split('_')
output_file = path + '/data_structures/BDM/DDI_BDM_' + words[2] + '_se_' + str(total) +\
'_drugs_' + str(drugs)
with open(output_file, 'wb') as f:
pickle.dump(output_data, f, protocol=3)
print('Output data exported in ', output_file,'\n')
| true |
7540ca37e20d2873725274e65d5542aadd500b00 | Python | Wintus/MyPythonCodes | /re - phone pattern.py | UTF-8 | 533 | 2.71875 | 3 | [] | no_license | phonePattern = re.compile(r'''
# don't match beginning of string, number can start anywhere
(\d{3}) # area code is 3 digits (e.g. '800')
\D* # optional separator is any number of non-digits
(\d{3}) # trunk is 3 digits (e.g. '555')
\D* # optional separator
(\d{4}) # rest of number is 4 digits (e.g. '1212')
\D* # optional separator
(\d*) # extension is optional and can be any number of digits
$ # end of string
''', re.VERBOSE)
| true |
da8888f983ad172c050ce2fea35cae82aa8dde84 | Python | jabbalaci/NimCliHelper | /python.old/rod.py | UTF-8 | 8,936 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
Nim CLI Helper
Goal: facilitate Nim development in the command-line.
by Laszlo Szathmary (jabba.laci@gmail.com), 2018
"""
import json
from glob import glob
import os
import shlex
import shutil
import sys
from glob import glob
from pathlib import Path
from subprocess import PIPE, STDOUT, Popen
VERSION = "0.1.2"
EXIT_CODE_OK = 0
EDITOR = "vim"
CURRENT_DIR_NAME = Path(os.getcwd()).name
# pykot is my small Python / Kotlin library, see https://github.com/jabbalaci/nimpykot
PYKOT_LOCATION = "{home}/Dropbox/nim/NimPyKot/src/pykot.nim".format(home=os.path.expanduser("~"))
VSCODE_NIM_SNIPPET = "{home}/.config/Code/User/snippets/nim.json".format(home=os.path.expanduser("~"))
NIMBLE = """
# Package
version = "0.1.0"
author = "..."
description = "..."
license = "MIT"
# srcDir = "src"
# bin = @["alap"]
# Dependencies
requires "nim >= 0.19.0"
""".strip()
class MissingSourceFileException(Exception):
pass
class ExistingFileException(Exception):
pass
def usage():
print("""
Nim CLI Helper v{ver}
=====================
option what it does notes
------ ------------ -----
init bundles the indented 3 steps below initialize a project folder
alap create alap.nim create a skeleton source file
pykot copy pykot.nim . copy pykot.nim to the current dir.
nimble simplified nimble init create a simple .nimble file
ad edit .nimble add dependency
id nimble install -d install dependencies (and nothing else)
(like `pip install -r requirements.txt`)
c nim c compile (debug)
cr nim c -r compile and run
s compile, run, then delete the exe
i.e., run it as if it were a script
rel nim c -d:release compile (release)
small1 nim c -d:release --opt:size small EXE
small2 small1 + strip smaller EXE
small3 small2 + upx smallest EXE
ver nim --version version info
""".strip().format(ver=VERSION))
def execute_command(cmd, debug=True, sep=False):
"""
Execute a simple external command and return its exit status.
"""
if debug:
print('#', cmd)
if sep:
print("-" * 78)
args = shlex.split(cmd)
child = Popen(args)
child.communicate()
return child.returncode
def get_simple_cmd_output(cmd, stderr=STDOUT):
"""
Execute a simple external command and get its output.
The command contains no pipes. Error messages are
redirected to the standard output by default.
"""
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0].decode("utf8")
def get_version_info():
return get_simple_cmd_output("nim --version").splitlines()[0]
print(nim)
def version_info():
print(get_version_info())
def create_alap_file():
fname = "alap.nim"
if os.path.isfile(fname):
raise ExistingFileException("alap.nim exists")
# else
if not os.path.isfile(VSCODE_NIM_SNIPPET):
execute_command(f"touch {fname}")
print(f"# an empty {fname} was created")
else:
try:
with open(VSCODE_NIM_SNIPPET) as f:
doc = json.load(f)
body = doc['alap']['body']
with open(fname, "w") as to:
for line in body:
line = line.replace("$0", "")
print(line, file=to)
#
print(f"# {fname} was created using your VS Code Nim snippet")
except Exception as e:
print(f"# Warning: couldn't process the file {VSCODE_NIM_SNIPPET}", file=sys.stderr)
print("#", e, file=sys.stderr)
execute_command(f"touch {fname}")
print(f"# an empty {fname} was created")
def copy_pykot():
if not os.path.isfile(PYKOT_LOCATION):
print(f"# Warning: {PYKOT_LOCATION} was not found")
return
# else
fname = "pykot.nim"
if os.path.isfile(f"./{fname}"):
print(f"# {fname} exists in the current folder, deleting it")
os.remove(f"./{fname}")
shutil.copy(PYKOT_LOCATION, ".")
print(f"# {fname}'s latest version was copied to the current folder")
def nimble():
fname = "alap.nimble"
if os.path.isfile(f"{fname}"):
print(f"# Warning: {fname} already exists")
return
# else
with open(fname, "w") as f:
print(NIMBLE, file=f)
#
print(f"# {fname} was created")
def compile(args, output=True, release=False, small=False):
options = ""
if not output:
options = "--hints:off --verbosity:0"
try:
src = args[1]
except:
print("Error: provide the source file too!", file=sys.stderr)
print(f"Tip: rod c <input.nim>", file=sys.stderr)
return 1
# else
cmd = f'nim {options} c {src}'
if release:
cmd = f'nim {options} c -d:release {src}'
if small:
cmd = f'nim {options} c -d:release --opt:size {src}'
exit_code = execute_command(cmd)
return exit_code
def get_exe_name(p):
# under Linux
return str(Path(p.stem))
def run_exe(exe, params):
params = " ".join(params)
cmd = f"./{exe} {params}"
exit_code = execute_command(cmd, sep=True)
return exit_code
def strip_exe(exe):
return execute_command(f"strip -s {exe}")
def upx_exe(exe):
return execute_command(f"upx --best {exe}")
def delete_exe(exe):
p = Path(exe)
if p.exists() and p.is_file() and p.suffix != ".nim":
# print(f"# remove {str(p)}")
p.unlink()
return not p.exists()
def small1(args):
return compile(args, release=True, small=True)
def small2(args):
small1(args)
p = Path(args[1])
exe = get_exe_name(p)
strip_exe(exe)
def small3(args):
small2(args)
p = Path(args[1])
exe = get_exe_name(p)
upx_exe(exe)
def find_nimble_file():
found = glob("*.nimble")
if len(found) == 1:
return found[0]
# else
return None
def add_dependency():
nimble_file = find_nimble_file()
if nimble_file is None:
print("# Error: no .nimble file was found", file=sys.stderr)
return
# else
execute_command(f"{EDITOR} {nimble_file}")
def install_dependencies():
execute_command("nimble install -d")
def process(args):
param = args[0]
params = " ".join(args[1:])
exit_code = 0
#
if param == "init":
try:
create_alap_file()
copy_pykot()
nimble()
except Exception as e:
print("Error:", e)
elif param == 'alap':
try:
create_alap_file()
except Exception as e:
print("Error:", e)
elif param == 'pykot':
copy_pykot()
elif param == "nimble":
nimble()
elif param == "ad":
add_dependency()
elif param == "id":
install_dependencies()
elif param == 'c':
exit_code = compile(args)
elif param == 'rel':
exit_code = compile(args, release=True)
elif param == 'small1':
exit_code = small1(args)
elif param == 'small2':
exit_code = small2(args)
elif param == 'small3':
exit_code = small3(args)
elif param == 'cr':
exit_code = compile(args)
if exit_code != EXIT_CODE_OK:
return exit_code
# else
p = Path(args[1])
exe = get_exe_name(p)
exit_code = run_exe(exe, args[2:])
elif param == 's':
try:
p = Path(args[1])
if p.suffix != ".nim":
raise MissingSourceFileException
except:
print("Error: provide a source file!", file=sys.stderr)
print(f"Tip: rod s <input.nim>", file=sys.stderr)
return 1
exit_code = compile(args, output=False)
if exit_code != EXIT_CODE_OK:
return exit_code
# else
p = Path(args[1])
exe = get_exe_name(p)
try:
run_exe(exe, args[2:])
finally:
exit_code = delete_exe(exe)
elif param == 'ver':
version_info()
else:
print("Error: unknown parameter")
#
return exit_code
def main():
if len(sys.argv) == 1:
usage()
return 0
# else
return process(sys.argv[1:])
##############################################################################
if __name__ == "__main__":
exit(main())
| true |
c7e78946c279a1f51dac87b641cb7590e70fd81e | Python | dengl11/Leetcode | /problems/regular_expression_matching/solution.py | UTF-8 | 841 | 3.140625 | 3 | [] | no_license | class Solution:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
dp = [[False]*(n+1) for _ in range(m+1)]
dp[0][0] = True
for j in range(2, n + 1):
dp[0][j] = dp[0][j-2] and p[j-1] == "*"
for i in range(1, m+1):
for j in range(1, n+1):
if s[i-1] == p[j-1] or p[j-1] == ".":
dp[i][j] = dp[i-1][j-1]
elif p[j-1] == "*":
dp[i][j] = dp[i][j-1] # a* -> a
if j >= 2:
dp[i][j] = dp[i][j] or dp[i][j-2] # a* -> ""
if i >= 2:
# a* -> aaaa.a
dp[i][j] = dp[i][j] or (p[j-2] in [".", s[i-1]] and dp[i-1][j]) # multiple
return dp[-1][-1]
| true |
8aebcb057c5f6171dbdc209e0af1430470e4e8d7 | Python | Financial-Engineering-I/strategy-template | /strategy.py | UTF-8 | 1,052 | 2.796875 | 3 | [
"MIT"
] | permissive | from sklearn import linear_model
import numpy as np
def strategy(
trade_dt_var, response_var, features_and_responses, trading_date, N, n
):
training_indices = features_and_responses[trade_dt_var] < trading_date
training_X = features_and_responses[training_indices].tail(N)[
['a', 'b', 'R2', 'ivv_vol']
]
training_Y = features_and_responses[training_indices].tail(N)[response_var]
# Need at least two 1's to train a model
if sum(training_Y) < 2:
return 0
if sum(training_Y) < n:
logisticRegr = linear_model.LogisticRegression()
logisticRegr.fit(np.float64(training_X), np.float64(training_Y))
trade_decision = logisticRegr.predict(
np.float64(
features_and_responses[["a", "b", "R2", "ivv_vol"]][
features_and_responses['Date'] == trading_date
]
)
).item()
else: # If EVERYTHING is a 1, then just go ahead and implement again.
trade_decision = 1
return trade_decision
| true |
4cc2379f79507ff924ca12bb9e9779265be7bc19 | Python | DinurTuraev/python_darslari | /3-dars.py | UTF-8 | 775 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 20:38:22 2021
@author: sulta
"""
print('"Nexia", "Tico",\'Damas\' ko\'rganlar qilar havas')
#5 ning 4-darajasi
print("5 ning 4-darajasi", 5**4, "ga teng")
#22 ni 4 ga bo'lganda qancha qoldiq qoladi
print("22 ni 4 ga bo'lganda", 22%4, "qoldiq chiqadi")
#Tomonlari 125 ga teng kvadratning perimetri va yuzini toping
print('Tomonlari 125 ga teng kvadratning yuzi', 125*125, 'ga, perimetri', 125*4, 'ga teng')
#Diametri 12 ga teng bo'lgan doiraning yuzini toping
print('Diametri 12 ga teng bo\'lgan doiraning yuzi', 3.14*(12/2)**2, 'ga teng')
#Katetlari 6 va 7 bo'lgan to'gri burchakli uchburchakning gipotenuzasini toping
print("Katetlari 6 va 7 bo'lgan to'g'ri burchakli uchburchakning gipotenuzasi", (6**2+7**2)**(1/2))kjbb | true |
c7a8ef52423772349b49d9ef34848b507edbffb6 | Python | carlosms92/redmine-spreadsheet | /main.py | UTF-8 | 1,946 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse, getpass
from datetime import datetime
from redmine_api import RedmineApi
from sheets_api.sheets_service import SheetsService
class Password(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
if values is None:
values = getpass.getpass()
setattr(namespace, self.dest, values)
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--username", help="Redmine user")
parser.add_argument("-p", "--password", action=Password, nargs="?", dest="password", help='Redmine password')
args = parser.parse_args()
if args.username is None:
sys.exit("Es necesario pasar el nombre de usuario de Redmine (option -u)")
if args.password is None:
sys.exit("Es necesario pasar la contraseña de Redmine (option -p)")
#REDMINE
redmine = RedmineApi(args.username, args.password)
redmine.connect()
userId = redmine.getCurrentUserId()
dateYesterday = redmine.getYesterdayDate()
dateYesterday = '2020-11-03'
issues = redmine.getUserIssuesByDate(userId,dateYesterday)
# for issue in issues:
# print(list(issue))
# print(issue.id, " - ", issue.custom_fields[0].value, " - ", issue.project.name, " - ", issue.subject)
# sys.exit(0)
#SHEETS
updateDate = datetime.strptime(dateYesterday,"%Y-%m-%d")
sheetsService = SheetsService()
#spreadsheet = sheetsService.getSpreadsheet()
#sheets = spreadsheet.get('sheets')
#for sheet in sheets:
# print(sheet.get('properties'))
responseUpdateFields = sheetsService.dailyUpdateSheet(issues,updateDate)
updatedRange = responseUpdateFields['updates']['updatedRange']
print(updatedRange)
responseUpdateFormat = sheetsService.updateFormatRange(updatedRange)
print(responseUpdateFormat)
#responseUpdateFormatColumnToNumber = sheetsService.updateFormatColumnToNumber()
#print(responseUpdateFormatColumnToNumber)
#sheetsService.getSpreadsheet()
#sheetsService.getRow()
| true |
edb2f1decca15645fbe9719ec501bfa86a0ec74e | Python | cjsmithvet/python-snippets | /simpleconnect1.py | UTF-8 | 611 | 2.765625 | 3 | [] | no_license | import sys
import telnetlib
import time
print "Hello"
HOST = "10.0.0.120"
print "got this far"
tn = telnetlib.Telnet()
tn.set_debuglevel(3)
print "About to open the connection, timeout of 10 seconds"
tn.open(HOST, timeout=10)
print "wow I did a telnet"
tn.write("VBUS.VALUE" + "\n")
print "I wrote VBUS.VALUE and am about to sleep 10 seconds"
time.sleep(10)
print tn.read_some()
sys.exit(0)
print tn.read_very_lazy()
print tn.read_until("-->", timeout=10)
tn.write("VBUS.VALUE" + "\n")
print "I wrote VBUS.VALUE again"
# print tn.read_all(timeout=10)
print tn.read_until("-->", timeout=10)
tn.close()
| true |
2414966e2b973baf5f01d97ac5341f07daca6bbf | Python | Nicholasli1995/VisualizingNDF | /data/Nexperia/dataset.py | UTF-8 | 6,775 | 2.9375 | 3 | [
"MIT"
] | permissive | # Nexperia Pytorch dataloader
import numpy as np
import os
import torch
import torch.utils.data
import imageio
import logging
import csv
image_extension = ".jpg"
class NexperiaDataset(torch.utils.data.Dataset):
def __init__(self, root, paths, imgs, labels=None, split=None, mean=None,
std=None):
self.root = root
self.paths = paths
self.names = [path.split(os.sep)[-1][:-len(image_extension)] for path in paths]
self.imgs = imgs
if len(self.imgs.shape) == 3:
self.imgs = np.expand_dims(self.imgs, axis=1)
self.labels = labels
self.split = split
self.name = 'Nexperia'
logging.info('{:s} {:s} set contains {:d} images'.format(self.name,
self.split, len(self.paths)))
self.mean, self.std = self.get_stats(mean, std)
self.normalize(self.mean, self.std)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
return torch.from_numpy(self.imgs[idx]), self.labels[idx]
def get_stats(self, mean=None, std=None, verbose=True):
if mean is not None and std is not None:
return mean, std
# get normalization statistics
if verbose:
logging.info("Calculating normalizing statistics...")
self.mean = np.mean(self.imgs)
self.std = np.std(self.imgs)
if verbose:
logging.info("Calculation done for {:s} {:s} set.".format(self.name,
self.split))
return self.mean, self.std
def normalize(self, mean, std, verbose=True):
if verbose:
logging.info("Normalizing images...")
self.imgs = (self.imgs - mean)/self.std
if verbose:
logging.info("Normalization done for {:s} {:s} set.".format(self.name,
self.split))
return
def visualize(self, count=3):
for idx in range(1, count+1):
visualize_grid(imgs = self.imgs, labels=self.labels, title=self.split + str(idx))
return
def write_preds(self, preds):
input_file = os.path.join(self.root, "template.csv")
assert os.path.exists(input_file), "Please download the submission template."
output_file = os.path.join(self.root, "submission.csv")
save_csv(input_file, output_file, self.names, preds)
np.save(os.path.join(self.root, 'submission.npy'), {'path':self.names, 'pred':preds})
return
def save_csv(input_file, output_file, test_list, test_labels):
"""
save a csv file for testing prediction which can be submitted to Kaggle competition
"""
assert len(test_list) == len(test_labels)
with open(input_file) as csv_file:
with open(output_file, mode='w') as out_csv:
csv_reader = csv.reader(csv_file, delimiter=',')
csv_writer = csv.writer(out_csv)
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
csv_writer.writerow(row)
line_count += 1
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
image_name = row[0]
assert image_name in test_list, 'Missing prediction!'
index = test_list.index(image_name)
label = test_labels[index]
csv_writer.writerow([image_name, str(label)])
line_count += 1
logging.info('Saved prediction. Processed {:d} lines.'.format(line_count))
return
def visualize_grid(imgs, nrows=5, ncols=5, labels = None, title=""):
"""
imgs: collection of images that supports indexing
"""
import matplotlib.pyplot as plt
assert nrows*ncols <= len(imgs), 'Not enough images'
# chosen indices
cis = np.random.choice(len(imgs), nrows*ncols, replace=False)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
fig.suptitle(title)
for row_idx in range(nrows):
for col_idx in range(ncols):
idx = row_idx*ncols + col_idx
axes[row_idx][col_idx].imshow(imgs[cis[idx]])
axes[row_idx][col_idx].set_axis_off()
plt.show()
if labels is not None:
axes[row_idx][col_idx].set_title(str(labels[cis[idx]]))
return
def load_data(folders):
lgood = 0
lbad = 1
ltest = -1
paths = []
imgs = []
labels = []
for folder in folders:
if 'good' in folder:
label = lgood
elif 'bad' in folder:
label = lbad
else:
label = ltest
for filename in os.listdir(folder):
filepath = os.path.join(folder, filename)
if filename.endswith(image_extension):
paths.append(filepath)
img = imageio.imread(filepath)
img = img.astype('float32') / 255.
imgs.append(img)
labels.append(label)
return np.array(paths), np.array(imgs), np.array(labels)
def get_datasets(opt, visualize=False):
root = opt.nexperia_root
train_ratio = opt.train_ratio
dirs = {}
dirs['good'] = os.path.join(root, 'train/good_0')
dirs['bad'] = os.path.join(root, 'train/bad_1')
dirs['test'] = os.path.join(root, 'test/all_tests')
train_paths, train_imgs, train_lbs = load_data([dirs['good'], dirs['bad']])
test_paths, test_imgs, test_lbs = load_data([dirs['test']])
# split the labeled data into training and evaluation set
ntu = num_train_used = int(len(train_paths)*train_ratio)
cis = chosen_indices = np.random.choice(len(train_paths), len(train_paths), replace=False)
used_paths, used_imgs, used_lbs = train_paths[cis[:ntu]], train_imgs[cis[:ntu]], train_lbs[cis[:ntu]]
eval_paths, eval_imgs, eval_lbs = train_paths[cis[ntu:]], train_imgs[cis[ntu:]], train_lbs[cis[ntu:]]
if opt.train_all:
train_set = NexperiaDataset(root, train_paths, train_imgs, train_lbs, 'train')
else:
train_set = NexperiaDataset(root, used_paths, used_imgs, used_lbs, 'train')
eval_set = NexperiaDataset(root, eval_paths, eval_imgs, eval_lbs, 'eval',
mean=train_set.mean, std=train_set.std)
test_set = NexperiaDataset(root, test_paths, test_imgs, test_lbs, 'test',
mean=train_set.mean, std=train_set.std)
if visualize:
# visualize the images with annotation
train_set.visualize()
eval_set.visualize()
return {'train':train_set, 'eval':eval_set, 'test':test_set} | true |
7ed64a6522450d6a5c550448a07ede73a424a94d | Python | amartshah/ImageRetrieval | /script.py | UTF-8 | 4,653 | 2.875 | 3 | [] | no_license | ###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
##the following function was taken from the above open source code
def QuadTree(tx, ty, level):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
#print bin(tx), bin(ty)
for i in range(level, 0, -1):
digit = 0
mask = 1 << (i-1)
#print tx, ty, mask
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
#print quadKey
quadKey += str(digit)
return quadKey
################## the following functions are no longer from the copyrighted project
import math
import sys
import urllib, cStringIO
#ensure latitude is in range of globe
def latBoundsCheck(latvalue):
latRange = [-85.05112878, 85.05112878]
return min(max(latvalue, latRange[0]), latRange[1])
#ensure longitude is in range of globe
def lonBoundsCheck(lonvalue):
lonRange = [-180, 180]
return min(max(lonvalue, lonRange[0]), lonRange[1])
def boundsCheck(value, min_check, max_check):
return min(max(value, min_check), max_check)
#Convert Lat and Lon to pixels
def LatLonToPixels(lat, lon, level):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
lat = latBoundsCheck(lat)
lon = lonBoundsCheck(lon)
sinlatitude = math.sin(lat * math.pi / 180)
px = ((180 + lon) / 360)
py = 0.5 - math.log((1+sinlatitude)/(1-sinlatitude)) / (4*math.pi)
map_scale = 256 * 2**level
px_final = boundsCheck(px * map_scale + 0.5, 0, map_scale - 1)
py_final = boundsCheck(py * map_scale + 0.5, 0, map_scale - 1)
return px_final, py_final
#Convert Pixels to Tile Coordinates
def PixelsToTile(px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.floor(px / 256.0))
ty = int(math.floor(py / 256.0))
return tx, ty
#Calculate the center of inputted points
def centers(lat, lon, lat1, lon1):
"Compute the centers"
lat = float(lat)
lon = float(lon)
lat1 = float(lat1)
lon1 = float(lon1)
final_lat = (lat + lat1)/2.0
final_lon = (lon + lon1)/2.0
return final_lat, final_lon
#grabs both coordinates from command line input
lat = float(sys.argv[1])
lon = float(sys.argv[2])
lat1 = float(sys.argv[3])
lon1 = float(sys.argv[4])
def BingImageRetriever(center_lat, center_lon, level):
#converts to pixels
pix_x, pix_y = LatLonToPixels(center_lat, center_lon, level)
#converts to tile coords
tile_x, tile_y = PixelsToTile(pix_x, pix_y)
#query quadkey corresponding tile coords
quadkey = QuadTree(tile_x, tile_y, level)
#url for specific quadkey
URL = "http://h0.ortho.tiles.virtualearth.net/tiles/h" + quadkey + ".jpeg?g=131"
urllib.urlretrieve(URL, "tile.jpg")
return URL, quadkey
#calculates center of bounding box
center_lat, center_lon = centers(lat, lon, lat1, lon1)
#stops at the first level that returns non-error image
for level in xrange(23, 0, -1):
URL, final_quadkey = BingImageRetriever(center_lat, center_lon, level)
if open("error.jpg","rb").read() == open("tile.jpg","rb").read():
pass
else:
print "Quadkey: " + str(final_quadkey)
break
| true |
928b7ad5aba18ecd94f0fe8572c0a9d319069651 | Python | LogNice/lognice | /evaluator/app.py | UTF-8 | 1,826 | 2.546875 | 3 | [] | no_license | import os
import json
import timeit
import socketio
def get_error_response(message):
return {
'status': 'error',
'message': message
}
def get_success_response(result):
return {
'status': 'success',
'result': result
}
def notify(data):
sio = socketio.Client()
def on_done():
sio.disconnect()
sio.close()
@sio.event
def connect():
sio.emit('evaluated', {
'session_id': os.environ.get('SESSION_ID'),
'username': os.environ.get('USERNAME'),
'data': data
}, callback=on_done)
sio.connect(os.environ.get('SOCKETIO_URL'))
sio.wait()
def execute():
solution = Solution()
validator = Validator()
passed_count = 0
blocker = None
def to_measure():
nonlocal passed_count
nonlocal blocker
passed_count = 0
for test in validator.tests():
input = test.get('input', {})
output = test.get('output', None)
answer = solution.solve(**input)
if answer != output:
blocker = test
blocker['output'] = answer
blocker['expected'] = output
break
passed_count += 1
iteration = 100
time = timeit.timeit(to_measure, number=iteration) / iteration
report = {
'passed': passed_count,
'blocker': blocker
}
if not blocker:
report['time'] = {
'value': int(time * 1000000),
'unit': 'us'
}
notify(get_success_response(report))
if __name__ == '__main__':
try:
from input.solution import Solution
from input.validator import Validator
execute()
except BaseException as error:
notify(get_error_response(str(error)))
| true |
659d35de3e52c42a47139d11fb38fd45395b98f7 | Python | Araknor99/Python-Rock-Paper-Scissor | /SSP_Oberfläche_fertig.py | UTF-8 | 2,867 | 3.25 | 3 | [
"MIT"
] | permissive | from random import*
v={"1":"Papier",'2': 'Stein',"0":"schere\n"}
def Stein():
Vg(2)
def Schere():
Vg(0)
def Papier():
Vg(1)
def ssp():
#ssp=Schere, Stein, Papier
ssp=tkinter.Tk()
ssp.frame =tkinter.Frame(ssp, relief=RIDGE, borderwidth=30)
ssp.frame.pack(fill=BOTH,expand=1)
ssp.label =tkinter.Label(ssp.frame, text = "Schere, Stein, Papier Drücke Etwas!")
ssp.label.pack(fill=X, expand=1)
ssp.button = tkinter.Button(ssp.frame,text="Schere",command=Schere)
ssp.button.pack(side=BOTTOM)
ssp.button = tkinter.Button(ssp.frame,text="Stein",command=Stein)
ssp.button.pack(side=BOTTOM)
ssp.button = tkinter.Button(ssp.frame,text="Papier",command=Papier)
ssp.button.pack(side=BOTTOM)
ssp.button = tkinter.Button(ssp.frame,text="Exit",command=ssp.destroy)
ssp.button.pack(side=BOTTOM)
v={"1":"Papier",'2': 'Stein',"0":"schere\n"}
def Vg(Spieler):
#vg=Vergleichen
Computer=randint(0,2)
c=(Computer-1)%3
Vg =tkinter.Tk()
Vg.frame = tkinter.Frame(Vg, relief=RIDGE, borderwidth=30)
Vg.frame.pack(fill=BOTH,expand=1)
if Spieler==c:
Vg.label=tkinter.Label(Vg.frame, text="DU gwinnst")
Vg.label.pack(fill=X,expand=1)
elif Spieler==Computer:
Vg.label=tkinter.Label(Vg.frame, text="KEINER gewinnt")
Vg.label.pack(fill=X,expand=1)
else:
Vg.label=tkinter.Label(Vg.frame, text="GEGNER gewinnt")
Vg.label.pack(fill=X,expand=1)
Vg.label=tkinter.Label(Vg.frame, text="du hast {} genommen der gegner {} ".format(v[str(Spieler)],v[str(Computer)]))
Vg.label.pack(fill=X,expand=1)
Vg.button = tkinter.Button(Vg.frame,text="Zum Menü",command=Vg.destroy)
Vg.button.pack(side=BOTTOM)
def Hel():
#Hel=Hilfe
Hel = tkinter.Tk()
frame = tkinter.Frame(Hel, relief=RIDGE, borderwidth=30)
frame.pack(fill=BOTH, expand=1)
label=tkinter.Label(frame, text='''Hallo! Dies ist das Hilfe Menü, hier können sie alles über das Menü
erfahren. Drücken sie Hilfe für Hilfe, drücken sie ssp um Stein,
Schere, Papier zu spielen und Exit um Das Menü zu schließen. Wenn
sie nicht lesen können fragen sie um Hilfe.:-)''')
label.pack(fill=X, expand=1)
button = tkinter.Button(frame,text="zurück",command=Hel.destroy)
button.pack(side=BOTTOM)
import tkinter
from tkinter.constants import *
tk = tkinter.Tk()
tk.frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=30)
tk.frame.pack(fill=BOTH,expand=1)
tk.label = tkinter.Label(tk.frame, text="Menü(V0.5)")
tk.label.pack(fill=X, expand=1)
tk.button = tkinter.Button(tk.frame,text="Exit",command=tk.destroy)
tk.button.pack(side=RIGHT)
tk.button = tkinter.Button(tk.frame,text="ssp",command=ssp)
tk.button.pack(side=LEFT)
tk.button = tkinter.Button(tk.frame,text="Hilfe",command=Hel)
tk.button.pack(side=BOTTOM)
tk.mainloop()
| true |
67d4d0ea9e9b471cdb0b94bbb8ce02968a3260f1 | Python | mattwilliams06/RealPython | /DataAnalysis/CSVTutorial/linkedin.py | UTF-8 | 455 | 3.125 | 3 | [] | no_license | def save_dict(dict_to_save, path):
import pickle
import os
name = 'test_dict.pickle'
if not isinstance(dict_to_save, dict):
print('Function takes dictionaries only.')
else:
with open(os.path.join(path,name), 'wb') as f:
pickle.dump(dict_to_save,f)
print(f'Pickle completed at {path}')
def load_dict(filepath):
import pickle
with open(filepath, 'rb') as f:
return pickle.load(f)
| true |
04b5e62f0f18b8528b9f44277c26655fd46861a3 | Python | ge-roy/NullProj | /BlackJackLite.py | UTF-8 | 5,788 | 3.546875 | 4 | [] | no_license | import random
class Card():
def __init__(self, suite, name, rank):
self.suite = suite
self.name = name
self.rank = rank
class Deck():
def __init__(self):
self.cards = []
def addcard(self, card):
self.cards.append(card)
def delete_card(self, card):
self.cards.remove(card)
class Dealer():
def __init__(self, deck, bank):
self.deck_on_hand = deck
self.bank = bank
def change_bank(self, m_operator, value):
if m_operator == '+':
self.bank += value
elif m_operator == '-':
self.bank -= value
def collected_points(self):
player_cards = self.deck_on_hand.cards
points = 0
for each in player_cards:
points += each.rank
return (points, points - 10)
class Player(Dealer):
def __init__(self, deck, bank):
Dealer.__init__(self, deck, bank)
self.first_turn = True
def put_cards_in_deck():
game_deck.cards.clear()
for card_t in card_suits:
for card_v in card_values.items():
c = Card(card_t, card_v[0], card_v[1])
game_deck.addcard(c)
def get_card(player_cards, show=True):
random_card = random.choice(game_deck.cards)
player_cards.append(random_card)
game_deck.delete_card(random_card)
if show:
show_cards(player_cards)
def ask_user(message, answers):
answers_up = [a.upper() for a in answers]
while True:
a = input(message).upper()
if a not in answers_up:
continue
else:
break
return a
def show_cards(cards):
for each in cards:
print(each.name + ' <=> ' + each.suite)
def user_turn():
next_card = False
play_again = False
dealer_turn = False
player_points = _player.collected_points()
player_cards = _player.deck_on_hand.cards
if player_points[0] == 21 or player_points[1] == 21:
pot_win = pot_size * 0.01
_dealer.change_bank('-', pot_win)
_player.change_bank('+', pot_win)
print('P:{}$ D:{}$'.format(_player.bank, _dealer.bank))
print("You've won the Game and earn {}$".format(_player.bank))
answer = ask_user('Would you like to paly again? ', ('y', 'n'))
if answer == 'Y':
play_again = True
elif player_points[0] > 21 or player_points[1] > 21:
pot_win = pot_size * 0.01
_dealer.change_bank('+', pot_win)
_player.change_bank('-', pot_win)
print('P:{}$ D:{}$'.format(_player.bank, _dealer.bank))
print("You've lost the Game!")
answer = ask_user('Would you like to paly again? ', ('y', 'n'))
if answer == 'Y':
play_again = True
elif player_points[0] > 21 and player_points[1] < 21:
answer = ask_user('Would you like to receive a card (your Ace now going to have a rank equal 1)? ', ('y', 'n'))
if answer == 'Y':
next_card = True
else:
answer = ask_user('Would you like to receive a card? ', ('y', 'n'))
if answer == 'Y':
next_card = True
else:
dealer_turn = True
if next_card:
get_card(player_cards)
return {'next_card': next_card,
'dealer_turn': dealer_turn,
'play_again': play_again}
def dealer_turn():
next_card = False
play_again = False
dealer_turn = False
dealer_cards = _dealer.deck_on_hand.cards
while True:
get_card(dealer_cards, False)
dealer_points = _dealer.collected_points()
if dealer_points[0] == 21 or dealer_points[1] == 21:
pot_win = pot_size * 0.01
_dealer.change_bank('+', pot_win)
_player.change_bank('-', pot_win)
print('P:{}$ D:{}$'.format(_player.bank, _dealer.bank))
print("Dealer has won the Game and earn {}$".format(_dealer.bank))
answer = ask_user('Would you like to paly again? ', ('y', 'n'))
if answer == 'Y':
play_again = True
break
elif dealer_points[0] > 21 or dealer_points[1] > 21:
pot_win = pot_size * 0.01
_dealer.change_bank('-', pot_win)
_player.change_bank('+', pot_win)
print('P:{}$ D:{}$'.format(_player.bank, _dealer.bank))
print("Dealer has lost the Game and earn {}$".format(_dealer.bank))
answer = ask_user('Would you like to paly again? ', ('y', 'n'))
if answer == 'Y':
play_again = True
break
return {'next_card': next_card,
'dealer_turn': dealer_turn,
'play_again': play_again}
def prepare_players():
_player.deck_on_hand.cards.clear()
_dealer.deck_on_hand.cards.clear()
def gameplay():
# User goes first
result = user_turn()
while True:
# result = user_turn()
if result['dealer_turn']:
result = dealer_turn()
elif result['next_card']:
result = user_turn()
elif result['play_again']:
prepare_players()
put_cards_in_deck()
result = user_turn()
else:
break
# ### Main Program ### #
card_suits = ('clubs', 'diamonds', 'hearts', 'spades')
card_values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10, 'A': 11}
pot_size = 100
game_deck = Deck()
user_deck = Deck()
dealer_deck = Deck()
put_cards_in_deck()
player_name = input('Type your name here ::: ')
print('Hi', player_name)
player_cash = int(input('How much money do you have? :) ::: '))
print('Ok, let\'s go')
_player = Player(user_deck, player_cash)
_dealer = Dealer(dealer_deck, pot_size)
gameplay()
| true |
014315a7033da25708fe3f50b77b36c36f8d8b76 | Python | jxx/uio-inf1100 | /SIR.py | UTF-8 | 2,451 | 3.28125 | 3 | [] | no_license | """ Runs and tests SIR model """
# Assume ODESolver.py in same folder
from ODESolver import ForwardEuler # Use FE just to be different...
import numpy as np
from scitools.std import plot
class RHS:
""" Returns the right hand sides of the equations for u'=f(u,t)."""
def __init__(self, v, dt, T, beta):
# Store parameters in the model
self.v, self.dt, self.T, self.beta = v, dt, T, beta
def __call__(self, u, t):
S, I, R = u # Let S, I, R be 3 functions (u)
# Change in..
return [-self.beta*S*I, # ..suspectible
self.beta*S*I- self.v*I, # ..infected
self.v*I] # ..resistant
# persons per dt
def test(b):
""" Runs test on SIR model, with variying beta """
beta = b #0.0005 or 0.0001 # Infection rate
v = 0.1 # Prob. of recovery per dt
S0 = 1500 # Init. No. of suspectibles
I0 = 1 # Init. No. of infected
R0 = 0. # Init. No. of resistant
U0 = [S0, I0, R0] # Initial conditions
T = 60 # Duration, days
dt = 0.5 # Time step length in days
n = T/dt # No. of solve steps
f = RHS(v, dt, T, beta) # Get right hand side of equation
solver = ForwardEuler(f) # Select ODE solver method
solver.set_initial_condition(U0)
time_points = np.linspace(0, 60, n+1)
u, t = solver.solve(time_points)
S = u[:,0] # S is all data in array no 0
I = u[:,1] # I is all data in array no 1
R = u[:,2] # R is all data in array no 2
plot(t, S, t, I, t, R,
xlabel='Days', ylabel='Persons',
legend=['Suspectibles', 'Infected', 'Resistant'],
hold=('on'))
if __name__ == '__main__':
test(0.0005)
"""
Manual testing and looking at the graphs shows that over a 60
day period, a reduction in infection rate from 0.0005 to 0.0001
will kill the epedemic growth.
Thresholds are sensitive, though: 0.0002 and the game is back on.
"""
| true |
596681a7d440310f95de2c5f44889ebc73b2075c | Python | dingdan539/healer | /src/api/father.py | UTF-8 | 377 | 2.546875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import json
class Father(object):
@staticmethod
def format_out(code=1, title='', description=''):
if code == 1:
title = 'succ'
description = 'success'
else:
title = 'fail'
description = 'failed'
return json.dumps({'code': code, 'title': title, 'description': description}) | true |
394352157a7b6f0a6701c8947623f7c22f25b66c | Python | dan-zheng/no-accent.github.io | /detect_color_test.py | UTF-8 | 653 | 2.953125 | 3 | [] | no_license | # USAGE
# python detect_color.py --image pokemon_games.png
# import the necessary packages
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image")
args = vars(ap.parse_args())
mouth_cascade = cv2.CascadeClassifier('mouth.xml')
# load the image
img = cv2.imread(args["image"])
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = mouth_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
fb30bc56f271c0f9ee31bd6a792c18c3d78b653a | Python | lindenhutchinson/card_generator | /data_gathering/tools/scrapers/trivia_scraper.py | UTF-8 | 1,553 | 2.921875 | 3 | [] | no_license | from .scraper import Scraper
import requests
import random
import numpy as np
import json
from urllib.parse import unquote
import os
class TriviaScraper(Scraper):
def __init__(self, output_file, url_list):
self.output_file = output_file
self.data = {
'game_text': [],
'answers': []
}
self.url_list = url_list
def get_data_from_api(self, url):
data = []
trivia_data = json.loads(requests.get(url).content)
for td in trivia_data['results']:
answer = unquote(td['correct_answer'])
game_text = unquote(td['question'])
if 'the following' in game_text:
continue
data.append({
'answers': [answer],
'game_text': game_text
})
return data
def run(self):
data = []
for i, url in enumerate(self.url_list):
# self.print_progress(i, len(self.url_list))
data = np.concatenate((data, self.get_data_from_api(url))).tolist()
self.write_to_json(data)
def run_trivia_scraper():
token = json.loads(requests.get(
'https://opentdb.com/api_token.php?command=request').content)
urls = [
f"https://opentdb.com/api.php?amount=50&difficulty=easy&type=multiple&encode=url3986&token={token['token']}" for _ in range(50)]
scraper = TriviaScraper('../data/trivia_data.json', urls)
scraper.run()
print("finished trivia scraper")
if __name__ == "__main__":
run_trivia_scraper()
| true |
831d6d6ccf336490e3e461295d9dceccb9491055 | Python | VP-Soup/Python-Projects-for-Dynamic-Programming-and-Other-Skills | /profit.py | UTF-8 | 327 | 3.546875 | 4 | [] | no_license | def profit(prices):
max_profit = 0
minimum = prices[0]
for i in prices:
if i < minimum:
minimum = i
max_profit = max(max_profit, i - minimum)
return max_profit
pricetest = [7,4,3,2,1,5]
price = [5, 9, 4, 8, 2]
print(profit(price))
lower = float('inf')
print (1 < lower) | true |
3f92975157ae8acf86237a369cf5ee90b44bd3f6 | Python | MLFall2017/quiz1-npugsley | /Quiz1_corrected.py | UTF-8 | 4,752 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 13:55:54 2017
@author: pugsleno
"""
## Quiz #1 Correct
#______________________________________________________________________________
# Load Libraries/Packages
#______________________________________________________________________________
import numpy as np
import matplotlib.pyplot as plt #plots framework
from numpy import linalg as LA
import pandas as pd
#from sklearn.decomposition import PCA
from matplotlib.mlab import PCA
#PCA.switch_backend('pgf')
#Packages to support plot display in 3d
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
#______________________________________________________________________________
# Calculate the variance of every variable in the data file.
#______________________________________________________________________________
# 1. Load Raw Data
in_file_name = "C:\Users\pugsleno\Desktop\Pessoal Docs\UNC\MachineLearning\Quiz#1\dataset_1.csv"
dataIn = pd.read_csv(in_file_name) # Read the Raw Data
# 2. Define Variables:
x = dataIn['x']
y = dataIn['y']
z = dataIn['z']
# Variance of x, y and z
variance_x = np.var(x)
variance_y = np.var(y)
variance_z = np.var(z)
print 'Variance X: ', variance_x
print 'Variance Y: ', variance_y
print 'Variance Z: ', variance_z
#______________________________________________________________________________
# calculate the covariance between x and y, and between y and z
#______________________________________________________________________________
covariance_xy = np.cov(x,y, rowvar=False)
covariance_yz = np.cov(y,z, rowvar=False)
print 'Covariance XY: \n', covariance_xy
print 'Covariance YZ: \n', covariance_yz
#______________________________________________________________________________
# do PCA of all the data in the given data file using your own PCA module
#______________________________________________________________________________
# Step 1. Mean
mean_X = np.mean(x)
mean_Y = np.mean(y)
mean_Z = np.mean(z)
# Step 2. Mean Centered Data
std_X = x - mean_X
std_Y = y - mean_Y
std_Z = z - mean_Z
# Step 3. Covariance
covariance_xy = np.cov(x,y, rowvar=False)
covariance_yz = np.cov(y,z, rowvar=False)
# Step 4. Eigendecomposition of the covariance matrix
# Between XY
eigenValues_xy, eigenVectors_xy = np.linalg.eig(covariance_xy)
eigValSort= eigenValues_xy.argsort()[::-1]
eigenValues_xy = eigenValues_xy[eigValSort]
eigenVectors_xy = eigenVectors_xy[:,eigValSort]
# Between YZ
eigenValues_yz, eigenVectors_yz = LA.eig(covariance_yz)
eigValSort= eigenValues_yz.argsort()[::-1]
eigenValues_yz = eigenValues_yz[eigValSort]
eigenVectors_yz = eigenVectors_yz[:,eigValSort]
# Step 5. PCA scores
# For X and Y
MeanCentered_xy = np.column_stack((std_X, std_Y)) #stacking X and Y std side by side on a matrix
pcaScores_xy = np.matmul(MeanCentered_xy, eigenVectors_xy)
# For Y and Z
MeanCentered_yz = np.column_stack((std_Y, std_Z)) #stacking X and Y std side by side on a matrix
pcaScores_yz = np.matmul(MeanCentered_yz, eigenVectors_yz)
# Step 6: Collect PCA results
# Between X and Y
RawData_xy = np.column_stack((x,y)) #stacking X and Y std side by side on a matrix
pcaResults_xy = {'data': RawData_xy,
'mean_centered_data': MeanCentered_xy,
'PC_variance': eigenValues_xy,
'loadings': eigenVectors_xy,
'scores': pcaScores_xy}
# Between Y and Z
RawData_yz = np.column_stack((y,z)) #stacking X and Y std side by side on a matrix
pcaResults_yz = {'data': RawData_yz,
'mean_centered_data': MeanCentered_yz,
'PC_variance': eigenValues_yz,
'loadings': eigenVectors_yz,
'scores': pcaScores_yz}
print pcaResults_yz
VarianceExplained = 100 * pcaResults_xy['PC_variance'][0] / sum(pcaResults_xy['PC_variance'])
print "PC1 explains the Variance XY: " + str(round(VarianceExplained, 2,)) + '% variance\n'
VarianceExplained = 100 * pcaResults_yz['PC_variance'][1] / sum(pcaResults_yz['PC_variance'])
print "PC2 explains the Variance YZ: " + str(round(VarianceExplained, 2,)) + '% variance\n'
#____________________________________________________________________________
# 3.2 Use thelinalgmodule innumpyto find the eigenvalues and eigenvectors.
# Are theythe same as your manual solution?
#____________________________________________________________________________
a = np.array([[0,-1],[2,3]], int)
print a
np.linalg.det(a) # finds the determinant of matrix a
print np.linalg.det(a)
# eigenvalues and eigenvetors of a matrix
vals, vecs = np.linalg.eig(a)
print vals
print vecs
| true |
ac29dc56ac31ca98cc38dfbe3831a4484359e020 | Python | in-april/srcScan | /dbService/propertyService.py | UTF-8 | 2,009 | 2.671875 | 3 | [] | no_license | import os
import json
import datetime
from dataIO import dataAccess
from config import config
result_path = config.TMP_FILE_PATH
def insert_masscan_result(filename='result.json'):
"""
从masscan中导入主机和端口列表
:param filename:
:return:
"""
services = []
abspath = os.path.join(result_path, filename)
with open(abspath, 'r') as masscan_file:
for item in json.loads(masscan_file.read()):
service = {'ip': item['ip'], 'port': item['ports'][0]['port'], 'update': datetime.datetime.now()}
services.append(service)
dataAccess.insert_items(services, 'property_services')
def insert_dns_result(filename):
"""
导入子域名数据
:param filename:
:return:
"""
with open(filename, 'r') as dns_file:
for item in json.loads(dns_file.read()):
dns = {'url': item['url'], 'hosts': item['hosts'], 'update': datetime.datetime.now()}
print(dns)
dataAccess.insert_item_no_repeat(dns, 'property_domain', 'url')
def generate_property_hosts():
"""
生成主机表,用于存储主机信息,包括主机ip,域名列表,时间等
:return:
"""
for result in dataAccess.get_items('property_domain'):
hosts = result['hosts']
for host in hosts:
item = {'host': host, 'update': datetime.datetime.now()}
dataAccess.insert_item_no_repeat(item, 'property_hosts', 'host')
for result in dataAccess.get_items('property_hosts'):
host = result['host']
domains = result.get('domains')
if domains is None:
domains = []
oldset = set(domains)
for item in dataAccess.get_items('property_domain', {'hosts': {'$in': [host]}}):
oldset.add(item['url'])
new_domains = list(oldset)
result['domains'] = new_domains
dataAccess.insert_or_update(result, 'property_hosts', 'host')
if __name__ == '__main__':
generate_property_hosts()
| true |
b414e28e3fa69e50d5299710d050f9e824a6673f | Python | HJMengx/Style_Transfer | /st_pytroch.py | UTF-8 | 6,997 | 2.828125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
class Content_loss(nn.Module):
def __init__(self,content_feature):
super(Content_loss, self).__init__()
self.content_feature = content_feature.detach()
def forward(self, input):
self.loss = F.mse_loss(input,self.content_feature)
return input
class Style_loss(nn.Module):
def __init__(self,style_feature):
super(Style_loss, self).__init__()
# don`t calculate the gradient with detach
self.style_feature = self.matrix(style_feature).detach()
def forward(self, input):
G = self.matrix(input)
self.loss = F.mse_loss(G,self.style_feature)
return input
def matrix(self,input=None):
if input is not None:
# batch_size=1,feature_map,height,width
batch,f_m,height,width = input.size()
#
features = input.view(batch*f_m,height*width)
G = torch.mm(features,features.t())
return G.div(batch*f_m*height*width)
# pre processing
# pre_traing model with imagenet(means,stds)
class Pre_processing(nn.Module):
def __init__(self,mean=torch.tensor([0.485, 0.456, 0.406]),std=torch.tensor([0.229, 0.224, 0.225])):
super(Pre_processing, self).__init__()
# img:[B x C x H x W],channel first
self.mean = mean.view(-1,1,1)
self.std = std.view(-1,1,1)
def forward(self, input):
return (input - self.mean) / self.std
# if has GPU,use gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# network
cnn = models.vgg19(pretrained=True).features.to(device).eval()
# compute the loss
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Pre_processing(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = Content_loss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = Style_loss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], Content_loss) or isinstance(model[i], Style_loss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=200,
style_weight=1000000, content_weight=1):
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img, content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
# calculate every layer value and loss,style.loss,content.loss has value
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
# desired size of the output image
imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu
loader = transforms.Compose([
transforms.Resize(imsize), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
def image_loader(image_name):
image = Image.open(image_name)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
style_img = image_loader("sandstone.jpg")
content_img = image_loader("cat.jpg")
input_image = torch.randn(content_img.data.size(), device=device)
output = run_style_transfer(cnn,torch.tensor([0.485, 0.456, 0.406]),
torch.tensor([0.229, 0.224, 0.225]),content_img,style_img,input_image)
| true |
ae54137eed7cbadc11ac2710d058cfbf1663b8d2 | Python | lucool/project | /flask_practise/HelloFlask/url_param.py | UTF-8 | 576 | 2.90625 | 3 | [] | no_license | from flask import Flask
app = Flask(__name__)
@app.route("/hi/<name>/")
def hi_view(name):
return "<h3>hi,<span style='color:green'>" + name + "</span></h3>"
@app.route("/student/<int:age>/<float:score>/")
def student_view(age,score):
age += 1
score += 5
return "明年" + str(age) + "岁;添加5分后的得分:" + str(score)
@app.route("/greet/<path:info>/")
def greet_view(info):
return "path转换器作用后,接收到的info是:" + info
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8888,debug=True) | true |
4bb84e5cd1d1b03f3e599e86c2580dfd56156caa | Python | RybaSG/NTTC | /Zad2.3/Zad2.3O/receiver.py | UTF-8 | 1,291 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
import scipy.io as sci
import numpy as np
FRAMES = 100
LDPC = 16200
SUBSTREAMS = 8
MOD = 8
CELLS = int(LDPC / SUBSTREAMS)
matData = sci.loadmat("demux_256_16200_allCR.mat")
inputDataMat = np.array(matData["v"])[0][0]
outputDataMat = np.array(matData["y"])[0][0]
inputData = np.zeros((LDPC, FRAMES))
# 16200 bits x 100 FRAMES
for frame in range(FRAMES):
tempLDPC = []
for cell in range(CELLS):
tempBits = outputDataMat[cell, :, frame]
decode = np.array(
[
tempBits[7], # 7:0
tempBits[3], # 3:1
tempBits[1], # 1:2
tempBits[5], # 5:3
tempBits[2], # 2:4
tempBits[6], # 6:5
tempBits[4], # 4:6
tempBits[0] # 0:7
]
)
for bit in range(MOD):
tempLDPC.append(decode[bit])
inputData[:, frame] = np.array(tempLDPC)
if(inputDataMat == inputData).all():
print("Data check passed")
#save to mat
dictionaryInput = {"v": inputData}
sci.savemat("input2_3RX.mat", dictionaryInput)
else:
print("Data check failed")
# TODO:
# * class?
# * improve concatenation
# * join project parts
| true |
b415d6b40285ae971c8e4b657423930b5ee5a85f | Python | shiyu3169/Internet_Protocol_Stack | /Shiyu_Project_Final/HTTP/MyCurl.py | UTF-8 | 2,755 | 2.953125 | 3 | [
"MIT"
] | permissive | from HTTP.ClientMessage import ClientMessage
from HTTP.ServerMessage import ServerMessage
from tcp.TCPSocket import TCPSocket
from HTTP.CookieJar import CookieJar
class MyCurl:
"""Curl to connect server and client"""
def __init__(self, dest):
try:
self.socket = TCPSocket()
except:
raise Exception("Cannot initiate socket correctly")
self.history=set()
self.cookieJar=CookieJar()
self.dest = dest
def request(self,method, URL, headers=None, body=""):
"""sending request to server"""
message=ClientMessage(method, URL, headers, body)
message.headers['Cookie']=str(self.cookieJar)
self.history.add(URL)
try:
self.socket = TCPSocket()
except:
raise Exception("Cannot initiate socket correctly")
try:
self.socket.connect(self.dest)
data = str(message).encode()
while True:
sent = self.socket.sendall(data)
if sent is None:
break
else:
self.socket.connect(self.dest)
except:
raise Exception("connection failed")
try:
response = ServerMessage(self.socket)
except:
raise Exception("empty socket")
self.add_new_cookies(response)
try:
self.socket.close()
except:
raise Exception("Socket cannot close correctly")
return response
def get(self,URL,headers={}):
"""sending get request"""
return self.request("GET", URL, headers)
def post(self,URL,headers={}, body=""):
"""sending post request"""
return self.request("POST", URL, headers, body)
def add_new_cookies(self,message):
"""add new coockies to the cookie jar"""
jar = message.cookieJar.getAll()
for key in jar:
self.cookieJar.add_cookie(key, jar[key])
def is_visited_or_not(self, link):
"""check if the link has been visited"""
return link in self.history
def get_cookie(self, str):
"""get the cookie"""
return self.cookieJar.get_cookie(str)
# Used to test Curl and lower level HTTP Protocol
if __name__=="__main__":
#test1
Destination1=("david.choffnes.com",80)
test1=MyCurl(Destination1)
response=test1.get("http://david.choffnes.com/classes/cs4700sp17/2MB.log")
file=open("test.log", 'wb')
file.write(response.body)
# #test2
# Destination2=("cs5700sp17.ccs.neu.edu",80)
# test2=MyCurl(Destination2)
# response2=test2.get("/accounts/login/?next=/fakebook/")
# file2=open("test2.html", 'wb')
# file2.write(response2.body) | true |
f8efa0cd27254d614bac4d37141a9b927a7c2f22 | Python | Cookie-YY/cooshow | /utils/groupby_and_sum.py | UTF-8 | 263 | 3 | 3 | [
"MIT"
] | permissive | import pandas as pd
def groupby_and_sum(data, value):
df = pd.DataFrame(data)
groupby_list = df.columns.to_list()
groupby_list.remove(value)
grouped = df.groupby(groupby_list, as_index=False)[value].sum()
return grouped
# print(grouped) | true |
9910c1f185c11c5f11b5d31e8dcf6e2954236851 | Python | shivamabrol/Excel-webapp | /app.py | UTF-8 | 2,094 | 3.0625 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
st.write("""
# Heart Disease Prediction App
On the basis of the given factors this app can predict if you
have heart disease
""")
st.sidebar.header('User Input Features')
def user_input_features():
age = st.sidebar.slider('Age', 1, 99, 18)
sex = st.sidebar.selectbox('Sex',('male','female'))
if(sex == 'male'):
sex = 0
else:
sex = '1'
cp = st.sidebar.selectbox('Constrictive pericarditis',('0', '1', '2', '3'))
trtbps = st.sidebar.slider('TRTBPS', 90, 200,150)
chol = st.sidebar.slider('Cholestrol', 120,600,250)
fbs = st.sidebar.selectbox('FBS', ('0', '1'))
rest_ecg = st.sidebar.selectbox('Rest ECG', ('0', '1', '2'))
thalachh = st.sidebar.slider('Thal Acch', 50, 250, 100)
exng = st.sidebar.selectbox('Exchange ', ('0', '1'))
oldpeak = st.sidebar.slider('Old Peak', 0, 200, 100)
oldpeak /= 100
slp = st.sidebar.selectbox('SLP', ('0', '1', '2'))
caa = st.sidebar.selectbox('CAA', ('0', '1', '2', '3', '4'))
thall = st.sidebar.selectbox('Thall', ('0', '1', '2', '3'))
data = {'age': age,
'sex': sex,
'cp': cp,
'trtbps': trtbps,
'chol': chol,
'fbs': fbs,
'restecg': rest_ecg,
'thalachh': thalachh,
'exng': exng,
'oldpeak': oldpeak,
'slp': slp,
'caa': caa,
'thall': thall
}
features = pd.DataFrame(data, index=[0])
return features
input_df = user_input_features()
# Reads in saved classification model
load_clf = pickle.load(open('heardisease.pkl', 'rb'))
# Apply model to make predictions
prediction = load_clf.predict(input_df)
st.subheader('Prediction')
if(prediction == 0):
st.write('You do not have health disease')
else:
st.write('You might have health disease')
| true |
571485bef085f2d83b9c9c91455f355903534501 | Python | BYOUINZAKA/MCM2020 | /code/Test/第一次训练/A/src/rebuild.py | UTF-8 | 865 | 2.859375 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pandas import read_csv
from scipy import interpolate
def setAx(ax):
ax.set_xlim(-256, 256)
ax.set_ylim(-256, 256)
ax.set_zlim(0, 100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax = Axes3D(plt.figure())
df = read_csv("code\\Test1\\第一次训练\\A\\circles.csv")
ax.plot3D(df['X'], df['Y'], df['Z'], 'gray')
ax.scatter3D(df['X'], df['Y'], df['Z'], cmap='b', s=900, marker='o')
setAx(ax)
datas = read_csv("code\\Test1\\第一次训练\\A\\circles.csv").to_numpy().T
y, x, z = datas[1:4]
yy = np.linspace(y[0], y[-1], 2000)
ax = Axes3D(plt.figure())
fyz = interpolate.interp1d(y, z, kind='slinear')
fyx = interpolate.interp1d(y, x, kind='slinear')
ax.scatter3D(fyx(yy), yy, fyz(yy), s=900, c='gray')
setAx(ax)
plt.show()
| true |
34846090101bdccd24bc722bd7ce8b9b97de43b6 | Python | maximkaZZZ/algoritms | /l3.py | UTF-8 | 994 | 3.265625 | 3 | [] | no_license | """L. Лестница
Евлампия выбрала себе классный дом. Правда в нём нет лифтов, хотя этажей много.
Она решила, что ходить по лестницам долго и можно прыгать по ступенькам.
Дом старинный, ступеньки в нём разного размера.
Для каждой ступеньки известно, на какое максимальное количество ступенек вверх с неё можно допрыгнуть.
Нужно помочь Евлампии определить,
сможет ли она добраться с нижней ступеньки на верхнюю.
"""
with open("input.txt") as f:
n = int(f.readline())
stairs = list(map(int, f.readline().split()))
way = set([a for a in range(1, n)])
i = 0
while way and i < len(stairs):
way -= set([a+i for a in range(1, stairs[i]+1)])
i += 1
print(way == set())
| true |
085d086171164c8235cce5a87385b2c68ddf0015 | Python | ileanagheo/data-structures | /lab/10_treap/checker.py | UTF-8 | 4,182 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import os
import sys
import json
import subprocess
ROOT = './'
''' Checker's HOW TO
@labs - represents a config dictionary/JSON for every lab.
For each lab number, it can have the following entries:
MUST HAVE:
@name = directory name
@tasks = the number of tasks for the lab
@points = list containing how many points you can get per each task
AUXILIARY:
@taskX_run = Makefile's target to run the taskX program
For each LAB, create tests/in/task[1-X] and tests/ref/task[1-X], X = No. of Tasks
In in/ and ref/ create test[1-Z].in and test[1-Z].ref, Z = No. of Tests
'''
labs = {
1 : { "name" : "01_recap_pc",
"tasks" : 1,
"points": [40] },
2 : { "name" : "02_simple_linked_list",
"tasks" : 1,
"points": [50] },
3 : { "name" : "03_double_linked_list",
"tasks" : 1,
"points": [70] },
4 : { "name" : "04_hashmap",
"tasks" : 1 },
5 : { "name" : "05_stack_queue",
"tasks" : 1 },
6 : { "name" : "06_graph_1",
"tasks" : 1 },
7 : { "name" : "07_graph_2",
"tasks" : 1 },
8 : { "name" : "08_tree",
"tasks" : 2,
"points" : [40, 30],
"task2_run" : "run_task2" },
9 : { "name" : "09_bst_heap",
"tasks" : 2,
"points" : [35, 35],
"task1_run" : "run_task1",
"task2_run" : "run_task2" },
10 : { "name" : "10_treap",
"tasks" : 4,
"points": [20, 15, 20, 15]},
11 : { "name" : "11_avl_rbtree",
"tasks" : 1 },
12 : { "name" : "12_recap_sd",
"tasks" : 1 },
99 : { "name" : "99_test",
"tasks" : 2,
"points" : [30, 70],
"task2_run" : "run_task2"}
}
if len(sys.argv) < 2:
print('Usage: ./checker.py <lab_no>')
sys.exit()
# You may remove the previous if and put below the wished lab number
lab_no = int(sys.argv[1])
current_lab = os.path.join(ROOT, labs[lab_no]['name'], 'skel/')
print(f'Checking {current_lab}...\n')
# make
rc = subprocess.call(f'make -sC {current_lab}', shell = True)
if rc != 0:
sys.stderr.write(f'make failed with status {rc}\n')
sys.exit(rc)
# run tasks
total_score = 0
for task_no in range(1, labs[lab_no]['tasks'] + 1):
task_score = 0
run = 'run'
if f'task{task_no}_run' in labs[lab_no]:
run = labs[lab_no][f'task{task_no}_run']
tests_in = os.path.join(current_lab, f'tests/in/task{task_no}')
tests_no = len(os.listdir(tests_in))
tests_ref = os.path.join(current_lab, f'tests/ref/task{task_no}')
tests_out = os.path.join(current_lab, f'tests/out/task{task_no}')
rc = subprocess.call(f'mkdir -p {tests_out}', shell = True)
if rc != 0:
sys.stderr.write(f'mkdir failed with status {rc}\n')
sys.exit(rc)
task_total_score = labs[lab_no]['points'][task_no - 1]
task_test_score = task_total_score / tests_no
print('=' * 10 + f' Task {task_no}')
for _ in range(1, tests_no + 1):
proc = os.popen(f'make {run} -sC {current_lab} \
< {os.path.join(tests_in, f"test{_}.in")} \
> {os.path.join(tests_out, f"test{_}.out")}')
proc.close()
res_out = open(f'{os.path.join(tests_out, f"test{_}.out")}').read().strip().strip('\n')
res_ref = open(f'{os.path.join(tests_ref, f"test{_}.ref")}').read().strip().strip('\n')
if res_out == res_ref:
result = 'passed'
task_score += task_test_score
else:
result = 'failed'
print(f'Test {_}' + '.' * 10 + result)
task_score = int(task_score) if abs(task_score - int(task_score)) < 1e3 else task_score
print('=' * 3 + f' Task Score: {task_score}/{task_total_score}\n')
total_score += task_score
print('=' * 5 + f' Total Score: {total_score}/100')
# make clean
rc = subprocess.call(f'make clean -sC {current_lab}', shell = True)
if rc != 0:
sys.stderr.write(f'make clean failed with status {rc}\n')
sys.exit(rc)
| true |
aeee36ba3d172aed494c696088b1015422ff50a3 | Python | cnorthwood/adventofcode | /2018/17/challenge.py | UTF-8 | 3,546 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env pypy3
import re
import sys
INPUT_RE = re.compile(r'(?P<d1>[xy])=(?P<v1>\d+), (?P<d2>[xy])=(?P<v2s>\d+)..(?P<v2e>\d+)')
def load_clay(filename):
with open(filename) as input_file:
lines = input_file.read().strip().splitlines()
for line in lines:
match = INPUT_RE.match(line)
if match.group('d1') == 'x' and match.group('d2') == 'y':
x = int(match.group('v1'))
for y in range(int(match.group('v2s')), int(match.group('v2e')) + 1):
yield x, y
elif match.group('d1') == 'y' and match.group('d2') == 'x':
y = int(match.group('v1'))
for x in range(int(match.group('v2s')), int(match.group('v2e')) + 1):
yield x, y
else:
raise ValueError()
TEST = set(load_clay('test.txt'))
BLOCKS = set(load_clay('input.txt'))
def visualise(flowing_water, resting_water, clay, stdout=False):
min_x = min(x for x, y in flowing_water | resting_water | clay)
min_y = min(y for x, y in flowing_water | resting_water | clay)
max_x = max(x for x, y in flowing_water | resting_water | clay)
max_y = max(y for x, y in flowing_water | resting_water | clay)
if stdout:
output = sys.stdout
output.write('\n\n~~~~~~~~~~~~~~~\n\n')
else:
output = open('debug.txt', 'w')
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
if (x, y) in clay:
output.write('█')
elif (x, y) in resting_water:
output.write('W')
elif (x, y) in flowing_water:
output.write('~')
else:
output.write(' ')
output.write('\n')
if not stdout:
output.close()
def is_contained(x, y, min_x, max_x, clay, water):
for left_x in range(x, min_x - 1, step=-1):
if (left_x, y + 1) not in clay and (left_x, y + 1) not in water:
return False
if (left_x, y) in clay:
for right_x in range(x, max_x + 1):
if (right_x, y + 1) not in clay and (right_x, y + 1) not in water:
return False
if (right_x, y) in clay:
return True
def simulate(clay):
flowing_water = {(500, 0)}
resting_water = set()
min_x = min(x for x, y in clay)
max_x = max(x for x, y in clay)
min_y = min(y for x, y in clay)
max_y = max(y for x, y in clay)
last_size = (0, 0)
while last_size != (len(flowing_water), len(resting_water)):
last_size = (len(flowing_water), len(resting_water))
for x, y in sorted(flowing_water, key=lambda pos: pos[1]):
if (x, y + 1) not in clay and (x, y + 1) not in resting_water:
if y <= max_y:
flowing_water.add((x, y + 1))
else:
if is_contained(x, y, min_x, max_x, clay, resting_water):
flowing_water.remove((x, y))
resting_water.add((x, y))
if (x - 1, y) not in clay and (x - 1, y) not in resting_water:
flowing_water.add((x - 1, y))
if (x + 1, y) not in clay and (x + 1, y) not in resting_water:
flowing_water.add((x + 1, y))
return len(list(filter(lambda pos: min_y <= pos[1] <= max_y, flowing_water | resting_water))), len(resting_water)
# assert(simulate(TEST) == (57, 29))
part_one, part_two = simulate(BLOCKS)
print("Part One: {}".format(part_one))
print("Part Two: {}".format(part_two))
| true |
6c83e972cb87aeefabaa6b192d12654b002d45f6 | Python | Roboy/roboy_smells | /classification/cnn1d_latent.py | UTF-8 | 3,927 | 3 | 3 | [
"BSD-3-Clause"
] | permissive | import os
from tensorflow.keras.layers import Conv1D, Flatten, Add, Dense, Layer, Multiply
from tensorflow.keras import Model
import numpy as np
import classification.triplet_util as tu
import classification.data_loading as dl
"""
This file describes the model used for the 1dCNN with the triplet loss and outputs predictions in a high dimensional
latent space.
"""
def load_data(path: str, num_triplets_train: int = 300, num_triplets_val: int = 300) -> (np.ndarray, np.ndarray):
"""
Loads the data from the specified path in the correct format
:param num_triplets_train: number of triplets in the train data set
:param num_triplets_val: number of triplets in the val data set
:param path: path to data
:return: train_batch and validation_batch for the training
"""
# Read in data
measurements = dl.get_measurements_from_dir(path)
ms_train, ms_val = dl.train_test_split(measurements, 0.7)
train_triplets, train_labels = tu.create_triplets(ms_train, num_triplets_train)
val_triplets, val_labels = tu.create_triplets(ms_val, num_triplets_val)
train_batch, val_batch = tu.getInputBatchFromTriplets(train_triplets, val_triplets)
return train_batch, val_batch
####################
# MODEL SETUP
####################
class RecurrentLayer(Layer):
"""
The recurrent layer of WaveNet
"""
def __init__(self, dilation_rate=1, filter_size=64):
"""
:param dilation_rate: dilation_rate for the recurrent layer
:param filter_size: the filter size of the CNN
"""
super(RecurrentLayer, self).__init__()
self.sigm_out = Conv1D(filter_size, 2, dilation_rate=2 ** dilation_rate, padding='causal', activation='sigmoid')
self.tanh_out = Conv1D(filter_size, 2, dilation_rate=2 ** dilation_rate, padding='causal', activation='tanh')
self.same_out = Conv1D(filter_size, 1, padding='same')
def call(self, x):
"""
This method is called during the forward pass of the recurrent layer.
:param x: input to the recurrent layer
:return: output of the recurrent layer
"""
original_x = x
x_t = self.tanh_out(x)
x_s = self.sigm_out(x)
x = Multiply()([x_t, x_s])
x = self.same_out(x)
x_skips = x
x = Add()([original_x, x])
return x_skips, x
class Model1DCNN(Model):
"""
Defines the whole model
"""
def __init__(self, dilations: int = 3, filter_size: int =64, input_shape: tuple=(64, 49)):
"""
:param dilations: number of dilations ("hidden" layers in the recurrent architecture)
:param filter_size: filter size of the CNN
:param input_shape: input shape of the network
"""
super(Model1DCNN, self).__init__()
self.residual = []
self.dilations = dilations
self.causal = Conv1D(filter_size, 2, padding='causal', input_shape=input_shape)
for i in range(1, dilations + 1):
self.residual.append(RecurrentLayer(dilation_rate=i, filter_size=filter_size))
self.same_out_1 = Conv1D(filter_size, 1, padding='same', activation='relu')
self.same_out_2 = Conv1D(8, 1, padding='same', activation='relu')
self.d1 = Dense(400, activation='relu')
self.d2 = Dense(200, activation='relu')
self.d3 = Dense(20)
def call(self, x):
"""
This method is called during the forward pass of the network.
:param x: input to the network
:return: output of the network (latent space)
"""
x_skips = []
x = self.causal(x)
for i in range(self.dilations):
x_skip, x = self.residual[i](x)
x_skips.append(x_skip)
x = Add()(x_skips)
x = self.same_out_1(x)
x = self.same_out_2(x)
x = Flatten()(x)
x = self.d1(x)
x = self.d2(x)
return self.d3(x)
| true |
893ad42a015ea29f6b453f15b87de81c52d5252b | Python | mlipatov/paint_atmospheres | /pa/usr/04_temperature.py | UTF-8 | 6,706 | 2.671875 | 3 | [
"MIT"
] | permissive | # Adapted from the code in pa.lib.map.F()
# Output: a plot of the relative error in temperature correction for omega in [0, 0.999]
from pa.lib import fit as ft
from pa.lib import util as ut
import numpy as np
from mpmath import mp
import math
import sys
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rc
import time
iodir = '../../'
def rho(x, omega):
output = np.empty_like(omega)
m = x == 1
output[m] = 1. / f(omega[m])
x = x[~m]
omega = omega[~m]
output[~m] = (2*np.sqrt(2 + omega**2) * \
np.sin(np.arcsin((3*np.sqrt(3 - 3*x**2)*omega) / (2 + omega**2)**1.5)/3.)) / \
(np.sqrt(3 - 3*x**2)*omega)
return output
def f(omega):
return 1 + omega**2 / 2
def F_0(omega):
return (1 - omega**2) ** (-2./3)
def F_1(omega):
return np.exp((2./3) * omega**2 * (1 / f(omega))**3)
# output: smallest value of x for which to compute
# using full Newton's method step function; a.k.a. x_b
# inputs: a grid of rotational velocities omega
# order-one factor of proportionality between the error
# in full step function and that in the series expansion
# resolution of floating point numbers
def X1(omega, k, q):
# a factor that's a little less than 1
B = 0.9
# omega below which the approximation yields values greater than 1
omega_lim = B * (2./(85*k))**(1./4) * 3**(1./2) * q**(1./4)
output = np.empty_like(omega)
mask = (omega < omega_lim)
output[mask] = 1
output[~mask] = q**(1./6) * (2./(85*k))**(1/6) * \
( 3**(1./3) * omega[~mask]**(-2./3) - \
3**(-2./3) * (199./255) * omega[~mask]**(4./3) - \
3**(-2./3) * (29123./65025) * omega[~mask]**(10./3) )
# in case this estimate exceeds 1 by a little, bring it back to 1
output [output > 1] = 1
return output
# helper function
# inputs: an array of temperature correction values and
# an array of x = abs(cos(theta)) values
def G(F, x):
return np.sqrt(F * (1 - x**2) + x**2)
# output: full Newton's method step function
# inputs: F, x, G, rho and omega
def dF_full(F, x, G, rho, omega):
mult = -2 * F * G**2
add1 = (1 - G) / x**2
add2 = (-1./3) * G * rho**3 * omega**2
add3 = G * np.log( np.sqrt(F) * (1 + x) / (x + G) ) / x**3
output = mult * (add1 + add2 + add3)
return output
# same as above, with higher precision; uses mpmath
def dF_full_prec(F, x, G, rho, omega):
mult = -2 * F * G**2
add1 = (1 - G) / x ** 2
add2 = (-1./3) * G * rho**3 * omega**2
logarg = np.array([mp.sqrt(y) for y in F]) * (1 + x) / (x + G)
add3 = G * np.array([mp.log(y) for y in logarg]) / x ** 3
output = mult * (add1 + add3 + add2)
return output
# output: series approximation of Newton's method step function up to third order
# inputs: F, x, G, rho and omega
def dF_approx(F, x, omega):
# helper variables and arrays
x2 = x**2
o2 = omega**2
output = (2*F)/3. + (2*x2)/5. - F**1.5*x2*(1 - o2) - \
(F**2.5*(10*(1 - o2)**2 + 3*x2*(-3 + 8*o2)))/(15.*(1 - o2))
return output
nm = 15 # number of steps to run the double-precision versions of the algorithm
nmp = 20 # number of steps to run the higher precision version
# omega_max = 0.999
delta = np.logspace(-3, 0, num=400, base=10)
omega = np.flip(1 - delta)
# omega = np.linspace(0, omega_max, 200)
o2 = omega**2
F0 = F_0(omega) # F at x = 0
F1 = F_1(omega) # F at x = 1
k = 100 # a parameter for estimating this value of x
q = np.finfo(float).eps # resolution of floating point numbers
# optimal smallest value of x for which to compute using Newton's method
xb = X1(omega, k, q)
# rho at these values of x
rho_b = rho(xb, omega)
# initialize the result arrays (to the half-way point in the possible range)
F_full = np.full_like(omega, (F0 + F1) / 2)
F_approx = np.full_like(omega, (F0 + F1) / 2)
F_etalon = mp.mpf(1) * np.full_like(omega, (F0 + F1) / 2)
# Newton's algorithm using the two variants of double precision
start = time.time()
for i in range(nm):
# helper function
G_full = G(F_full, xb)
G_approx = G(F_approx, xb)
# the new values of F at the locations
# where we use the full Newton's method step function
F_full = F_full + dF_full(F_full, xb, G_full, rho_b, omega)
# the new values of F at the locations
# where we use the series expansion of Newton's method step function
F_approx = F_approx + dF_approx(F_approx, xb, omega)
# check if we end up outside the bounds on F
# and come back into the bounds if we did
m = (F_full < F1); F_full[ m ] = F1[ m ]
m = (F_full > F0); F_full[ m ] = F0[ m ]
m = (F_approx < F1); F_approx[ m ] = F1[ m ]
m = (F_approx > F0); F_approx[ m ] = F0[ m ]
end = time.time()
print('Time for the two sets of double precision evaluations in seconds: ' + str(end - start), flush=True)
# Newton's algorithm using the full expression method with higher precision
xb = mp.mpf(1) * xb
F0 = mp.mpf(1) * F0
F1 = mp.mpf(1) * F1
mp.dps = 100 # number of digits after decimal point in higher precision calculations
start = time.time()
for i in range(nm):
# helper function
G_etalon = G(F_etalon, xb)
# the new values of F at the locations
# where we use the etalon Newton's method step function
F_etalon = F_etalon + dF_full_prec(F_etalon, xb, G_etalon, rho_b, omega)
# check if we end up outside the bounds on F
# and come back into the bounds if we did
m = (F_etalon < F1); F_etalon[ m ] = F1[ m ]
m = (F_etalon > F0); F_etalon[ m ] = F0[ m ]
# # uncomment the following four lines to see that the etalon values converge
# if i > 0:
# diff = np.abs(F_etalon - F_prev)
# print(i + 1, float(diff.max()))
# F_prev = np.copy(F_etalon)
end = time.time()
print('Time for the high precision evaluations: ' + str(end - start), flush=True)
dfull = np.abs(F_full/F_etalon - 1).astype(float)
dapprox = np.abs(F_approx/F_etalon - 1).astype(float)
print('k = A*B ' + str(k))
print('Maximum error using full formula: ' + str(dfull.max()))
print('Maximum error using series approximation: ' + str(dapprox.max()))
diff = np.concatenate((dfull, dapprox))
max_diff = np.max(diff)
min_diff = np.min(diff)
plt.rcParams.update({'font.size': 18})
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=True)
# convergence plot figure
fig = plt.figure()
# axes
ax = plt.axes()
ax.set_yscale('log')
ax.set_xscale('log')
ax.invert_xaxis()
ax.set_ylim(q / 1e3, max_diff * 4)
ax.scatter(1 - omega, dapprox, marker='o', facecolors='none', edgecolors='g', s=6)
ax.scatter(1 - omega, dfull, marker='o', facecolors='b', edgecolors='b', s=6)
om_label = [0, 0.9, 0.99, 0.999]
ax.set_xticks(1 - np.asarray(om_label))
ax.set_xticklabels(['%g' % x for x in om_label])
ax.set_xlim(1.2, 1e-3 * 0.8)
ax.set_xlabel(r'$\omega$')
ax.set_ylabel(r'$\left|\delta F(x_b) \,/\, F(x_b)\right|$', labelpad=5)
fig.savefig(iodir + 'error_F.pdf', dpi=200, bbox_inches='tight')
| true |