blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e5455f961d5cf3417746f87eb7886aacaeac5ac | 53522e77824385c45a61a960e0d1b3b4975c6f98 | /软件第九次作业/软件161/陈晓莉2016021007/首次使用jieba.py | 9f8b87ab417da5192626f06a3df8d1a22e54aa5d | [] | no_license | ZAKERR/-16-python- | f5ab4789f83b37975612844cb645dea411facd86 | e9e91c5217a2583ea2f05ec2dc1365eed989a8ce | refs/heads/master | 2020-03-28T15:59:08.472526 | 2019-04-10T13:36:33 | 2019-04-10T13:36:33 | 148,646,442 | 13 | 14 | null | 2019-03-15T09:39:49 | 2018-09-13T14:02:27 | Python | UTF-8 | Python | false | false | 1,283 | py | import jieba
from collections import Counter
from wordcloud import WordCloud
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
#分词
with open("西游记.txt",'r',encoding='utf-8') as f:
article=f.read()
words=jieba.cut(article)
wordlist=list(words)
'''
#统计词频
c=Counter(words).most_common(100)
with open("西游记.txt","w",encoding="utf-8")as fw:
for x in c:
if x[0] not in [",","。","你","我","他"]:
fw.write("{0},{1}\n".format(x[0],x[1]))
'''
#绘制词云
listStr="/".join(wordlist)
image1=Image.open("heart.png")
image2=np.array(image1)
wc=WordCloud(background_color="white",
mask=image2,
max_words=800,
font_path="C:\Windows\Fonts\simfang.ttf",
max_font_size=100,
random_state=30,
margin=2)
wc.generate(listStr)
plt.figure("wc")
wc.to_file("wc.png")
plt.imshow(wc)
plt.axis("off")
plt.show()
'''
#jieba.load_userdict("userdict.txt")
jieba.suggest_freq(('孔明','曰'),True)
with open("马蹄下的断枪.txt",'r',encoding='utf-8') as f:
str1=f.read()
cut = jieba.cut(str1)
with open("马蹄下的断枪.txt",'w',encoding='utf-8') as f:
f.write(" ".join(cut))
'''
| [
"noreply@github.com"
] | noreply@github.com |
1b64bbbf63d56a87bdaaedc8d78df89b1ed56b1a | 15c214b5885fceed927e478d2946070f2870bdef | /TG-LSTM.py | e2e322521ab26b92bc1c320cbea44111aeeaf793 | [] | no_license | huihuid/TG-LSTM-network-for-time-series-prediction | 247cfe25851d06e0f35b62a4a4f9ab5fecef5d8d | 4f166ed9b43aa9a6b9dd52f5c37d9c40402daee0 | refs/heads/master | 2022-07-02T17:10:51.983931 | 2020-04-26T23:17:09 | 2020-04-26T23:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,992 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 10 10:31:40 2018
@author: Wendong Zheng
"""
from math import sqrt
from numpy import concatenate
import numpy as np
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras import metrics
from keras import regularizers
from keras import optimizers
from ind_rnn import IndRNN
from keras.layers.normalization import BatchNormalization
from custom_layers import LSTM_Custom
np.random.seed(1337) # for reproducibility
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# load dataset
dataset = read_csv('pollution_pm2.5.csv', header=0, index_col=0)
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_hours = 548 * 24#365*24*2=2years,548*24=1.5years
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
test_X1, test_y1 = test[:, :-1], test[:, -1]
test_X2, test_y2 = test[:, :-1], test[:, -1]
test_X3, test_y3 = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
test_X1 = test_X1.reshape((test_X1.shape[0], 1, test_X1.shape[1]))
test_X2 = test_X2.reshape((test_X2.shape[0], 1, test_X2.shape[1]))
test_X3 = test_X3.reshape((test_X3.shape[0], 1, test_X3.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#1 layer
# design network LSTM
print('Build LSTM model...')
model = Sequential()
model.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# design network TG-LSTM
print('Build Our model...')
model1 = Sequential()
model1.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),implementation=2))
model1.add(Dense(1))
model1.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history1 = model1.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X1, test_y1), verbose=2, shuffle=False)
#IndRNN
print('Build IndRNN model...')
model2 = Sequential()
model2.add(IndRNN(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0
))
model2.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='mae',optimizer='adam',metrics=['mae'])
history2 = model2.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X2, test_y2), verbose=2, shuffle=False)
# design network LSTM+zoneout
print('Build LSTM+Zoneout model...')
model3 = Sequential()
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.2,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(Dense(1))
model3.compile(loss='mae',optimizer='adam',metrics=['mae'])
# fit network
history3 = model3.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X3, test_y3), verbose=2, shuffle=False)
'''
#2-layer
# design network LSTM
print('Build LSTM model...')
model = Sequential()
model.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),return_sequences=True))
model.add(LSTM(128, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# design network LSTM-modify
print('Build Our model...')
model1 = Sequential()
model1.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_dropout=0.1,implementation=2,return_sequences=True))
model1.add(LSTM(128,implementation=2,return_sequences=False,recurrent_dropout=0.1))
model1.add(Dense(1))
model1.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history1 = model1.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X1, test_y1), verbose=2, shuffle=False)
#IndRNN
print('Build IndRNN model...')
model2 = Sequential()
model2.add(IndRNN(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=False))#默认值dropout=0.0, recurrent_dropout=0.0,用先前研究提到的6层IndRNN
model2.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='mae',optimizer='adam',metrics=['mae'])
history2 = model2.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X2, test_y2), verbose=2, shuffle=False)
# design network LSTM+zoneout
print('Build LSTM+Zoneout model...')
model3 = Sequential()
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=False))
model3.add(Dense(1))
model3.compile(loss='mae',optimizer='adam',metrics=['mae'])
# fit network
history3 = model3.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X3, test_y3), verbose=2, shuffle=False)
'''
'''
#6-layer
# design network LSTM
print('Build LSTM model...')
model = Sequential()
model.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# design network LSTM-modify
print('Build Our model...')
model1 = Sequential()
model1.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128,implementation=2,return_sequences=False))
model1.add(Dense(1))
model1.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history1 = model1.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X1, test_y1), verbose=2, shuffle=False)
#IndRNN
print('Build IndRNN model...')
model2 = Sequential()
model2.add(IndRNN(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=False))#默认值dropout=0.0, recurrent_dropout=0.0,用先前研究提到的6层IndRNN
model2.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='mae',optimizer='adam',metrics=['mae'])
history2 = model2.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X2, test_y2), verbose=2, shuffle=False)
# design network LSTM+zoneout
print('Build LSTM+Zoneout model...')
model3 = Sequential()
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.3,return_sequences=False))
model3.add(Dense(1))
model3.compile(loss='mae',optimizer='adam',metrics=['mae'])
# fit network
history3 = model3.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X3, test_y3), verbose=2, shuffle=False)
'''
'''
#21-layer
# design network LSTM
print('Build LSTM model...')
model = Sequential()
model.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# design network LSTM-modify
print('Build Our model...')
model1 = Sequential()
model1.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128, implementation=2,return_sequences=True))
model1.add(LSTM(128,implementation=2,return_sequences=False))
model1.add(Dense(1))
model1.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history1 = model1.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X1, test_y1), verbose=2, shuffle=False)
#IndRNN
print('Build IndRNN model...')
model2 = Sequential()
model2.add(IndRNN(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.2, recurrent_dropout=0.1,
return_sequences=True))
model2.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=False))#默认值dropout=0.0, recurrent_dropout=0.0,用先前研究提到的6层IndRNN
model2.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='mae',optimizer='adam',metrics=['mae'])
history2 = model2.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X2, test_y2), verbose=2, shuffle=False)
'''
# plot history train-loss
pyplot.ylabel("Train loss value")
pyplot.xlabel("The number of epochs")
pyplot.title("Loss function-epoch curves")
pyplot.plot(history.history['loss'], label='train_LSTM')
pyplot.plot(history2.history['loss'], label='train_IndRNN')
pyplot.plot(history3.history['loss'], label='train_LSTM+Zoneout')
pyplot.plot(history1.history['loss'], label='train_Our')
pyplot.legend()
pyplot.savefig('Figure-PM 2.5-train-loss.png', dpi=300)
pyplot.show()
# plot history val-loss
pyplot.ylabel("Validation Loss value")
pyplot.xlabel("The number of epochs")
pyplot.title("Loss function-epoch curves")
pyplot.plot(history.history['val_loss'], label='val_LSTM')
pyplot.plot(history2.history['val_loss'], label='val_IndRNN')
pyplot.plot(history3.history['val_loss'], label='val_LSTM+Zoneout')
pyplot.plot(history1.history['val_loss'], label='val_Our')
pyplot.legend()
pyplot.savefig('Figure-PM 2.5-val-loss.png', dpi=300)
pyplot.show()
# make a prediction LSTM
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# make a prediction LSTM-Our
yhat1 = model1.predict(test_X1)
test_X1 = test_X1.reshape((test_X1.shape[0], test_X1.shape[2]))
# make a prediction IndRNN
yhat2 = model2.predict(test_X2)
test_X2 = test_X2.reshape((test_X2.shape[0], test_X2.shape[2]))
# make a prediction Zoneout
yhat3 = model3.predict(test_X3)
test_X3 = test_X3.reshape((test_X3.shape[0], test_X3.shape[2]))
# invert scaling for forecast LSTM
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for forecast LSTM-Our
inv_yhat1 = concatenate((yhat1, test_X1[:, 1:]), axis=1)
inv_yhat1 = scaler.inverse_transform(inv_yhat1)
inv_yhat1 = inv_yhat1[:,0]
# invert scaling for forecast IndRNN
inv_yhat2 = concatenate((yhat2, test_X2[:, 1:]), axis=1)
inv_yhat2 = scaler.inverse_transform(inv_yhat2)
inv_yhat2 = inv_yhat2[:,0]
# invert scaling for forecast Zoneout
inv_yhat3 = concatenate((yhat3, test_X3[:, 1:]), axis=1)
inv_yhat3 = scaler.inverse_transform(inv_yhat3)
inv_yhat3 = inv_yhat3[:,0]
# invert scaling for actual LSTM
inv_y = scaler.inverse_transform(test_X)
inv_y = inv_y[:,0]
# invert scaling for actual LSTM-Our
inv_y1 = scaler.inverse_transform(test_X1)
inv_y1 = inv_y1[:,0]
# invert scaling for actual IndRNN
inv_y2 = scaler.inverse_transform(test_X2)
inv_y2 = inv_y2[:,0]
# invert scaling for actual Zoneout
inv_y3 = scaler.inverse_transform(test_X3)
inv_y3 = inv_y3[:,0]
# calculate RMSE and MAE LSTM
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
mae = mean_absolute_error(inv_y, inv_yhat)
print('LSTM Test RMSE: %.3f' % rmse)
print('LSTM Test MAE: %.3f' % mae)
# calculate RMSE and MAE IndRNN
rmse2 = sqrt(mean_squared_error(inv_y2, inv_yhat2))
mae2 = mean_absolute_error(inv_y2, inv_yhat2)
print('IndRNN Test RMSE: %.3f' % rmse2)
print('IndRNN Test MAE: %.3f' % mae2)
# calculate RMSE and MAE Zoneout
rmse3 = sqrt(mean_squared_error(inv_y3, inv_yhat3))
mae3 = mean_absolute_error(inv_y3, inv_yhat3)
print('LSTM+Zoneout Test RMSE: %.3f' % rmse3)
print('LSTM+Zoneout Test MAE: %.3f' % mae3)
# calculate RMSE and MAE Our
rmse1 = sqrt(mean_squared_error(inv_y1, inv_yhat1))
mae1 = mean_absolute_error(inv_y1, inv_yhat1)
print('Our method Test RMSE: %.3f' % rmse1)
print('Our method Test MAE: %.3f' % mae1)
pyplot.figure(figsize=(20,10))
pyplot.title('PM 2.5(the next 96 hours)')
pyplot.xlabel('Time range(h)')
pyplot.ylabel(' PM2.5 range')
pyplot.plot(inv_y[:96],label='true')
pyplot.plot(inv_yhat[:96],'r--',label='predictions_LSTM')
pyplot.plot(inv_yhat2[:96],'c-.',label='predictions_IndRNN')
pyplot.plot(inv_yhat3[:96],'k:',label='predictions_LSTM+Zoneout')
pyplot.plot(inv_yhat1[:96],'g-*',label='predictions_Our')
pyplot.legend()
pyplot.savefig('Figure-PM 2.5.png', dpi=300)
pyplot.show() | [
"noreply@github.com"
] | noreply@github.com |
3c3a9f520cc8333b8fc3da19fc215ff5873b8cdd | 137337f800c10d6657803cf2745aaf4c7136a5c3 | /mainapp/migrations/0035_auto_20210207_2240.py | a84d821b4f13b3ea80b36810ac9f07c8c7bbc47d | [] | no_license | deft727/Django-ecommerce | c82d4cf691cb75354b474f3f58fc3dac5b696803 | f9c04a30fa7af60d59d05fe89f6e15f123efcddb | refs/heads/main | 2023-03-07T05:05:02.285901 | 2021-02-18T19:12:33 | 2021-02-18T19:12:33 | 339,198,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # Generated by Django 3.0.8 on 2021-02-07 20:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0034_auto_20210205_2013'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status_pay',
field=models.CharField(choices=[('pay', 'Оплачен'), ('not_pay', 'Отклонен'), ('miss', 'Ошибка при оплате'), ('nal', 'Наложенный платеж'), ('wait', 'Ожидание платежа'), ('reversed', 'Платеж возвращен')], default='nal', max_length=100, verbose_name='Оплата'),
),
]
| [
"deft727@gmail.com"
] | deft727@gmail.com |
88d039f99f633131187e0d42444fb21b95fb6709 | 0a7e7dafe1f2a75f15bf6e1908616863e6a9db4c | /Task 5/gameparser.py | 3d853e677cdd39688049d95ae8ae2b64cd30b676 | [] | no_license | EuanMorgan70/GameTemplates | f564e05a9233ec5c35524c13e027917ea07abf2a | ae1aa2eee2db82a8ccb3e47f74d8a84a70d11186 | refs/heads/master | 2020-04-01T21:17:58.661197 | 2018-10-19T09:49:51 | 2018-10-19T09:49:51 | 153,648,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,312 | py | import string
# List of "unimportant" words (feel free to add more)
skip_words = ['a', 'about', 'all', 'an', 'another', 'any', 'around', 'at',
'bad', 'beautiful', 'been', 'better', 'big', 'can', 'every', 'for',
'from', 'good', 'have', 'her', 'here', 'hers', 'his', 'how',
'i', 'if', 'in', 'into', 'is', 'it', 'its', 'large', 'later',
'like', 'little', 'main', 'me', 'mine', 'more', 'my', 'now',
'of', 'off', 'oh', 'on', 'please', 'small', 'some', 'soon',
'that', 'the', 'then', 'this', 'those', 'through', 'till', 'to',
'towards', 'until', 'us', 'want', 'we', 'what', 'when', 'why',
'wish', 'with', 'would']
def filter_words(words, skip_words):
"""This function takes a list of words and returns a copy of the list from
which all words provided in the list skip_words have been removed.
For example:
>>> filter_words(["help", "me", "please"], ["me", "please"])
['help']
>>> filter_words(["go", "south"], skip_words)
['go', 'south']
>>> filter_words(['how', 'about', 'i', 'go', 'through', 'that', 'little', 'passage', 'to', 'the', 'south'], skip_words)
['go', 'passage', 'south']
"""
a = []
for i in words:
if not (i in skip_words):
a.append(i)
return a
def remove_punct(text):
"""This function is used to remove all punctuation
marks from a string. Spaces do not count as punctuation and should
not be removed. The funcion takes a string and returns a new string
which does not contain any puctuation. For example:
>>> remove_punct("Hello, World!")
'Hello World'
>>> remove_punct("-- ...Hey! -- Yes?!...")
' Hey Yes'
>>> remove_punct(",go!So.?uTh")
'goSouTh'
"""
no_punct = ""
for char in text:
if not (char in string.punctuation):
no_punct = no_punct + char
return no_punct
def normalise_input(user_input):
"""This function removes all punctuation from the string and converts it to
lower case. It then splits the string into a list of words (also removing
any extra spaces between words) and further removes all "unimportant"
words from the list of words using the filter_words() function. The
resulting list of "important" words is returned. For example:
>>> normalise_input(" Go south! ")
['go', 'south']
>>> normalise_input("!!! tAkE,. LAmp!?! ")
['take', 'lamp']
>>> normalise_input("HELP!!!!!!!")
['help']
>>> normalise_input("Now, drop the sword please.")
['drop', 'sword']
>>> normalise_input("Kill ~ tHe :- gObLiN,. wiTH my SWORD!!!")
['kill', 'goblin', 'sword']
>>> normalise_input("I would like to drop my laptop here.")
['drop', 'laptop']
>>> normalise_input("I wish to take this large gem now!")
['take', 'gem']
>>> normalise_input("How about I go through that little passage to the south...")
['go', 'passage', 'south']
"""
# Remove punctuation and convert to lower case
no_punct = remove_punct(user_input).lower()
no_spaces = no_punct.strip()
word_list = no_spaces.split()
norm = filter_words(word_list, skip_words)
return norm
#
# COMPLETE ME!
#
| [
"noreply@github.com"
] | noreply@github.com |
73268d8e78be08959b7a0ae204f64a99e367dc91 | ac47074bcf749273941ab01213bb6d1f59c40c99 | /project/multi_factor/alpha_model/exposure/alpha_factor_dividend_12m.py | 578ecd49441115d3a844ec792f25ce7045c363c4 | [] | no_license | xuzhihua95/quant | c5561e2b08370610f58662f2871f1f1490681be2 | c7e312c70d5f400b7e777d2ff4c9f6f223eabfee | refs/heads/master | 2020-05-19T17:04:08.796981 | 2019-04-24T02:50:29 | 2019-04-24T02:50:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | from quant.stock.date import Date
from quant.stock.stock import Stock
from quant.project.multi_factor.alpha_model.exposure.alpha_factor import AlphaFactor
class AlphaDividend12m(AlphaFactor):
"""
因子说明: 最近12月股息率, 根据最新财报更新数据
披露日期 为 最近财报
表明因子估值能力
"""
def __init__(self):
AlphaFactor.__init__(self)
self.exposure_path = self.data_path
self.raw_factor_name = 'alpha_raw_dividend_12m'
def cal_factor_exposure(self, beg_date, end_date):
""" 计算因子暴露 """
dividend_12m = Stock().read_factor_h5("dividendyield2") / 100
beg_date = Date().change_to_str(beg_date)
end_date = Date().change_to_str(end_date)
dividend_12m = dividend_12m.loc[:, beg_date:end_date]
res = dividend_12m.T.dropna(how='all').T
self.save_alpha_factor_exposure(res, self.raw_factor_name)
if __name__ == "__main__":
from datetime import datetime
beg_date = '20040101'
end_date = datetime.today()
self = AlphaDividend12m()
self.cal_factor_exposure(beg_date, end_date)
| [
"1119332482@qq.com"
] | 1119332482@qq.com |
3a338b3bce0ae7855f28acdbf3a14a2360a75451 | 0de28d10ee8ed7d3615413584fb59a968593fb68 | /tests/auth_token/commands/test_authenticate_token_command.py | c94c6dff4948670cfd6469f2066ba2a5ba07cf47 | [
"MIT"
] | permissive | westofpluto/django_custom_auth_user | e501aeb28709bae26042031e9ca10c9a569c3f38 | e8dd1bbbdf943982d68a3183b4931a34b2b2c3f5 | refs/heads/master | 2020-03-09T14:38:50.859972 | 2018-05-03T01:41:08 | 2018-05-03T01:41:08 | 128,839,664 | 0 | 0 | MIT | 2018-05-03T01:41:09 | 2018-04-09T22:10:29 | Python | UTF-8 | Python | false | false | 1,725 | py | # -*- coding: utf-8
# Core
import pytest
from mixer.backend.django import mixer
# Models
from custom_auth_user.models import User
from custom_auth_user.models import AuthToken
# Store
from custom_auth_user.auth_token.store import AuthTokenStore
# Commands
from custom_auth_user.auth_token.commands.authenticate_token_command \
import authenticate_token
@pytest.mark.django_db
class TestAuthenticateTokenCommand():
@pytest.fixture
def auth_token_store(self):
return AuthTokenStore()
def test_authenticate_token_command(self, auth_token_store):
mixer.blend(AuthToken, token='test_token')
user = authenticate_token(
auth_token_store=auth_token_store,
auth_token='invalid')
assert user is None, 'Should not be authenticated by token'
user = authenticate_token(
auth_token_store=auth_token_store,
auth_token='test_token')
assert user, 'Should be authenticated by token'
def test_authenticate_disabled_user(self, auth_token_store):
user = mixer.blend(User, is_disabled=True)
mixer.blend(AuthToken, token='test_token', user=user)
user = authenticate_token(
auth_token_store=auth_token_store,
auth_token='test_token')
assert user is None, 'Should not authenticate disabled user'
def test_authenticate_inactive_user(self, auth_token_store):
user = mixer.blend(User, is_active=False)
mixer.blend(AuthToken, token='test_token', user=user)
user = authenticate_token(
auth_token_store=auth_token_store,
auth_token='test_token')
assert user is None, 'Should not authenticate inactive user'
| [
"anthon.alindada.435@gmail.com"
] | anthon.alindada.435@gmail.com |
83df69486edacf78980c1b67c388516b4134c5e6 | 29dd3c6fcb20252ada254a342eae87367e55e010 | /manage.py | ea2e7224478317d5b7b73667027a3e389d23d694 | [] | no_license | Lazi-Algorithm/DjangoBlogApp | b4caa17c341a87f9c0b76a5be1d122f0f9c134e0 | 6f4f6cef67dca0f216afaae6bd72be80c0c91255 | refs/heads/main | 2023-04-18T00:38:43.567342 | 2021-04-27T20:53:20 | 2021-04-27T20:53:20 | 362,244,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ablog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"eloghosaefficiency@gmail.com"
] | eloghosaefficiency@gmail.com |
7f23664b7bbc4be12bd5c23a8f685cf41f098106 | f6aac61a48a87743be9c40fecdc24344bae4d263 | /scripts/gfs/gfs2iemre.py | 062adac7a62c91781a649ef342cf23c96977f333 | [
"MIT"
] | permissive | akrherz/iem | 8714d99b371c8818f7cdde73dd24639e9fc7d42b | 178015584b7fb5b585f65be6013eaf16fb6db0c7 | refs/heads/main | 2023-08-19T02:58:24.507782 | 2023-08-18T12:08:31 | 2023-08-18T12:08:31 | 4,253,774 | 118 | 74 | MIT | 2023-09-14T18:28:41 | 2012-05-07T20:32:59 | Python | UTF-8 | Python | false | false | 6,702 | py | """Copy GFS grib data to IEMRE grid...
Run from RUN_50_AFTER.sh
"""
import shutil
import subprocess
import sys
from datetime import date, timedelta
import numpy as np
import pygrib
from pyiem import iemre
from pyiem.util import logger, ncopen, utc
from scipy.interpolate import NearestNDInterpolator
LOG = logger()
def create(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = "/mesonet/data/iemre/gfs_current_new.nc"
with ncopen(fn, "w") as nc:
nc.title = "GFS on IEMRE Grid."
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.gfs_forecast = f"{ts:%Y-%m-%dT%H:%M:%SZ}"
nc.history = f"{date.today():%d %B %Y} Generated"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
# store 20 days worth, to be safe of future changes
nc.createDimension("time", 20)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat"))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.bounds = "lat_bnds"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon"))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.bounds = "lon_bnds"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = f"Days since {ts:%Y-%m-%d} 00:00:0.0"
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
# Placeholder
tm[:] = np.arange(0, 20)
high = nc.createVariable(
"high_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
high.units = "K"
high.scale_factor = 0.01
high.long_name = "2m Air Temperature 12 Hour High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable(
"low_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
low.units = "K"
low.scale_factor = 0.01
low.long_name = "2m Air Temperature 12 Hour Low"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
ncvar = nc.createVariable(
"tsoil", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "K"
ncvar.scale_factor = 0.01
ncvar.long_name = "0-10 cm Average Soil Temperature"
ncvar.standard_name = "0-10 cm Average Soil Temperature"
ncvar.coordinates = "lon lat"
ncvar = nc.createVariable(
"p01d", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "mm"
ncvar.scale_factor = 0.01
ncvar.long_name = "Precipitation Accumulation"
ncvar.standard_name = "precipitation_amount"
ncvar.coordinates = "lon lat"
def merge_grib(nc, now):
"""Merge what grib data we can find into the netcdf file."""
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
lons = None
lats = None
tmaxgrid = None
tmingrid = None
tsoilgrid = None
pgrid = None
hits = 0
for fhour in range(6, 385, 6):
fxtime = now + timedelta(hours=fhour)
grbfn = now.strftime(
f"/mesonet/tmp/gfs/%Y%m%d%H/gfs.t%Hz.sfluxgrbf{fhour:03.0f}.grib2"
)
grbs = pygrib.open(grbfn)
for grb in grbs:
name = grb.shortName.lower()
if lons is None:
lats, lons = [np.ravel(x) for x in grb.latlons()]
lons = np.where(lons > 180, lons - 360, lons)
if name == "tmax":
if tmaxgrid is None:
tmaxgrid = grb.values
else:
tmaxgrid = np.where(
grb.values > tmaxgrid, grb.values, tmaxgrid
)
elif name == "tmin":
if tmingrid is None:
tmingrid = grb.values
else:
tmingrid = np.where(
grb.values < tmingrid, grb.values, tmingrid
)
elif name == "prate":
# kg/m^2/s over six hours
hits += 1
if pgrid is None:
pgrid = grb.values * 6.0 * 3600
else:
pgrid += grb.values * 6.0 * 3600
# Hacky
elif name == "st" and str(grb).find("0.0-0.1 m") > -1:
if tsoilgrid is None:
tsoilgrid = grb.values
else:
tsoilgrid += grb.values
grbs.close()
# Write tmax, tmin out at 6z
if fxtime.hour == 6:
# The actual date is minus one
days = (fxtime.date() - now.date()).days - 1
if hits == 4:
LOG.info("Writing %s, days=%s", fxtime, days)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmaxgrid))
nc.variables["high_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmingrid))
nc.variables["low_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(pgrid))
nc.variables["p01d"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator(
(lons, lats), np.ravel(tsoilgrid / 4.0)
)
nc.variables["tsoil"][days, :, :] = nn(xi, yi)
tmingrid = None
tmaxgrid = None
tsoilgrid = None
hits = 0
def main(argv):
"""Do the work."""
now = utc(*[int(s) for s in argv[1:5]])
# Run every hour, filter those we don't run
if now.hour % 6 != 0:
return
create(now)
with ncopen("/mesonet/data/iemre/gfs_current_new.nc", "a") as nc:
merge_grib(nc, now)
shutil.move(
"/mesonet/data/iemre/gfs_current_new.nc",
"/mesonet/data/iemre/gfs_current.nc",
)
# Archive this as we need it for various projects
cmd = [
"pqinsert",
"-i",
"-p",
(
f"data a {now:%Y%m%d%H%M} bogus "
f"model/gfs/gfs_{now:%Y%m%d%H}_iemre.nc nc"
),
"/mesonet/data/iemre/gfs_current.nc",
]
subprocess.call(cmd)
# Generate 4inch plots based on 6z GFS
if now.hour == 6:
subprocess.call(["python", "gfs_4inch.py"])
if __name__ == "__main__":
main(sys.argv)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
250227ba416df11e77fe26a382553f1b41650339 | 40041373c9d34ffcde9e3f713a3b4154498f39a8 | /cl_main.py | 32adf6737a361475a2a7d5d3fa5d1561737259be | [] | no_license | Justprogramer/text_information_extraction_ch | 421c15b9efbe2fe2f3150bcac660f10b600f4bfe | 9038076bd400c0edc80d04288a7ef26fb7bbfb54 | refs/heads/master | 2020-06-14T12:07:16.147892 | 2019-07-16T06:32:01 | 2019-07-16T06:32:01 | 195,000,359 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,404 | py | # -*-coding:utf-8-*-
import argparse
import os
import pickle
import torch
import torchtext.data as data
from torchtext.vocab import Vectors
import cl_dataset
import cl_model
import cl_train
from ner_tool import ner_tool
parser = argparse.ArgumentParser(description='TextCNN text classifier')
# learning
parser.add_argument('-lr', type=float, default=0.015, help='initial learning rate [default: 0.015]')
parser.add_argument('-momentum', type=float, default=0., help='initial momentum [default: 0.]')
parser.add_argument('-l2_rate', type=float, default=1.0e-8, help='initial l2_rate [default: 1.0e-8]')
parser.add_argument('-lr_decay', type=float, default=0.05, help='initial learning rate ecay [default: 0.05]')
parser.add_argument('-epochs', type=int, default=10000, help='number of epochs for train [default: 256]')
parser.add_argument('-batch-size', type=int, default=64, help='batch size for training [default: 128]')
parser.add_argument('-log-interval', type=int, default=1,
help='how many steps to wait before logging training status [default: 1]')
parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')
parser.add_argument('-max_patience', type=int, default=10,
help='iteration numbers to stop without performance increasing')
parser.add_argument('-save-best', type=bool, default=True, help='whether to save when get best performance')
# model
parser.add_argument('-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')
parser.add_argument('-max-norm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]')
parser.add_argument('-embedding-dim', type=int, default=128, help='number of embedding dimension [default: 128]')
parser.add_argument('-position_embedding_dim', type=int, default=20,
help='number of position embedding dimension [default: 5]')
parser.add_argument('-filter-num', type=int, default=100, help='number of each size of filter')
parser.add_argument('-filter-sizes', type=str, default='3,4,5',
help='comma-separated filter sizes to use for convolution')
parser.add_argument('-static', type=bool, default=True, help='whether to use static pre-trained word vectors')
parser.add_argument('-non-static', type=bool, default=True,
help='whether to fine-tune static pre-trained word vectors')
parser.add_argument('-multichannel', type=bool, default=True, help='whether to use 2 channel of word vectors')
parser.add_argument('-pretrained-name', type=str, default='sgns.zhihu.word',
help='filename of pre-trained word vectors')
parser.add_argument('-pretrained-path', type=str, default='pretrained', help='path of pre-trained word vectors')
# device
parser.add_argument('-device', type=int, default=0, help='device to use for iterate data, -1 mean cpu [default: -1]')
# option
parser.add_argument('-snapshot', type=str, default='./snapshot/cl_model.pkl',
help='filename of model snapshot [default: None]')
args = parser.parse_args()
def load_word_vectors(model_name, model_path):
vectors = Vectors(name=model_name, cache=model_path)
return vectors
def load_dataset(text_field, label_field, args, **kwargs):
train_dataset, dev_dataset = cl_dataset.get_dataset('data', text_field, label_field)
if args.static and args.pretrained_name and args.pretrained_path:
vectors = load_word_vectors(args.pretrained_name, args.pretrained_path)
text_field.build_vocab(train_dataset, dev_dataset, vectors=vectors)
else:
text_field.build_vocab(train_dataset, dev_dataset)
args.text_field = text_field
label_field.build_vocab(train_dataset, dev_dataset)
train_iter = data.Iterator.splits(
(train_dataset,),
batch_sizes=(args.batch_size,),
sort_key=lambda x: len(x.text),
**kwargs)
dev_iter = data.Iterator.splits(
(dev_dataset,),
batch_sizes=(args.batch_size,),
sort_key=lambda x: len(x.text),
shuffle=False)
return train_iter[0], dev_iter[0]
print('Loading data...')
text_field = data.Field()
label_field = data.Field(sequential=False)
train_iter, dev_iter = load_dataset(text_field, label_field, args, device=-1, repeat=False, shuffle=True)
args.vocabulary_size = len(text_field.vocab)
if args.static:
args.embedding_dim = text_field.vocab.vectors.size()[-1]
args.vectors = text_field.vocab.vectors
if args.multichannel:
args.static = True
args.non_static = True
args.class_num = len(label_field.vocab) - 1
args.label = label_field.vocab.itos
args.label.remove('<unk>')
args.cuda = args.device != -1 and torch.cuda.is_available()
args.filter_sizes = [int(size) for size in args.filter_sizes.split(',')]
print('Parameters:')
for attr, value in sorted(args.__dict__.items()):
if attr in {'vectors'}:
continue
print('\t{}={}'.format(attr.upper(), value))
text_cnn = cl_model.TextCNN(args)
if args.cuda:
torch.cuda.set_device(args.device)
text_cnn = text_cnn.cuda()
try:
cl_train.train(train_iter, dev_iter, text_cnn, args)
if args.snapshot:
print('\nLoading model from {}...\n'.format(args.snapshot))
text_cnn.load_state_dict(torch.load(args.snapshot))
cl_train.eval(dev_iter, text_cnn, args)
except KeyboardInterrupt:
print('Exiting from training early')
| [
"whupenger@gmail.com"
] | whupenger@gmail.com |
b3fdd146da4c2235de6f496facc12824508d4d65 | 9d0e04a3f0c5819baf0c942e55507453c7079705 | /documentation/__manifest__.py | 23e06387e0cafbe541b8c1ce699829334e708ffb | [
"Apache-2.0"
] | permissive | ElNahoko/HSE_ARNOSH | 491517e87887a1042c69f3fedb0445388f0235e8 | 1a8661db454e6a9e7f775a3ffd58a3936a43bb59 | refs/heads/master | 2020-06-26T17:57:35.516056 | 2019-09-19T21:34:37 | 2019-09-19T21:34:37 | 199,706,193 | 1 | 2 | Apache-2.0 | 2019-08-19T15:55:10 | 2019-07-30T18:28:12 | Python | UTF-8 | Python | false | false | 1,033 | py | # -*- coding: utf-8 -*-
{
'name': "Documentation des STANDARDS HSE",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/12.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base'],
# always loaded
'data': [
'security/ir.model.access.csv',
'views/DOCUMENT_FORM_VIEW.xml',
'views/CATEGORIE_FORM_VIEW.xml',
'report/doc_report.xml',
'report/doc_report_template.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
| [
"noreply@github.com"
] | noreply@github.com |
6c6752a5145547271c14366dbb8732b389e118dc | 715a29d762c7f6ea5ba2e2273d6463ebbfabcb39 | /10/compileSubroutineDec.py | b24d0f478e8a5f553c0b39d243175355f169c257 | [
"MIT"
] | permissive | zivshacham/Nand2Tetris_clone | c72781a9a849a7bb371251dfba0a998e2e0725a7 | 5f91805823b7572263bc31b0b4537aed14d6b4e7 | refs/heads/master | 2023-03-19T23:37:30.344530 | 2020-02-16T13:30:51 | 2020-02-16T13:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | import compileSubroutineBody
import compileParameterList
import handleXML
import verifyXMLline
def compile2xml(text, index):
labelClass = handleXML.writeLabelPairs('subroutineDec')
startIndex = index
currentIndex = index
listIndex = ()
output = []
# Add: <subroutineDec>.
output.append(labelClass[0])
# Move pointer: ('constructor' | 'function' | 'method') ('void' | type)
# subroutineName '('.
while not verifyXMLline.isLeftRoundBracket(text[currentIndex]):
currentIndex += 1
# Move pointer: parameterList.
currentIndex += 1
# Add: ('constructor' | 'function' | 'method') ('void' | type)
# subroutineName '('.
for i in range(startIndex, currentIndex):
output.append(text[i])
# Add & move pointer: parameterList.
listIndex = compileParameterList.compile2xml(text, currentIndex)
output += listIndex[0]
currentIndex = listIndex[1]
# Add: ')'.
output += text[currentIndex]
# Move pointer: subroutineBody.
currentIndex += 1
# Add & move pointer: subroutineBody.
listIndex = compileSubroutineBody.compile2xml(text, currentIndex)
output += listIndex[0]
currentIndex = listIndex[1]
# Add: </subroutineDec>.
output.append(labelClass[1])
return (output, currentIndex)
| [
"5583771+Bozar@users.noreply.github.com"
] | 5583771+Bozar@users.noreply.github.com |
2c9a215099b4f34fbcc0cb91065ff3b3496cab1a | 27077b17fd9149195de9161351319ee24544016e | /eventex/core/migrations/0004_auto_20161011_1421.py | 05d70349ea5b094b1981a25e0b6182947f2cb075 | [] | no_license | rpadilha/eventex | 69a5f57bd30405a1d2da994871357600bc147762 | b89387f9a229476df7592a06e9b2c97008cac31b | refs/heads/master | 2020-04-11T00:12:19.622336 | 2016-11-03T02:53:46 | 2016-11-03T02:53:46 | 68,050,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-11 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_contact'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'verbose_name': 'contato', 'verbose_name_plural': 'contatos'},
),
migrations.AlterField(
model_name='contact',
name='kind',
field=models.CharField(choices=[('E', 'Email'), ('P', 'Telefone')], max_length=1, verbose_name='tipo'),
),
migrations.AlterField(
model_name='contact',
name='speaker',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Speaker', verbose_name='palestrante'),
),
migrations.AlterField(
model_name='contact',
name='value',
field=models.CharField(max_length=255, verbose_name='valor'),
),
]
| [
"padilha@renatoair.local"
] | padilha@renatoair.local |
865ee216e6cfc9d792037c307d5b7a7160b8a831 | cec2ef5ae03f994aa618be4fe5df61619a12257b | /GRLTest/IndicatortargetmorethanworsT/IndicatortargetmorethanworsT/IndicatortargetmorethanworsT.py | a428bcfdfc9ca73c6aa5981371f35e3750851f5e | [] | no_license | m81092/GRLToMath | 40052eb6b4e8ecff544a2d18af408366c1465c8e | 6bd13adeea09700ce738412895c6b81af0456fc5 | refs/heads/master | 2020-06-19T14:02:55.387404 | 2018-06-20T21:57:05 | 2018-06-20T21:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def IndicatortargetmorethanworsT( Indicator1):
expr = ((100.0) if (Indicator1 >= 300.0) else (((50.0*abs(0.01*Indicator1 - 2.0) + 50.0) if (Indicator1 >= 200.0) else (((-50.0*abs(0.00588235294117647*Indicator1 - 1.17647058823529) + 50.0) if (Indicator1 > 30.0) else (((0) if (True) else None)))))))
return expr
| [
"filuwan@gmail.com"
] | filuwan@gmail.com |
0a3826ebc48e9ed2fae489ac94d5db8824694b1a | 2c1f4b2f03bbe6704c04a06b5eea1bbb4f21752d | /sliced/base.py | 7f3837515f416e9053d906fd06a5b640704024d8 | [
"MIT"
] | permissive | sofianehaddad/sliced | 54eb49920f57b560090bf7da84087c7c622e7bf0 | 243bde236d8c615f9563279a0a2095e2fa2f4650 | refs/heads/master | 2021-05-23T19:05:00.513782 | 2018-06-18T19:38:54 | 2018-06-18T19:38:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,679 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pkg_resources import parse_version
import warnings
import numpy as np
import scipy.linalg as linalg
NUMPY_UNIQUE_COUNTS_VERSION = '1.9.0'
def is_multioutput(y):
"""Whether the target y is multi-output (or multi-index)"""
return hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1
def grouped_sum(array, groups):
"""Sums an array by groups. Groups are assumed to be contiguous by row."""
inv_idx = np.concatenate(([0], np.diff(groups).nonzero()[0] + 1))
return np.add.reduceat(array, inv_idx)
def unique_counts(arr):
"""Determine the unique values and the number of times they occur in a one
dimensional array.
This is a wrapper around numpy's unique function.
In order to keep the numpy dependency below 1.9 this function falls
back to the slow version of getting the unique counts array by counting
the indices of the inverse array.
Parameters
----------
arr : array_like
Input array. This array will be flattened if it is not already 1-D.
Returns
-------
unique : ndarray
The sorted unique values.
unique_counts : ndarray
The number of times each of the unique values compes up in the orginal
array.
"""
if (parse_version(np.__version__) >=
parse_version(NUMPY_UNIQUE_COUNTS_VERSION)):
unique, counts = np.unique(arr, return_counts=True)
else:
unique, unique_inverse = np.unique(arr, return_inverse=True)
counts = np.bincount(unique_inverse)
return unique, counts
def slice_y(y, n_slices=10):
"""Determine non-overlapping slices based on the target variable, y.
Parameters
----------
y : array_like, shape (n_samples,)
The target values (class labels in classification, real numbers
in regression).
n_slices : int (default=10)
The number of slices used when calculating the inverse regression
curve. Truncated to at most the number of unique values of ``y``.
Returns
-------
slice_indicator : ndarray, shape (n_samples,)
Index of the slice (from 0 to n_slices) that contains this
observation.
slice_counts : ndarray, shape (n_slices,)
The number of counts in each slice.
"""
unique_y_vals, counts = unique_counts(y)
cumsum_y = np.cumsum(counts)
# `n_slices` must be less-than or equal to the number of unique values
# of `y`.
n_y_values = unique_y_vals.shape[0]
if n_y_values == 1:
raise ValueError("The target only has one unique y value. It does "
"not make sense to fit SIR or SAVE in this case.")
elif n_slices >= n_y_values:
if n_slices > n_y_values:
warnings.warn(
"n_slices greater than the number of unique y values. "
"Setting n_slices equal to {0}.".format(counts.shape[0]))
# each y value gets its own slice. usually the case for classification
slice_partition = np.hstack((0, cumsum_y))
else:
# attempt to put this many observations in each slice.
# not always possible since we need to group common y values together
# NOTE: This should be ceil, but this package is attempting to
# replicate the slices used by R's DR package which uses floor.
n_obs = np.floor(y.shape[0] / n_slices)
# Loop through the unique y value sums and group
# slices together with the goal of 2 <= # in slice <= n_obs
# Find index in y unique where slice begins and ends
n_samples_seen = 0
slice_partition = [0] # index in y of start of a new slice
while n_samples_seen < y.shape[0] - 2:
slice_start = np.where(cumsum_y >= n_samples_seen + n_obs)[0]
if slice_start.shape[0] == 0: # this means we've reached the end
slice_start = cumsum_y.shape[0] - 1
else:
slice_start = slice_start[0]
n_samples_seen = cumsum_y[slice_start]
slice_partition.append(n_samples_seen)
# turn partitions into an indicator
slice_indicator = np.ones(y.shape[0], dtype=np.int)
for j, (start_idx, end_idx) in enumerate(
zip(slice_partition, slice_partition[1:])):
# this just puts any remaining observations in the last slice
if j == len(slice_partition) - 2:
slice_indicator[start_idx:] = j
else:
slice_indicator[start_idx:end_idx] = j
slice_counts = np.bincount(slice_indicator)
return slice_indicator, slice_counts
| [
"jloyal25@gmail.com"
] | jloyal25@gmail.com |
ca20f677e85788ae5a2d6c3252f6ebbd0cc52688 | 0e3b4f7ef8ff40391fa21c6d5b1e7c8f12179b03 | /codeforces/F.py | db421dbf1a1183d0c8c568bad10866660d8f0c71 | [
"MIT"
] | permissive | pavponn/machine-learning | c5bcc82bfe1cd03409321e5ba7e540f9b69bbc20 | 95ab573556a72fb5d16761cb8136d2896ae55263 | refs/heads/master | 2023-02-06T05:09:59.815128 | 2020-12-22T22:09:38 | 2020-12-22T22:09:38 | 296,904,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | from typing import Dict, Tuple, Set
import math
EPS = 1e-10
def calculate_likelihood_probabilities(k: int, alpha: int, classes_num: Dict[int, int], all_words: Set[str],
word_in_class_k: Dict[Tuple[str, int], int]) -> Dict[Tuple[str, int], Tuple[float, float]]:
likelihood_probabilities: Dict[Tuple[str, int], Tuple[float, float]] = {}
for word in all_words:
for cl in range(k):
count = 0
if (word, cl) in word_in_class_k:
count = word_in_class_k[(word, cl)]
numerator = count + alpha + 0.000
denominator = classes_num[cl] + alpha * 2 + 0.0000
likelihood_probabilities[(word, cl)] = (numerator, denominator)
return likelihood_probabilities
def solve():
k = int(input())
lambdas = [int(x) for x in input().split()]
alpha = int(input())
n = int(input())
all_words: Set[str] = set()
word_in_class_k: Dict[Tuple[str, int], int] = {}
classes_num = {}
for cl in range(k):
classes_num[cl] = 0
for _ in range(n):
line = [x for x in input().split()]
this_class = int(line.pop(0)) - 1
this_length = int(line.pop(0))
this_words = set(line)
for w in this_words:
all_words.add(w)
classes_num[this_class] += 1
for word in this_words:
if (word, this_class) not in word_in_class_k:
word_in_class_k[(word, this_class)] = 0
word_in_class_k[(word, this_class)] += 1
likelihood_probabilities: Dict[Tuple[str, int], Tuple[float, float]] = \
calculate_likelihood_probabilities(k, alpha, classes_num, all_words, word_in_class_k)
m = int(input())
for i in range(m):
line = [x for x in input().split()]
this_length = int(line.pop(0))
this_words = set(line)
num = [0] * k
for cl in range(k):
this_num = 0
this_num += math.log(lambdas[cl] * (EPS + classes_num[cl] / n))
for word in all_words.difference(this_words):
this_num += math.log(1 - (likelihood_probabilities[(word, cl)][0] / likelihood_probabilities[(word, cl)][1]))
for word in this_words.intersection(all_words):
this_num += math.log(likelihood_probabilities[(word, cl)][0] / likelihood_probabilities[(word, cl)][1])
num[cl] = this_num
max_num = max(num)
snd = 0
for ln_pr in num:
snd += math.exp(ln_pr - max_num)
snd = math.log(snd) + max_num
for cl in range(k):
if cl != k - 1:
print(math.exp(num[cl] - snd), end=' ')
else:
print(math.exp(num[cl] - snd))
solve() | [
"pavponn@yandex.ru"
] | pavponn@yandex.ru |
112a913a50c7b49e221c725f44b22096245022c1 | 2fa102b151d19cf6fc2cfe5a42df17e8ba90eb9d | /task-management-api/app/main/controller/TaskController.py | d1230092cba559c568070210823291b9815d2cf9 | [] | no_license | sbsanjaybharti/tms | 36fdb49a122b0bfdf612c05956ff6c266c54e7aa | a9140f1eac2627ecec67a8e821095349608a3436 | refs/heads/master | 2023-02-08T01:28:35.424785 | 2020-05-25T05:29:33 | 2020-05-25T05:29:33 | 226,490,976 | 0 | 0 | null | 2023-02-02T06:41:44 | 2019-12-07T10:08:58 | Python | UTF-8 | Python | false | false | 3,305 | py | import http.client
import os, json
import requests
from flask import request, session, jsonify, Flask
from flask_cors import cross_origin
from flask_restplus import Resource
from ..decorator.AuthDecorator import token_required
from ..utility.ErrorHandler import responseData
from ..utility.validation import Validation
from ..service.TaskService import TaskService
from ..dto.dto import TaskDto
api = TaskDto.api
task_create = TaskDto.task_create
task_list = TaskDto.task_list
task_update = TaskDto.task_update
parser = api.parser()
parser.add_argument('Authorization', type=str, location='headers')
@api.route('/')
@api.header('Authorization: bearer', 'JWT TOKEN', required=True)
@api.doc(parser=parser)
class TaskController(Resource):
"""
Create Task
"""
@cross_origin(headers=['Content-Type', 'Authorization'])
@api.expect(task_create, validate=True)
@token_required
def post(self):
"""
API to create task
## Implementation Notes
__Access__ : Admin
"""
patch_data = request.json
validation = Validation.createTask(patch_data)
if validation is not None:
return responseData(validation)
task = TaskService.create(patch_data)
return responseData(task)
# list user of current logged in user
# user->organization-> list all user of that organization
@cross_origin(headers=['Content-Type', 'Authorization'])
# @api.doc(params={'page': 'Pagination no. of page'})
@api.doc(params={'page': 'Pagination no. of page'})
# @api.marshal_list_with(task_list, envelope='data')
@token_required
def get(self):
"""
API to list task
## Implementation Notes
__Access__ : Admin
"""
# get user list
args = request.args
return responseData(TaskService.list(args))
@api.route('/<id>')
@api.header('Authorization: bearer', 'JWT TOKEN', required=True)
@api.doc(parser=parser)
class TaskViewController(Resource):
@cross_origin(headers=['Content-Type', 'Authorization'])
@token_required
def get(self, id):
"""
API to get the task
## Implementation Notes
__Access__ : Admin
"""
return responseData(TaskService.get(id))
# Edit user detail(first name, last name, email)
# return user data with auth0 id
@cross_origin(headers=['Content-Type', 'Authorization'])
@api.expect(task_update, validate=True)
@token_required
def put(self, id):
"""
API to update the task
## Implementation Notes
__Access__ : Admin
"""
patch_data = request.json
validation = Validation.UpdateTask(patch_data)
if validation is not None:
return responseData(validation)
return responseData(TaskService.edit(patch_data, id))
@api.route('/process/<id>')
@api.header('Authorization: bearer', 'JWT TOKEN', required=True)
@api.doc(parser=parser)
class TaskProcessController(Resource):
@cross_origin(headers=['Content-Type', 'Authorization'])
@token_required
def get(self, id):
"""
API to get the task
## Implementation Notes
__Access__ : Admin
"""
return responseData(TaskService.process(id))
| [
"sanjay@cynixlabs.com"
] | sanjay@cynixlabs.com |
1fbf18053d3044b77888255824e709eae15f86dd | 33169e8e3a3b7aac2ac2e66ee3a5156729424190 | /CSES/problem3.py | 1f1627f98bcbe03e76f3c9389a6ba34370111275 | [] | no_license | gilleseulemans/CSES-problems | 167717a88ae8d8e88258706fe6b4e1b472562b02 | 5d830940f2908850b2d93d7f9f6fb7ee70f39c7b | refs/heads/main | 2023-07-30T04:44:58.788853 | 2021-09-11T12:01:41 | 2021-09-11T12:01:41 | 405,367,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | def main():
n = input()
list1 = []
list2 = [1]
counter = 1
for i in n:
list1.append(i)
current = list1[0]
for i in range(len(list1) - 1):
if current == list1[i+1]:
counter += 1
list2.append(counter)
else:
counter = 1
current = list1[i+1]
list2.sort()
print(list2[len(list2) -1])
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
dc57e4b3c1c4954f61c5cb315bb48277c0c10ea5 | 42e85e88b8936942eb9e5ed068034c9579384586 | /pipeline_logic/omop/python/schemas.py | 7d87b889016dcae33fda3424b828096f1cafdd78 | [] | no_license | dr-you-group/Data-Ingestion-and-Harmonization | 55b634d8a7abe22cc7f06b3b0bce27467c6720ca | 145aec62daa5df450c94180d5252dd3bc23f0eae | refs/heads/master | 2023-08-25T15:25:59.934816 | 2021-10-07T15:27:07 | 2021-10-07T15:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,408 | py | from collections import OrderedDict
from pyspark.sql import types as T
from pyspark.sql.types import StructType, StructField
def schema_dict_to_struct(schema_dict, all_string_type):
field_list = []
for col_name, col_type in schema_dict.items():
if all_string_type:
field_list.append(StructField(col_name, T.StringType(), True))
else:
field_list.append(StructField(col_name, col_type, True))
struct_schema = StructType(field_list)
return struct_schema
def schema_dict_all_string_type(schema_dict, all_lowercase=False):
result = OrderedDict()
for col_name in schema_dict.keys():
if all_lowercase:
col_name = col_name.lower()
result[col_name] = T.StringType()
return result
complete_domain_schema_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
('CARE_SITE_NAME', T.StringType()),
('PLACE_OF_SERVICE_CONCEPT_ID', T.IntegerType()),
('LOCATION_ID', T.LongType()),
('CARE_SITE_SOURCE_VALUE', T.StringType()),
('PLACE_OF_SERVICE_SOURCE_VALUE', T.StringType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
('CONDITION_ERA_END_DATE', T.DateType()),
('CONDITION_OCCURRENCE_COUNT', T.IntegerType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
('CONDITION_START_DATETIME', T.TimestampType()),
('CONDITION_END_DATE', T.DateType()),
('CONDITION_END_DATETIME', T.TimestampType()),
('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
('STOP_REASON', T.StringType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('CONDITION_SOURCE_VALUE', T.StringType()),
('CONDITION_SOURCE_CONCEPT_ID', T.IntegerType()),
('CONDITION_STATUS_SOURCE_VALUE', T.StringType()),
('CONDITION_STATUS_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
('DEATH_DATETIME', T.TimestampType()),
('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
('CAUSE_CONCEPT_ID', T.IntegerType()),
('CAUSE_SOURCE_VALUE', T.StringType()),
('CAUSE_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
('DRUG_ERA_END_DATE', T.DateType()),
('DRUG_EXPOSURE_COUNT', T.IntegerType()),
('GAP_DAYS', T.IntegerType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
('DRUG_EXPOSURE_START_DATETIME', T.TimestampType()),
('DRUG_EXPOSURE_END_DATE', T.DateType()),
('DRUG_EXPOSURE_END_DATETIME', T.TimestampType()),
('VERBATIM_END_DATE', T.DateType()),
('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
('STOP_REASON', T.StringType()),
('REFILLS', T.IntegerType()),
('QUANTITY', T.FloatType()),
('DAYS_SUPPLY', T.IntegerType()),
('SIG', T.StringType()),
('ROUTE_CONCEPT_ID', T.IntegerType()),
('LOT_NUMBER', T.StringType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('DRUG_SOURCE_VALUE', T.StringType()),
('DRUG_SOURCE_CONCEPT_ID', T.IntegerType()),
('ROUTE_SOURCE_VALUE', T.StringType()),
('DOSE_UNIT_SOURCE_VALUE', T.StringType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
('ADDRESS_1', T.StringType()),
('ADDRESS_2', T.StringType()),
('CITY', T.StringType()),
('STATE', T.StringType()),
('ZIP', T.StringType()),
('COUNTY', T.StringType()),
('LOCATION_SOURCE_VALUE', T.StringType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
('MEASUREMENT_DATETIME', T.TimestampType()),
('MEASUREMENT_TIME', T.StringType()),
('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
('OPERATOR_CONCEPT_ID', T.IntegerType()),
('VALUE_AS_NUMBER', T.FloatType()),
('VALUE_AS_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('RANGE_LOW', T.FloatType()),
('RANGE_HIGH', T.FloatType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('MEASUREMENT_SOURCE_VALUE', T.StringType()),
('MEASUREMENT_SOURCE_CONCEPT_ID', T.IntegerType()),
('UNIT_SOURCE_VALUE', T.StringType()),
('VALUE_SOURCE_VALUE', T.StringType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
('OBSERVATION_DATETIME', T.TimestampType()),
('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
('VALUE_AS_NUMBER', T.FloatType()),
('VALUE_AS_STRING', T.StringType()),
('VALUE_AS_CONCEPT_ID', T.IntegerType()),
('QUALIFIER_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('OBSERVATION_SOURCE_VALUE', T.StringType()),
('OBSERVATION_SOURCE_CONCEPT_ID', T.IntegerType()),
('UNIT_SOURCE_VALUE', T.StringType()),
('QUALIFIER_SOURCE_VALUE', T.StringType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
('OBSERVATION_PERIOD_END_DATE', T.DateType()),
('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('MONTH_OF_BIRTH', T.IntegerType()),
('DAY_OF_BIRTH', T.IntegerType()),
('BIRTH_DATETIME', T.TimestampType()),
('RACE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_CONCEPT_ID', T.IntegerType()),
('LOCATION_ID', T.LongType()),
('PROVIDER_ID', T.LongType()),
('CARE_SITE_ID', T.LongType()),
('PERSON_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_CONCEPT_ID', T.IntegerType()),
('RACE_SOURCE_VALUE', T.StringType()),
('RACE_SOURCE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_SOURCE_VALUE', T.StringType()),
('ETHNICITY_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
('PROCEDURE_DATETIME', T.TimestampType()),
('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
('MODIFIER_CONCEPT_ID', T.IntegerType()),
('QUANTITY', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('PROCEDURE_SOURCE_VALUE', T.StringType()),
('PROCEDURE_SOURCE_CONCEPT_ID', T.IntegerType()),
('MODIFIER_SOURCE_VALUE', T.StringType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
('PROVIDER_NAME', T.StringType()),
('NPI', T.StringType()),
('DEA', T.StringType()),
('SPECIALTY_CONCEPT_ID', T.IntegerType()),
('CARE_SITE_ID', T.LongType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('PROVIDER_SOURCE_VALUE', T.StringType()),
('SPECIALTY_SOURCE_VALUE', T.StringType()),
('SPECIALTY_SOURCE_CONCEPT_ID', T.IntegerType()),
('GENDER_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
('VISIT_START_DATETIME', T.TimestampType()),
('VISIT_END_DATE', T.DateType()),
('VISIT_END_DATETIME', T.TimestampType()),
('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('CARE_SITE_ID', T.LongType()),
('VISIT_SOURCE_VALUE', T.StringType()),
('VISIT_SOURCE_CONCEPT_ID', T.IntegerType()),
('ADMITTING_SOURCE_CONCEPT_ID', T.IntegerType()),
('ADMITTING_SOURCE_VALUE', T.StringType()),
('DISCHARGE_TO_CONCEPT_ID', T.IntegerType()),
('DISCHARGE_TO_SOURCE_VALUE', T.StringType()),
('PRECEDING_VISIT_OCCURRENCE_ID', T.IntegerType()),
]),
}
required_domain_schema_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
('CONDITION_ERA_END_DATE', T.DateType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
('DRUG_ERA_END_DATE', T.DateType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
('DRUG_EXPOSURE_END_DATE', T.DateType()),
('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
('OBSERVATION_PERIOD_END_DATE', T.DateType()),
('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('RACE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
('VISIT_END_DATE', T.DateType()),
('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
])
}
# Required columns that are essential
# Records should be dropped if they contain null, not just warned
null_cols_to_drop_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
# ('CONDITION_ERA_END_DATE', T.DateType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
# ('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
# ('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
# ('UNIT_CONCEPT_ID', T.IntegerType()),
# ('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
# ('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
# ('DRUG_ERA_END_DATE', T.DateType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
# ('DRUG_EXPOSURE_END_DATE', T.DateType()),
# ('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
# ('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
# ('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
# ('OBSERVATION_PERIOD_END_DATE', T.DateType()),
# ('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
# ('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
# ('RACE_CONCEPT_ID', T.IntegerType()),
# ('ETHNICITY_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
# ('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
# ('VISIT_END_DATE', T.DateType()),
# ('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
])
}
manifest_schema = T.StructType([
T.StructField("SITE_ABBREV", T.StringType(), True),
T.StructField("SITE_NAME", T.StringType(), True),
T.StructField("CONTACT_NAME", T.StringType(), True),
T.StructField("CONTACT_EMAIL", T.StringType(), True),
T.StructField("CDM_NAME", T.StringType(), True),
T.StructField("CDM_VERSION", T.StringType(), True),
T.StructField("VOCABULARY_VERSION", T.StringType(), True),
T.StructField("N3C_PHENOTYPE_YN", T.StringType(), True),
T.StructField("N3C_PHENOTYPE_VERSION", T.StringType(), True),
T.StructField("SHIFT_DATE_YN", T.StringType(), True),
T.StructField("MAX_NUM_SHIFT_DAYS", T.StringType(), True),
T.StructField("RUN_DATE", T.StringType(), True),
T.StructField("UPDATE_DATE", T.StringType(), True),
T.StructField("NEXT_SUBMISSION_DATE", T.StringType(), True),
T.StructField("CONTRIBUTION_DATE", T.StringType(), True),
])
| [
"stephanie.hong@jhu.edu"
] | stephanie.hong@jhu.edu |
6417ec0d6ce8a4da8cedd2d42a6c46f353d902ef | 91de8c735941b523148df362b956a11b6693c0a0 | /dashboard/routes.py | 539a0c356e59de2353d4684ce161f96e49e44afd | [] | no_license | adeoti-ade/worldbank_dashboard | 96891903e24d6725927fb8ca99b7a4acaa8c4679 | 0acb63b2d77575f7c0aa85bcbbd8335a6650b650 | refs/heads/master | 2022-04-22T19:57:29.129306 | 2020-04-22T17:53:25 | 2020-04-22T17:53:25 | 252,763,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from dashboard import app
# from flask import Flask
# app = Flask(__name__)
from flask import render_template
@app.route("/")
def index():
return render_template('index.html')
# @app.route("/")
# def get_news():
# return "no news is good news"
if __name__ == '__main__':
app.run(port=5000, debug=True) | [
"gboyega.a@credpal.com"
] | gboyega.a@credpal.com |
a4fdd6567140f0f569eb565c51d1dbe9bb4152a3 | 49484b879d6c7117ceb1e72dd92621ca7626406d | /old_rank.py | db91c73c446980ac0fabae08102a1796eb06b8af | [] | no_license | magicray/magicray.github.io | b1d29dcdbfc0011f2b43e0dd5df97e3952f0fc70 | 18b757c83a25f61832c34177da2d4a449f72a09c | refs/heads/master | 2023-08-23T15:25:32.209025 | 2023-08-23T00:42:36 | 2023-08-23T00:42:36 | 180,509,807 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,675 | py | import re
import bs4
import math
import json
import time
import argparse
import requests
from logging import critical as log
value_screen = ('903587/value', '5ruq9mkugbh7rqdi6q022ld8ji8zg5ki')
growth_screen = ('879125/growth', 'bbv9rqsz9qblhxhp4efhtv5qvjah7mof')
quality_screen = ('878969/quality', '36g81fd47dtp2sfxv18ymxolc36e65o5')
def download(screen, sessionid):
url = 'https://www.screener.in/screens/{}/?include_old=yes&page='
url = url.format(screen).lower()
rows = list()
headers = list()
s_no_max = 0
for i in range(1000):
r = requests.get(url + str(i+1), headers={
'accept-encoding': 'gzip',
'cookie': 'sessionid={};'.format(sessionid)})
assert(200 == r.status_code)
log('downloaded {}'.format(url + str(i+1)))
page = bs4.BeautifulSoup(r.content, 'lxml')
for h in page.select('th a'):
title = re.sub(r'\W+', '_', h.text).strip('_').lower()
if title not in headers:
headers.append(title)
flag = False
for r in page.select('tr'):
row = list()
for c in r.children:
if 'td' == c.name:
row.append(c.text.strip())
flag = True
if row and row[0].strip():
s_no = int(row[0].strip('.'))
if s_no > s_no_max:
rows.append(row)
s_no_max = s_no
else:
flag = False
break
if flag is False and rows:
break
# To avoid flooding the server with requests and getting thrown out
time.sleep(1)
result = dict()
for row in rows:
d = result.setdefault(row[1], dict())
for i in range(len(headers)-2):
try:
d[headers[i+2]] = float(row[i+2])
except Exception:
d[headers[i+2]] = row[i+2]
return result
def rank(field, data, descending=True):
data = sorted([(v[field], k) for k, v in data.items()], reverse=descending)
rank = dict()
for i, (ebit, name) in enumerate(data):
rank[name] = i
return rank
def median(field, data):
val = sorted([v[field] for k, v in data.items()])
return val[-1], val[len(val)//2], val[0]
def portfolio(args):
# OPM > 0 AND
# Return on equity > 0 AND
# Return on assets > 0 AND
# Return on invested capital > 0 AND
# Return on capital employed > 0 AND
#
# Sales growth > 0 AND
# Profit growth > 0 AND
# Operating profit growth > 0 AND
#
# Earnings yield > 0 AND
# Price to Sales > 0 AND
# Price to Earning > 0 AND
# Price to book value > 0 AND
#
# EPS > 0 AND
# EBIT > 0 AND
# Net profit > 0 AND
# Profit after tax > 0 AND
# Operating profit > 0 AND
#
# EBIT latest quarter > 0 AND
# EBIT preceding quarter > 0 AND
# Operating profit latest quarter > 0 AND
# Operating profit preceding quarter > 0 AND
# Operating profit 2quarters back > 0 AND
# Operating profit 3quarters back > 0 AND
#
# Sales > Net profit AND
# Sales > Operating profit AND
#
# Current ratio > 1 AND
# Net worth > 0 AND
# Book value > 0 AND
# Total Assets > 0
filename = 'universe.json'
try:
data = json.load(open(filename))
assert(data['timestamp'] > time.time() - 86400)
except Exception:
data = dict()
for screen, sessionid in (value_screen, growth_screen, quality_screen):
for key, value in download(screen, sessionid).items():
if key in data:
data[key].update(value)
else:
data[key] = value
data = dict(timestamp=int(time.time()), data=data)
with open(filename, 'w') as fd:
json.dump(data, fd)
tmp = dict()
for k, v in data['data'].items():
v.pop('5yrs_return', None)
if all('' != y for y in v.values()):
tmp[k] = v
v['p_o'] = v['mar_cap_rs_cr'] / v['op_12m_rs_cr']
else:
log('incomplete data : %s', k)
if not args.top:
args.top = int(len(tmp)/2)
if not args.count:
args.count = args.top
# Statistics is likely to work more reliable for bigger companies,
# pick biggest args.top stocks by market cap
mcap = rank('op_12m_rs_cr', tmp)
#mcap = rank('mar_cap_rs_cr', tmp)
final_rank = [(mcap[name], name) for name in mcap]
biggest = set([name for rank, name in sorted(final_rank)[:args.top]])
data = {k: v for k, v in tmp.items() if k in biggest}
assert(len(data) == args.top)
t = time.time()
log('columns(%d) rows(%d) msec(%d)',
len(data[list(data.keys())[0]]), len(data), (time.time()-t)*1000)
columns = ('roce', 'roe',
# 'qtr_sales_var', 'qtr_profit_var',
'earnings_yield', 'p_e',
'mar_cap_rs_cr', 'cmp_rs')
# Rank on Profitability
roe = rank('roe', data)
roe_3yr = rank('roe_3yr', data)
roe_5yr = rank('roe_5yr', data)
roce = rank('roce', data)
roce_3yr = rank('roce_3yr', data)
roce_5yr = rank('roce_5yr', data)
roic = rank('roic', data)
opm = rank('opm', data)
opm_5yr = rank('5yr_opm', data)
roa = rank('roa_12m', data)
roa_3yr = rank('roa_3yr', data)
roa_5yr = rank('roa_5yr', data)
# Rank on Growth
sales_growth = rank('sales_growth', data)
sales_growth_3yr = rank('sales_var_3yrs', data)
sales_growth_5yr = rank('sales_var_5yrs', data)
sales_growth_yoy = rank('qtr_sales_var', data)
profit_growth = rank('profit_growth', data)
profit_growth_3yr = rank('profit_var_3yrs', data)
profit_growth_5yr = rank('profit_var_5yrs', data)
profit_growth_yoy = rank('qtr_profit_var', data)
op_profit_growth = rank('opert_prft_gwth', data)
# Rank on Valuation
pe = rank('p_e', data, False)
ps = rank('cmp_sales', data, False)
pb = rank('cmp_bv', data, False)
po = rank('p_o', data, False)
e_yield = rank('earnings_yield', data)
# Rank on Stability
sales = rank('sales_rs_cr', data)
np = rank('np_12m_rs_cr', data)
op = rank('op_12m_rs_cr', data)
debteq = rank('debt_eq', data, False)
stats = {f: median(f, data) for f in columns}
final_rank = [(
# Quality
(roce[name] + roe[name] + opm[name] + roa[name] +
roce_3yr[name] + roe_3yr[name] + roa_3yr[name] +
roce_5yr[name] + roe_5yr[name] + opm_5yr[name] + roa_5yr[name] +
roic[name]) / 12 +
# Growth
(sales_growth[name] + profit_growth[name] +
sales_growth_3yr[name] + profit_growth_3yr[name] +
sales_growth_5yr[name] + profit_growth_5yr[name] +
sales_growth_yoy[name] + profit_growth_yoy[name] +
op_profit_growth[name]) / 9 +
# Value
(pe[name] + pb[name] + ps[name] + po[name] + e_yield[name]) / 5 +
# Stability
(sales[name] + np[name] + op[name] + debteq[name]) / 4,
name) for name in roe]
def print_header():
headers = '{:16s}' + '{:>8s}' * 9
print(headers.format(time.strftime('%Y-%m-%d'),
'ROCE', 'ROE',
'SALES', 'PROFIT',
'YIELD', 'P/E',
'MCAP', 'CMP', 'QTY'))
print_header()
for i, f in enumerate(('Max', 'Median')):
print(('%s\t\t' + '%8.2f' * 4 + '%8d%8d') % (
f,
stats['roce'][i],
stats['roe'][i],
# stats['qtr_sales_var'][i],
# stats['qtr_profit_var'][i],
stats['earnings_yield'][i],
stats['p_e'][i],
stats['mar_cap_rs_cr'][i],
stats['cmp_rs'][i]))
print('-' * 88)
avg = {k: 0 for k in columns}
avg['count'] = 0
if int(args.count) != args.count:
args.count = args.count * len(final_rank)
args.count = int(args.count)
start = 0
args.count = args.count if args.count else len(final_rank)
if args.count < 0:
args.count *= -1
start = len(final_rank) - args.count
per_stock = args.amount / args.count
count = 0
stock_list = list()
for n, (_, name) in enumerate(sorted(final_rank)[start:start+args.count]):
v = data[name]
v['name'] = name
v['rank'] = count+1
stock_list.append(v)
qty = 0
available = per_stock if args.amount > per_stock else args.amount
qty = math.ceil(available / v['cmp_rs'])
if qty*v['cmp_rs'] > max(available, args.amount):
qty -= 1
if args.amount and qty < 1:
break
args.amount -= qty*v['cmp_rs']
print(('%-16s' + '%8.2f' * 4 + '%8d%8d%8d') % (
name, v['roce'], v['roe'],
# v['qtr_sales_var'], v['qtr_profit_var'],
v['earnings_yield'], v['p_e'],
v['mar_cap_rs_cr'], v['cmp_rs'],
qty))
count += 1
for k in columns:
avg[k] += v[k]
avg['count'] += 1
for k in columns:
avg[k] /= avg['count']
with open('magicrank.json') as fd:
prev = json.load(fd)
prev_names = set([s['name'] for s in prev['data'] if s['rank'] <= len(prev['data'])/2])
stock_names = set([s['name'] for s in stock_list if s['rank'] <= args.top/2])
with open('magicrank.json', 'w') as fd:
ts = int(time.time())
sold = prev.get('sold', {})
sold.update({s: ts for s in set(prev_names) - set(stock_names)})
for s in list(sold.keys()):
if s in stock_names:
sold.pop(s)
json.dump(dict(
data=stock_list,
date=int(time.time()),
symbol=prev['symbol'],
sold={k: v for k, v in sold.items() if v+86400*90 > ts},
url='https://www.screener.in/screens/290555/universe/'),
fd, sort_keys=True, indent=4)
print('-' * 88)
print_header()
print(('%-16s' + '%8.2f' * 4 + '%8d%8d') % (
'Average', avg['roce'], avg['roe'],
# avg['qtr_sales_var'], avg['qtr_profit_var'],
avg['earnings_yield'], avg['p_e'],
avg['mar_cap_rs_cr'], avg['cmp_rs']))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--amount', dest='amount', type=int, default=0)
parser.add_argument('--count', dest='count', type=float)
parser.add_argument('--top', dest='top', type=int, default=500)
portfolio(parser.parse_args())
if __name__ == '__main__':
main()
| [
"bhsingh@gmail.com"
] | bhsingh@gmail.com |
a3596d0458068c035f819299c4c073136628a1bd | af4f73a15837c2b06f632611eac07a3f44110007 | /client/node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi | a949df6fc8d73bb3bcc9c963214059ab8b226f28 | [
"MIT"
] | permissive | m3mber/ES_2020_2021 | 6664e9cee0665f304c4ad6d200ed00115c5ed6bc | a57a0384c905295af4f518283ffd8f93c121f11f | refs/heads/main | 2023-06-01T06:38:45.396519 | 2021-06-30T01:15:01 | 2021-06-30T01:15:01 | 369,866,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,493 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/vv/Library/Caches/node-gyp/10.16.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/vv/.npm-init.js",
"userconfig": "/Users/vv/.npmrc",
"cidr": "",
"node_version": "10.16.3",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/vv/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.13 node/v10.16.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"unsafe_perm": "true",
"onload_script": "",
"tmp": "/var/folders/rg/r41gm_0d6s7d7c2dvbj55fd00000gn/T",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"valentino.vukadinovic013@gmail.com"
] | valentino.vukadinovic013@gmail.com |
8c5396bc02f8374ca2fdbef23af507552d0e8a17 | 75c2b6336b06d1dc2ac2fd49d6554f75708a7fe0 | /Compare Images/compare.py | 50777400106786cbad05667121c13f7a8114c1de | [] | no_license | SanjayKumarTS/Computer-Vision | c3fd8e377b43928077219bea342ced7a606e38da | 23099db4f500bc4d2c011dcfa945ddf62bd9e80e | refs/heads/master | 2020-04-18T06:15:50.912097 | 2019-01-24T06:12:09 | 2019-01-24T06:12:09 | 167,310,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | # import the necessary packages
from skimage.measure import compare_ssim
import argparse
import imutils
import cv2
from skimage import feature
import matplotlib.pyplot as plt
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="first input image")
ap.add_argument("-s", "--second", required=True,
help="second")
args = vars(ap.parse_args())
# load the two input images
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
# convert the images to grayscale
# grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
# grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
grayA = cv2.imread(args["first"], 0)
grayB = cv2.imread(args["second"], 0)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 128, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and then draw the
# bounding box on both input images to represent where the two
# images differ
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
# edges1 = feature.canny(grayA, sigma=3)
# edges2 = feature.canny(grayB, sigma=3)
# fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
# sharex=True, sharey=True)
# ax2.imshow(edges1, cmap=plt.cm.gray)
# ax2.axis('off')
# ax2.set_title('Canny filter1', fontsize=20)
# plt.show()
# ax2.imshow(edges2, cmap=plt.cm.gray)
# ax2.axis('off')
# ax2.set_title('Canny filter2', fontsize=20)
# plt.show()
# show the output images
cv2.imshow("Original", imageA)
cv2.imshow("Modified", imageB)
cv2.imshow("Diff", diff)
cv2.imshow("Thresh", thresh)
cv2.waitKey(0)
| [
"32599300+SanjayKumarTS@users.noreply.github.com"
] | 32599300+SanjayKumarTS@users.noreply.github.com |
da325578a57f0f5949a3625ee61b64b1612a13c1 | 04f948d94cf288eafccf2b513078aeed77e3faef | /prof.py | a35159b88b3feed2074e0fcec867c1df8d0ddf85 | [
"Apache-2.0"
] | permissive | jdily/qpth | a9d0e5a662c407e6b6a92a25962040f0a2834ce8 | 296c01775ac82e7890aa688839f39fff6a6cb681 | refs/heads/master | 2021-01-21T12:58:33.373545 | 2017-05-16T15:02:12 | 2017-05-16T15:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | #!/usr/bin/env python3
import argparse
import sys
import numpy as np
import numpy.random as npr
import qpth.solvers.pdipm.single as pdipm_s
import qpth.solvers.pdipm.batch as pdipm_b
import itertools
import time
import torch
import gurobipy as gpy
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
import setproctitle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nTrials', type=int, default=10)
args = parser.parse_args()
setproctitle.setproctitle('bamos.optnet.prof')
npr.seed(0)
prof(args)
def prof(args):
print('| \# Vars | \# Batch | Gurobi | single | batched |')
print('|----------+----------+--------+--------+---------|')
# for nz, nBatch in itertools.product([100,500], [1, 64, 128]):
for nz, nBatch in itertools.product([100], [1, 64, 128]):
times = []
for i in range(args.nTrials):
times.append(prof_instance(nz, nBatch))
times = np.array(times)
print(("| {:5d} " * 2 + "| ${:.5e} \pm {:.5e}$ s " * 3 + '|').format(
*([nz, nBatch] + [item for sublist in zip(times.mean(axis=0), times.std(axis=0))
for item in sublist])))
def prof_instance(nz, nBatch, cuda=True):
nineq, neq = 100, 0
assert(neq == 0)
L = npr.rand(nBatch, nz, nz)
Q = np.matmul(L, L.transpose((0, 2, 1))) + 1e-3 * np.eye(nz, nz)
G = npr.randn(nBatch, nineq, nz)
z0 = npr.randn(nBatch, nz)
s0 = npr.rand(nBatch, nineq)
p = npr.randn(nBatch, nz)
h = np.matmul(G, np.expand_dims(z0, axis=(2))).squeeze(2) + s0
A = npr.randn(nBatch, neq, nz)
b = np.matmul(A, np.expand_dims(z0, axis=(2))).squeeze(2)
zhat_g = []
gurobi_time = 0.0
for i in range(nBatch):
m = gpy.Model()
zhat = m.addVars(nz, lb=-gpy.GRB.INFINITY, ub=gpy.GRB.INFINITY)
obj = 0.0
for j in range(nz):
for k in range(nz):
obj += 0.5 * Q[i, j, k] * zhat[j] * zhat[k]
obj += p[i, j] * zhat[j]
m.setObjective(obj)
for j in range(nineq):
con = 0
for k in range(nz):
con += G[i, j, k] * zhat[k]
m.addConstr(con <= h[i, j])
m.setParam('OutputFlag', False)
start = time.time()
m.optimize()
gurobi_time += time.time() - start
t = np.zeros(nz)
for j in range(nz):
t[j] = zhat[j].x
zhat_g.append(t)
p, L, Q, G, z0, s0, h = [torch.Tensor(x) for x in [p, L, Q, G, z0, s0, h]]
if cuda:
p, L, Q, G, z0, s0, h = [x.cuda() for x in [p, L, Q, G, z0, s0, h]]
if neq > 0:
A = torch.Tensor(A)
b = torch.Tensor(b)
else:
A, b = [torch.Tensor()] * 2
if cuda:
A = A.cuda()
b = b.cuda()
# af = adact.AdactFunction()
single_results = []
start = time.time()
for i in range(nBatch):
A_i = A[i] if neq > 0 else A
b_i = b[i] if neq > 0 else b
U_Q, U_S, R = pdipm_s.pre_factor_kkt(Q[i], G[i], A_i)
single_results.append(pdipm_s.forward(p[i], Q[i], G[i], A_i, b_i, h[i],
U_Q, U_S, R))
single_time = time.time() - start
start = time.time()
Q_LU, S_LU, R = pdipm_b.pre_factor_kkt(Q, G, A)
zhat_b, nu_b, lam_b, s_b = pdipm_b.forward(p, Q, G, h, A, b, Q_LU, S_LU, R)
batched_time = time.time() - start
# Usually between 1e-4 and 1e-5:
# print('Diff between gurobi and pdipm: ',
# np.linalg.norm(zhat_g[0]-zhat_b[0].cpu().numpy()))
# import IPython, sys; IPython.embed(); sys.exit(-1)
# import IPython, sys; IPython.embed(); sys.exit(-1)
# zhat_diff = (single_results[0][0] - zhat_b[0]).norm()
# lam_diff = (single_results[0][2] - lam_b[0]).norm()
# eps = 0.1 # Pretty relaxed.
# if zhat_diff > eps or lam_diff > eps:
# print('===========')
# print("Warning: Single and batched solutions might not match.")
# print(" + zhat_diff: {}".format(zhat_diff))
# print(" + lam_diff: {}".format(lam_diff))
# print(" + (nz, neq, nineq, nBatch) = ({}, {}, {}, {})".format(
# nz, neq, nineq, nBatch))
# print('===========')
return gurobi_time, single_time, batched_time
if __name__ == '__main__':
main()
| [
"bamos@cs.cmu.edu"
] | bamos@cs.cmu.edu |
959d18ae3024dfaa89aa2fc9610817389fe4a1cb | 62ec7aa1361416c29583a1ca247fd54f2bd185a4 | /test_fr/settings.py | fdf2b5c69bbf075bb6849b2bcdafb808d154737d | [] | no_license | xinchao-bojan/test_fr | 20d3bea5908646c0320ab10a829984a385ec94e2 | e023cfb06041613aca7fe24cc7d02875367dbc41 | refs/heads/main | 2023-05-27T01:47:19.984770 | 2021-06-13T10:23:52 | 2021-06-13T10:23:52 | 376,494,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | """
Django settings for test_fr project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from datetime import timedelta
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-+(!26^0x18&72ntzd^g4e$+ettigr64-(q(io=*6_qyje&vhae'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main_app',
'custom_user',
'corsheaders',
'rest_framework',
'djoser',
'drf_yasg',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
ROOT_URLCONF = 'test_fr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_fr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {
'default':
{
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_fr',
'USER': 'bojan',
'PASSWORD': '789256',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=30),
'REFRESH_TOKEN_LIFETIME': timedelta(days=30),
'AUTH_HEADER_TYPES': ('Bearer',),
}
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',
'USERNAME_RESET_CONFIRM_URL': '#/username/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': '#/activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
}
CORS_ORIGIN_ALLOW_ALL = True # If this is used then `CORS_ORIGIN_WHITELIST` will not have any effect
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_REGEX_WHITELIST = [
'http://localhost:3030',
]
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header'
}
}
}
AUTH_USER_MODEL = "custom_user.CustomUser"
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# STATICFILES_DIRS = [(os.path.join(BASE_DIR, 'static_dev'))]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"sk.schooldude@gmail.com"
] | sk.schooldude@gmail.com |
c1d4d9b077db0b1868665eae4933ae3061f14e62 | e2deae038ca17daad3e496fe8c517acc77bd7175 | /protein-translation/protein_translation.py | 4036d77aef47aa9d344d84be018844ba942c90cc | [] | no_license | eureka84/exercism-python | d2bb9248d4eeb9ab9ccefc4f847a595d1401d7eb | 3cf0e2abccc866469f9b126217906065b5bb9eb0 | refs/heads/master | 2020-04-28T01:50:10.636420 | 2019-03-29T18:40:58 | 2019-03-29T18:40:58 | 174,874,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from itertools import takewhile
aminoAcidsDictionary = dict([
("AUG", "Methionine"),
("UUU", "Phenylalanine"),
("UUC", "Phenylalanine"),
("UUA", "Leucine"),
("UUG", "Leucine"),
("UCU", "Serine"),
("UCC", "Serine"),
("UCA", "Serine"),
("UCG", "Serine"),
("UAU", "Tyrosine"),
("UAC", "Tyrosine"),
("UGU", "Cysteine"),
("UGC", "Cysteine"),
("UGG", "Tryptophan"),
("UAA", "STOP"),
("UAG", "STOP"),
("UGA", "STOP")
])
def proteins(strand):
codons = split_every_n_chars(strand, 3)
candidate_sequence = map(lambda c: parse_codon(c), codons)
return list(takewhile(lambda a: a != "STOP", candidate_sequence))
def split_every_n_chars(string, n):
return [str(string[i:i + n]) for i in range(0, len(string), n)]
def parse_codon(strand):
return aminoAcidsDictionary[strand] | [
"angelosciarra@ymail.com"
] | angelosciarra@ymail.com |
d6e78f5990e9b9bcb0a179d6107e7ba267053399 | ad13e1c4fb65b810cd6d7232ab618e29d864a3d9 | /data_analysis/data_group.py | 48b65b398dd9cde9c7d74b6d5f3f178c9d065c22 | [] | no_license | shuaiyin/pystudy | 688994f728e6a8e678325e63b3d8caa446caae20 | fe017cb94582b5e3af68807915447ea99983b742 | refs/heads/master | 2021-01-13T10:27:53.808830 | 2017-05-07T08:17:03 | 2017-05-07T08:17:03 | 63,121,870 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,092 | py | #coding=utf-8
from pandas import DataFrame,Series
import pandas as pd
import numpy as np
import sys
df = DataFrame({'key1':['a','a','b','b','a'],
'key2':['one','two','one','two','one'],
'data1':np.random.randn(5)})
print df
#group by key1 and calculate the average value of column data1
groupd = df['data1'].groupby(df['key1'])
print groupd
means = groupd.mean()
#using two keys to group and calculate the average value (using in Series)
means = df['data1'].groupby([df['key1'],df['key2']]).mean()
print means
#the key of group by can be every array the length of which is approprivate
states = np.array(['Ohio','California','California','Ohio','Ohio'])
years = np.array([2005,2005,2006,2005,2006])
means = df['data1'].groupby([states,years]).mean()
print means
#using column name as group key
means = df.groupby('key1').mean()
"""
there is no column key2 in the output,for that the data type of column key2 is string and not num !! so it will be filtered
"""
print means
print df.groupby(['key1','key2']).size()
for name,group in df.groupby('key1'):
print 'name is ',name
print 'value is: \n',group
print '\n----------------\n'
for (k1,k2),group in df.groupby(['key1','key2']):
print 'the key name is ',k1,k2
print 'the value is:\n',group
people = DataFrame(np.random.randn(5,5),columns=['a','b','c','d','e'],index=['Joe','Steve','Wes','Jim','Travis'])#索引值为人的名字
print people
"""
a b c d e
Joe 1.304699 0.100459 -0.000408 -1.095217 -1.142781
Steve -1.224551 0.478045 -1.328901 -0.365792 -1.339277
Wes 0.330814 -0.768008 -0.599442 -0.854585 -0.174300
Jim 0.701609 -1.466142 -0.207906 -0.870489 0.963129
Travis -2.215134 -0.821001 0.361285 -0.935930 -0.472026
"""
people.ix[2:3,['b','c']] = np.nan #add some NA value
print people
"""
a b c d e
Joe 1.304699 0.100459 -0.000408 -1.095217 -1.142781
Steve -1.224551 0.478045 -1.328901 -0.365792 -1.339277
Wes 0.330814 NaN NaN -0.854585 -0.174300
Jim 0.701609 -1.466142 -0.207906 -0.870489 0.963129
Travis -2.215134 -0.821001 0.361285 -0.935930 -0.472026
"""
mapping = {'a':'red','b':'red','c':'blue','d':'blue','e':'red','f':'orange'}
#那么分组是可以按照index(索引行)或者column(列)来进行分组的,那么默认情况下是按照index来进行分组的,那么如果想指定column进行分组的话,需要设置axis参数为1
by_column = people.groupby(mapping,axis=1)
print by_column.sum()#做一次列的汇总求和
"""
blue red
Joe -1.095625 0.262377
Steve -1.694693 -2.085783
Wes -0.854585 0.156513
Jim -1.078395 0.198596
Travis -0.574644 -3.508161
"""
print by_column.count()#做一次列的汇总求总数
"""
blue red
Joe 2 3
Steve 2 3
Wes 1 2
Jim 2 3
Travis 2 3
"""
# 通过函数进行分组,相较于字典或Series,python函数在定义分组映射关系时可以更有创意且更为抽象,任何被当做分组建的函数都会在各个索引值上被调用一次,其返回值会被用作分组名称
print people.groupby(len).sum()#using fuction len as group function
"""
a b c d e
3 -0.817955 0.202583 1.424850 1.375932 0.589461
5 -0.865928 0.517076 -0.981535 0.816557 1.303144
6 1.700588 1.281608 0.025498 -0.415192 0.114043
"""
print people.groupby(len).count()
"""
a b c d e
3 3 2 2 3 3
5 1 1 1 1 1
6 1 1 1 1 1
"""
#将函数与跟数组,列表。字典,Series混合使用也不是问题,因为任何东西都会被转换为数组。
key_list = ['one','one','one','two','two']
print people
print people.groupby([len,key_list]).sum()
print people.groupby(len).sum()
"""
a b c d e
Joe -0.044850 1.446475 -0.354495 -0.892443 -0.415122
Steve -0.941921 -1.141826 -0.947607 -0.854944 1.867269
Wes -1.419970 NaN NaN -0.339313 -2.458381
Jim -0.397000 1.715947 -0.654819 -1.420298 -0.806450
Travis -1.463469 0.356982 0.131443 1.245837 -0.365482
------优先按照长度来分组,那么发现这个key_list的长度恰好和索引的长度相同,那么第二级分组就靠它了
a b c d e
3 one -1.464820 1.446475 -0.354495 -1.231757 -2.873503
two -0.397000 1.715947 -0.654819 -1.420298 -0.806450
5 one -0.941921 -1.141826 -0.947607 -0.854944 1.867269
6 two -1.463469 0.356982 0.131443 1.245837 -0.365482
-------
a b c d e
3 -1.861820 3.162422 -1.009314 -2.652055 -3.679954
5 -0.941921 -1.141826 -0.947607 -0.854944 1.867269
"""
df = DataFrame({
'A':['foo','bar','foo','bar','foo','bar','foo','foo'],
'B':['one','one','two','three','two','two','one','three'],
'C': np.random.randn(8),
'D':np.random.randn(8)
})
print df
result = df.groupby('A').min()
print result
"""
A B C D
0 foo one 1.271095 0.524734
1 bar one -1.606482 0.945581
2 foo two -1.770528 -2.329267
3 bar three -0.525324 -0.197216
4 foo two -0.572990 1.313470
5 bar two -0.319865 -0.241170
6 foo one 0.126530 0.443100
7 foo three -0.956525 -1.255222
----这里按照A列进行分组,那么这里求得分组之后分组中数据的最小值(当然对于str类型的数据是无法输出的)
B C D
A
bar one -1.606482 -0.241170
foo one -1.770528 -2.329267
"""
##根据索引级别分组,层次化索引数据集最方便的地方就在于他能够根据索引级别进行聚合。要实现该目的通过level关键字传入级别编号或名称即可
ls = [['US','US','US','JP','JP'],[1,3,5,1,3]]
columns = pd.MultiIndex.from_arrays(ls,names=['city','tenor'])
hier_df = DataFrame(np.random.randn(4,5),columns=columns)
print hier_df
print hier_df.groupby(level='city',axis=1).count()#
print hier_df.groupby(level='tenor',axis=1).count()#
"""
##根据索引级别分组,层次化索引数据集最方便的地方就在于他能够根据索引级别进行聚合。要实现该目的通过level关键字传入级别编号或名称即可
city US JP
tenor 1 3 5 1 3
0 0.781725 1.171544 -0.743763 0.887777 -0.487526
1 -0.162591 -0.510159 -0.898424 0.341528 2.143882
2 -0.204438 -0.709068 -3.320502 1.403123 1.065139
3 0.965160 0.898632 -0.390778 0.036086 1.621391
--------
city JP US
0 2 3
1 2 3
2 2 3
3 2 3
--------
tenor 1 3 5
0 2 2 1
1 2 2 1
2 2 2 1
3 2 2 1
"""
df = DataFrame({'data1':np.random.randn(5),
'data2':np.random.randn(5),
'key1':['a','a','b','b','a'],
'key2':['one','two','one','two','one']})
print df
grouped = df.groupby('key1')
print grouped['data1'].quantile(0.9)
"""
key1
a 0.543716
b 0.870144
Name: data1, dtype: float64
"""
####如果要使用你自己的聚合函数,只需传入aggregate或agg方法即可
df = DataFrame({'data1':np.random.randn(5),
'data2':np.random.randn(5),
'key1':['a','a','b','b','a'],
'key2':['one','two','one','two','one']})
print df
grouped = df.groupby('key1')
def peak_to_peak(arr):
print '\nthe arr is\n',arr,'\n'
return arr.max() - arr.min()
print grouped.agg(peak_to_peak)
"""
data1 data2 key1 key2
0 -1.195139 0.874116 a one
1 -0.824895 1.735717 a two
2 -1.097719 0.243204 b one
3 0.906202 -0.072424 b two
4 -0.357310 1.201323 a one
-----------
如果要使用你自己的聚合函数,只需要将其传入aggregate或agg方法即可
data1 data2
key1
a 0.837828 0.861601
b 2.003922 0.315628
"""
# def test(x):
# return pd.Series([x,x+1],index=['x','x+1'])
# grouped = df.groupby('A')
# print grouped['C'].apply(test)
# # print grouped['C'].apply(lambda x : x.describe())
sys.exit(0)
| [
"yshuaicode@gmail.com"
] | yshuaicode@gmail.com |
875c0f821af2f07ad32cab2bdcedef57dd82e2a5 | a26ecf8a24ed20ed9ee4728fa189cc9168f4416b | /library/__init__.py | 09902a0aa53c4e5e72e8ac4cf82bc0737e8102b8 | [] | no_license | mfa/addition_seq2seq_allennlp | c3cf543c65a939aa33ed7aa74f9bf0457f913530 | e8176b33cd6ce375f13d9e720aa4d92a4f210912 | refs/heads/master | 2020-04-25T23:41:27.294094 | 2019-03-10T13:25:15 | 2019-03-10T13:25:15 | 173,153,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from library.data import AdditionSeq2SeqDatasetReader
| [
"andreas@madflex.de"
] | andreas@madflex.de |
44e428fed52f42a36ffdae0738e5c259ee1aec43 | eafd1a20588db93fce6d6b0fe13bf9be5ea36293 | /make_plots.py | 3aafa8a0ebd5d5fcb019518b40c89fd4ce39ca8c | [] | no_license | isaakh12/final_project | 885124d50ec66b2dad647a562aaa12b98c1c4128 | 70b85de1ef90072860a480db5accb1dca425567c | refs/heads/master | 2023-05-04T06:02:13.540575 | 2021-05-26T06:58:21 | 2021-05-26T06:58:21 | 362,218,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | def make_plots(subset_df, interp_df1, interp_df2, transect_number, var, min_var, max_var):
'''
Makes interpolation and raw data plots
Input: Subset dataframe, interpolation grid, transect number, variable, minimum value, maximum values
Output: Raw data figure, Interpolation figure
'''
from matplotlib import pyplot as plt
grid = plt.GridSpec(2, 3, wspace=0, hspace=0) #set gridspace
plt.figure(figsize = (18,8))
plt.suptitle("Transect {:d}".format(transect_number), size = 20)
plt.subplot(grid[0, 1])
#graph raw data
plt.scatter(subset_df['distance'], subset_df['rangetobot'], c=subset_df[var], s = 3)
plt.clim(min_var, max_var)
#set labels depending on variable used
if(var == 'oxygen'):
plt.colorbar(label = "Oxygen [\u03BCmol/L]")
elif(var == 'temp'):
plt.colorbar(label = "Temperature [\N{DEGREE SIGN}C]")
elif(var == 'salinity'):
plt.colorbar(label = "Salinity [PSU]")
plt.title("Original Data")
plt.xlabel("Transect Distance [m]")
plt.ylabel("Depth above seafloor [m]")
plt.subplot(grid[1, 0])
#visualize interpolation (transposed so it appears correct)
plt.imshow(interp_df1.T, extent=(0,max(subset_df['distance']),0,max(subset_df['rangetobot'])), origin='lower', aspect = 'auto', cmap='viridis')
plt.plot(subset_df['distance'], subset_df['rangetobot'], 'x', ms = 1, c = 'k')
plt.clim(min_var, max_var)
if(var == 'oxygen'):
plt.colorbar(label = "Oxygen [\u03BCmol/L]")
elif(var == 'temp'):
plt.colorbar(label = "Temperature [\N{DEGREE SIGN}C]")
elif(var == 'salinity'):
plt.colorbar(label = "Salinity [PSU]")
plt.title("Linear Interpolation")
plt.xlabel("Transect Distance [m]")
plt.ylabel("Depth above seafloor [m]")
plt.subplot(grid[1, 2])
#visualize interpolation (transposed so it appears correct)
plt.imshow(interp_df2.T, extent=(0,max(subset_df['distance']),0,max(subset_df['rangetobot'])), origin='lower', aspect = 'auto', cmap='viridis')
plt.plot(subset_df['distance'], subset_df['rangetobot'], 'x', ms = 1, c = 'k')
plt.clim(min_var, max_var)
if(var == 'oxygen'):
plt.colorbar(label = "Oxygen [\u03BCmol/L]")
elif(var == 'temp'):
plt.colorbar(label = "Temperature [\N{DEGREE SIGN}C]")
elif(var == 'salinity'):
plt.colorbar(label = "Salinity [PSU]")
plt.title("Cubic Spline Interpolation")
plt.xlabel("Transect Distance [m]")
plt.ylabel("Depth above seafloor [m]")
plt.show()
| [
"ihaberman@mlml.calstate.edu"
] | ihaberman@mlml.calstate.edu |
1f1c6c2bd258f7cf84354d7bf9675b2f2e93c4d1 | 3e6592da31da5ab872a20ab9b97daef85d183d4e | /cryspy/B_parent_classes/preocedures.py | 66f1faa79b5b4aa7388c65c3b9a5e92674da25f3 | [
"MIT"
] | permissive | ikibalin/cryspy | 248b821d8e63cc1ffff07caee5b4b0591a3d5fa6 | 7452415b87f948bb75fb5af96fe414fb1165c71e | refs/heads/master | 2023-07-22T04:14:11.153574 | 2022-11-25T13:48:33 | 2022-11-25T13:48:33 | 178,411,703 | 5 | 2 | NOASSERTION | 2022-04-15T14:26:37 | 2019-03-29T13:36:00 | Python | UTF-8 | Python | false | false | 353 | py | from .cl_3_data import DataN
def take_items_by_class(global_obj, l_class) -> list:
l_res = []
for item in global_obj.items:
if isinstance(item, l_class):
l_res.append(item)
elif isinstance(item, DataN):
l_res_data = take_items_by_class(item, l_class)
l_res.extend(l_res_data)
return l_res | [
"noreply@github.com"
] | noreply@github.com |
3d3444f908c3d7439d1aa9465070c7661a020e8b | e7eaf928ecf661b62ba5a8814fe3fdf6dfc85c82 | /parallel/update_fam.py | 0bb27d259c652ce0da242e5c910155ab43de88d4 | [] | no_license | kannz6/cchmcProjects | 8ac820c21a943c5937bf6efc8dc290b201c890c5 | e7dc4b50578488a43804c5873b1192cf48dca78e | refs/heads/master | 2021-04-30T16:31:03.965864 | 2017-08-18T17:20:09 | 2017-08-18T17:20:09 | 80,047,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | #!/usr/bin/env python
#Andrew Rupert 4-6-17
import fileinput
import os
import re
import sys
######################
#bulk vcf (yale)
######################
# r_parse_base_id=re.compile(r'^(.*?)(-\d\d)?$')
# new_columns = []
path = "tmp_fam.txt"
# tmpFamFileWriter = open("tmp_fam.txt", "w+")
# for line in fileinput.input():
# line=line.strip()
# columns=line.split(' ')
# new_columns=columns[:]
# #print(columns)
# #print(new_columns)
# match = r_parse_base_id.match(columns[0])
# family_id = match.group(1)
# gender = '0'
# mother_id = '0'
# father_id = '0'
# if match.group(2) == '-01':
# gender = '2'
# elif match.group(2) == '-02':
# gender = '1'
# elif match.group(2) is None:
# mother_id = family_id + '-01'
# father_id = family_id + '-02'
# else:
# raise ValueError('HEEELP')
# new_columns[0] = family_id
# new_columns[2] = mother_id
# new_columns[3] = father_id
# new_columns[4] = gender
# tmpFamFileWriter.write(' '.join(new_columns))
# tmpFamFileWriter.write("\n")
# # print(' '.join(new_columns))#bulk yale vcf use > to write to file in
# tmpFamFileWriter.close()
######################
doneFileNames = [];
curretDirectoryFiles = [];
def joinPaths ( rt, fname ):
filepath = os.path.join( rt, fname )
doneFileNames .append( fname ) # Add it to the list.
def getFileNames( directory ):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
# Walk the tree.
[ ([joinPaths(root, f) for f in files],[ curretDirectoryFiles.append(d) for d in directories]) for root, directories, files in os.walk( directory ) ]
return doneFileNames # Self-explanatory.
######################
#fastq to kin pipeline
######################
# r_parse_base_id=re.compile(r'^(.*?)(-\d\d)?$')
r_parse_base_id=re.compile(r'^((.*?)(-\d+|(-\d+-\d)))?$')
new_columns = []
path = "tmp_fam.txt"
tmpFamFileWriter = open("tmp_fam.txt", "w+")
for i,line in enumerate(fileinput.input()):
line=line.strip()
columns=line.split(' ')
new_columns=columns[:]
# print(columns)
# print(new_columns)
_id = new_columns[1].split(".")
# print(_id)
_id = _id[1]
family_id = _id; filesDict = {};
match = r_parse_base_id.match(columns[0])
family_id = match.group(1)
gender = '0'
mother_id = '0'
father_id = '0'
# print("family id: {0}\nid: {1}\nmatch group 0: {2}\n".format(family_id,_id,match.group(0)))
if (i == 0 and _id.count("-") == 2):
directoryFileNames = getFileNames( os.getcwd() )
listOfBamFileNames = filter( (lambda x : re.match( r'aligned-sorted.*bam$', x) ), directoryFileNames )
[ filesDict.update({x.split(".")[1][0:7]:x.split(".")[1]+":"+x}) if x.split(".")[1].count("-") == 2 else filesDict.update({x.split(".")[1][0:10]:x.split(".")[1]+":"+x}) for x in listOfBamFileNames if ( len(x.split(".")[1]) == 9 and os.path.getsize(x) > filesDict.get(x.split(".")[1][0:7]) ) or ( len(x.split(".")[1]) == 12 and os.path.getsize(x) > filesDict.get(x.split(".")[1][0:10]))]
# sys.exit("filesDict: {0}".format(filesDict))
new_columns[0] = family_id
new_columns[1] = _id
new_columns[2] = filesDict.get(family_id+"-01").split(":")[0]
new_columns[3] = filesDict.get(family_id+"-02").split(":")[0]
new_columns[4] = gender
elif(i == 0 and _id.count("-") == 1):
new_columns[0] = family_id
new_columns[1] = _id
new_columns[2] = family_id + '-01'
new_columns[3] = family_id + '-02'
new_columns[4] = gender
elif (i % 2) == 1:
if ("01" in _id.split("-")):
gender = '2'
elif ("02" in _id.split("-")):
gender = '1'
new_columns[0] = family_id
new_columns[1] = _id
new_columns[4] = gender
elif (i % 2 ) == 0:
if ("01" in _id.split("-")):
gender = '2'
elif ("02" in _id.split("-")):
gender = '1'
new_columns[0] = family_id
new_columns[1] = _id
new_columns[4] = gender
tmpFamFileWriter.write(' '.join(new_columns))
tmpFamFileWriter.write("\n")
# print(' '.join(new_columns))#bulk yale vcf use > to write to file in
tmpFamFileWriter.close()
######################
| [
"bryan.kanu@cchmc.org"
] | bryan.kanu@cchmc.org |
3f6e2abacfeac461a57ba7a45a1cf5a7fed12415 | a275c7e4161c89ed3ee6289b75ad1d017634baab | /kontrollbank/pipelines.py | fb4ba7933de52e17d5cffd84c31fac2ff44fb0a5 | [] | no_license | SimeonYS/Oesterreichische-Kontrollbank-AG | c277d179aa41990458fbed76143fb48c0d8346d2 | f2aa83979c1faa52fdc18fb2802222af0de2d0e3 | refs/heads/main | 2023-04-18T01:17:55.803542 | 2021-04-29T06:34:11 | 2021-04-29T06:34:11 | 339,081,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import sqlite3
class KontrollbankPipeline:
# Database setup
conn = sqlite3.connect('KontrollBank.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute("""CREATE TABLE IF NOT EXISTS articles
(date text, title text, link text, content text)""")
def process_item(self, item, spider):
self.c.execute("""SELECT * FROM articles WHERE title = ? AND date = ?""",
(item.get('title'), item.get('date')))
duplicate = self.c.fetchall()
if len(duplicate):
return item
print(f"New entry added at {item['link']}")
# Insert values
self.c.execute("INSERT INTO articles (date, title, link, content)"
"VALUES (?,?,?,?)", (item.get('date'), item.get('title'), item.get('link'), item.get('content')))
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
24c9e6984a30f735e585bc692d8a87a374630a02 | 88f5893f223949c2db7b30d44205482384c4e855 | /hardnet/text_detector.py | 447abe3b14017ec41eb2bf0d3128da0aa5732ca5 | [] | no_license | Danee-wawawa/myhardnet | 4bcd132653331d4738cbe3e94a73c07ee3517cf5 | a1828495570da8260f625757c47f8c7791424271 | refs/heads/master | 2021-10-10T23:57:31.651980 | 2019-01-19T11:43:06 | 2019-01-19T11:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,469 | py | import copy
import random
import argparse
from rcnn.config import default, generate_config
from rcnn.symbol import *
from rcnn.utils.load_model import load_param
from rcnn.core.module import MutableModule
from rcnn.processing.bbox_transform import nonlinear_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, gpu_nms_wrapper
from rcnn.tools.minimum_bounding import minimum_bounding_rectangle
bbox_pred = nonlinear_pred
import numpy as np
import os
from scipy import io
import cv2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import zipfile
MERGE_THRESH = 1.1 #no merge
class OneDataBatch():
def __init__(self,img):
im_info = mx.nd.array([[img.shape[0],img.shape[1],1.0]])
img = np.transpose(img,(2,0,1))
img = img[np.newaxis,(2,1,0)]
self.data = [mx.nd.array(img),im_info]
self.label = None
self.provide_label = None
self.provide_data = [("data",(1,3,img.shape[2],img.shape[3])),("im_info",(1,3))]
class TextDetector:
def __init__(self, network, prefix, epoch, ctx_id=0, mask_nms=True):
self.ctx_id = ctx_id
self.ctx = mx.gpu(self.ctx_id)
self.mask_nms = mask_nms
#self.nms_threshold = 0.3
#self._bbox_pred = nonlinear_pred
if not self.mask_nms:
self.nms = gpu_nms_wrapper(config.TEST.NMS, self.ctx_id)
else:
self.nms = gpu_nms_wrapper(config.TEST.RPN_NMS_THRESH, self.ctx_id)
#self.nms = py_nms_wrapper(config.TEST.NMS)
sym = eval('get_' + network + '_mask_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
#arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=self.ctx, process=True)
split = False
max_image_shape = (1,3,1024,1024)
#max_image_shape = (1,3,1200,2200)
max_data_shapes = [("data",max_image_shape),("im_info",(1,3))]
mod = MutableModule(symbol = sym, data_names = ["data","im_info"], label_names= None,
max_data_shapes = max_data_shapes, context=self.ctx)
mod.bind(data_shapes = max_data_shapes, label_shapes = None, for_training=False)
mod.init_params(arg_params=arg_params, aux_params=aux_params)
self.model = mod
pass
def detect(self, img, scales=[1.], thresh=0.5):
ret = []
#scale = scales[0]
dets_all = None
masks_all = None
for scale in scales:
if scale!=1.0:
nimg = cv2.resize(img, None, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
else:
nimg = img
im_size = nimg.shape[0:2]
#im_info = mx.nd.array([[nimg.shape[0],nimg.shape[1],1.0]])
#nimg = np.transpose(nimg,(2,0,1))
#nimg = nimg[np.newaxis,(2,1,0)]
#nimg = mx.nd.array(nimg)
#db = mx.io.DataBatch(data=(nimg,im_info))
db = OneDataBatch(nimg)
self.model.forward(db, is_train=False)
results = self.model.get_outputs()
output = dict(zip(self.model.output_names, results))
rois = output['rois_output'].asnumpy()[:, 1:]
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
mask_output = output['mask_prob_output'].asnumpy()
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, [im_size[0],im_size[1]])
boxes= pred_boxes
label = np.argmax(scores, axis=1)
label = label[:, np.newaxis]
cls_ind = 1 #text class
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] / scale
cls_masks = mask_output[:, cls_ind, :, :]
cls_scores = scores[:, cls_ind, np.newaxis]
#print cls_scores.shape, label.shape
keep = np.where((cls_scores >= thresh) & (label == cls_ind))[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
masks = cls_masks[keep, :, :]
if dets.shape[0]==0:
continue
if dets_all is None:
dets_all = dets
masks_all = masks
else:
dets_all = np.vstack((dets_all, dets))
masks_all = np.vstack((masks_all, masks))
#scores = dets[:,4]
#index = np.argsort(scores)[::-1]
#dets = dets[index]
#print(dets)
if dets_all is None:
return np.zeros( (0,2) )
dets = dets_all
masks = masks_all
keep = self.nms(dets)
dets = dets[keep, :]
masks = masks[keep, :, :]
det_mask = np.zeros( (dets.shape[0],)+img.shape[0:2], dtype=np.int )
mask_n = np.zeros( (dets.shape[0],), dtype=np.int )
invalid = np.zeros( (dets.shape[0],), dtype=np.int )
for i in range(dets.shape[0]):
bbox_i = dets[i, :4]
#if bbox[2] == bbox[0] or bbox[3] == bbox[1] or bbox[0] == bbox[1] or bbox[2] == bbox[3] :
if bbox_i[2] == bbox_i[0] or bbox_i[3] == bbox_i[1] :
invalid[i] = 1
continue
score_i = dets[i, -1]
#bbox_i = map(int, bbox_i)
bbox_i = bbox_i.astype(np.int)
mask_i = masks[i, :, :]
mask_i = cv2.resize(mask_i, (bbox_i[2] - bbox_i[0], (bbox_i[3] - bbox_i[1])), interpolation=cv2.INTER_LINEAR)
#avg_mask = np.mean(mask_i[mask_i>0.5])
#print('det', i, 'mask avg', avg_mask)
mask_i[mask_i > 0.5] = 1
mask_i[mask_i <= 0.5] = 0
det_mask[i, bbox_i[1]: bbox_i[3], bbox_i[0]: bbox_i[2]] += mask_i.astype(np.int)
mask_n[i] = np.sum(mask_i==1)
if self.mask_nms:
for i in range(dets.shape[0]):
if invalid[i]>0:
continue
mask_i = det_mask[i]
ni = mask_n[i]
merge_list = []
for j in range(i+1, dets.shape[0]):
if invalid[j]>0:
continue
mask_j = det_mask[j]
nj = mask_n[j]
mask_inter = mask_i+mask_j
nij = np.sum(mask_inter==2)
iou = float(nij)/(ni+nj-nij)
if iou>=config.TEST.NMS:
#if iou>=0.7:
invalid[j] = 1
if iou>=MERGE_THRESH:
merge_list.append(j)
#mask_i = np.logical_or(mask_i, mask_j, dtype=np.int).astype(np.int)
#det_mask[i] = mask_i
#print(mask_i)
for mm in merge_list:
_mask = det_mask[mm]
mask_i = np.logical_or(mask_i, _mask, dtype=np.int)
if len(merge_list)>0:
det_mask[i] = mask_i.astype(np.int)
for i in range(dets.shape[0]):
if invalid[i]>0:
continue
mask_i = det_mask[i]
mini_box = minimum_bounding_rectangle(mask_i)
mini_boxt = np.zeros((4,2))
mini_boxt[0][0] = mini_box[0][1]
mini_boxt[0][1] = mini_box[0][0]
mini_boxt[1][0] = mini_box[1][1]
mini_boxt[1][1] = mini_box[1][0]
mini_boxt[2][0] = mini_box[2][1]
mini_boxt[2][1] = mini_box[2][0]
mini_boxt[3][0] = mini_box[3][1]
mini_boxt[3][1] = mini_box[3][0]
mini_box = mini_boxt
mini_box = np.int32(mini_box)
ret.append(mini_box)
#scores.append(score_i)
#print("---------------",mini_box)
#cv2.polylines(im, [mini_box], 1, (255,255,255))
#submit_path = os.path.join(submit_dir,'res_img_{}.txt'.format(index))
#result_txt = open(submit_path,'a')
#for i in range(0,4):
# result_txt.write(str(mini_box[i][0]))
# result_txt.write(',')
# result_txt.write(str(mini_box[i][1]))
# if i < 3:
# result_txt.write(',')
#result_txt.write('\r\n')
#result_txt.close()
return ret
| [
"sdldd520@163.com"
] | sdldd520@163.com |
368836164c10f8ad6ad4d6a65f978972ca0679ef | 66627d6bd2241be0bd0d7ccc7774ff4b003ea942 | /voicescore/voicescore.py | d0e402909b037ab58ec4ca54789ceee49787f355 | [
"MIT"
] | permissive | ThePheonixGuy/ThePhoenixCogs | 72173b8057b308feb7312d66151f2ce07a6dc4d3 | dec9508955037fde4edf60e0f724fd66fb4eb257 | refs/heads/master | 2021-01-11T13:50:45.306067 | 2017-05-25T20:19:11 | 2017-05-25T20:19:11 | 86,629,943 | 1 | 0 | null | 2017-04-11T22:51:40 | 2017-03-29T21:13:27 | Python | UTF-8 | Python | false | false | 9,350 | py | import discord
import os
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from copy import deepcopy
import time
import asyncio
import datetime
class VoiceScore:
def __init__(self,bot):
self.bot=bot
self.settings_path = "data/voicescore/settings.json"
self.settings = dataIO.load_json(self.settings_path)
self.scores_path = "data/voicescore/scores.json"
self.scores = dataIO.load_json(self.scores_path)
self.eligibleChannels_path = "data/voicescore/channels.json"
self.activeVCUsers = {}
self.eligibleChannels = {}
self.activeVClist = []
self.payoutMembers = []
self.timeLast = int(time.time())
self._setupdefaults()
@commands.command(pass_context=True)
async def setchannel(self,ctx):
""" This sets the channel which you send the command to as the text channel for announcements"""
channel=ctx.message.channel
server = ctx.message.server
self.settings[server.id]["ChannelID"] = channel.id
self.settings[server.id]["ChannelName"] = channel.name
self.save_settings()
await self.bot.say("Set this channel for all Voice state Announcements")
await self._getchannel(ctx)
@commands.command(pass_context=True)
async def getchannel(self,ctx):
server = ctx.message.server
"""Returns the set announcement channel. Try using setchannel first."""
await self._getchannel(ctx, server)
@commands.command(pass_context=True)
async def setpayoutscore(self,ctx, message:int):
"""Sets the payout for when a user crosses the score threshold"""
channel=ctx.message.channel
server = ctx.message.server
self.settings[server.id]["CreditsPerScore"] = message
self.save_settings()
await self.bot.say("New Payout set to {}".format(self.settings[server.id]["CreditsPerScore"]))
@commands.command(pass_context=True)
async def getpayoutscore(self,ctx):
"""Returns the current payout for when a user crosses the score threshold."""
await self.bot.say("Payout set to {}".format(self.settings[server.id]["CreditsPerScore"]))
@commands.command(pass_context = True)
async def get_all_vcmembers(self,ctx):
await self.bot.say("***Current users active in Voice Channels:*** \n ```{}```".format(await self.voice_state(ctx.message.author,ctx.message.author)))
@commands.command(pass_context = True)
async def get_score(self,ctx):
member = ctx.message.author
server = ctx.message.server
if server.id in self.scores:
if member.id in self.scores[server.id]:
output = self.scores[server.id][member.id]
await self.bot.say("{}, your score is {}".format(member.mention, output))
else:
await self.bot.say("{}, you have no score yet! Connect to a voice channel to earn some.".format(member.mention))
else:
await self.bot.say("{}, you have no score yet! Connect to a voice channel to earn some.".format(member.mention))
@commands.command(pass_context=True)
async def unixtime(self,ctx):
print("Unix Time: {}".format(time.time()))
print("DateTime: {}".format(time.time()))
printtime = datetime.datetime.fromtimestamp(int(time.time()))
await self.bot.say("Unix Time:{} \nDate Time: {}".format(time.time(),printtime))
def _setupdefaults(self):
for server in self.bot.servers:
sid = server.id
if sid not in self.settings:
self.settings[sid] = {"ChannelID": 0, "ChannelName": "none", "CreditsPerScore": 250, "ScoreThreshold": 1800}
async def _getchannel(self,ctx,server):
channelID = self.settings[server.id]["ChannelID"]
channelName = self.settings[server.id]["ChannelName"]
await self.bot.say("Name: {} \nID: {}".format(channelName,channelID))
async def voice_state_message_primer(self,message):
author = message.author
await asyncio.sleep(5)
await self.voice_state(author,author)
return
async def voice_state(self,userbefore,userafter):
server = userbefore.server
sid = server.id
afkChannel = userbefore.server.afk_channel
timeNow = int(time.time())
if sid not in self.activeVCUsers.keys():
self.activeVCUsers[sid] = {}
if sid not in self.eligibleChannels.keys():
self.eligibleChannels[sid] = {}
if sid not in self.scores.keys():
self.scores[sid] = {}
if sid not in self.scores.keys():
self.scores[sid] = {}
vcMembers = 0
ovcMembers = 0
eligibleChannel = False
# for loop that checks each channels eligability for score and assings the json true or not.
# needs to be here for updating.
# doesnt actually need to save to json. Did that for figurings
tempEligible = []
for currServer in self.bot.servers:
sid = currServer.id
if sid not in self.activeVCUsers.keys():
self.activeVCUsers[sid] = {}
if sid not in self.eligibleChannels.keys():
self.eligibleChannels[sid] = {}
if sid not in self.scores.keys():
self.scores[sid] = {}
if sid not in self.scores.keys():
self.scores[sid] = {}
for channel in currServer.channels:
vcMembers = len(channel.voice_members)
ovcMembers = vcMembers - 1
if ovcMembers > 0:
if channel != afkChannel:
tempEligible.append("{}".format(channel))
sid = server.id
self.saveChannels()
# for loop to check conditions of eligibility of member. not deafened, not single channel afk,
# not afk channel. if works, update the active voice client list.
tempVClist = []
for member in server.members:
if member.voice_channel is not None:
vcID = member.voice.voice_channel.id
if self._finditem(self.eligibleChannels, vcID):
if not member.self_deaf and not member.is_afk and not member.bot:
tempVClist.append(member)
self.activeVClist = tempVClist
totalVCmembers = len(self.activeVClist)
tempNameList = []
timeBetween = timeNow - self.timeLast
adjustAmount = int(timeBetween/10)
adjustAmount = adjustAmount * totalVCmembers
adjustedScore = timeBetween + adjustAmount
for member in self.activeVClist:
if member.id not in self.scores[sid]:
self.scores[sid][member.id] = 0
self.scores[sid][member.id] += adjustedScore
finalScore = self.checkScores(server, member)
self.scores[sid][member.id] = finalScore
tempNameList.append(member.name)
timestamp = datetime.datetime.fromtimestamp(int(time.time()))
scoreGiven = "Score given: {} \nMembers: {}".format(adjustedScore, tempNameList)
eligChannels = "Eligible Channels: {}".format(tempEligible)
if len(self.payoutMembers) > 0:
payOutMems = "Payed out members: {}".format(self.payoutMembers)
with open("data/voicescore/log.txt", "a") as log_file:
log_file.write("\nTime: \n{} \n{} \n{}".format(timestamp, scoreGiven,eligChannels,payOutMems))
else:
with open("data/voicescore/log.txt", "a") as log_file:
log_file.write("\nTime: \n{} \n{} \n{} \n{}".format(timestamp, scoreGiven,eligChannels,"No Members payed out"))
vcMembers = 0
ovcMembers = 0
eligibleChannel = False
tempEligible = []
for currServer in self.bot.servers:
for channel in currServer.channels:
vcMembers = len(channel.voice_members)
ovcMembers = vcMembers - 1
if ovcMembers > 0:
if channel != afkChannel:
self.eligibleChannels[currServer.id][channel.id] = True
tempEligible.append("{}".format(channel))
else:
print("{} users now AFK".format(vcMembers))
else:
self.eligibleChannels[currServer.id][channel.id] = False
self.saveChannels()
self.saveScores()
self.timeLast = int(time.time())
self.payoutMembers = []
return(tempNameList)
def checkScores(self, server, member):
currScore = self.scores[server.id][member.id]
threshold = int(self.settings[server.id]["ScoreThreshold"])
if currScore >= threshold:
currScore -= threshold
self.payOut(member, server.id)
return currScore
else:
return currScore
def payOut(self, member,sid):
#payout method with bank access. check coupon cog for help. coupon redeem
econ = self.bot.get_cog('Economy')
if econ == None:
print("Error loading economy cog.")
return
basePot = self.settings[sid]["CreditsPerScore"]
if econ.bank.account_exists(member):
econ.bank.deposit_credits(member, basePot)
self.payoutMembers.append(member.name)
else:
print("User {} has no account, failed to pay".format(member.name))
def saveChannels(self):
dataIO.save_json(self.eligibleChannels_path, self.eligibleChannels)
def saveScores(self):
dataIO.save_json(self.scores_path, self.scores)
def save_settings(self):
dataIO.save_json(self.settings_path, self.settings)
def _finditem(self, mydict, key):
if key in mydict:
return mydict[key]
for k, v in mydict.items():
if isinstance(v,dict):
return self._finditem(v, key)
def check_folders():
if not os.path.exists("data/voicescore"):
print("Creating voicescore default directory")
os.makedirs("data/voicescore")
else:
print("Voicescore Folder found successfully")
def check_files():
f = "data/voicescore/settings.json"
if not dataIO.is_valid_json(f):
print("Creating default settings.json...")
dataIO.save_json(f, {})
else:
current = dataIO.load_json(f)
print("Settings found successfully")
f = "data/voicescore/scores.json"
if not dataIO.is_valid_json(f):
print("Creating default scores.json...")
dataIO.save_json(f, {})
else:
current = dataIO.load_json(f)
print("Scores found successfully")
def setup(bot):
check_folders()
check_files()
n = VoiceScore(bot)
bot.add_listener(n.voice_state, "on_voice_state_update")
bot.add_cog(n)
| [
"reynolds.j.a.119@gmail.com"
] | reynolds.j.a.119@gmail.com |
6e8025869d890b774b88472415b3371f8dcb6d48 | b457143523c492ac5293df24adfe8b6f9a667ee3 | /abc/ABC121/c.py | d51b777fd365e52fc482f9273e22bb9bb5c49079 | [] | no_license | d-yuji/atcoder_study | a9cb39ff9808321d8984cfb5f6a38ba1f543b880 | ea59b8f3709aa2286de1809b4e999f0c7f108e0f | refs/heads/master | 2020-05-03T22:55:45.791737 | 2019-05-26T10:04:40 | 2019-05-26T10:04:40 | 178,853,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # coding:utf-8
def main():
N,M = map(int,input().split())
stores = []
cost = 0
number = 0
for i in range(N):
A,B = map(int,input().split())
stores.append([A,B])
stores.sort()
for store in stores:
if number + store[1] >= M:
cost += (M - number)*store[0]
break
else:
number += store[1]
cost += store[0]*store[1]
print(cost)
if __name__ == "__main__":
main() | [
"d.yuji.fm@gmail.com"
] | d.yuji.fm@gmail.com |
99c86317623eebc3408c6fbd9cbef298f9049dc0 | 7d1bd4868e4a9ef612003ba15e34bf247cf1a42c | /swp/manage.py | be1f3aa4f4e273d633e8263d8119311e124f02cd | [] | no_license | Student-Welfare-Portal/Web-App-Django | 45f7569ce1b5c67deb54231864a49017d2d86831 | f51b791aed2746fe525e4633c9538837a1a35585 | refs/heads/master | 2020-03-31T20:24:37.089227 | 2019-01-20T10:30:58 | 2019-01-20T10:30:58 | 152,539,070 | 0 | 6 | null | 2018-12-11T03:15:51 | 2018-10-11T06:04:02 | HTML | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "swp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"="
] | = |
64cab81a648693fce875315066c566357a973b30 | 6a9a8921671d4d0c69f901993043f95bf1859b3d | /manage.py | a828b75b5152660801b21204a5e8b13afd30ab4f | [] | no_license | leedj93/ggaggoong | 061d02fcbf20e7fc3f5ca55a45ead55941a63521 | 749c97d58b8ba67b2c64c83d9d944f421e7c8ab6 | refs/heads/master | 2023-09-06T09:40:04.806505 | 2021-11-09T06:59:19 | 2021-11-09T06:59:19 | 422,127,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ggaggoong.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"taewankim0925@likelion.org"
] | taewankim0925@likelion.org |
d4e5bd956f39f41ddb8b283bb4e69983f46cd8a8 | cd8b12c1c4f512336b3ea5c7a87e8623c1f80c79 | /services/amundsen/amundsendatabuilder/databuilder/extractor/dashboard/tableau/tableau_dashboard_extractor.py | 4bbac12c592aa4ae988334997b92ec80c18cbfb5 | [
"Apache-2.0"
] | permissive | irvcaza/datalake4os | c9c29d38997e0fc01b29852fde61d608521f96f6 | bfe9152e9527ecc3a4928e0d93df1118152025e2 | refs/heads/main | 2023-04-05T16:47:52.547573 | 2021-04-13T18:28:23 | 2021-04-13T18:28:23 | 341,649,613 | 0 | 0 | Apache-2.0 | 2021-02-23T18:23:35 | 2021-02-23T18:23:35 | null | UTF-8 | Python | false | false | 5,754 | py | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import (
Any, Dict, Iterator, List,
)
from pyhocon import ConfigFactory, ConfigTree
import databuilder.extractor.dashboard.tableau.tableau_dashboard_constants as const
from databuilder import Scoped
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.dashboard.tableau.tableau_dashboard_utils import (
TableauDashboardUtils, TableauGraphQLApiExtractor,
)
from databuilder.extractor.restapi.rest_api_extractor import STATIC_RECORD_DICT
from databuilder.transformer.base_transformer import ChainedTransformer, Transformer
from databuilder.transformer.dict_to_model import MODEL_CLASS, DictToModel
from databuilder.transformer.timestamp_string_to_epoch import FIELD_NAME, TimestampStringToEpoch
LOGGER = logging.getLogger(__name__)
class TableauGraphQLApiMetadataExtractor(TableauGraphQLApiExtractor):
"""
Implements the extraction-time logic for parsing the GraphQL result and transforming into a dict
that fills the DashboardMetadata model. Allows workbooks to be exlcuded based on their project.
"""
CLUSTER = const.CLUSTER
EXCLUDED_PROJECTS = const.EXCLUDED_PROJECTS
TABLEAU_BASE_URL = const.TABLEAU_BASE_URL
def execute(self) -> Iterator[Dict[str, Any]]:
response = self.execute_query()
workbooks_data = [workbook for workbook in response['workbooks']
if workbook['projectName'] not in
self._conf.get_list(TableauGraphQLApiMetadataExtractor.EXCLUDED_PROJECTS)]
base_url = self._conf.get(TableauGraphQLApiMetadataExtractor.TABLEAU_BASE_URL)
for workbook in workbooks_data:
data = {
'dashboard_group': workbook['projectName'],
'dashboard_name': TableauDashboardUtils.sanitize_workbook_name(workbook['name']),
'description': workbook.get('description', ''),
'created_timestamp': workbook['createdAt'],
'dashboard_group_url': f'{base_url}/#/projects/{workbook["projectVizportalUrlId"]}',
'dashboard_url': f'{base_url}/#/workbooks/{workbook["vizportalUrlId"]}/views',
'cluster': self._conf.get_string(TableauGraphQLApiMetadataExtractor.CLUSTER)
}
yield data
class TableauDashboardExtractor(Extractor):
"""
Extracts core metadata about Tableau "dashboards".
For the purposes of this extractor, Tableau "workbooks" are mapped to Amundsen dashboards, and the
top-level project in which these workbooks preside is the dashboard group. The metadata it gathers is:
Dashboard name (Workbook name)
Dashboard description (Workbook description)
Dashboard creation timestamp (Workbook creationstamp)
Dashboard group name (Workbook top-level folder name)
Uses the Metadata API: https://help.tableau.com/current/api/metadata_api/en-us/index.html
"""
API_BASE_URL = const.API_BASE_URL
API_VERSION = const.API_VERSION
CLUSTER = const.CLUSTER
EXCLUDED_PROJECTS = const.EXCLUDED_PROJECTS
SITE_NAME = const.SITE_NAME
TABLEAU_BASE_URL = const.TABLEAU_BASE_URL
TABLEAU_ACCESS_TOKEN_NAME = const.TABLEAU_ACCESS_TOKEN_NAME
TABLEAU_ACCESS_TOKEN_SECRET = const.TABLEAU_ACCESS_TOKEN_SECRET
VERIFY_REQUEST = const.VERIFY_REQUEST
def init(self, conf: ConfigTree) -> None:
self._conf = conf
self.query = """query {
workbooks {
id
name
createdAt
description
projectName
projectVizportalUrlId
vizportalUrlId
}
}"""
self._extractor = self._build_extractor()
transformers: List[Transformer] = []
timestamp_str_to_epoch_transformer = TimestampStringToEpoch()
timestamp_str_to_epoch_transformer.init(
conf=Scoped.get_scoped_conf(self._conf, timestamp_str_to_epoch_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict({FIELD_NAME: 'created_timestamp', })))
transformers.append(timestamp_str_to_epoch_transformer)
dict_to_model_transformer = DictToModel()
dict_to_model_transformer.init(
conf=Scoped.get_scoped_conf(self._conf, dict_to_model_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict(
{MODEL_CLASS: 'databuilder.models.dashboard.dashboard_metadata.DashboardMetadata'})))
transformers.append(dict_to_model_transformer)
self._transformer = ChainedTransformer(transformers=transformers)
def extract(self) -> Any:
record = self._extractor.extract()
if not record:
return None
return self._transformer.transform(record=record)
def get_scope(self) -> str:
return 'extractor.tableau_dashboard_metadata'
def _build_extractor(self) -> TableauGraphQLApiMetadataExtractor:
"""
Builds a TableauGraphQLApiMetadataExtractor. All data required can be retrieved with a single GraphQL call.
:return: A TableauGraphQLApiMetadataExtractor that provides core dashboard metadata.
"""
extractor = TableauGraphQLApiMetadataExtractor()
tableau_extractor_conf = Scoped.get_scoped_conf(self._conf, extractor.get_scope()) \
.with_fallback(self._conf) \
.with_fallback(ConfigFactory.from_dict({TableauGraphQLApiExtractor.QUERY: self.query,
STATIC_RECORD_DICT: {'product': 'tableau'}}))
extractor.init(conf=tableau_extractor_conf)
return extractor
| [
"abxda@outlook.com"
] | abxda@outlook.com |
f6c0293401098e66a36e782ca564cf4be1bf6bdc | 67bae06a2f69735fd39d1e0c0cda1e5d59559a8e | /prog1/hw1.py | caa3349e0348c8d86b35b947a93f5fd378753fe7 | [] | no_license | andrewlkraft/cs165b | 0e60fe9d345a105cdebe57eb94fee2e02c0c2d8a | f90b5fc8918ba53132eaf4036be837ade6c1c4fb | refs/heads/main | 2023-02-28T08:50:52.886231 | 2021-01-31T21:44:09 | 2021-01-31T21:44:09 | 306,977,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,722 | py | import numpy as np, time, matplotlib.pyplot as plt
TAKE_AVG = True
KXVAL = 8
EPOCHS = 300
EPSILON = 0.0001
class SGDSolver():
def __init__(self, path):
""" load input dataset specified in path and split data into train and validation. Hint: you can
store both training and testing features and target vector as class variables """
tmp = []
try:
with open(path) as f:
f.readline() # read past title line
for line in f:
tmp.append([float(x) for x in line.split(',')[1:]])
except Exception as e:
print('Could not open file:\n%s' % e)
exit(0)
self.rng = np.random.default_rng()
self.rng.shuffle(tmp)
self.x = np.array(tmp)[:,:7]
self.y = np.array(tmp)[:,7]
self.w = np.empty(7)
self.b = 0
self.mse = np.inf
def training(self, alpha, lam, nepoch, epsilon):
""" Training process of linear regression will happen here. User will provide
learning rate alpha, regularization term lam, specific number of training epoches,
and a variable epsilon to specify pre-mature end condition,
ex. if error < epsilon, training stops. Hint: You can store both weight and
bias as class variables, so other functions can directly use them """
tmp_l = 1000
tmp_a = 1e-10
lambdas = [tmp_l]
alphas = [tmp_a]
while tmp_l < 1e6:
tmp_l = tmp_l * 3
lambdas.append(tmp_l)
while tmp_a < 1e-6:
tmp_a = tmp_a * 3
alphas.append(tmp_a)
n = len(self.x)
# 2 nested loops to perform the grid search of alpha and lambda
for a in alphas:
for l in lambdas:
b_avg = 0
w_avg = np.zeros(7)
if TAKE_AVG:
mse_avg = 0
else:
mse_avg = np.inf
for xval in range(KXVAL):
b = self.rng.random()
w = self.rng.random(7)
# perform SGD number of times = nepoch
mse = np.inf
for iteration in range(nepoch):
# each iteration calculates b_grad and w_grad once
b_grad = 0
w_grad = np.zeros(7)
for index in range(n):
# only use sample in gradient calculation if its index is not equal to val mod self.k, ie only if in training set
if index % KXVAL != xval:
# calculate the part of the gradient for each entry, and add it to the gradient overall
inner = 2 / n * (b + np.dot(w, self.x[index]) - self.y[index])
w_grad = inner * self.x[index] + l * w / n
b_grad = inner
tmp_b = b - a / np.sqrt(iteration + 1) * b_grad
tmp_w = w - a / np.sqrt(iteration + 1) * w_grad
# perform SGD
# evaluate mse
valid = 0
training = 0
for index in range(n):
if index % KXVAL == xval:
valid = valid + (tmp_b + np.dot(tmp_w, self.x[index]) - self.y[index])**2 / n
else:
training = training + (tmp_b + np.dot(tmp_w, self.x[index]) - self.y[index])**2 / n
if valid > mse:
break
# if mse went down (or stayed level) since last epoch, keep going
mse = valid
b = tmp_b
w = tmp_w
# if mse is low enough, break because of diminishing returns
if mse < epsilon:
break
# take the results of model building from this training and validation test and add to average
# OR select if less than running minimum, if TAKE_AVG is set to False
if TAKE_AVG == True:
b_avg = b_avg + b / KXVAL
w_avg = w_avg + w / KXVAL
mse_avg = mse_avg + mse / KXVAL
else:
if mse < mse_avg:
mse_avg = mse
b_avg = b
w_avg = w
if mse_avg < self.mse:
self.mse = mse_avg
self.b = b_avg
self.w = w_avg
def testing(self, testX):
""" predict the Y value based on testing data input and ground truth data """
n = len(testX)
testY = np.zeros((n,1))
for index in range(n):
testY[index] = testX[index] @ self.w + self.b
return testY
""" Training Process: You only need to modify nepoch, epsilon of training method,
this is for autograding """
model = SGDSolver('tests/train.csv')
# Compute the time to do grid search on training
start = time.time()
model.training([10**-10, 10], [1, 1e10], EPOCHS, EPSILON)
end = time.time()
print('---COMPLETE---\nTRAINING:\ntraining time:\t%s\nepochs:\t%s\nepsilon:\t%s\nMODEL:\nmse:\t%s\nkxval:\t%s\navging:\t%s\nb:\t%s\nw:\n%s' % (end - start, EPOCHS, EPSILON, model.mse, KXVAL, TAKE_AVG, model.b, model.w)) | [
"43654559+AndrewKraft@users.noreply.github.com"
] | 43654559+AndrewKraft@users.noreply.github.com |
2f0f93ea3526d06951c7ec1abdf4d5cdba900116 | d3384e773b4cd82d7c422654176a1c141ecc45ac | /RNNTutorials/NonLinearApproximation_MIMO_Chaos.py | d3d214162e9e2877ad0c389d700abd7905ba0d44 | [
"MIT"
] | permissive | brandonbraun653/ValkyrieRNN | fd724ee9615c2dd61da987324c8c05f963ac27a2 | 532d2f9b1251d151a7f7ef1324ae3250b496193b | refs/heads/master | 2021-05-08T10:53:17.594626 | 2018-03-20T15:21:17 | 2018-03-20T15:21:17 | 119,868,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,359 | py | from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.layers.normalization import batch_normalization
import numpy as np
import pandas as pd
import math
import matplotlib
matplotlib.use('TkAgg') # use('Agg') for saving to file and use('TkAgg') for interactive plot
import matplotlib.pyplot as plt
input = np.r_[np.array(pd.read_csv('mackeyglass/mackey_glass1.csv')).transpose(),
np.array(pd.read_csv('mackeyglass/mackey_glass2.csv')).transpose(),
np.array(pd.read_csv('mackeyglass/mackey_glass3.csv')).transpose(),
np.array(pd.read_csv('mackeyglass/mackey_glass4.csv')).transpose()]
# Configuration Variables
input_dim = 4 # Number of parameters the NN will use as input
output_dim = 3 # Number of NN outputs
steps_of_history = 2000 # Selects how large of a sample set will be used to train
batch_len = 128
epoch_len = 15
neurons_per_layer = 128
layer_dropout = (1.0, 1.0)
# Generate some noise to add on top of the input signal
input += np.random.random(np.shape(input)) * 0.1
# Weird and random non-linear functions for the NN to learn
output1 = np.tan(input[0, :] + np.cos(input[1, :]) - np.tanh(input[0, :])) - 0.5
output2 = np.cos(input[1, :] + np.cos(input[0, :]) - np.sin(input[2, :])*np.cos(input[3, :]))
output3 = np.sin(input[3, :] + input[2, :])
output = np.r_[output1[None, :],
output2[None, :],
output3[None, :]]
print(np.shape(output))
plt.figure(figsize=(16, 4))
plt.suptitle('Output of Non-Linear Function 1')
plt.plot(output[0, :], 'g-', label='Output1')
plt.legend()
plt.figure(figsize=(16, 4))
plt.suptitle('Output of Non-Linear Function 2')
plt.plot(output[1, :], 'g-', label='Output2')
plt.legend()
plt.figure(figsize=(16, 4))
plt.suptitle('Output of Non-Linear Function 2')
plt.plot(output[2, :], 'g-', label='Output2')
plt.legend()
# plt.show()
# Generate the input and training data
input_seq = []
output_seq = []
for i in range(0, len(input[0, :]) - steps_of_history):
input_seq.append(input[:, i:i+steps_of_history]) # NOTE: Select all columns for multiple inputs...
output_seq.append(output[:, i+steps_of_history]) # NOTE: Select all columns for multiple outputs...
trainX = np.reshape(input_seq, [-1, input_dim, steps_of_history])
trainY = np.reshape(output_seq, [-1, output_dim])
print(np.shape(trainX))
print(np.shape(trainY))
# Build the network model
input_layer = tflearn.input_data(shape=[None, input_dim, steps_of_history])
layer1 = tflearn.simple_rnn(input_layer,
n_units=neurons_per_layer,
activation='relu',
return_seq=True,
dropout=layer_dropout,
name='Layer1')
layer2 = tflearn.simple_rnn(layer1,
n_units=neurons_per_layer,
activation='sigmoid',
dropout=layer_dropout,
name='Layer2')
layer3 = tflearn.fully_connected(layer2,
output_dim,
activation='linear',
name='Layer3')
output_layer = tflearn.regression(layer3,
optimizer='adam',
loss='mean_square',
learning_rate=0.002)
# Training
model = tflearn.DNN(output_layer, clip_gradients=0.1, tensorboard_verbose=3)
model.fit(trainX, trainY, n_epoch=epoch_len, validation_set=0.1, batch_size=batch_len)
# Generate a model prediction as a very simple sanity check...
predictY = model.predict(trainX)
print(np.shape(trainX))
print(np.shape(predictY))
# Plot the results
plt.figure(figsize=(16, 4))
plt.suptitle('Function 1 Train vs Predict')
plt.plot(trainY[:, 0], 'r-', label='Actual')
plt.plot(predictY[:, 0], 'b-', label='Predicted')
plt.legend()
plt.figure(figsize=(16, 4))
plt.suptitle('Function 2 Train vs Predict')
plt.plot(trainY[:, 1], 'r-', label='Actual')
plt.plot(predictY[:, 1], 'b-', label='Predicted')
plt.legend()
plt.figure(figsize=(16, 4))
plt.suptitle('Function 3 Train vs Predict')
plt.plot(trainY[:, 2], 'r-', label='Actual')
plt.plot(predictY[:, 2], 'b-', label='Predicted')
plt.legend()
plt.show()
| [
"brandonbraun653@gmail.com"
] | brandonbraun653@gmail.com |
4c2cd3335e0930d20697d6176e63bc3d97ace879 | 2bb9159e1466ad4f2635a57bea1ffe97f08d2899 | /src/xlibris/tex.py | 99ce0e2b83404b7b39ca235f806ea54a6e8c1668 | [] | no_license | geodynamics-liberation-front/xlibris | 31da3980df7cea209f8610619aab5fdb1b3c9055 | b473d28a684c08da6c60c887043ab8f35e8669eb | refs/heads/master | 2021-01-23T03:12:29.265803 | 2013-07-14T00:57:02 | 2013-07-14T00:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,888 | py | import pkg_resources
import cPickle as pickle
from unidecode import unidecode
from . import LOG
U_T_L=pickle.load(pkg_resources.resource_stream(__name__,"utl.p"))
MONTHS=['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
BIBTEX_KEY=u'{article.authors[0].surname}{article.earliest_publication.year:04d}{iter}'
def bibkey(article,iter=''):
return unidecode(BIBTEX_KEY.format(article=article,iter=iter)).lower().replace(' ','')
"""
@article{ribe1995,
title={The dynamics of plume-ridge interaction, 1: Ridge-centered plumes},
author={Ribe, NM and Christensen, UR and Theissing, J},
journal={Earth and Planetary Science Letters},
volume={134},
number={1},
pages={155--168},
year={1995},
publisher={Elsevier},
doi={10.1016/0012-821X(95)00116-T}
}
"""
def article_to_bibtex(articles):
try:
iterator = iter(articles)
except TypeError:
iterator = iter([articles])
references={}
for article in iterator:
pub=article.get_earliest_publication()
bibtexKey=bibkey(article)
i=ord('a')-1
while bibtexKey in references:
i+=1
bibtexKey=bibkey(article,chr(i))
ref_items=[]
ref_items.append(u" title={{%s}}" % article.title)
authors=" and ".join(
[u"{author.surname}, {author.given_name}".format(author=a)
for a in article.authors] )
ref_items.append(u" author={%s}" % authors)
ref_items.append(u" journal={%s}" % article.issue.journal.title)
volume = article.issue.volume
if volume != None and volume != '':
ref_items.append(u" volume={%s}" % volume)
number = article.issue.issue
if number != None and number != '':
ref_items.append(u" number={%s}" % number)
first_page=article.first_page
last_page=article.last_page
if first_page != None and first_page != '':
if last_page != None and last_page !='':
ref_items.append(u" pages={%s--%s}" % (first_page,last_page))
else:
ref_items.append(u" pages={%s}" % first_page)
if pub.month != None and pub.month != '':
month=pub.month
try:
month=MONTHS[int(month)-1]
except:
LOG.warning("Couldn't turn month '%s' into an int",pub.month)
ref_items.append(u" month={%s}" % month)
if pub.year != None and pub.year != '':
ref_items.append(u" year={%s}" % pub.year)
if article.url != None and article.url != '':
ref_items.append(u" url={%s}" % article.url)
ref_items.append(u" doi={%s}" % article.doi)
reference="@article{%s,\n%s\n}"%(bibtexKey,",\n".join(ref_items))
references[bibtexKey] = reference.translate(U_T_L)
return references
| [
"rpetersen@ucsd.edu"
] | rpetersen@ucsd.edu |
853a2c3e89c802451284a5cd194c08367509c856 | 720c931cea25d5aea9d03331a505ff3a03190cd7 | /common/utils.py | 80245a112cd2ae1f2d0b448963b66e9d24d5ed86 | [] | no_license | xijiali/WEBAN | 977a7507e2eeb7b83fbc26fa76cd741ca22f1864 | 6359ea2f1c3e99472eb682560a3ea76bdcc077fe | refs/heads/master | 2022-12-29T15:00:59.525989 | 2020-10-14T02:19:45 | 2020-10-14T02:19:45 | 303,879,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- show_sample: plot 9x9 sample grid of the dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
def get_train_valid_loader(data_dir,
batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False):
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
| [
"xjl@IT-FVFSMHUYH3QD.local"
] | xjl@IT-FVFSMHUYH3QD.local |
855f4f7bb00f0fad2b5d4834ac54a2168322b3cd | 5f6adaf9a8927bd598e25d96040e4a5d34d905fb | /v2/python-crash-course/projects/django/learning_log/ll_env/bin/pip3 | 7ebde5d78b4113b92b25fde2c912493cacadba49 | [] | no_license | jsore/notes | 10d9da625dd8f6d1c1731c6aad40c6bdbf15a23f | 09b658d7425e2f6c0653810a2a73d8b001fb9288 | refs/heads/master | 2021-10-12T17:55:30.501885 | 2021-10-11T21:48:27 | 2021-10-11T21:48:27 | 158,454,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 302 | #!/Users/justin/Core/Dev/Pub/notes/v2/python-crash-course/projects/django/learning_log/ll_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jus.a.sorensen@gmail.com"
] | jus.a.sorensen@gmail.com | |
3dde995ff788492c63b56d27a881fa0f305b2539 | cedb40e20aa78ea97b70a3e73f636bc04501d626 | /professor.py | 47033ca33a39362c18153b425a518542ebd1bb47 | [
"MIT"
] | permissive | iansandes/school-system | ce6218dd3fd5f9e8a8ca8dc0ea56c183b66670a9 | 20d0ae41e136452bddbf853d41db1913d3daddc1 | refs/heads/master | 2020-05-24T18:11:33.434490 | 2019-05-30T17:37:06 | 2019-05-30T17:37:06 | 187,404,187 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | from funcionário import Funcionario
import pickle
class Professor(Funcionario):
def __init__(self):
self.formacao = ""
self.nivel = ""
self.disciplina = ""
super().__init__()
def cadastrarProfessor(self):
dados_funcionario = super().cadastrarFuncionario()
self.formacao = input("Digite a formação: ")
self.disciplina = input("Digite a disciplina: ")
dados_prof = dict(formacao=self.formacao, nivel=self.nivel,
disciplina=self.disciplina)
dados_funcionario.update(dados_prof)
try:
with open('professor.pkl', 'rb') as lista_profs:
antiga_lista = pickle.load(lista_profs)
with open('professor.pkl', mode='wb') as lista_profs:
antiga_lista.append(dados_funcionario)
nova_lista = pickle.dumps(antiga_lista)
lista_profs.write(nova_lista)
except:
with open('professor.pkl', mode='wb') as lista_profs:
nova_lista = [dados_funcionario]
lista = pickle.dumps(nova_lista)
lista_profs.write(lista)
def exibirProfessor(self):
super().exibirFuncionario()
print(self.formacao)
print(self.nivel)
print(self.disciplina) | [
"iansandes15@gmail.com"
] | iansandes15@gmail.com |
b5acca52f06eebd8391f62eef405b92ed82b36ff | 7449282d4d50b3481aa2d272cc218c27682fd6cd | /mnasnet/cifar10/bottlenecks/Sim/f_sim.py | 44e245b360795f776d311493c9ab77a29c4af2ed | [] | no_license | compstruct/fusion-timeloop-model | 3f18b1beb819e71fa45a0b79d01ad3819b48fbc7 | ab4a7ef7140dfa47e3394ed293d223fecff691c5 | refs/heads/main | 2023-06-11T17:26:41.369904 | 2021-06-30T15:54:32 | 2021-06-30T15:54:32 | 381,750,736 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,335 | py | import numpy as np
import sys
import pandas
import os
import subprocess
import time
import yaml
import ResMaker.fusion as fusion
#*****Configurations********
#simulator configs
DELAY = 20
pred_mapper_pars = []
pred_dw_mapper_pars = []
main_mapper_pars = []
#fusion acc. config
F_PEs = 128
X_PEs = 8
PRED_SPATIAL_GLB = {}
PRED_SPATIAL_DUMMY = {}
#Fusion CNN configs
F_BLOCKS = 13
f_in_ch = []
f_midd_ch = []
f_out_ch = []
f_pixels = []
f_stride = []
f_kernel_size = []
#**************************************************************************************
#Prediction Funstions******************************************************************
#**************************************************************************************
def fix_layer_shape(BLOCK_DIR, layer, block):
with open(BLOCK_DIR + layer + "/prob/prob.yaml", 'r') as file:
prob = yaml.safe_load(file)
if layer == 'conv':
prob['problem']['instance']['M'] = f_midd_ch[block]
prob['problem']['instance']['Wstride'] = f_stride[block]
prob['problem']['instance']['Hstride'] = f_stride[block]
prob['problem']['instance']['S'] = f_kernel_size[block]
prob['problem']['instance']['R'] = f_kernel_size[block]
elif layer == 'decom':
prob['problem']['instance']['C'] = f_in_ch[block]
prob['problem']['instance']['M'] = f_midd_ch[block]
if layer == 'decom':
prob['problem']['instance']['P'] = f_pixels[block] * f_stride[block]
prob['problem']['instance']['Q'] = f_pixels[block] * f_stride[block]
else:
prob['problem']['instance']['P'] = f_pixels[block]
prob['problem']['instance']['Q'] = f_pixels[block]
with open(BLOCK_DIR + layer + "/prob/prob.yaml", 'w') as file:
yaml.dump(prob, file)
def fix_block_shape(BLOCK_DIR, block):
fix_layer_shape(BLOCK_DIR, 'decom', block)
fix_layer_shape(BLOCK_DIR, 'conv', block)
def fix_layer_constraints(BLOCK_DIR, layer, block):
with open(BLOCK_DIR + layer + "/constraints/constraints.yaml", 'r') as file:
const = yaml.safe_load(file)
for i in range(len(const['architecture_constraints']['targets'])):
current_const = const['architecture_constraints']['targets'][i]
if current_const['target'] == 'shared_glb' and current_const['type'] == 'spatial':
const['architecture_constraints']['targets'][i]['factors'] = PRED_SPATIAL_GLB[layer]
elif current_const['target'] == 'DummyBuffer' and current_const['type'] == 'spatial':
const['architecture_constraints']['targets'][i]['factors'] = PRED_SPATIAL_DUMMY[layer]
elif current_const['target'] == 'weights_spad' and current_const['type'] == 'temporal' and layer == 'conv':
const['architecture_constraints']['targets'][i]['factors'] = 'N=1 Q=1 C=1 R=' + str(f_kernel_size[block]) + 'S=' + str(f_kernel_size[block])
with open(BLOCK_DIR + layer + "/constraints/constraints.yaml", 'w') as file:
yaml.dump(const, file)
def fix_block_constraints(BLOCK_DIR, block):
fix_layer_constraints(BLOCK_DIR, 'decom', block)
fix_layer_constraints(BLOCK_DIR, 'conv', block)
def fix_pred_mapper(DIR):
global pred_mapper_pars, pred_dw_mapper_pars
with open(DIR + "mapper/mapper.yaml", 'r') as file:
mapper = yaml.safe_load(file)
mapper['mapper']['timeout'] = pred_mapper_pars[0]
mapper['mapper']['victory-condition'] = pred_mapper_pars[1]
with open(DIR + "mapper/mapper.yaml", 'w') as file:
yaml.dump(mapper, file)
with open(DIR + "dw_mapper/mapper.yaml", 'r') as file:
mapper = yaml.safe_load(file)
mapper['mapper']['timeout'] = pred_dw_mapper_pars[0]
mapper['mapper']['victory-condition'] = pred_dw_mapper_pars[1]
with open(DIR + "dw_mapper/mapper.yaml", 'w') as file:
yaml.dump(mapper, file)
def run_timeloop(DIR, LAYER_DIR, dw=False):
if dw == True:
mapper = DIR+'dw_mapper/mapper.yaml'
else:
mapper = DIR+'mapper/mapper.yaml'
p = subprocess.Popen(['timeloop-mapper', './../accelerator/fusion/predictions/arch.yaml', './../accelerator/fusion/predictions/components/smartbuffer_RF.yaml',\
'./../accelerator/fusion/predictions/components/smartbuffer_SRAM.yaml', mapper, LAYER_DIR+'prob/prob.yaml', LAYER_DIR+'constraints/constraints.yaml',\
'-o', LAYER_DIR+'output'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
while True:
time.sleep(3)
p.communicate(input="a")
if p.poll() != None:
break
def fusion_pred_sim(DIR):
PRED_DIR = DIR + "predictions/"
fix_pred_mapper(PRED_DIR)
for block in range(F_BLOCKS):
PRED_BLOCK_DIR = PRED_DIR + "b" + str(block+1) + "/"
if block != 0:
if os.path.isfile(PRED_DIR + 'b'+str(block+1)):
subprocess.run(['rm', '-rf', PRED_DIR + 'b'+str(block+1)])
subprocess.run(['cp', '-r', PRED_DIR+ 'b'+str(block), PRED_DIR + 'b'+str(block+1)])
fix_block_shape(PRED_BLOCK_DIR, block)
fix_block_constraints(PRED_BLOCK_DIR, block)
t1 = time.time()
run_timeloop(PRED_DIR, PRED_BLOCK_DIR+"decom/")
t2 = time.time()
print("fusion predictions pw block ", block+1, " and took ", t2-t1)
t1 = time.time()
run_timeloop(PRED_DIR, PRED_BLOCK_DIR+"conv/", dw=True)
t2 = time.time()
print("fusion predictions dw block ", block+1, " and took ", t2-t1)
#****************************************************************************************************
#****************************************************************************************************
#**************************************************************************************
#Main Computation Funstions************************************************************
#**************************************************************************************
def fix_block_constraints_main(BLOCK_DIR, block):
#if f_pixels[block] == 32:
# q_dummy_spatial = 16
# q_glb_spatial = 2
# p_spatial = X_PEs//2
#else:
# q_dummy_spatial = f_pixels[block]
# q_glb_spatial = 0
# p_spatial = X_PEs
with open(BLOCK_DIR + "constraints/constraints.yaml", 'r') as file:
const = yaml.safe_load(file)
for i in range(len(const['architecture_constraints']['targets'])):
current_const = const['architecture_constraints']['targets'][i]
if current_const['target'] == 'shared_glb' and current_const['type'] == 'temporal':
const['architecture_constraints']['targets'][i]['factors'] = 'N=1 R=1 S=1 Q=1 C=' + str(f_in_ch[block]) + ' M=' + str(f_out_ch[block])
elif current_const['target'] == 'shared_glb' and current_const['type'] == 'spatial':
const['architecture_constraints']['targets'][i]['factors'] = 'N=1 R=1 S=1 C=1 M=1 P=' + str(8) + ' Q=' + str(1)
elif current_const['target'] == 'DummyBuffer' and current_const['type'] == 'spatial':
const['architecture_constraints']['targets'][i]['factors'] = 'N=1 R=1 S=1 C=1 M=1 P=1 Q=' + str(8)
elif current_const['target'] == 'psum_spad' and current_const['type'] == 'temporal':
const['architecture_constraints']['targets'][i]['factors'] = 'M=1 N=1 P=1 Q=1 C=1 R=' + str(f_kernel_size[block]) + 'S=' + str(f_kernel_size[block])
with open(BLOCK_DIR + "constraints/constraints.yaml", 'w') as file:
yaml.dump(const, file)
def fix_block_shape_main(BLOCK_DIR, block):
with open(BLOCK_DIR + "prob/prob.yaml", 'r') as file:
prob = yaml.safe_load(file)
prob['problem']['instance']['Wstride'] = f_stride[block]
prob['problem']['instance']['Hstride'] = f_stride[block]
prob['problem']['instance']['C'] = f_in_ch[block]
prob['problem']['instance']['M'] = f_out_ch[block]
prob['problem']['instance']['P'] = f_pixels[block]
prob['problem']['instance']['Q'] = f_pixels[block]
prob['problem']['instance']['R'] = f_kernel_size[block]
prob['problem']['instance']['S'] = f_kernel_size[block]
with open(BLOCK_DIR + "prob/prob.yaml", 'w') as file:
yaml.dump(prob, file)
def fix_main_mapper(DIR):
global main_mapper_pars
with open(DIR + "mapper/mapper.yaml", 'r') as file:
mapper = yaml.safe_load(file)
mapper['mapper']['timeout'] = main_mapper_pars[0]
mapper['mapper']['victory-condition'] = main_mapper_pars[1]
with open(DIR + "mapper/mapper.yaml", 'w') as file:
yaml.dump(mapper, file)
def fusion_main_sim(DIR):
MAIN_DIR = DIR + "main_computations/"
fix_main_mapper(MAIN_DIR)
for block in range(F_BLOCKS):
MAIN_BLOCK_DIR = MAIN_DIR + "b" + str(block+1) + "/"
if block != 0:
if os.path.isfile(MAIN_DIR + 'b'+str(block+1)):
subprocess.run(['rm', '-rf', MAIN_DIR + 'b'+str(block+1)])
subprocess.run(['cp', '-r', MAIN_DIR+ 'b'+str(block), MAIN_DIR + 'b'+str(block+1)])
fix_block_shape_main(MAIN_BLOCK_DIR, block)
fix_block_constraints_main(MAIN_BLOCK_DIR, block)
p = subprocess.Popen(['timeloop-mapper', './../accelerator/fusion/main/arch.yaml',\
'./../accelerator/fusion/main/components/smartbuffer_RF.yaml', './../accelerator/fusion/main/components/smartbuffer_SRAM.yaml',\
MAIN_DIR+'mapper/mapper.yaml', MAIN_BLOCK_DIR+'prob/prob.yaml', MAIN_BLOCK_DIR+'constraints/constraints.yaml',\
'-o', MAIN_BLOCK_DIR+'output'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
while True:
time.sleep(3)
p.communicate(input="a")
if p.poll() != None:
break
print("block ", block+1, "of main computations simulations")
time.sleep(3)
#****************************************************************************************************
#****************************************************************************************************
def fusion_sim():
DIR = "./fusion/"
fusion_pred_sim(DIR)
fusion_main_sim(DIR)
def sim(CSV_only, delay, main_mapper, pred_mapper, pred_dw_mapper, pes, xpes, spatial_x, spatial_y, blocks,\
in_ch, midd_ch, out_ch, pixels, stride, adders, prune_factors, pred1_qtz, pred2_qtz, pred_qtz, q_blocks, kernel_size):
global DELAY, main_mapper_pars, pred_mapper_pars, pred_dw_mapper_pars,\
F_PEs, X_PEs, PRED_SPATIAL_GLB, PRED_SPATIAL_DUMMY,\
F_BLOCKS, f_in_ch, f_midd_ch, f_out_ch, f_pixels, f_stride, f_kernel_size
DELAY = delay
main_mapper_pars = main_mapper
pred_mapper_pars = pred_mapper
pred_dw_mapper_pars = pred_dw_mapper
F_PEs = pes
X_PEs = xpes
PRED_SPATIAL_GLB = spatial_x
PRED_SPATIAL_DUMMY = spatial_y
F_BLOCKS = blocks
f_in_ch = in_ch
f_midd_ch = midd_ch
f_out_ch = out_ch
f_pixels = pixels
f_stride = stride
f_kernel_size = kernel_size
if not CSV_only:
fusion_sim()
fusion.make_csv(F_BLOCKS, F_PEs, adders, f_in_ch, f_midd_ch, f_out_ch, f_pixels,\
f_stride, prune_factors, pred1_qtz=pred1_qtz, pred2_qtz=pred2_qtz, pred_qtz=pred_qtz, q_blocks=q_blocks)
| [
"mohamad.ol95@gmail.com"
] | mohamad.ol95@gmail.com |
13015e14dea26ea82a75341fbe98b78d0ab4190d | 10c46fbb1f8b5229a485b015366f27738c487acd | /Testing_Project.py | 6cc8fc9509897dbc8a4e0ad6ef27961f62e202c2 | [] | no_license | EugeoKirito/Flask_Blueprint_Testing | d238ceae6a50d4ef1562e38ddda82e4c6e35ec2d | f7318580d5e63e30f5b4a3f2f1f11637ff727928 | refs/heads/master | 2020-05-09T22:44:40.409376 | 2019-04-15T12:22:00 | 2019-04-15T12:22:00 | 181,480,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import json
import unittest
from manage import app
class LoginTest(unittest.TestCase):
def setUp(self):
self.client=app.test_client()
def test_empty_username_password(self):
# client=app.test_client()
# ret=client.post('/login',data={})
ret=self.client.post('/login',data={})
ret=ret.data
print(ret)
resp=json.loads(ret)
self.assertIn('code',resp)
self.assertEqual(resp['code'],1)
if __name__ == '__main__':
unittest.main() | [
"44084753+EugeoKirito@users.noreply.github.com"
] | 44084753+EugeoKirito@users.noreply.github.com |
9482857d3b8deefcb5786bfe6b8a01f54785c5a3 | 98c137031262565b975a1c6673adef47ff82456b | /utils/setting.py | ae74d73e72a65ef815406d9c85001790147006a2 | [] | no_license | zhulinyi422/huxiuwang_blog | 4708beeed37704a9e604914e15ad2eda007e9496 | 6f96f4d234e526582c3bbf09bd0c17d8dbab81eb | refs/heads/master | 2020-03-23T22:11:53.732396 | 2018-07-26T07:28:34 | 2018-07-26T07:28:34 | 142,159,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
UPLOAD_DIR = os.path.join(os.path.join(BASE_DIR,'static'),'upload')
| [
"536555895@qq.com"
] | 536555895@qq.com |
63b06ada39b3e650e67c14cb068f5523fdad5075 | cff720e5f1c214b864544dc29e69f97a1341500f | /mysite/settings.py | e3b6074d649704561f13d27322227a948af8821d | [] | no_license | buront11/my-first-blog | 22e005b9cae42432215561abbff49149e3663edd | 9cbef6948c5e246f8d2636ab5ddb4f49bd326b3c | refs/heads/master | 2020-12-23T12:12:21.993978 | 2020-02-06T02:18:39 | 2020-02-06T02:18:39 | 237,147,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,198 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bt#bjp)5a)50dcnqv=)=z(=onn$p%gwnl(+$e$g(xfu-%qih$8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static') | [
"ysk.s_0220@outlook.com"
] | ysk.s_0220@outlook.com |
c01905023c84bc877eca4fa09077170f3c508b6c | 21ca09e275a8c9086e57728213b186b96477b48d | /BkgdSub_v1.py | 0d152d2c127f05c341c33cba58bb2f7d982a0564 | [] | no_license | aflevel/FlyCourtship | 48a110045c86ea61ab07522629b35e948bf6ae88 | 612cd3a7a32ddf39ed5500163e25da8f1a63b0b9 | refs/heads/master | 2020-04-21T10:48:14.769598 | 2019-02-07T00:22:59 | 2019-02-07T00:22:59 | 169,497,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | #!/usr/bin/python
import sys
import os
import numpy as np
import cv2
import datetime
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
#sys.argv=['','359_CTMxCTF_6.mp4',2]
cap = cv2.VideoCapture(sys.argv[1])
rec = FFMPEG_VideoReader(sys.argv[1],True)
rec.initialize()
frameWidth = int(cap.get(3))
frameHeight = int(cap.get(4))
frameNum=int(cap.get(7))
fps=int(cap.get(5))
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = False)
FrameDiff=np.array([])
lapse=int(sys.argv[2])
i=0
while(i<frameNum):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
if i>lapse*fps+1 and i<frameNum-fps:
ref=rec.get_frame((i-lapse*fps)/fps)
refmask = fgbg.apply(ref)
try:
frameDelta = cv2.absdiff(refmask, fgmask)
except:
frameDelta = np.array([])
print(str(i) + ' out of ' + str(frameNum))
entry=[float(i)/fps,np.sum(frameDelta)]
FrameDiff=np.insert(FrameDiff,[0],entry,axis=0)
if i<frameNum-fps:
try:
cv2.imshow('frame',fgmask)
except:
print(str(i) + ' out of ' + str(frameNum))
break
i+=1
k = cv2.waitKey(30) & 0xff
if k == ord('q'):
break
LogFile='Log_' + sys.argv[1].replace(".mp4","_" + str(sys.argv[2]) + "sec.csv")
FrameDiff=np.reshape(FrameDiff,(len(FrameDiff)/2,2))
np.savetxt(LogFile, FrameDiff, delimiter=",")
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
19f06cd1078d337384ddc3da7c6e980f4f9cebf3 | 2328a25664cd427f2043164ad815698bbb021c34 | /ProfilerApp/ProfilerApp/__init__.py | 304131b26aa01fa05bbc7b96a95f61758190e504 | [] | no_license | Dishan765/Automated-Cybercrime-Profiling | 7f7f017c8d4614ddffd5f662dc7e279a8d40608e | 31a7f89be7a2ed06444bda7cb0ece52854d4e7e7 | refs/heads/master | 2023-07-04T19:35:07.333739 | 2021-08-21T19:44:41 | 2021-08-21T19:44:41 | 347,069,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from ProfilerApp.config import Config
from flask_mail import Mail
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from ProfilerApp.users.routes import users
from ProfilerApp.posts.routes import posts
from ProfilerApp.profiles.routes import profile
from ProfilerApp.admin.routes import admin
#from ProfilerApp.main.routes import main
from ProfilerApp.api.routes import api
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(profile)
app.register_blueprint(admin)
#app.register_blueprint(main)
app.register_blueprint(api)
return app | [
"you@example.com"
] | you@example.com |
6e1ca7af00fb94efbfc0f9b99bfc5ebd843edbdf | 52c87bbb67acac57fc2a06b0a53c3b2aa95fad08 | /portal/migrations/0007_data_deduct_limits.py | c25b6e73ad5973c80ee06dadc6133e2ad169bf0f | [] | no_license | gBobCodes/insurance | c6b6a44edde8f9f91304bb64b7433553c92fbb7f | 940d44443026c97c302093aacbe17944f1595988 | refs/heads/master | 2021-06-07T12:16:21.329579 | 2016-10-08T13:26:27 | 2016-10-08T13:26:27 | 70,332,465 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,788 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-17 11:58
from __future__ import unicode_literals
from django.db import migrations
def connect_deductibles_limits(apps, schema_editor):
'''Connect Deductibles to liability Limits.'''
Deductible = apps.get_model('portal', 'deductible')
DeductibleLimit = apps.get_model('portal', 'deductiblelimit')
Limit = apps.get_model('portal', 'limit')
deduct_0 = Deductible.objects.get(value=0)
deduct_5 = Deductible.objects.get(value=5000)
deduct_15 = Deductible.objects.get(value=15000)
limit_100_300 = Limit.objects.get(min=100000, max=300000)
limit_200_600 = Limit.objects.get(min=200000, max=600000)
limit_300_900 = Limit.objects.get(min=300000, max=900000)
limit_250_500 = Limit.objects.get(min=250000, max=500000)
limit_250_750 = Limit.objects.get(min=250000, max=750000)
limit_500_1000 = Limit.objects.get(min=500000, max=1000000)
limit_500_1500 = Limit.objects.get(min=500000, max=1500000)
limit_750_1500 = Limit.objects.get(min=750000, max=1500000)
limit_1000_1000 = Limit.objects.get(min=1000000, max=1000000)
limit_1000_2000 = Limit.objects.get(min=1000000, max=2000000)
limit_1000_3000 = Limit.objects.get(min=1000000, max=3000000)
limit_1000_4000 = Limit.objects.get(min=1000000, max=4000000)
limit_1300_3900 = Limit.objects.get(min=1300000, max=3900000)
# The max deductible has a multiplier of 1.0
# because it does not change the premium calculation.
for limit in Limit.objects.all():
DeductibleLimit.objects.get_or_create(
deductible=deduct_15,
limit=limit,
multiplier=1.0
)
# Deductible of $5,000
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit_100_300,
multiplier=1.129
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit_200_600,
multiplier=1.093
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit_250_500,
multiplier=1.08
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit_250_750,
multiplier=1.08
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit_300_900,
multiplier=1.07
)
# The rest of limits get a multiplier of 1.06
for limit in [
limit_500_1000,
limit_500_1500,
limit_750_1500,
limit_1000_1000,
limit_1000_2000,
limit_1000_3000,
limit_1000_4000,
limit_1300_3900,
]:
DeductibleLimit.objects.get_or_create(
deductible=deduct_5,
limit=limit,
multiplier=1.06
)
# Deductible of $0
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit_100_300,
multiplier=1.199
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit_200_600,
multiplier=1.144
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit_250_500,
multiplier=1.122
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit_250_750,
multiplier=1.122
)
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit_300_900,
multiplier=1.108
)
# The rest of limits get a multiplier of 1.093
for limit in [
limit_500_1000,
limit_500_1500,
limit_750_1500,
limit_1000_1000,
limit_1000_2000,
limit_1000_3000,
limit_1000_4000,
limit_1300_3900,
]:
DeductibleLimit.objects.get_or_create(
deductible=deduct_0,
limit=limit,
multiplier=1.093
)
def delete_deductiblelimits(apps, schema_editor):
'''Remove all the deductiblelimit objects from the DB.'''
apps.get_model('portal', 'deductiblelimit').objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_deductiblelimit'),
]
operations = [
migrations.RunPython(
code=connect_deductibles_limits,
reverse_code=delete_deductiblelimits
),
]
| [
"bcollins@foregroundsecurity.com"
] | bcollins@foregroundsecurity.com |
be94b685e25130e55324861a9920dd3906984a0c | 5285cba52bbcdb34bdd8893bdcd95a6c5628dbd5 | /hillandgertner/pages/views.py | 16d0992f6c6c620fe014697497e1d1049435a5d5 | [] | no_license | chemcnabb/hill-gertner | fae389ad8b12fc6c8d9be6d78a469aa231d6a8ae | 6920971c7fc7d7298dfa91b670d8d085c73c4e73 | refs/heads/master | 2021-09-14T01:10:11.820597 | 2018-01-08T00:42:11 | 2018-01-08T00:42:11 | 109,914,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic import TemplateView
from hillandgertner.pages.models import Page
from hillandgertner.protected_pages.models import ProtectedPage
debug=False
if not debug:
from hillandgertner.page_globals.models import Globals
# Create your views here.
class IndexView(TemplateView):
template_name = 'main.html'
pages = Page.objects.order_by('order')
try:
globals = Globals.objects.first() if Globals else debug
except:
globals = None
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['pages'] = self.pages
if self.globals:
context['content_margin'] = ((len(self.pages)+1)*float(self.globals.header_height))-(len(self.pages)/2)-1
context['header_height_adjusted'] = float(self.globals.header_height)-1
context['globals'] = self.globals
return context
| [
"che.mcnabb@sgsco.com"
] | che.mcnabb@sgsco.com |
50b9260ebbf8a1f583eaf4f101ca5bb2e43e63f0 | 99f9ecdb35c9927698f3a3e8b5864dd7f5b8aef7 | /thingsboard_gateway/connectors/request/request_uplink_converter.py | fd6b5b3a26888c39d5c2e9274c43f6f01eef19bd | [
"Apache-2.0"
] | permissive | luxiaosu/thingsboard-gateway | 43bd4af5f7944c68a403c8bdb125e7536e202c2b | 646bc6bb64a05aac8710c9a3e736db6ec8d5864b | refs/heads/master | 2023-07-30T13:23:18.783011 | 2021-10-07T15:41:20 | 2021-10-07T15:41:20 | 408,509,478 | 0 | 0 | Apache-2.0 | 2021-09-20T16:01:15 | 2021-09-20T16:01:14 | null | UTF-8 | Python | false | false | 810 | py | # Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from thingsboard_gateway.connectors.converter import Converter, abstractmethod
class RequestUplinkConverter(Converter):
@abstractmethod
def convert(self, config, data):
pass
| [
"ibarkov.ua@gmail.com"
] | ibarkov.ua@gmail.com |
bfadc639c8f6aae941ff458f195d340f9719f26e | e9f90d1ba4247c01f59c313179f1ef005885aaba | /khovanhanh/urls.py | f37c115a2c071219be44e80929fecb2c871f9537 | [] | no_license | hangockhue/khovanhanh | aef9ad39822902dea621512b97a485bc7d123bee | d5b3ca103abd4e20a80db8f43768a68d7adab9dc | refs/heads/master | 2022-04-17T18:02:11.526517 | 2020-04-20T08:06:11 | 2020-04-20T08:06:11 | 231,758,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import static
import khovanhanh.settings as settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('khofront.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"khueha@nhaphangmy.com"
] | khueha@nhaphangmy.com |
65cdabf8faee54817569aebc2ce8097e24679139 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s621337044.py | f7964c4a3f01cff6041508b36017d68bb3b4e4ed | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | N=int(input())
*A,=map(int,input().split())
ans=[]
for a in A:
if len(ans)<a-1:
ans=[-1]
break
else:
ans.insert(a-1,a)
for x in ans:
print(x) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
78fbc97608fb1ad31a7ee890841003439b9de511 | 5d717ea633b4d1ffc25fd9c4248e893d05ddc2a9 | /scripts/trimm_model.py | 63b297adbc70f61b19be39707db27704a16dcf8c | [
"MIT"
] | permissive | migp11/csm4cobra | 0ea92af0ebf91d56fe6fcd0c0c953203fb9ee47b | af1b9ed03935180e936d3faa3b2cb0bf77764255 | refs/heads/master | 2020-06-22T03:16:42.780918 | 2019-07-18T16:09:00 | 2019-07-18T16:09:00 | 197,619,124 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | #!/usr/bin/env python3
import argparse
import os
from cobra.flux_analysis import find_blocked_reactions
from cobra.io import read_sbml_model
from cobra.io import write_sbml_model
from csm4cobra.io import read_json
from csm4cobra.manipulate import set_medium
def create_parser():
parser = argparse.ArgumentParser(description="Trim blocked reactions and gap metabolites from \
a genome-scale metabolic model")
parser.add_argument('sbml_fname', action="store", help='SBML file to use a the model reference')
parser.add_argument('--out', action="store", dest="sbml_fname_out", required=True,
help='SBML file name to for the outputed model')
parser.add_argument('--media', action="store", dest="json_exchanges", default=None,
help='JSON file storing the exchange bounds')
parser.add_argument('--open-exchanges', action="store_true", dest="open_exchanges",
help="A flag to indicade wheather to relax exchange fluxes bounds. \
Ignored if --media is also used")
parser.add_argument('--exchange-prefix', action="store", dest="exchange_prefix", default="EX",
help='Prefix for the exchange reaction. Use with open-exhanges')
parser.add_argument('--flux-bound', action="store", dest="flux_bound", default=1000.,
help='Prefix for the exchange reaction. Use with open-exhanges')
parser.add_argument('--usefbc', action="store_true", help='Write SBML files using FBC package')
return parser
def main():
parser = create_parser()
args = parser.parse_args()
assert os.path.isfile(args.sbml_fname)
# Reading SBML genome-scale model
print("Reading SBML Model from %s:" % args.sbml_fname, end=" ")
model = read_sbml_model(args.sbml_fname)
print("OK!")
if args.json_exchanges:
print("Reading exchange fluxes bounds: %s:" % args.json_exchanges, end=" ")
media_dict = read_json(args.json_exchanges)
print("OK!")
print("Setting exchange fluxes bounds")
set_medium(model, media_dict, inplace=True)
print("OK!")
else:
if args.open_exchanges:
for r in model.reactions:
if not r.id.startswith(args.exchange_prefix):
continue
r.lower_bound = -args.flux_bound
r.upper_bound = args.flux_bound
print("Finding blocked reactions and gap metabolites:", end=" ")
blocked = find_blocked_reactions(model)
blocked = set(blocked)
gap_metabolites = [m for m in model.metabolites
if len(set([r.id for r in m.reactions]) - blocked) == 0]
print("OK!")
if len(blocked) > 0:
print("- %i blocked reactions found" % len(blocked))
print("- %i gap metabolites found" % len(gap_metabolites))
print("Trimming model", end=" ")
model.remove_reactions(blocked)
model.remove_metabolites(gap_metabolites)
print("OK!")
print("Writing trimmed model as %s" % args.sbml_fname_out, end=" ")
write_sbml_model(model, args.sbml_fname_out, use_fbc_package=args.usefbc)
print("OK!")
else:
print("NO blocked reactions found, nothing to do")
main()
| [
"miguelponcedeleon@gmail.com"
] | miguelponcedeleon@gmail.com |
f114db5dc6c302ab1e88603c39a5c95c1668865c | 3496588e6cc763a429184b55db6d401095a59229 | /alien_invasion/alien_invasion/scoreboard.py | c99da0b08ec572cb4eea5b6b93dda82e02f4e6bd | [] | no_license | DamonTan/Python-Crash-Course | c1f63c19f5c2c476baec015c39d09facd21977d1 | 49a35426467288ed1dfd823ef409c234ad22675f | refs/heads/master | 2020-03-14T06:12:20.998384 | 2018-05-10T14:23:33 | 2018-05-10T14:23:33 | 130,567,227 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,879 | py | #coding=utf-8
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard():
#显示得分信息的类
def __init__(self, ai_settings, screen, stats):
#初始化显示得分涉及的属性
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
#字体设置
self.text_color = (30,30,30)
self.font = pygame.font.SysFont(False, 48)
#准备初始得分图像
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
#将得分转换为一幅渲染的图像
rounded_score = int(round(self.stats.score, -1))
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True, self.text_color,
self.ai_settings.bg_color)
#将得分放在右上角
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def show_score(self):
#在屏幕上显示当前得分和最高得分
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
def prep_high_score(self):
#将最高得分转换为一幅渲染的图像
high_score = int(round(self.stats.high_score, -1))
high_score_str = "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color,
self.ai_settings.bg_color)
#将最高得分在屏幕顶部中央显示
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.top = self.score_rect.top
self.high_score_rect.centerx = self.screen_rect.centerx
def prep_level(self):
#将等级转换为渲染的图像
self.level_image = self.font.render(str(self.stats.level), True,
self.text_color, self.ai_settings.bg_color)
#将等级放在得分下方
self.level_rect = self.level_image.get_rect()
self.level_rect.top = self.score_rect.bottom + 10
self.level_rect.right = self.score_rect.right
def prep_ships(self):
#显示剩余飞船数量
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_settings, self.screen)
ship.rect.x = 10 + ship_number*ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
| [
"noreply@github.com"
] | noreply@github.com |
c945be5ade7d7d7bd3d244f0f621168994f71939 | be8bdc6bf059f7ca62e0a2fffe936ed481326380 | /shrello_web/shrello_web/urls.py | f0ade18c41bb77eef3e626431116317d9ac62d4e | [] | no_license | dhaivat28/shrello | b9a218fb3b9d0615b3c01a0a0353c1304277644b | 415a300005338e75fb9d047c6f4d51802beadf49 | refs/heads/master | 2021-01-02T09:03:14.383967 | 2017-01-06T19:24:13 | 2017-01-06T19:24:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | """shrello_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^Django-admin/', admin.site.urls),
url(r'', include('web_app.urls')),
]
| [
"siddharth2395@gmail.com"
] | siddharth2395@gmail.com |
9e01e282f678e3fac997ccaed9c5abcb7ebd2853 | b502a61dae00f9fbfed7a89b693ba9352e016756 | /Python/plotly1.py | cef9a52d973d954a1bd86e24eb2e70dc63ce131c | [] | no_license | VIJAYAYERUVA/100DaysOfCode | 4971fadd8a9583a79a3b66723db91d9d0b1cfd2a | 637bfd559e0a50181902cc31cfe062de20615b53 | refs/heads/main | 2023-03-27T06:06:14.725721 | 2021-03-27T22:09:41 | 2021-03-27T22:09:41 | 322,189,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import pandas as pd
import plotly.offline as pyo
import numpy as np
# create fake data:
df = pd.DataFrame(np.random.randn(100, 4), columns='A B C D'.split())
pyo.plot([{
'x': df.index,
'y': df[col],
'name': col
} for col in df.columns], filename='data/output/plotlyPlots/plotly1.html')
| [
"VIJAYAYERUVA@users.noreply.github.com"
] | VIJAYAYERUVA@users.noreply.github.com |
655ae72ebf093d06618bf11f5e378743c4d56783 | 2b8c8272d723f8ddc3fc1299828a72d7fbfe3cbe | /tails/settings/local.py | 3f2559df0d07b6ac3657813b1a6ae56ff95f5748 | [] | no_license | yusufertekin/tails-assignment | 041e0b7d46eac7558dba5cbe92dfca58f956fa25 | 23856026d45536b614d4beb00c577bdf04d642ac | refs/heads/master | 2022-04-29T16:38:07.930888 | 2019-09-24T02:12:54 | 2019-09-24T02:12:54 | 210,486,137 | 0 | 0 | null | 2022-04-22T22:19:37 | 2019-09-24T01:37:44 | Python | UTF-8 | Python | false | false | 251 | py | from tails.settings.base import *
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
"yusuf@ertekin.net"
] | yusuf@ertekin.net |
5a6eb0cb2eb972dee48c7e91616bf75ba288e65f | 101d866f8e2f84dc8f76181341180c13b38e0ecf | /utils/tes.py | 1937dc4f93ef482fe7fa346571d89d6792137995 | [] | no_license | cming091/autotest | 1d9a6f5f750c04b043a6bc45efa423f2e730b3aa | 0f6fe31a27de9bcf0697c28574b97555fe36d1e1 | refs/heads/master | 2023-06-02T18:22:24.971786 | 2021-06-21T08:52:47 | 2021-06-21T08:52:47 | 378,858,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,257 | py | import json
import requests
import logging
from utils import cfg
def register(warehouse_name):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/register'
result = 'fail'
warehouseID = ''
try:
data = {
'userID': "111",
'warehouseName': warehouse_name,
'length': 1000,
'width': 1000
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'register success, {res_data}')
warehouseID = res_data['data']['warehouseID']
else:
logging.error(f'register error, {res_data}')
else:
logging.error(f'register error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'register error, {e}')
return result, warehouseID
def register_warebasic(warehouse_name,warehouseID,warehouseCode):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':8000/wes/warebasic/warehouse/registerWarehouse'
result = 'fail'
try:
data = {
'warehouseID': warehouseID,
'warehouseName': warehouse_name,
'warehouseCode': warehouseCode,
}
headers = {'Content-Type': 'application/json'}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'register success, {res_data}')
else:
logging.error(f'register_warebasic error, {res_data}')
else:
logging.error(f'register_warebasic error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'register_warebasic error, {e}')
return result
def upload(file_path):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':81/upload'
result = 'fail'
file_url = ''
md5 = ''
try:
data = {'file': open(file_path, 'rb')}
r = requests.post(url=url, files=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'upload success, {res_data}')
file_url = res_data['data']['url']
md5 = res_data['data']['md5']
else:
logging.error(f'upload error,data: {data} res: {res_data}')
else:
logging.error(f'upload error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'upload error, {e}')
return result, file_url, md5
def import_wareservice(md5,fileName,fileURL,warehouseID):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/importByURL'
result = 'fail'
try:
data = {
'clearNodeTypeIndex': 1,
'clearAllFrame': 1,
'clearNodeTypeInsulate': 1,
'md5': md5,
'fileName': fileName,
'fileURL': fileURL,
'importType': 'COVER',
'userName': 'admin',
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import wareservice success, {res_data}')
else:
logging.error(f'import wareservice error, {res_data}')
else:
logging.error(f'import wareservice error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import wareservice error, {e}')
return result
def import_wareservice_915(md5,fileName,fileURL,warehouseID,regionType,regionName):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/importByURL'
result = 'fail'
try:
data = {
'regionType':regionType,
'regionName':regionName,
'clearNodeTypeIndex': 0,
'clearAllFrame': 0,
'clearNodeTypeInsulate': 0,
'md5': md5,
'fileName': fileName,
'fileURL': fileURL,
'importType': 'COVER',
'userName': 'admin',
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import wareservice success, {res_data}')
else:
logging.error(f'import wareservice error, {res_data}')
else:
logging.error(f'import wareservice error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import wareservice error, {e}')
return result
def import_warebase(fileName,fileURL,warehouseID):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/warebase/api/warehouse/initWarehouseByUrl'
result = 'fail'
try:
data = {
'warehouseName': fileName,
'fileURL': fileURL,
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import warebase success, {res_data}')
else:
logging.error(f'import warebase error, {res_data}')
else:
logging.error(f'import warebase error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import warebase error, {e}')
return result
def import_warebasic(warehouseCode,regionCode,regionName,regionType,fileURL):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':8000/wes/warebasic/warehouse/importMapByFileUrl'
result = 'fail'
try:
data = {
'warehouseCode': warehouseCode,
'regionCode': regionCode,
'regionName': regionName,
'regionType': regionType,
'fileUrl': fileURL
}
headers = {'Content-Type': 'application/json'}
r = requests.post(url=url, headers = headers, data=json.dumps(data))
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import warebasic success, {res_data}')
else:
logging.error(f'import warebasic error, {res_data}')
else:
logging.error(f'import warebasic error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import warebasic error, {e}')
return result
def set_warehouse_sn(warehouse_id, sn_type, robot_id, sn):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/setWarehouseSNInfo'
print(f"---------------------------------{url}---------------------------------")
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={str(warehouse_id)}&snType={str(sn_type)}&robotID={str(robot_id)}&sn={str(sn)}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'set warehouse sn success, {res_data}')
else:
logging.error(f'set warehouse sn error, {res_data}')
else:
logging.error(f'set warehouse sn error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'set warehouse sn error, {e}')
return result
def multi_add_pod(warehouse_id, pod_info):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/multiAddPod'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={warehouse_id}&podInfo={pod_info}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
else:
logging.error(f'multi add pod error, {res_data}')
else:
logging.error(f'multi add pod error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'multi add pod error, {e}')
return result
def multi_add_pod_815(warehouse_id, pod_info, request_id, client_code):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/multiAddPod'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={warehouse_id}&podInfo={pod_info}&requestID={request_id}&clientCode={client_code}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
else:
logging.error(f'multi add pod error, {res_data}')
else:
logging.error(f'multi add pod error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'multi add pod error, {e}')
return result
def all_resume_robots(warehouse_id):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/resumeRobots'
result = 'fail'
try:
data = {
'warehouseID': warehouse_id,
'all': 1
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'all_resume_robots success, {res_data}')
else:
logging.error(f'all_resume_robots error, {res_data}')
else:
logging.error(f'all_resume_robots, http response code is {r.status_code}')
except Exception as e:
logging.error(f'all_resume_robots, {e}')
return result
# if __name__ == "__main__":
# import os
# root_path = os.path.dirname(os.path.dirname(__file__))
# cfg_path = os.path.join(root_path, './conf/config.ini')
# cfg.load_cfg(cfg_path)
#
# file_path = '/Users/zhangjinqiang/Downloads/V1.4_big-118-hetu1.4.hetu'
# res = import_map(file_path)
# print('import map res = ', res)
#
# warehouse_id = '268370858668458740'
# sn_type = '0'
# robot_id = '37463339938'
# sn = '850809707888977'
# res = set_warehouse_sn(warehouse_id, sn_type, robot_id, sn)
# print('set_warehouse_sn, res =', res)
#
# pod_info = [
# {"podID": "201", "posID": "1568272772503", "posType": 2, "podFace": 3.14, "podType": 2},
# {"podID": "202", "posID": "1568272772518", "posType": 2, "podFace": 3.14, "podType": 2}
# ]
# res = multi_add_pod(warehouse_id, json.dumps(pod_info))
# print('multi_add_pod, res = ', res)
| [
"349152234@qq.com"
] | 349152234@qq.com |
970b4c6d8c568ca1a1a604bcefbed31891b3cdbc | fd8fd645c93c6ed556f8d1792b4aeaf5a9f740b3 | /carts/migrations/0003_cartitem_lin_item_total.py | 2108370424b7d86f623e6fb89e7cf3e890567790 | [] | no_license | joincs/DjangoEcommerce | 5c921f07c4933c02018bb9bd2e7e8d8864ec58d4 | 06b4aa536001c49beb71293b958c1b52980ebcdb | refs/heads/master | 2022-12-03T23:52:14.490046 | 2020-08-20T17:08:48 | 2020-08-20T17:08:48 | 289,061,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 3.0.8 on 2020-08-17 06:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0002_auto_20200817_1021'),
]
operations = [
migrations.AddField(
model_name='cartitem',
name='lin_item_total',
field=models.DecimalField(decimal_places=2, default=19.99, max_digits=10),
preserve_default=False,
),
]
| [
"codingspark786@gmail.com"
] | codingspark786@gmail.com |
2bfb6bf038bd261aebbd37bb9bafc0467a2f4652 | f67314456882a5e55e2ee57e0c0571dd36062e8b | /tests/test_players.py | 84099d2e0a4725964c9ec0b28d2c8942d7cb194d | [
"MIT"
] | permissive | gunnardag/spoppy | a571eebdd9aec689696f706a4a361823bcc1aa2d | ebf8df43cb45241c9e674124ce3562cca0522247 | refs/heads/master | 2020-12-11T07:31:31.285062 | 2016-04-27T01:15:44 | 2016-04-27T01:15:44 | 57,172,688 | 0 | 0 | null | 2016-04-27T01:07:20 | 2016-04-27T01:07:20 | null | UTF-8 | Python | false | false | 19,842 | py | import unittest
from collections import namedtuple
from mock import MagicMock, Mock, patch
import spotify
from spoppy import players, responses
from . import utils
class TestPlayer(unittest.TestCase):
def setUp(self):
self.navigation = Mock()
self.player = players.Player(self.navigation)
self.player.initialize()
def tearDown(self):
del self.player
del self.navigation
def test_has_been_loaded(self):
self.assertFalse(self.player.has_been_loaded())
self.player.song_list = [
utils.Track('', '')
]
self.assertTrue(self.player.has_been_loaded())
def test_shows_playlist_name(self):
playlist_name = 'Playlist 1'
ui = self.player.get_ui()
self.assertEqual(
len([line for line in ui if playlist_name in line]),
0
)
self.player.playlist = utils.Playlist(playlist_name, [])
ui = self.player.get_ui()
self.assertEqual(
len([line for line in ui if playlist_name in line]),
1
)
@patch('spoppy.players.Player.get_help_ui')
def test_shows_help(self, patched_show_help):
self.player.get_ui()
self.assertEqual(patched_show_help.call_count, 0)
self.player.get_help()
self.player.get_ui()
self.assertEqual(patched_show_help.call_count, 1)
def test_shows_all_available_actions_in_help(self):
help_items = self.player.get_help_ui()
actions = []
for item in help_items:
if '[' in item and ']' in item:
actions.append(item.split(':')[-1].lstrip(' '))
for action in self.player.reversed_actions:
self.assertIn(action, actions)
@patch('spoppy.players.time')
def test_get_progress_while_playing(self, patched_time):
self.player.player = Mock()
self.player.player.state = 'playing'
# This would amount to 60 seconds played
patched_time.time.return_value = 30
self.player.play_timestamp = 0
self.player.seconds_played = 30
self.player.current_track = Mock()
self.player.current_track.duration = 120 * 1000
state, mins_played, perc_played, duration = self.player.get_progress()
self.assertEqual(state, self.player.player.state)
self.assertEqual(mins_played, '01:00')
self.assertEqual(perc_played, 0.5)
def test_get_progress_while_paused(self):
self.player.player = Mock()
self.player.player.state = 'paused'
# This would amount to 30 seconds played
self.player.seconds_played = 30
self.player.current_track = Mock()
self.player.current_track.duration = 120 * 1000
state, mins_played, perc_played, duration = self.player.get_progress()
self.assertEqual(state, self.player.player.state)
self.assertEqual(mins_played, '00:30')
self.assertEqual(perc_played, 0.25)
@patch('spoppy.players.time')
def test_seek_backwards(self, patched_time):
patched_time.time.return_value = 30
self.player.player = Mock()
self.player.play_timestamp = 0
self.assertIsNone(self.player.backward_10s())
self.assertEqual(self.player.play_timestamp, 30)
self.assertEqual(self.player.seconds_played, 20)
self.player.player.seek.assert_called_once_with(20 * 1000)
def test_seek_backwards_doesnt_seek_negative(self):
self.player.seconds_played = 1
self.player.backward_10s()
self.assertEqual(self.player.seconds_played, 0)
self.player.player.seek.assert_called_once_with(0)
@patch('spoppy.players.time')
def test_seek_forwards(self, patched_time):
patched_time.time.return_value = 30
self.player.player = Mock()
self.player.play_timestamp = 0
self.assertIsNone(self.player.forward_10s())
self.assertEqual(self.player.play_timestamp, 30)
self.assertEqual(self.player.seconds_played, 40)
self.player.player.seek.assert_called_once_with(40 * 1000)
def test_seek_doesnt_set_play_timestamp_if_paused(self):
self.player.play_timestamp = None
self.player.forward_10s()
self.assertIsNone(self.player.play_timestamp)
@patch('spoppy.players.Player.is_playing')
@patch('spoppy.players.time')
def test_plays_when_paused(self, patched_time, patched_is_playing):
self.player.player = Mock()
patched_is_playing.return_value = False
patched_time.time.return_value = 100
self.assertEqual(self.player.play_pause(), responses.NOOP)
self.player.player.play.assert_called_once_with()
self.player.player.pause.assert_not_called()
@patch('spoppy.players.Player.is_playing')
@patch('spoppy.players.time')
def test_pauses_when_playing(self, patched_time, patched_is_playing):
self.player.player = Mock()
self.player.play_timestamp = 0
patched_is_playing.return_value = True
patched_time.time.return_value = 100
self.assertEqual(self.player.play_pause(), responses.NOOP)
self.player.player.pause.assert_called_once_with()
self.player.player.play.assert_not_called()
self.assertEqual(self.player.seconds_played, 100)
self.assertIsNone(self.player.play_timestamp)
@patch('spoppy.players.Player.play_current_song')
@patch('spoppy.players.Player.get_prev_idx')
def test_play_prev_song(self, patched_get_prev_idx, patched_play_current):
patched_get_prev_idx.return_value = 7
self.assertEqual(self.player.previous_song(), responses.NOOP)
self.assertEqual(self.player.current_track_idx, 7)
patched_play_current.assert_called_once_with()
@patch('spoppy.players.Player.play_current_song')
@patch('spoppy.players.Player.get_next_idx')
def test_play_next_song(self, patched_get_next_idx, patched_play_current):
patched_get_next_idx.return_value = 7
self.assertEqual(self.player.next_song(), responses.NOOP)
self.assertEqual(self.player.current_track_idx, 7)
patched_play_current.assert_called_once_with()
@patch('spoppy.players.Player.play_current_song')
def test_remove_current_track(self, patched_play_current):
track_to_remove = utils.Track('foo', ['bar'])
song_list = [
utils.Track('A', ['A']),
utils.Track('B', ['B']),
track_to_remove,
utils.Track('C', ['C']),
utils.Track('D', ['D']),
]
playlist = utils.Playlist('Playlist 1', song_list)
self.player.load_playlist(playlist)
self.assertEqual(len(self.player.song_list), len(song_list))
self.assertEqual(len(self.player.song_order), len(song_list))
self.assertIsNotNone(self.player.playlist)
self.player.current_track_idx = song_list.index(track_to_remove)
self.assertIn(track_to_remove, self.player.song_list)
self.assertEqual(self.player.remove_current_song(), responses.NOOP)
self.assertNotIn(track_to_remove, self.player.song_list)
self.assertEqual(len(self.player.song_list), len(song_list) - 1)
self.assertEqual(len(self.player.song_order), len(song_list) - 1)
patched_play_current.assert_called_once_with()
self.assertIsNone(self.player.playlist)
@patch('spoppy.players.Player.play_current_song')
def test_starts_beginning_if_last_song_removed(self, patched_play_current):
track_to_remove = utils.Track('foo', ['bar'])
song_list = [
utils.Track('A', ['A']),
utils.Track('B', ['B']),
utils.Track('C', ['C']),
utils.Track('D', ['D']),
track_to_remove,
]
playlist = utils.Playlist('Playlist 1', song_list)
self.player.load_playlist(playlist)
self.player.current_track_idx = song_list.index(track_to_remove)
self.assertEqual(self.player.remove_current_song(), responses.NOOP)
patched_play_current.assert_called_once_with()
self.assertEqual(self.player.current_track_idx, 0)
@patch('spoppy.players.Player.play_current_song')
def test_remove_song_doesnt_raise_with_empty_q(self, patched_play_current):
song_list = [
]
playlist = utils.Playlist('Playlist 1', song_list)
self.player.load_playlist(playlist)
self.player.current_track_idx = 0
self.assertEqual(self.player.remove_current_song(), responses.NOOP)
patched_play_current.assert_not_called()
self.assertEqual(self.player.current_track_idx, 0)
def test_shuffle(self):
# Testing that shuffle maintains the currently playing song
# is kind of impossible, just testing that the shuffle flag toggles
self.assertEqual(self.player.shuffle, False)
self.assertEqual(self.player.toggle_shuffle(), responses.NOOP)
self.assertEqual(self.player.shuffle, True)
self.assertEqual(self.player.toggle_shuffle(), responses.NOOP)
self.assertEqual(self.player.shuffle, False)
@patch('spoppy.players.Player.clear')
def test_stop_and_clear(self, patched_clear):
self.player.player = Mock()
self.assertEqual(self.player.stop_and_clear(), responses.UP)
patched_clear.assert_called_once_with()
self.player.player.unload.assert_called_once_with()
def test_toggle_repeat(self):
seen_repeat_flags = []
for i in range(len(players.Player.REPEAT_OPTIONS)):
self.assertEqual(self.player.toggle_repeat(), responses.NOOP)
seen_repeat_flags.append(self.player.repeat)
self.assertEqual(
sorted(players.Player.REPEAT_OPTIONS),
sorted(seen_repeat_flags)
)
@patch('spoppy.players.Player.play_current_song')
def test_add_track_to_queue(self, patched_play_current_song):
track = MagicMock(spec=spotify.Track)
self.player.playlist = 'foo'
self.assertIsNone(self.player.current_track)
self.assertIsNone(self.player.add_to_queue(track))
self.assertIn(track, self.player.song_list)
self.assertIsNone(self.player.playlist)
patched_play_current_song.assert_called_once_with(start_playing=False)
@patch('spoppy.players.Player.play_current_song')
def test_add_playlist_to_queue(self, patched_play_current_song):
tracks = [
MagicMock(spec=spotify.Track),
MagicMock(spec=spotify.Track),
MagicMock(spec=spotify.Track),
]
for track in tracks:
track.availability = spotify.TrackAvailability.AVAILABLE
playlist = MagicMock(spec=spotify.Playlist)
playlist.tracks = tracks
self.player.playlist = 'foo'
self.assertIsNone(self.player.add_to_queue(playlist))
for track in tracks:
self.assertIn(track, self.player.song_list)
self.assertIsNone(self.player.playlist)
self.assertEqual(patched_play_current_song.call_count, 3)
patched_play_current_song.assert_called_with(start_playing=False)
@patch('spoppy.players.Player.next_song')
@patch('spoppy.players.Player.play_current_song')
def test_check_end_of_track_doesnt_do_anything_if_song_is_playing(
self, patched_play_current, patched_next_song
):
self.player.end_of_track = Mock()
self.player.end_of_track.is_set.return_value = False
self.player.check_end_of_track()
patched_play_current.assert_not_called()
patched_next_song.assert_not_called()
@patch('spoppy.players.Player.next_song')
@patch('spoppy.players.Player.play_current_song')
def test_check_end_of_track_plays_next_song(
self, patched_play_current, patched_next_song
):
self.player.end_of_track = Mock()
self.player.end_of_track.is_set.return_value = True
self.player.repeat = 'all'
self.player.check_end_of_track()
patched_play_current.assert_not_called()
patched_next_song.assert_called_once_with()
@patch('spoppy.players.Player.next_song')
@patch('spoppy.players.Player.play_current_song')
def test_check_end_of_track_plays_current_song(
self, patched_play_current, patched_next_song
):
self.player.end_of_track = Mock()
self.player.end_of_track.is_set.return_value = True
self.player.repeat = 'one'
self.player.check_end_of_track()
patched_play_current.assert_called_once_with()
patched_next_song.assert_not_called()
def test_get_next_prev_idx_raises_with_empty_queue(self):
with self.assertRaises(RuntimeError):
self.player.get_next_idx()
with self.assertRaises(RuntimeError):
self.player.get_prev_idx()
def test_get_next_idx_wraps(self):
self.player.song_order = [1, 2, 3]
self.player.current_track_idx = 2
self.assertEqual(self.player.get_next_idx(), 0)
def test_get_prev_idx_wraps(self):
self.player.song_order = [1, 2, 3]
self.player.current_track_idx = 0
self.assertEqual(self.player.get_prev_idx(), 2)
@patch('spoppy.players.Player.set_song_order_by_shuffle')
def test_load_playlist(self, patched_set_shuffle):
song_list = [
utils.Track('A', ['A']),
utils.Track('B', ['B']),
utils.Track('C', ['C']),
utils.Track('D', ['D']),
]
playlist = utils.Playlist('Playlist 1', song_list)
self.player.load_playlist(playlist)
self.assertEqual(self.player.playlist, playlist)
self.assertEqual(len(self.player.song_list), len(song_list))
for i in range(len(song_list)):
# Test that order is maintained
self.assertEqual(song_list[i], self.player.song_list[i])
def test_load_playlist_sets_shuffle(self):
self.player.load_playlist(utils.Playlist('foo', []), shuffle=True)
self.assertEqual(self.player.shuffle, True)
self.player.load_playlist(utils.Playlist('foo', []))
self.assertEqual(self.player.shuffle, True)
self.player.load_playlist(utils.Playlist('foo', []), shuffle=False)
self.assertEqual(self.player.shuffle, False)
self.player.load_playlist(utils.Playlist('foo', []))
self.assertEqual(self.player.shuffle, False)
def test_load_playlist_does_not_load_unplayable_tracks(self):
track_a = utils.Track('A', ['A'])
track_b = utils.Track('C', ['C'])
song_list = [
track_a,
utils.Track('B', ['B'], available=False),
track_b,
utils.Track('D', ['D'], available=False),
]
playlist = utils.Playlist('Playlist 1', song_list)
self.player.load_playlist(playlist)
self.assertEqual(self.player.playlist, playlist)
self.assertEqual(len(self.player.song_list), 2)
self.assertIn(track_a, self.player.song_list)
self.assertIn(track_b, self.player.song_list)
@patch('spoppy.players.thread')
def test_on_end_of_track(self, patched__thread):
self.player.end_of_track = Mock()
self.player.on_end_of_track()
self.player.end_of_track.set.assert_called_once_with()
patched__thread.interrupt_main.assert_called_once_with()
@patch('spoppy.players.threading')
@patch('spoppy.players.Player.get_track_by_idx')
@patch('spoppy.players.get_duration_from_s')
@patch('spoppy.players.Player.play_pause')
def test_play_current_song(
self, patched_play_pause, patched_get_duration, patched_get_track,
patched_threading
):
self.player.player = Mock()
self.player.session = Mock()
patched_track = Mock()
TrackLoaded = namedtuple('TrackLoaded', ('duration', 'name'))
track_loaded = TrackLoaded(1, 'foo')
patched_track.load.return_value = track_loaded
patched_threading.Event.return_value = 'Event'
patched_get_track.return_value = patched_track
patched_get_duration.return_value = 'Duration'
self.assertIsNone(self.player.play_current_song())
# Unloads previously playing song
self.player.player.unload.assert_called_once_with()
self.assertEqual(self.player.end_of_track, 'Event')
patched_track.load.assert_called_once_with()
self.assertEqual(self.player.current_track, track_loaded)
self.assertEqual(self.player.current_track_duration, 'Duration')
patched_play_pause.assert_called_once_with()
self.assertEqual(self.player.seconds_played, 0)
self.player.session.on.assert_called_once_with(
spotify.SessionEvent.END_OF_TRACK,
self.player.on_end_of_track
)
@patch('spoppy.players.threading')
@patch('spoppy.players.Player.get_track_by_idx')
def test_play_current_song_handles_empty_queue(
self, patched_get_track, patched_threading
):
self.player.player = Mock()
patched_get_track.return_value = None
self.player.play_current_song()
self.assertIsNone(self.player.current_track)
patched_threading.Event.assert_not_called()
@patch('spoppy.players.random')
def test_set_song_order_by_shuffle(self, patched_random):
original = [1, 2, 3, 4, 5]
self.player.song_list = [1, 2, 3, 4, 5]
self.player.shuffle = False
self.player.set_song_order_by_shuffle()
self.assertEqual(
len(self.player.song_list), len(self.player.song_order)
)
self.assertEqual(
len(original), len(self.player.song_order)
)
patched_random.shuffle.assert_not_called()
self.player.shuffle = True
self.player.set_song_order_by_shuffle()
self.assertEqual(
len(self.player.song_list), len(self.player.song_order)
)
self.assertEqual(
len(original), len(self.player.song_order)
)
patched_random.shuffle.assert_called_once_with(self.player.song_order)
@patch('spoppy.players.Player.play_current_song')
def test_play_track_by_idx(self, patched_play_current):
self.player.song_order = [0, 1, 2, 3]
self.player.play_track(0)
patched_play_current.assert_called_once_with()
self.assertEqual(self.player.current_track_idx, 0)
patched_play_current.reset_mock()
self.player.song_order = [2, 1, 3, 0]
self.player.play_track(0)
patched_play_current.assert_called_once_with()
self.assertEqual(self.player.current_track_idx, 3)
with self.assertRaises(ValueError):
self.player.play_track(None)
@patch('spoppy.players.SavePlaylist')
def test_save_as_playlist(self, patched_saveplaylist):
SavePlaylist = Mock()
patched_saveplaylist.return_value = SavePlaylist
self.player.playlist = 'Something'
self.assertEqual(self.player.save_as_playlist(), SavePlaylist)
self.assertEqual(self.player.song_list, SavePlaylist.song_list)
self.assertTrue(callable(SavePlaylist.callback))
playlist = Mock()
playlist.name = 'foobar'
SavePlaylist.callback(playlist)
self.assertEqual(self.player.playlist, playlist)
self.assertEqual(self.player.original_playlist_name, 'foobar')
self.player.playlist = None
self.assertEqual(self.player.save_as_playlist(), SavePlaylist)
self.assertEqual(self.player.song_list, SavePlaylist.song_list)
self.assertTrue(callable(SavePlaylist.callback))
SavePlaylist.callback(playlist)
self.assertEqual(self.player.playlist, playlist)
self.assertEqual(self.player.original_playlist_name, 'foobar')
| [
"sindrigudmundsson@gmail.com"
] | sindrigudmundsson@gmail.com |
abbe58cde2af999e63f1c1ece5b0ad34b39ee2b7 | 7e6dc15dd2455f0913db62ada1d666a64f35660f | /wxgui.py | fc5ef7cfffd7817115489afcc13a27e8134a820c | [] | no_license | johnpm-12/tunneler | 5cb602695238074a509de3647b69f69bc0d5ba80 | 5f5515f9ddfb088b78862ff625f3f6fda797910e | refs/heads/master | 2022-09-13T16:21:58.809945 | 2018-08-21T22:59:58 | 2018-08-21T22:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,722 | py | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jan 23 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MainFrame
###########################################################################
class MainFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Tunneler", pos = wx.DefaultPosition, size = wx.Size( 220,370 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.panel_main = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer2 = wx.BoxSizer( wx.VERTICAL )
gSizer1 = wx.GridSizer( 5, 5, 0, 0 )
self.m_checkBox1 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox1.SetValue(True)
gSizer1.Add( self.m_checkBox1, 0, wx.ALL, 5 )
self.m_checkBox2 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox2.SetValue(True)
gSizer1.Add( self.m_checkBox2, 0, wx.ALL, 5 )
self.m_checkBox3 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox3.SetValue(True)
gSizer1.Add( self.m_checkBox3, 0, wx.ALL, 5 )
self.m_checkBox4 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox4.SetValue(True)
gSizer1.Add( self.m_checkBox4, 0, wx.ALL, 5 )
self.check_box_end = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.check_box_end.Enable( False )
gSizer1.Add( self.check_box_end, 0, wx.ALL, 5 )
self.m_checkBox6 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox6.SetValue(True)
gSizer1.Add( self.m_checkBox6, 0, wx.ALL, 5 )
self.m_checkBox7 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox7.SetValue(True)
gSizer1.Add( self.m_checkBox7, 0, wx.ALL, 5 )
self.m_checkBox8 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox8.SetValue(True)
gSizer1.Add( self.m_checkBox8, 0, wx.ALL, 5 )
self.m_checkBox9 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox9.SetValue(True)
gSizer1.Add( self.m_checkBox9, 0, wx.ALL, 5 )
self.m_checkBox10 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox10.SetValue(True)
gSizer1.Add( self.m_checkBox10, 0, wx.ALL, 5 )
self.m_checkBox11 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox11.SetValue(True)
gSizer1.Add( self.m_checkBox11, 0, wx.ALL, 5 )
self.m_checkBox12 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox12.SetValue(True)
gSizer1.Add( self.m_checkBox12, 0, wx.ALL, 5 )
self.m_checkBox13 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox13.SetValue(True)
gSizer1.Add( self.m_checkBox13, 0, wx.ALL, 5 )
self.m_checkBox14 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox14.SetValue(True)
gSizer1.Add( self.m_checkBox14, 0, wx.ALL, 5 )
self.m_checkBox15 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox15.SetValue(True)
gSizer1.Add( self.m_checkBox15, 0, wx.ALL, 5 )
self.m_checkBox16 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox16.SetValue(True)
gSizer1.Add( self.m_checkBox16, 0, wx.ALL, 5 )
self.m_checkBox17 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox17.SetValue(True)
gSizer1.Add( self.m_checkBox17, 0, wx.ALL, 5 )
self.m_checkBox18 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox18.SetValue(True)
gSizer1.Add( self.m_checkBox18, 0, wx.ALL, 5 )
self.m_checkBox19 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox19.SetValue(True)
gSizer1.Add( self.m_checkBox19, 0, wx.ALL, 5 )
self.m_checkBox20 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox20.SetValue(True)
gSizer1.Add( self.m_checkBox20, 0, wx.ALL, 5 )
self.check_box_start = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.check_box_start.Enable( False )
gSizer1.Add( self.check_box_start, 0, wx.ALL, 5 )
self.m_checkBox22 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox22.SetValue(True)
gSizer1.Add( self.m_checkBox22, 0, wx.ALL, 5 )
self.m_checkBox23 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox23.SetValue(True)
gSizer1.Add( self.m_checkBox23, 0, wx.ALL, 5 )
self.m_checkBox24 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox24.SetValue(True)
gSizer1.Add( self.m_checkBox24, 0, wx.ALL, 5 )
self.m_checkBox25 = wx.CheckBox( self.panel_main, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox25.SetValue(True)
gSizer1.Add( self.m_checkBox25, 0, wx.ALL, 5 )
bSizer2.Add( gSizer1, 1, wx.EXPAND, 5 )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText1 = wx.StaticText( self.panel_main, wx.ID_ANY, u"Solves fastest path from bottom left to top right. No diagonals. Checkmarks are walls which take 7 steps to destroy and 1 step to walk to.", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
bSizer3.Add( self.m_staticText1, 1, wx.ALL|wx.EXPAND, 5 )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.button_solve = wx.Button( self.panel_main, wx.ID_ANY, u"Solve", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer4.Add( self.button_solve, 0, wx.ALL, 5 )
self.static_text_status = wx.StaticText( self.panel_main, wx.ID_ANY, u"Ready", wx.DefaultPosition, wx.DefaultSize, 0 )
self.static_text_status.Wrap( -1 )
bSizer4.Add( self.static_text_status, 1, wx.ALL|wx.EXPAND, 5 )
bSizer3.Add( bSizer4, 0, wx.EXPAND, 5 )
bSizer2.Add( bSizer3, 1, wx.EXPAND, 5 )
self.panel_main.SetSizer( bSizer2 )
self.panel_main.Layout()
bSizer2.Fit( self.panel_main )
bSizer1.Add( self.panel_main, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.app_close )
self.button_solve.Bind( wx.EVT_BUTTON, self.solve_click )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def app_close( self, event ):
event.Skip()
def solve_click( self, event ):
event.Skip()
| [
"39016062+whatsyourgithub@users.noreply.github.com"
] | 39016062+whatsyourgithub@users.noreply.github.com |
40caab98def245cb3c4d05ebd2fc31b31a1ee555 | 8ca52d458dda5b1a557828003240942ed02e19d9 | /4_6_4.py | e5089bcc2e205cbdc7aabdf73f0bfe4462b4cd77 | [
"MIT"
] | permissive | rursvd/pynumerical2 | 48c8a7707c4327bfb88d0b747344cc1d71b80b69 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | refs/heads/master | 2020-04-19T04:15:34.457065 | 2019-12-06T04:12:16 | 2019-12-06T04:12:16 | 167,957,944 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | n = 2
m = 3
for i in range(n):
for j in range(m):
print(i,j)
| [
"noreply@github.com"
] | noreply@github.com |
16e88bccfc754c5e287e656e2f5a5f3fa71e2a5f | 99f9f92a0e6508d85feabe31e4004772491f9258 | /templates/api/hydrofunctions/hydrofunctions.py | fb6bee0146d9b7880cb34ebfb64e69de35a7ead7 | [
"MIT"
] | permissive | edgewize/flask-dashboard | ab8bf24b983b6a35b50ce0012dc6120d10c347f2 | 55143a467b92f7ca48402907844d11358985d765 | refs/heads/master | 2023-01-08T03:51:07.317555 | 2020-09-27T15:00:30 | 2020-09-27T15:00:30 | 233,483,190 | 0 | 0 | null | 2023-01-06T01:25:28 | 2020-01-13T00:56:47 | Python | UTF-8 | Python | false | false | 27,023 | py | """
hydrofunctions.hydrofunctions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the main functions used in an interactive session.
-----
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import requests
import numpy as np
import pandas as pd
import json
import pyarrow as pa
import pyarrow.parquet as pq
from pandas.tseries.frequencies import to_offset
import logging
# Change to relative import: from . import exceptions
# https://axialcorps.com/2013/08/29/5-simple-rules-for-building-great-python-packages/
from . import exceptions
import warnings
from . import typing
from . import helpers
logging.basicConfig(
filename="hydrofunctions_testing.log",
level=logging.ERROR,
format="%(asctime)s:%(levelname)s:%(message)s",
)
def select_data(nwis_df):
"""Create a boolean array of columns that contain data.
Args:
nwis_df:
A pandas dataframe created by ``extract_nwis_df``.
Returns:
an array of Boolean values corresponding to the columns in the
original dataframe.
Example:
>>> my_dataframe[:, select_data(my_dataframe)]
returns a dataframe with only the data columns; the qualifier columns
do not show.
"""
data_regex = r"[0-9]$"
return nwis_df.columns.str.contains(data_regex)
def calc_freq(index):
# Method 0: calc_freq() was called, but we haven't done anything yet.
method = 0
if isinstance(index, pd.DataFrame):
index = index.index
try:
# Method 1: Try the direct approach first. Maybe freq has already been set.
freq = index.freq
method = 1
except AttributeError:
# index.freq does not exist, so let's keep trying.
freq = None
if freq is None:
# Method 2: Use the built-in pd.infer_freq(). It raises ValueError
# when it fails, so catch ValueErrors and keep trying.
try:
freq = to_offset(pd.infer_freq(index))
method = 2
except ValueError:
pass
if freq is None:
# Method 3: divide the length of time by the number of observations.
freq = (index.max() - index.min()) / len(index)
if pd.Timedelta("13 minutes") < freq < pd.Timedelta("17 minutes"):
freq = to_offset("15min")
elif pd.Timedelta("27 minutes") < freq < pd.Timedelta("33 minutes"):
freq = to_offset("30min")
elif pd.Timedelta("55 minutes") < freq < pd.Timedelta("65 minutes"):
freq = to_offset("60min")
else:
freq = None
method = 3
if freq is None:
# Method 4: Subtract two adjacent values and use the difference!
if len(index) > 3:
freq = to_offset(abs(index[2] - index[3]))
method = 4
logging.debug(
"calc_freq4:"
+ str(freq)
+ "= index[2]:"
+ str(index[3])
+ "- index [3]:"
+ str(index[2])
)
if freq is None:
# Method 5: If all else fails, freq is 0 minutes!
warnings.warn(
"It is not possible to determine the frequency "
"for one of the datasets in this request. "
"This dataset will be set to a frequency of "
"0 minutes",
exceptions.HydroUserWarning,
)
freq = to_offset("0min")
method = 5
debug_msg = "Calc_freq method:" + str(method) + "freq:" + str(freq)
logging.debug(debug_msg)
return pd.Timedelta(freq)
def get_nwis(
site,
service="dv",
start_date=None,
end_date=None,
stateCd=None,
countyCd=None,
bBox=None,
parameterCd="all",
period=None,
):
"""Request stream gauge data from the USGS NWIS.
Args:
site (str or list of strings):
a valid site is '01585200' or ['01585200', '01646502']. site
should be `None` if stateCd or countyCd are not `None`.
service (str):
can either be 'iv' or 'dv' for instantaneous or daily data.
- 'dv'(default): daily values. Mean value for an entire day.
- 'iv': instantaneous value measured at this time. Also known\
as 'Real-time data'. Can be measured as often as every\
five minutes by the USGS. 15 minutes is more typical.
start_date (str):
should take on the form yyyy-mm-dd
end_date (str):
should take on the form yyyy-mm-dd
stateCd (str):
a valid two-letter state postal abbreviation. Default is `None`.
countyCd (str or list of strings):
a valid county abbreviation. Default is `None`.
bBox (str, list, or tuple):
a set of coordinates that defines a bounding box.
* Coordinates are in decimal degrees
* Longitude values are negative (west of the prime meridian).
* Latitude values are positive (north of the equator).
* comma-delimited, no spaces, if provided as a string.
* The order of the boundaries should be: "West,South,East,North"
* Example: "-83.000000,36.500000,-81.000000,38.500000"
parameterCd (str or list of strings):
NWIS parameter code. Usually a five digit code. Default is 'all'.\
A valid code can also be given as a list: ``parameterCd=['00060','00065']``
* if value of 'all' is submitted, then NWIS will return every \
parameter collected at this site. (default option)
* stage: '00065'
* discharge: '00060'
* not all sites collect all parameters!
* See https://nwis.waterdata.usgs.gov/usa/nwis/pmcodes for full list
period (str):
NWIS period code. Default is `None`.
* Format is "PxxD", where xx is the number of days before today.
* Either use start_date or period, but not both.
Returns:
a response object. This function will always return the response,
even if the NWIS returns a status_code that indicates a problem.
* response.url: the url we used to request data
* response.json: the content translated as json
* response.status_code: the internet status code
- '200': is a good request
- non-200 codes will be reported as a warning.
- '400': is a 'Bad Request'-- the parameters did not make sense
- see <https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html> for more codes and meaning.
* response.ok: `True` when we get a '200' status_code
Raises:
ConnectionError: due to connection problems like refused connection
or DNS Error.
SyntaxWarning: when NWIS returns a response code that is not 200.
**Example:**
>>> import hydrofunctions as hf
>>> response = hf.get_nwis('01585200', 'dv', '2012-06-01', '2012-07-01')
>>> response
<response [200]>
>>> response.json()
*JSON ensues*
>>> hf.extract_nwis_df(response)
*a Pandas dataframe appears*
Other Valid Ways to Make a Request::
>>> sites = ['07180500', '03380475', '06926000'] # Request a list of sites.
>>> service = 'iv' # Request real-time data
>>> days = 'P10D' # Request the last 10 days.
>>> stage = '00065' # Sites that collect discharge usually collect water depth too.
>>> response2 = hf.get_nwis(sites, service, period=days, parameterCd=stage)
Request Data By Location::
>>> # Request the most recent daily data for every site in Maine
>>> response3 = hf.get_nwis(None, 'dv', stateCd='ME')
>>> response3
<Response [200]>
The specification for the USGS NWIS IV service is located here:
http://waterservices.usgs.gov/rest/IV-Service.html
"""
service = typing.check_NWIS_service(service)
if parameterCd == "all":
parameterCd = None
header = {"Accept-encoding": "gzip", "max-age": "120"}
values = {
# specify version of nwis json. Based on WaterML1.1
# json,1.1 works; json%2C works; json1.1 DOES NOT WORK
"format": "json,1.1",
"sites": typing.check_parameter_string(site, "site"),
"stateCd": stateCd,
"countyCd": typing.check_parameter_string(countyCd, "county"),
"bBox": typing.check_NWIS_bBox(bBox),
"parameterCd": typing.check_parameter_string(parameterCd, "parameterCd"),
"period": period,
"startDT": start_date,
"endDT": end_date,
}
# Check that site selection parameters are exclusive!
total = helpers.count_number_of_truthy([site, stateCd, countyCd, bBox])
if total == 1:
pass
elif total > 1:
raise ValueError(
"Select sites using either site, stateCd, "
"countyCd, or bBox, but not more than one."
)
elif total < 1:
raise ValueError(
"Select sites using at least one of the following "
"arguments: site, stateCd, countyCd or bBox."
)
# Check that time parameters are not both set.
# If neither is set, then NWIS will return the most recent observation.
if start_date and period:
raise ValueError(
"Use either start_date or period, or neither, " "but not both."
)
if not (start_date or period):
# User didn't specify time; must be requesting most recent data.
# See issue #49.
pass
url = "https://waterservices.usgs.gov/nwis/"
url = url + service + "/?"
response = requests.get(url, params=values, headers=header)
print("Requested data from", response.url)
# requests will raise a 'ConnectionError' if the connection is refused
# or if we are disconnected from the internet.
# .get_nwis() will always return the response.
# Higher-level code that calls get_nwis() may decide to handle or
# report status codes that indicate something went wrong.
# Issue warnings for bad status codes
nwis_custom_status_codes(response)
if not response.text:
raise exceptions.HydroNoDataError(
"The NWIS has returned an empty string for this request."
)
return response
def get_nwis_property(nwis_dict, key=None, remove_duplicates=False):
"""Returns a list containing property data from an NWIS response object.
Args:
nwis_dict (dict):
the json returned in a response object as produced by ``get_nwis().json()``.
key (str):
a valid NWIS response property key. Default is `None`. The index is \
returned if key is `None`. Valid keys are:
* None
* name - constructed name "provider:site:parameterCd:statistic"
* siteName
* siteCode
* timeZoneInfo
* geoLocation
* siteType
* siteProperty
* variableCode
* variableName
* variableDescription
* valueType
* unit
* options
* noDataValue
remove_duplicates (bool):
a flag used to remove duplicate values in the returned list.
Returns:
a list with the data for the passed key string.
Raises:
HydroNoDataError
when the request is valid, but NWIS has no data for \
the parameters provided in the request.
ValueError when the key is not available.
"""
# nwis_dict = response_obj.json()
# strip header and all metadata. ts is the 'timeSeries' element of the
# response; it is an array of objects that contain time series data.
ts = nwis_dict["value"]["timeSeries"]
msg = "The NWIS reports that it does not have any data for this request."
if len(ts) < 1:
raise exceptions.HydroNoDataError(msg)
# This predefines what to expect in the response.
# Would it be better to look in the response for the key?
# Pseudo code
# skip stations with no data
# if key in tts['variable']:
# v = etc
# elif key in tts['sourceInfo']:
# v = etc
# elif key in tts:
# v = etc
# else just return index or raise an error later
#
sourceInfo = [
"siteName",
"siteCode",
"timeZoneInfo",
"geoLocation",
"siteType",
"siteProperty",
]
variable = [
"variableCode",
"variableName",
"variableDescription",
"valueType",
"unit",
"options",
"noDataValue",
]
root = ["name"]
vals = []
try:
for idx, tts in enumerate(ts):
d = tts["values"][0]["value"]
# skip stations with no data
if len(d) < 1:
continue
if key in variable:
v = tts["variable"][key]
elif key in sourceInfo:
v = tts["sourceInfo"][key]
elif key in root:
v = tts[key]
else:
v = idx # just return index
if remove_duplicates:
if v not in vals:
vals.append(v)
else:
vals.append(v)
# Why catch this? If we can't find the key, we already return the index.
except: # TODO: dangerous to use bare 'except' clauses.
msg = 'The selected key "{}" could not be found'.format(key)
raise ValueError(msg)
return vals
def extract_nwis_df(nwis_dict, interpolate=True):
"""Returns a Pandas dataframe and a metadata dict from the NWIS response
object or the json dict of the response.
Args:
nwis_dict (obj):
the json from a response object as returned by get_nwis().json().
Alternatively, you may supply the response object itself.
Returns:
a pandas dataframe.
Raises:
HydroNoDataError
when the request is valid, but NWIS has no data for
the parameters provided in the request.
HydroUserWarning
when one dataset is sampled at a lower frequency than
another dataset in the same request.
"""
if type(nwis_dict) is not dict:
nwis_dict = nwis_dict.json()
# strip header and all metadata.
ts = nwis_dict["value"]["timeSeries"]
if ts == []:
# raise a HydroNoDataError if NWIS returns an empty set.
#
# Ideally, an empty set exception would be raised when the request
# is first returned, but I do it here so that the data doesn't get
# extracted twice.
# TODO: raise this exception earlier??
#
# ** Interactive sessions should have an error raised.
#
# **Automated systems should catch these errors and deal with them.
# In this case, if NWIS returns an empty set, then the request
# needs to be reconsidered. The request was valid somehow, but
# there is no data being collected.
raise exceptions.HydroNoDataError(
"The NWIS reports that it does not " "have any data for this request."
)
# create a list of time series;
# set the index, set the data types, replace NaNs, sort, find the first and last
collection = []
starts = []
ends = []
freqs = []
meta = {}
for series in ts:
series_name = series["name"]
temp_name = series_name.split(":")
agency = str(temp_name[0])
site_id = agency + ":" + str(temp_name[1])
parameter_cd = str(temp_name[2])
stat = str(temp_name[3])
siteName = series["sourceInfo"]["siteName"]
siteLatLongSrs = series["sourceInfo"]["geoLocation"]["geogLocation"]
noDataValues = series["variable"]["noDataValue"]
variableDescription = series["variable"]["variableDescription"]
unit = series["variable"]["unit"]["unitCode"]
data = series["values"][0]["value"]
if data == []:
# This parameter has no data. Skip to next series.
continue
if len(data) == 1:
# This parameter only contains the most recent reading.
# See Issue #49
pass
qualifiers = series_name + "_qualifiers"
DF = pd.DataFrame(data=data)
DF.index = pd.to_datetime(DF.pop("dateTime"), utc=True)
DF["value"] = DF["value"].astype(float)
DF = DF.replace(to_replace=noDataValues, value=np.nan)
DF["qualifiers"] = DF["qualifiers"].apply(lambda x: ",".join(x))
DF.rename(
columns={"qualifiers": qualifiers, "value": series_name}, inplace=True
)
DF.sort_index(inplace=True)
local_start = DF.index.min()
local_end = DF.index.max()
starts.append(local_start)
ends.append(local_end)
local_freq = calc_freq(DF.index)
freqs.append(local_freq)
if not DF.index.is_unique:
print(
"Series index for "
+ series_name
+ " is not unique. Attempting to drop identical rows."
)
DF = DF.drop_duplicates(keep="first")
if not DF.index.is_unique:
print(
"Series index for "
+ series_name
+ " is STILL not unique. Dropping first rows with duplicated date."
)
DF = DF[~DF.index.duplicated(keep="first")]
if local_freq > to_offset("0min"):
local_clean_index = pd.date_range(
start=local_start, end=local_end, freq=local_freq, tz="UTC"
)
# if len(local_clean_index) != len(DF):
# This condition happens quite frequently with missing data.
# print(str(series_name) + "-- clean index length: "+ str(len(local_clean_index)) + " Series length: " + str(len(DF)))
DF = DF.reindex(index=local_clean_index, copy=True)
else:
# The dataframe DF must contain only the most recent data.
pass
qual_cols = DF.columns.str.contains("_qualifiers")
# https://stackoverflow.com/questions/21998354/pandas-wont-fillna-inplace
# Instead, create a temporary dataframe, fillna, then copy back into original.
DFquals = DF.loc[:, qual_cols].fillna("hf.missing")
DF.loc[:, qual_cols] = DFquals
if local_freq > pd.Timedelta(to_offset("0min")):
variableFreq_str = str(to_offset(local_freq))
else:
variableFreq_str = str(to_offset("0min"))
parameter_info = {
"variableFreq": variableFreq_str,
"variableUnit": unit,
"variableDescription": variableDescription,
}
site_info = {
"siteName": siteName,
"siteLatLongSrs": siteLatLongSrs,
"timeSeries": {},
}
# if site is not in meta keys, add it.
if site_id not in meta:
meta[site_id] = site_info
# Add the variable info to the site dict.
meta[site_id]["timeSeries"][parameter_cd] = parameter_info
collection.append(DF)
if len(collection) < 1:
# It seems like this condition should not occur. The NWIS trims the
# response and returns an empty nwis_dict['value']['timeSeries']
# if none of the parameters requested have data.
# If at least one of the paramters have data,
# then the empty series will get delivered, but with no data.
# Compare these requests:
# empty: https://nwis.waterservices.usgs.gov/nwis/iv/?format=json&sites=01570500&startDT=2018-06-01&endDT=2018-06-01¶meterCd=00045
# one empty, one full: https://nwis.waterservices.usgs.gov/nwis/iv/?format=json&sites=01570500&startDT=2018-06-01&endDT=2018-06-01¶meterCd=00045,00060
raise exceptions.HydroNoDataError(
"The NWIS does not have any data for"
" the requested combination of sites"
", parameters, and dates."
)
startmin = min(starts)
endmax = max(ends)
# Remove all frequencies of zero from freqs list.
zero = to_offset("0min")
freqs2 = list(filter(lambda x: x > zero, freqs))
if len(freqs2) > 0:
freqmin = min(freqs)
freqmax = max(freqs)
if freqmin != freqmax:
warnings.warn(
"One or more datasets in this request is going to be "
"'upsampled' to " + str(freqmin) + " because the data "
"were collected at a lower frequency of " + str(freqmax),
exceptions.HydroUserWarning,
)
clean_index = pd.date_range(start=startmin, end=endmax, freq=freqmin, tz="UTC")
cleanDF = pd.DataFrame(index=clean_index)
for dataset in collection:
cleanDF = pd.concat([cleanDF, dataset], axis=1)
# Replace lines with missing _qualifier flags with hf.upsampled
qual_cols = cleanDF.columns.str.contains("_qualifiers")
cleanDFquals = cleanDF.loc[:, qual_cols].fillna("hf.upsampled")
cleanDF.loc[:, qual_cols] = cleanDFquals
if interpolate:
# TODO: mark interpolated values with 'hf.interp'
# select data, then replace Nans with interpolated values.
data_cols = cleanDF.columns.str.contains(r"[0-9]$")
cleanDFdata = cleanDF.loc[:, data_cols].interpolate()
cleanDF.loc[:, data_cols] = cleanDFdata
else:
# If datasets only contain most recent data, then
# don't set an index or a freq. Just concat all of the datasets.
cleanDF = pd.concat(collection, axis=1)
cleanDF.index.name = "datetimeUTC"
if not DF.index.is_unique:
DF = DF[~DF.index.duplicated(keep="first")]
if not DF.index.is_monotonic:
DF.sort_index(axis=0, inplace=True)
return cleanDF, meta
def nwis_custom_status_codes(response):
"""
Raise custom warning messages from the NWIS when it returns a
status_code that is not 200.
Args:
response: a response object as returned by get_nwis().
Returns:
* `None`
if response.status_code == 200
* `response.status_code`
for all other status codes.
Raises:
SyntaxWarning: when a non-200 status code is returned.
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
Note:
To raise an exception, call ``response.raise_for_status()``
This will raise `requests.exceptions.HTTPError` with a helpful message
or it will return `None` for status code 200.
From: http://docs.python-requests.org/en/master/user/quickstart/#response-status-codes
NWIS status_code messages come from:
https://waterservices.usgs.gov/docs/portable_code.html
Additional status code documentation:
https://waterservices.usgs.gov/rest/IV-Service.html#Error
"""
nwis_msg = {
"200": "OK",
"400": "400 Bad Request - "
"This often occurs if the URL arguments "
"are inconsistent. For example, if you submit a request using "
"a startDT and an endDT with the period argument. "
"An accompanying error should describe why the request was "
"bad." + "\nError message from NWIS: {}".format(response.reason),
"403": "403 Access Forbidden - "
"This should only occur if for some reason the USGS has "
"blocked your Internet Protocol (IP) address from using "
"the service. This can happen if we believe that your use "
"of the service is so excessive that it is seriously "
"impacting others using the service. To get unblocked, "
"send us the URL you are using along with the IP using "
"this form. We may require changes to your query and "
"frequency of use in order to give you access to the "
"service again.",
"404": "404 Not Found - "
"Returned if and only if the query expresses a combination "
"of elements where data do not exist. For multi-site "
"queries, if any data are found, it is returned for those "
"site/parameters/date ranges where there are data.",
"503": "500 Internal Server Error - "
"If you see this, it means there is a problem with the web "
"service itself. It usually means the application server "
"is down unexpectedly. This could be caused by a host of "
"conditions, but changing your query will not solve this "
"problem. The NWIS application support team has to fix it. Most "
"of these errors are quickly detected and the support team "
"is notified if they occur.",
}
if response.status_code == 200:
return None
# All other status codes will raise a warning.
else:
# Use the status_code as a key, return None if key not in dict
msg = (
"The NWIS returned a code of {}.\n".format(response.status_code)
+ nwis_msg.get(str(response.status_code))
+ "\n\nURL used in this request: {}".format(response.url)
)
# Warnings will not beak the flow. They just print a message.
# However, they are often supressed in some applications.
warnings.warn(msg, SyntaxWarning)
return response.status_code
def read_parquet(filename):
pa_table = pq.read_table(filename)
dataframe = pa_table.to_pandas()
meta_dict = pa_table.schema.metadata
if b"hydrofunctions_meta" in meta_dict:
meta_string = meta_dict[b"hydrofunctions_meta"].decode()
meta = json.loads(meta_string, encoding="utf-8")
else:
meta = None
return dataframe, meta
def save_parquet(filename, dataframe, hf_meta):
table = pa.Table.from_pandas(dataframe, preserve_index=True)
meta_dict = table.schema.metadata
hf_string = json.dumps(hf_meta).encode()
meta_dict[b"hydrofunctions_meta"] = hf_string
table = table.replace_schema_metadata(meta_dict)
pq.write_table(table, filename)
| [
"tayloredginton@localhost.localdomain"
] | tayloredginton@localhost.localdomain |
a5f1cbaaa1e56547b3fe505e75e3e829a2a2c67c | 0ebc71b91c5135eba413ae91df67e378b9642080 | /tests/system/test_base.py | 8af851e2abbf91656d06798f074bff2755b74a28 | [
"Apache-2.0"
] | permissive | yaule/cloudwatchmetricbeat | c1193557c52567b9ea8fee0205c13f9cc3b08f53 | a1162cf17f4b3f61d0f6413518fd4a28435d7234 | refs/heads/master | 2020-04-12T15:54:39.027320 | 2018-06-21T15:25:08 | 2018-06-21T15:25:08 | 162,593,585 | 0 | 0 | NOASSERTION | 2018-12-20T14:55:22 | 2018-12-20T14:55:21 | null | UTF-8 | Python | false | false | 531 | py | from cloudwatchmetricbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Cloudwatchmetricbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
cloudwatchmetricbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("cloudwatchmetricbeat is running"))
exit_code = cloudwatchmetricbeat_proc.kill_and_wait()
assert exit_code == 0
| [
"phillip@narmitech.com"
] | phillip@narmitech.com |
f51c72b4fc63f1560f1e39df2271ba5c5dd65d7a | 91bba081bc796dabb15cf93d8a4e9f15463efe7f | /Models/LSTMClassifier/main.py | 8edddcf353d2ec568272f02d97795893736a2a23 | [] | no_license | duyvuleo/scientific-paper-summarisation | 719b0a9473d66d4465b89bfeedd9861de67cb593 | 60bb9d2300b42c86c42a81b639c48fb90ef4a6c4 | refs/heads/master | 2020-07-12T15:32:04.369169 | 2017-06-12T17:46:23 | 2017-06-12T17:46:23 | 94,280,947 | 1 | 1 | null | 2017-06-14T02:42:59 | 2017-06-14T02:42:58 | null | UTF-8 | Python | false | false | 11,981 | py | from __future__ import print_function, division
import os
import dill
import pickle
import sys
import random
sys.path.insert(0, "/Users/edcollins/Documents/CS/4thYearProject/Code")
from operator import itemgetter
from Dev.DataTools import useful_functions
from Dev.DataTools.useful_functions import wait, printlist, num2onehot, BASE_DIR, PAPER_SOURCE
from Dev.Evaluation.rouge import Rouge
from Dev.DataTools.DataPreprocessing.LSTMPreprocessor import LSTMPreprocessor
from Dev.DataTools.LSTM_preproc.vocab import Vocab
from Dev.DataTools.LSTM_preproc.batch import get_batches, GeneratorWithRestart, get_feed_dicts, get_feed_dicts_old
from Dev.DataTools.LSTM_preproc.map import numpify, tokenize, notokenize, lower, deep_map, deep_seq_map, dynamic_subsample, jtr_map_to_targets
import time
import tensorflow as tf
import numpy as np
MODEL_BASE_DIR = BASE_DIR + "/Trained_Models/LSTM/"
MODEL_SAVE_PATH = BASE_DIR + "/Trained_Models/LSTM/LSTM.ckpt"
VOCAB_DATA_DIR = BASE_DIR + "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/LSTM/"
NUMBER_OF_PAPERS = len([name for name in os.listdir(PAPER_SOURCE) if name.endswith(".txt")])
LOADING_SECTION_SIZE = NUMBER_OF_PAPERS / 30
PRETRAINED = False
# The number of classes a sentence could be classified into
NUM_CLASSES = 2
# How often to display testing loss
DISPLAY_EVERY = 100
# The number of summary sentences to extract from the paper as training data
NUM_SUMMARY = 20
# The name of this model
MODEL_NAME = "LSTM"
# Directory for data
DATA_DIR = BASE_DIR + "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/AbstractNet/abstractnet_data.pkl"
# The location to save the model at
SAVE_PATH = BASE_DIR + "/Trained_Models/" + MODEL_NAME + "/" + MODEL_NAME + "_.ckpt"
# The directory to save the model in
SAVE_DIR = BASE_DIR + "/Trained_Models/" + MODEL_NAME + "/"
def dummy_data(sentences=None):
data = {"sentences": ["in this project we use a bilstm for extractive summarisation", "this not a summary sentence"],
"sentence_labels": [[1, 0], [0, 1]]} # label-length vector - [0, 1] for positive examples, [1, 0] for negative examples
return data
def get_data():
print("Loading Data...")
t = time.time()
data = useful_functions.load_pickled_object(DATA_DIR)
sents = []
labs = []
for item in data:
sentences = item["sentences"]
for sent, sec, y in sentences:
sents.append(sent)
labs.append(num2onehot(y, NUM_CLASSES))
print("Done, took ", time.time() - t, " seconds")
data = {
"sentences": sents,
"labels": labs
}
return data
def create_placeholders():
sentences = tf.placeholder(tf.int32, [None, None], name="sentences") # [batch_size, max_num_tokens]
sentences_lengths = tf.placeholder(tf.int32, [None], name="sentences_lengths") # [batch_size]
sentence_labels = tf.placeholder(tf.int32, [None, None], name="sentence_labels") # [batch_size]
placeholders = {"sentences": sentences, "sentences_lengths": sentences_lengths, "sentence_labels": sentence_labels}
return placeholders
def bilstm_reader(placeholders, vocab_size, emb_dim, drop_keep_prob=1.0):
# [batch_size, max_seq_length]
sentences = placeholders['sentences']
# [batch_size, candidate_size]
targets = tf.to_float(placeholders['sentence_labels'])
with tf.variable_scope("embeddings"):
embeddings = tf.get_variable("word_embeddings", [vocab_size, emb_dim], dtype=tf.float32)
with tf.variable_scope("embedders") as varscope:
sentences_embedded = tf.nn.embedding_lookup(embeddings, sentences)
with tf.variable_scope("bilstm_reader") as varscope1:
# states: (c_fw, h_fw), (c_bw, h_bw)
outputs, states = reader(sentences_embedded, placeholders['sentences_lengths'], emb_dim,
scope=varscope1, drop_keep_prob=drop_keep_prob)
# concat fw and bw outputs
output = tf.concat(1, [states[0][1], states[1][1]])
scores = tf.contrib.layers.linear(output, 2) # we don't strictly need this as we've only got 2 targets
# add non-linearity
scores = tf.nn.tanh(scores)
loss = tf.nn.softmax_cross_entropy_with_logits(scores, targets)
predict = tf.nn.softmax(scores)
predictions = tf.argmax(predict, axis=1)
true_vals = tf.argmax(targets, axis=1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, true_vals), tf.float32))
saver = tf.train.Saver()
return scores, loss, predict, accuracy, saver
def reader(inputs, lengths, output_size, contexts=(None, None), scope=None, drop_keep_prob=1.0):
"""Dynamic bi-LSTM reader; can be conditioned with initial state of other rnn.
Args:
inputs (tensor): The inputs into the bi-LSTM
lengths (tensor): The lengths of the sequences
output_size (int): Size of the LSTM state of the reader.
context (tensor=None, tensor=None): Tuple of initial (forward, backward) states
for the LSTM
scope (string): The TensorFlow scope for the reader.
drop_keep_drop (float=1.0): The keep probability for dropout.
Returns:
Outputs (tensor): The outputs from the bi-LSTM.
States (tensor): The cell states from the bi-LSTM.
"""
with tf.variable_scope(scope or "reader") as varscope:
cell = tf.nn.rnn_cell.LSTMCell(
output_size,
state_is_tuple=True,
initializer=tf.contrib.layers.xavier_initializer()
)
if drop_keep_prob != 1.0:
cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=drop_keep_prob)
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell,
cell,
inputs,
sequence_length=lengths,
initial_state_fw=contexts[0],
initial_state_bw=contexts[1],
dtype=tf.float32
)
# ( (outputs_fw,outputs_bw) , (output_state_fw,output_state_bw) )
# in case LSTMCell: output_state_fw = (c_fw,h_fw), and output_state_bw = (c_bw,h_bw)
# each [batch_size x max_seq_length x output_size]
return outputs, states
def train(placeholders, train_feed_dicts, test_feed_dicts, vocab, max_epochs=1000, emb_dim=64, l2=0.0, clip=None, clip_op=tf.clip_by_value, sess=None):
# create model
logits, loss, preds, accuracy, saver = bilstm_reader(placeholders, len(vocab), emb_dim)
optim = tf.train.AdamOptimizer(learning_rate=0.001)
#optim = tf.train.AdadeltaOptimizer(learning_rate=1.0)
if l2 != 0.0:
loss = loss + tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) * l2
if clip is not None:
gradients = optim.compute_gradients(loss)
if clip_op == tf.clip_by_value:
capped_gradients = [(tf.clip_by_value(grad, clip[0], clip[1]), var)
for grad, var in gradients]
elif clip_op == tf.clip_by_norm:
capped_gradients = [(tf.clip_by_norm(grad, clip), var)
for grad, var in gradients]
min_op = optim.apply_gradients(capped_gradients)
else:
min_op = optim.minimize(loss)
tf.global_variables_initializer().run(session=sess)
if not PRETRAINED:
prev_loss = 1000
steps_since_save = 0
breakout = False
for i in range(1, max_epochs + 1):
if breakout:
break
loss_all = []
avg_acc = 0
count = 0
for j, batch in enumerate(train_feed_dicts):
print("Training iteration: ", j, end="\r")
sys.stdout.flush()
_, current_loss, p, acc = sess.run([min_op, loss, preds, accuracy], feed_dict=batch)
avg_acc += acc
count += 1
loss_all.append(np.mean(current_loss))
if j % DISPLAY_EVERY == 0:
print()
avg_test_acc = 0
avg_test_loss = 0
count = 0
for k, batch in enumerate(test_feed_dicts):
print("Testing iteration: ", k, end="\r")
sys.stdout.flush()
acc, l = sess.run([accuracy, loss], feed_dict=batch)
avg_test_acc += acc
avg_test_loss += np.mean(l)
count += 1
avg_test_loss /= count
avg_test_acc /= count
print("\n\t\t**** EPOCH ", i, " ****")
print("Test Accuracy on Iteration ", j, " is: ", avg_test_acc)
print("Test Loss on Iteration ", j, " is: ", avg_test_loss)
if avg_test_loss < prev_loss:
print(">> New Lowest Loss <<")
saver.save(sess=sess, save_path=MODEL_SAVE_PATH)
print(">> Model Saved <<")
prev_loss = avg_test_loss
steps_since_save = 0
else:
steps_since_save += 1
if steps_since_save > 10:
breakout = True
break
l = np.mean(loss_all)
#print('Epoch %d :' % i, l, " Accuracy: ", avg_acc / count, "\n")
# Restore the model
saver.restore(sess, MODEL_SAVE_PATH)
return logits, loss, preds, accuracy, saver
def load_data(placeholders):
train_data = get_data()
train_data, vocab = prepare_data(train_data)
with open(VOCAB_DATA_DIR + "vocab.pkl", "wb") as f:
pickle.dump(vocab, f)
train_data = numpify(train_data, pad=0) # padding to same length and converting lists to numpy arrays
train_feed_dicts = get_feed_dicts(train_data, placeholders, batch_size=100, inst_length=len(train_data["sentences"]))
return train_feed_dicts, vocab
def prepare_data(data, vocab=None):
data_tokenized = deep_map(data, tokenize, ['sentences'])
data_lower = deep_seq_map(data_tokenized, lower, ['sentences'])
data = deep_seq_map(data_lower, lambda xs: ["<SOS>"] + xs + ["<EOS>"], ["sentences"])
if vocab is None:
vocab = Vocab()
for instance in data["sentences"]:
for token in instance:
vocab(token)
vocab.freeze()
data_ids = deep_map(data, vocab, ["sentences"])
data_ids = deep_seq_map(data_ids, lambda xs: len(xs), keys=['sentences'], fun_name='lengths', expand=True)
return data_ids, vocab
def main():
# Create the TensorFlow placeholders
placeholders = create_placeholders()
# Get the training feed dicts and define the length of the test set.
train_feed_dicts, vocab = load_data(placeholders)
num_test = int(len(train_feed_dicts) * (1 / 5))
print("Number of Feed Dicts: ", len(train_feed_dicts))
print("Number of Test Dicts: ", num_test)
# Slice the dictionary list into training and test sets
final_test_feed_dicts = train_feed_dicts[0:num_test]
test_feed_dicts = train_feed_dicts[0:50]
train_feed_dicts = train_feed_dicts[num_test:]
# Do not take up all the GPU memory, all the time.
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
logits, loss, preds, accuracy, saver = train(placeholders, train_feed_dicts, test_feed_dicts, vocab, sess=sess)
print('============')
# Test on train data - later, test on test data
avg_acc = 0
count = 0
for j, batch in enumerate(final_test_feed_dicts):
acc = sess.run(accuracy, feed_dict=batch)
print("Accuracy on test set is: ", acc)
avg_acc += acc
count += 1
print('-----')
print("Overall Average Accuracy on the Test Set Is: ", avg_acc / count)
if __name__ == "__main__":
main()
| [
"edward.g.collins.1995@gmail.com"
] | edward.g.collins.1995@gmail.com |
a678ce0647f4fcc50b8dfa7d82c5c516efdabcc1 | 53262ee5b8437d208a80de997a8de5074a92426a | /root_numpy/tmva/__init__.py | 8286f5266882d4967b02669008fcb582b4da83cb | [
"BSD-3-Clause"
] | permissive | scikit-hep/root_numpy | bb2c7280a5e9e15df91c86ff3c6d9bfe3464c754 | 049e487879d70dd93c97e323ba6b71c56d4759e8 | refs/heads/master | 2023-04-07T11:25:50.080999 | 2023-01-06T17:57:30 | 2023-01-06T17:57:30 | 3,823,872 | 87 | 25 | BSD-3-Clause | 2021-02-27T10:02:21 | 2012-03-25T11:40:22 | Python | UTF-8 | Python | false | false | 544 | py | try:
from . import _libtmvanumpy
except ImportError: # pragma: no cover
import warnings
warnings.warn(
"root_numpy.tmva requires that you install root_numpy with "
"the tmva interface enabled", ImportWarning)
__all__ = []
else:
from ._data import add_classification_events, add_regression_events
from ._evaluate import evaluate_reader, evaluate_method
__all__ = [
'add_classification_events',
'add_regression_events',
'evaluate_reader',
'evaluate_method',
]
| [
"noel.dawe@gmail.com"
] | noel.dawe@gmail.com |
dc45b52c698d138d42ece2f31f39783f33253a7d | 3f842384062b9280826e83fd973a6d048e061dc2 | /Computer-Networks-Lab/Lab8/udpserverfilewrite.py | b0d639fcea9d1ac72b3c3d32a6414f2802a1b669 | [] | no_license | AbstractXan/Thats-all-codes | 7ad65620a5bf2178a53d7c9b52740b596f774659 | 5108da180b3cebd7ea3a6d6c6bff3198fcaaa46f | refs/heads/master | 2021-04-09T13:30:21.424293 | 2019-09-16T06:21:26 | 2019-09-16T06:21:26 | 125,702,276 | 0 | 0 | null | 2018-04-18T05:57:34 | 2018-03-18T07:31:30 | C++ | UTF-8 | Python | false | false | 500 | py | import socket
import sys
print ('IP entered: ',str(sys.argv[1]),'\nFile name to writeto: ', str(sys.argv[2]))
port = 1234
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((str(sys.argv[1]),port))
print("sock is listening")
while True:
data,addr = sock.recvfrom(1024*1024)
#print(" recieved data is ", data)
binary_file = open(str(sys.argv[2]),"wb")
binary_file.write(data)
binary_file.close()
message="CS16B021 File Written Successfully"
sock.sendto(message.encode(),addr)
| [
"noreply@github.com"
] | noreply@github.com |
f959c6b7d70748d3b06d683b8f1d5192a6878ead | a0df4c9a95fd9546c78c30bb400446097aaa124a | /5620/ex15_17/dc_leapfrog_exper.py | 606cf31e95ddc3de6d1cff1f02afb07fe1bdd1da | [] | no_license | weizhanguio/INF5620 | 22ad4382c3d1b3fd0b56ee8bc2f91cfd0c2cf889 | 178cb0077d1081de8e00f25cf55e57bdc45b1bfd | refs/heads/master | 2016-09-06T04:55:05.554385 | 2013-01-07T18:51:32 | 2013-01-07T18:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | from numpy import *
from matplotlib.pyplot import *
import nose.tools as nt
def solver(I,a,b,T,dt,theta):
dt=float(dt)
N=int(round(T/dt))
T = N*dt
u=zeros(N+1)
t=linspace(0,T,N+1)
u[0]=I
u[1]=dt*(-a(t[0])*I+b(t[0]))+I
for n in range(1,N):
u[n+1]=(u[n-1]+2*dt*( (theta-1)*a(t[n])*u[n]+(1-theta)*b(t[n])+theta*b(t[n+1]) )) /(1+2*dt*theta*a(t[n+1]))
#u[n+1]=u[n-1]+2*dt*( (theta-1)*a(t[n-1])*u[n-1]-theta*a(t[n])*u[n] +(1-theta)*b(t[n-1])+theta*b(t[n]))
return u,t
def test_specialcase(delta_t): # a=1 b=1
def exact_solution(t):
return 1-exp(-t)
def a(t):
return 1.0
def b(t):
return 1.0
theta=0;I=0;dt=delta_t
T=4
N=int(T/dt)
u, t = solver(I=I, a=a, b=b, T=N*dt, dt=dt, theta=theta)
u_e=exact_solution(t)
return u,u_e,t
delta_t=[0.1,0.05,0.03,0.01]
for i in range(len(delta_t)):
u,u_e,t=test_specialcase(delta_t[i])
figure()
plot(t,u,'r')
plot(t,u_e)
legend(['numerical','exact'],loc=4)
xlabel('t')
ylabel('u')
title('dt=%g' % delta_t[i])
savefig('exper_%s.jpg' % delta_t[i])
show()
| [
"weizhang@student.matnat.uio.no"
] | weizhang@student.matnat.uio.no |
ba38f6e77064e01ad0bb128110e9dfe5425b2ef8 | 9a5368dbbcaa9aa81e9aacf94ab4d17f9cc78eae | /dbcsv/csv_seed.py | fbea298bab7d02624d4c21a85c3517176ecec18f | [] | no_license | medtech-proj/alvin | 38a7c98ca5d15e09772c144ad8050c035203716d | 3154e717d0aa7348bd613bbae39e6200e5c792bc | refs/heads/master | 2021-01-23T02:16:14.255843 | 2017-05-17T03:16:01 | 2017-05-17T03:16:01 | 85,980,840 | 0 | 2 | null | 2017-05-17T03:16:02 | 2017-03-23T17:54:14 | CSS | UTF-8 | Python | false | false | 1,254 | py | import psycopg2
from psycopg2.extras import RealDictCursor
import csv
database = 'test'
connection = psycopg2.connect(dbname=database)
#create cursor factory
connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
with open("facilities.csv") as facilities_data:
f = csv.reader(facilities_data)
for row in f:
# print(row)
cursor.execute('''
INSERT INTO
facilities (name, address, image, rating, reviews)
VALUES
(%s,%s,%s,%s,%s);
''', row)
with open("procedure_types.csv") as procedure_types_data:
f = csv.reader(procedure_types_data)
for row in f:
# print(row)
cursor.execute('''
INSERT INTO
procedure_types (cpt_code, description)
VALUES
(%s,%s);
''', row)
with open("procedures.csv") as procedures_data:
f = csv.reader(procedures_data)
for row in f:
# print(row)
cursor.execute('''
INSERT INTO
procedures (id_procedure_types, id_facilities, tot_price)
VALUES
(%s,%s,%s);
''', row)
with open("geolocations.csv") as geolocations_data:
f = csv.reader(geolocations_data)
for row in f:
cursor.execute('''
INSERT INTO
geolocations (id_facilities, latitude, longitude)
VALUES
(%s,%s,%s);
''', row)
connection.close()
| [
"lgibson212@users.noreply.github.com"
] | lgibson212@users.noreply.github.com |
dfe3cae5f71902afac7daead48a6342cb66ce9a0 | 0655da8f317b889c973ffcdd401438b9354677ed | /Python Standard Library - Usage/Random.py | 424f88eb5cddee1c73a60dbda796cde9954ed3ea | [] | no_license | SniperBuddy101/learno | e8efd727a0a45991cdfdac76e445291460d1dd21 | 5944e7c94bbaf1b6d8c5ddc1ade42825a33a76d7 | refs/heads/master | 2021-05-18T02:09:08.821873 | 2020-04-14T20:28:28 | 2020-04-14T20:28:28 | 251,059,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Randomizing :)
import random
print(f"A random floating point number: {random.random()}")
print(f"A random integer: {random.randint(2, 56)}")
print(f"A random item from a list {random.choice([2, 3, 4, 'Yes'])}")
print(f"2 random items from a list {random.choices([5, 6, 7, 8, 9], k=2)}")
print(f"Joining items in an iterable: {'.'.join('Shreyash')}")
| [
"karnik.shreyash@gmail.com"
] | karnik.shreyash@gmail.com |
258edd999e0db3cc2ad5351c7e9e8cdc1c1ed982 | 5a6cd9da73ad197e6ca29cce0436640797991096 | /bench/app/benchmark/domain/events.py | c8fc479dc237e953e01e35e4401de6e5233af4a9 | [] | no_license | in-void/flask-ddd-ca | 1c944f040f7001318ac2e73a3bfb8b36271424e6 | 201246cdd003c08b89d8bee08790db2afd9f0b72 | refs/heads/master | 2023-05-27T19:39:06.290935 | 2019-08-27T15:51:08 | 2019-08-27T15:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # -*- coding: utf-8 -*-
from bench.app.core.domain.events_dispatcher import DomainEvent
class ComparativeBenchmarkFinished(DomainEvent):
name: str = 'benchmark.comparative_benchmark_finished'
def __init__(self, benchmark_id: str) -> None:
super().__init__()
self.benchmark_id = benchmark_id
| [
"barnard.kano@gmail.com"
] | barnard.kano@gmail.com |
50d49eda3d0f6a9bf8a2664a0489184a0a528b18 | efcd21234f3291e8fc561f49a7c88fc57a63e952 | /tartiflette/execution/nodes/variable_definition.py | d39c40f25262e260c7aa9a9a91e664a5891a9398 | [
"MIT"
] | permissive | tartiflette/tartiflette | 146214a43847d2f423bf74594643c1fdefc746f1 | 421c1e937f553d6a5bf2f30154022c0d77053cfb | refs/heads/master | 2023-09-01T02:40:05.974025 | 2022-01-20T14:55:31 | 2022-01-20T14:55:31 | 119,035,565 | 586 | 39 | MIT | 2023-09-11T07:49:27 | 2018-01-26T09:56:10 | Python | UTF-8 | Python | false | false | 2,799 | py | from functools import partial
from typing import Any, Callable
from tartiflette.coercers.inputs.compute import get_input_coercer
from tartiflette.coercers.literals.compute import get_literal_coercer
from tartiflette.coercers.variables import variable_coercer
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.utils.type_from_ast import schema_type_from_ast
__all__ = ("variable_definition_node_to_executable",)
class ExecutableVariableDefinition:
"""
Node representing a GraphQL executable variable definition.
"""
__slots__ = (
"name",
"graphql_type",
"default_value",
"coercer",
"definition",
)
def __init__(
self,
name: str,
graphql_type: "GraphQLType",
default_value: Any,
coercer: Callable,
definition: "VariableDefinitionNode",
) -> None:
"""
:param name: the name of the variable
:param graphql_type: the GraphQLType expected for the variable value
:param default_value: the default value of the variable
:param coercer: callable to use when coercing the user input value
:param definition: the variable definition AST node
:type name: str
:type graphql_type: GraphQLType
:type default_value: Any
:type coercer: Callable
:type definition: VariableDefinitionNode
"""
self.name = name
self.graphql_type = graphql_type
self.default_value = default_value
self.coercer = partial(coercer, self)
self.definition = definition
def variable_definition_node_to_executable(
schema: "GraphQLSchema", variable_definition_node: "VariableDefinitionNode"
) -> "ExecutableVariableDefinition":
"""
Converts a variable definition AST node into an executable variable
definition.
:param schema: the GraphQLSchema instance linked to the engine
:param variable_definition_node: the variable definition AST node to treat
:type schema: GraphQLSchema
:type variable_definition_node: VariableDefinitionNode
:return: an executable variable definition
:rtype: ExecutableVariableDefinition
"""
graphql_type = schema_type_from_ast(schema, variable_definition_node.type)
return ExecutableVariableDefinition(
name=variable_definition_node.variable.name.value,
graphql_type=graphql_type,
default_value=variable_definition_node.default_value
or UNDEFINED_VALUE,
coercer=partial(
variable_coercer,
input_coercer=partial(
get_input_coercer(graphql_type), variable_definition_node
),
literal_coercer=get_literal_coercer(graphql_type),
),
definition=variable_definition_node,
)
| [
"raulic.maximilien@gmail.com"
] | raulic.maximilien@gmail.com |
073bd0379e046cf8083fef89f310e2b630edf7cf | 7aab493a5289b92f141e568d9029131f6e044638 | /Lesson 1 - Iterations/Python/BinaryGap.py | 8a1a9a310d1e290979a01c7d0a58b146b83fa993 | [] | no_license | domheb/codility-solutions | fad664737cb154bed69177e9e36dec6876c87707 | 5017c401948e41f96effc5e484402b3e4d162265 | refs/heads/master | 2021-08-08T21:10:27.119758 | 2020-04-30T21:44:00 | 2020-04-30T21:44:00 | 168,657,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | """
Written using Python 3.6
Compiled on Linux Manjaro 18
"""
def solution(N):
#Create useful variables
bin_number = []
#Check additional conditions:
if not isinstance(N, int): #N is not an intiger
return(-1)
if N == 1 or N == 0: #N is equal to 0 or 1 so no gap
return(0)
else:
bin_number.append(1) #bin_number always starts with 1, then
reverse_bin_number = [] #all the next 0 and 1
while (N != 1): #have to be reversed first
temp = N % 2
reverse_bin_number.append(temp)
N = N // 2
#Perform reversing
length = len(reverse_bin_number)
for i in range(length-1,-1,-1):
bin_number.append(reverse_bin_number[i]) #here they are reversed
length += 1 #bin_number has 1 more element -> the first one
#Perform operation
binary_gap = 0
binary_gap_max = 0
is_counting = 0
for i in range(0,length):
#1) start counting
if is_counting == 0 and bin_number[i] == 1:
is_counting = 1
continue
#2) continue counting
if is_counting == 1 and bin_number[i] == 0:
binary_gap +=1
continue
#3) stop counting
if is_counting == 1 and bin_number[i] == 1:
if binary_gap > binary_gap_max: #maximize binary_gap_max if possible
binary_gap_max = binary_gap
binary_gap = 0
continue
#Finish the algorithm
return(binary_gap_max)
| [
"noreply@github.com"
] | noreply@github.com |
2de246307e5de016353b9bef9801df50e562fcac | 7ce2cc0df74e24d1bb7c860c689407affe2db856 | /day27/breast_cancer.py | 75e061c4f99f33c4920e3347a8a71fee9910a8da | [] | no_license | puneetb97/Python_course | 6cf1ef98b208959324e6b5bd6316b1a0818a9501 | 238011ccf5443b90948d8877507982dcf78f401c | refs/heads/master | 2020-04-27T00:40:50.975729 | 2019-06-06T09:02:46 | 2019-06-06T09:02:46 | 173,941,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 1 15:46:25 2019
@author: Puneet
"""
import pandas as pd
import numpy as np
#extracting data from csv file
df = pd.read_csv("breast_cancer.csv")
df.isnull().any()
#dealing with null values
df["G"] = df["G"].fillna(method = "ffill")
df.isnull().any()
features = df.iloc[:,1:-1].values
labels = df.iloc[:,-1].values
#spliting data into training and testing data
from sklearn.model_selection import train_test_split
features_train,features_test,labels_train,labels_test = train_test_split(features,labels, test_size=0.1, random_state=0)
#performing svm classification model
from sklearn.svm import SVC
classifier = SVC(kernel = "poly", random_state=0)
classifier.fit(features_train, labels_train)
labels_pred = classifier.predict(features_test).tolist()
result = []
for i in labels_pred:
if i==2:
result.append("non_cancerous")
else:
result.append("cancerous")
result = np.array(result)
print(result)
#calculating score of created model
print("score of the SVC model with train data:",classifier.score(features_train,labels_train))
print("score of the SVC model with test data:",classifier.score(features_test,labels_test))
#prediction for a sample data
x = [6,2,5,3,2,7,9,2,4]
x = np.array(x,ndmin=2)
pred = classifier.predict(x)
if pred==4:
print("malignant tumor")
else:
print("Benign tumor") | [
"puneetb006@gmial.com"
] | puneetb006@gmial.com |
caf42ea5c4f339c679802201ca4b249411d33da5 | 69bb82b7df793da03d2599eb7c3b81e56824234f | /tableau.py | 0c7d9b7c8c67f7321204fe6466830e0db4faace3 | [] | no_license | scottfits/tableau-scraper | c093da0f647755f9895cafab3727a189ff3fc5bb | bf416e53029f649fdec353b1229c3b0262bcb9ff | refs/heads/master | 2022-12-24T10:05:15.454925 | 2020-09-24T20:53:10 | 2020-09-24T20:53:10 | 298,392,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import requests
from bs4 import BeautifulSoup
import json
import re
url = "https://tableau.ons.org.br/vizql/w/COVID-19Deaths/v/Deaths/bootstrapSession/sessions/"
url = "https://tableau.azdhs.gov/views/COVID-19Deaths/Deaths"
r = requests.get(
url,
params= {
":embed":"y",
":showAppBanner":"false",
":showShareOptions":"true",
":display_count":"no",
"showVizHome": "no"
}
)
soup = BeautifulSoup(r.text, "html.parser")
tableauData = json.loads(soup.find("textarea",{"id": "tsConfigContainer"}).text)
print(tableauData["vizql_root"])
print(tableauData["sessionid"])
print(tableauData["sheetId"])
dataUrl = f'https://tableau.azdhs.gov{tableauData["vizql_root"]}/bootstrapSession/sessions/{tableauData["sessionid"]}'
print(dataUrl)
r = requests.post(dataUrl, data= {
"sheet_id": tableauData["sheetId"],
})
print(r)
dataReg = re.search('\d+;({.*})\d+;({.*})', r.text, re.MULTILINE)
info = json.loads(dataReg.group(1))
data = json.loads(dataReg.group(2))
print(data)
print(data["secondaryInfo"]["presModelMap"]["dataDictionary"]["presModelHolder"]["genDataDictionaryPresModel"]["dataSegments"]["0"]["dataColumns"])
| [
"scott@airgara.ge"
] | scott@airgara.ge |
458dc8884ad6649d49359f7b856a3c5baf07039e | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /wicd/rev519-537/right-branch-537/wicd/wicd-client.py | 96cef4b2cc9a14ce6f3fefe19abd026d2c623630 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,485 | py | """ wicd - wireless connection daemon frontend implementation
This module implements a usermode frontend for wicd. It updates connection
information, provides an (optional) tray icon, and allows for launching of
the wicd GUI and Wired Profile Chooser.
class TrayIcon() -- Parent class of TrayIconGUI and IconConnectionInfo.
class TrayConnectionInfo() -- Child class of TrayIcon which provides
and updates connection status.
class TrayIconGUI() -- Child class of TrayIcon which implements the tray.
icon itself. Parent class of StatusTrayIconGUI and EggTrayIconGUI.
class StatusTrayIconGUI() -- Implements the tray icon using a
gtk.StatusIcon.
class EggTrayIconGUI() -- Implements the tray icon using egg.trayicon.
def usage() -- Prints usage information.
def main() -- Runs the wicd frontend main loop.
"""
import sys
import gtk
import gobject
import getopt
import os
import pango
import time
from dbus import DBusException
from dbus import version as dbus_version
from wicd import wpath
from wicd import misc
from wicd import gui
from wicd.dbusmanager import DBusManager
if not (gtk.gtk_version[0] >= 2 and gtk.gtk_version[1] >= 10):
try:
import egg.trayicon
USE_EGG = True
except ImportError:
print 'Unable to load wicd.py: Missing egg.trayicon module.'
sys.exit(1)
else:
USE_EGG = False
if not dbus_version or (dbus_version < (0, 80, 0)):
import dbus.glib
else:
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
misc.RenameProcess("wicd-client")
if __name__ == '__main__':
wpath.chdir(__file__)
dbus_manager = None
daemon = None
wireless = None
wired = None
wired = None
language = misc.get_language_list_tray()
class NetworkMenuItem(gtk.ImageMenuItem):
def __init__(self, lbl, is_active=False):
gtk.ImageMenuItem.__init__(self)
self.label = gtk.Label(lbl)
if is_active:
atrlist = pango.AttrList()
atrlist.insert(pango.AttrWeight(pango.WEIGHT_BOLD, 0, 50))
self.label.set_attributes(atrlist)
self.label.set_justify(gtk.JUSTIFY_LEFT)
self.label.set_alignment(0, 0)
self.add(self.label)
self.label.show()
class TrayIcon:
""" Base Tray Icon class.
Base Class for implementing a tray icon to display network status.
"""
def __init__(self, use_tray, animate):
if USE_EGG:
self.tr = self.EggTrayIconGUI(use_tray)
else:
self.tr = self.StatusTrayIconGUI(use_tray)
self.icon_info = self.TrayConnectionInfo(self.tr, use_tray, animate)
class TrayConnectionInfo:
""" Class for updating the tray icon status. """
def __init__(self, tr, use_tray=True, animate=True):
""" Initialize variables needed for the icon status methods. """
self.last_strength = -2
self.still_wired = False
self.network = ''
self.tried_reconnect = False
self.connection_lost_counter = 0
self.tr = tr
self.use_tray = use_tray
self.last_sndbytes = -1
self.last_rcvbytes = -1
self.max_snd_gain = 10000
self.max_rcv_gain = 10000
self.animate = animate
self.update_tray_icon()
def wired_profile_chooser(self):
""" Launch the wired profile chooser. """
gui.WiredProfileChooser()
daemon.SetNeedWiredProfileChooser(False)
def set_wired_state(self, info):
""" Sets the icon info for a wired state. """
wired_ip = info[0]
self.tr.set_from_file(wpath.images + "wired.png")
self.tr.set_tooltip(language['connected_to_wired'].replace('$A',
wired_ip))
def set_wireless_state(self, info):
""" Sets the icon info for a wireless state. """
lock = ''
wireless_ip = info[0]
self.network = info[1]
strength = info[2]
cur_net_id = int(info[3])
sig_string = daemon.FormatSignalForPrinting(str(strength))
if wireless.GetWirelessProperty(cur_net_id, "encryption"):
lock = "-lock"
self.tr.set_tooltip(language['connected_to_wireless']
.replace('$A', self.network)
.replace('$B', sig_string)
.replace('$C', str(wireless_ip)))
self.set_signal_image(int(strength), lock)
def set_connecting_state(self, info):
""" Sets the icon info for a connecting state. """
if info[0] == 'wired' and len(info) == 1:
cur_network = language['wired']
else:
cur_network = info[1]
self.tr.set_tooltip(language['connecting'] + " to " +
cur_network + "...")
self.tr.set_from_file(wpath.images + "no-signal.png")
def set_not_connected_state(self, info):
""" Set the icon info for the not connected state. """
self.tr.set_from_file(wpath.images + "no-signal.png")
if wireless.GetKillSwitchEnabled():
status = (language['not_connected'] + " (" +
language['killswitch_enabled'] + ")")
else:
status = language['not_connected']
self.tr.set_tooltip(status)
def update_tray_icon(self, state=None, info=None):
""" Updates the tray icon and current connection status. """
if not self.use_tray: return False
if not state or not info:
[state, info] = daemon.GetConnectionStatus()
if state == misc.WIRED:
self.set_wired_state(info)
elif state == misc.WIRELESS:
self.set_wireless_state(info)
elif state == misc.CONNECTING:
self.set_connecting_state(info)
elif state in (misc.SUSPENDED, misc.NOT_CONNECTED):
self.set_not_connected_state(info)
else:
print 'Invalid state returned!!!'
return False
return True
def set_signal_image(self, wireless_signal, lock):
""" Sets the tray icon image for an active wireless connection. """
if self.animate:
prefix = self.get_bandwidth_state()
else:
prefix = 'idle-'
if daemon.GetSignalDisplayType() == 0:
if wireless_signal > 75:
signal_img = "high-signal"
elif wireless_signal > 50:
signal_img = "good-signal"
elif wireless_signal > 25:
signal_img = "low-signal"
else:
signal_img = "bad-signal"
else:
if wireless_signal >= -60:
signal_img = "high-signal"
elif wireless_signal >= -70:
signal_img = "good-signal"
elif wireless_signal >= -80:
signal_img = "low-signal"
else:
signal_img = "bad-signal"
img_file = ''.join([wpath.images, prefix, signal_img, lock, ".png"])
self.tr.set_from_file(img_file)
def get_bandwidth_state(self):
""" Determines what network activity state we are in. """
transmitting = False
receiving = False
dev_dir = '/sys/class/net/'
wiface = daemon.GetWirelessInterface()
for fldr in os.listdir(dev_dir):
if fldr == wiface:
dev_dir = dev_dir + fldr + "/statistics/"
break
try:
rcvbytes = int(open(dev_dir + "rx_bytes", "r").read().strip())
sndbytes = int(open(dev_dir + "tx_bytes", "r").read().strip())
except IOError:
sndbytes = None
rcvbytes = None
if not rcvbytes or not sndbytes:
return 'idle-'
activity = self.is_network_active(rcvbytes, self.max_rcv_gain,
self.last_rcvbytes)
receiving = activity[0]
self.max_rcv_gain = activity[1]
self.last_rcvbytes = activity[2]
activity = self.is_network_active(sndbytes, self.max_snd_gain,
self.last_sndbytes)
transmitting = activity[0]
self.max_snd_gain = activity[1]
self.last_sndbytes = activity[2]
if transmitting and receiving:
return 'both-'
elif transmitting:
return 'transmitting-'
elif receiving:
return 'receiving-'
else:
return 'idle-'
def is_network_active(self, bytes, max_gain, last_bytes):
""" Determines if a network is active.
Determines if a network is active by looking at the
number of bytes sent since the previous check. This method
is generic, and can be used to determine activity in both
the sending and receiving directions.
Returns:
A tuple containing three elements:
1) a boolean specifying if the network is active.
2) an int specifying the maximum gain the network has had.
3) an int specifying the last recorded number of bytes sent.
"""
active = False
if last_bytes == -1:
last_bytes = bytes
elif bytes > (last_bytes + float(max_gain / 20.0)):
last_bytes = bytes
active = True
gain = bytes - last_bytes
if gain > max_gain:
max_gain = gain
return (active, max_gain, last_bytes)
class TrayIconGUI(object):
""" Base Tray Icon UI class.
Implements methods and variables used by both egg/StatusIcon
tray icons.
"""
def __init__(self, use_tray):
menu = """
<ui>
<menubar name="Menubar">
<menu action="Menu">
<menu action="Connect">
</menu>
<separator/>
<menuitem action="About"/>
<menuitem action="Quit"/>
</menu>
</menubar>
</ui>
"""
actions = [
('Menu', None, 'Menu'),
('Connect', gtk.STOCK_CONNECT, "Connect"),
('About', gtk.STOCK_ABOUT, '_About...', None,
'About wicd-tray-icon', self.on_about),
('Quit',gtk.STOCK_QUIT,'_Quit',None,'Quit wicd-tray-icon',
self.on_quit),
]
actg = gtk.ActionGroup('Actions')
actg.add_actions(actions)
self.manager = gtk.UIManager()
self.manager.insert_action_group(actg, 0)
self.manager.add_ui_from_string(menu)
self.menu = (self.manager.get_widget('/Menubar/Menu/About').
props.parent)
self.gui_win = None
self.current_icon_path = None
self.use_tray = use_tray
self._is_scanning = False
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
net_menuitem.connect("activate", self.on_net_menu_activate)
def tray_scan_started(self):
""" Callback for when a wireless scan is started. """
self._is_scanning = True
self.init_network_menu()
def tray_scan_ended(self):
""" Callback for when a wireless scan finishes. """
self._is_scanning = False
self.populate_network_menu()
def on_activate(self, data=None):
""" Opens the wicd GUI. """
self.toggle_wicd_gui()
def on_quit(self, widget=None):
""" Closes the tray icon. """
sys.exit(0)
def on_about(self, data=None):
""" Opens the About Dialog. """
dialog = gtk.AboutDialog()
dialog.set_name('Wicd Tray Icon')
dialog.set_version('2.0')
dialog.set_comments('An icon that shows your network connectivity')
dialog.set_website('http://wicd.net')
dialog.run()
dialog.destroy()
def _add_item_to_menu(self, net_menu, lbl, type_, n_id, is_connecting,
is_active):
""" Add an item to the network list submenu. """
def network_selected(widget, net_type, net_id):
""" Callback method for a menu item selection. """
if net_type == "__wired__":
wired.ConnectWired()
else:
wireless.ConnectWireless(net_id)
item = NetworkMenuItem(lbl, is_active)
image = gtk.Image()
if type_ == "__wired__":
image.set_from_icon_name("network-wired", 2)
else:
pb = gtk.gdk.pixbuf_new_from_file_at_size(self._get_img(n_id),
20, 20)
image.set_from_pixbuf(pb)
del pb
item.set_image(image)
del image
item.connect("activate", network_selected, type_, n_id)
net_menu.append(item)
item.show()
if is_connecting:
item.set_sensitive(False)
del item
def _get_img(self, net_id):
""" Determines which image to use for the wireless entries. """
def fix_strength(val, default):
""" Assigns given strength to a default value if needed. """
return val is not None and int(val) or default
def get_prop(prop):
return wireless.GetWirelessProperty(net_id, prop)
strength = fix_strength(get_prop("quality"), -1)
dbm_strength = fix_strength(get_prop('strength'), -100)
if daemon.GetWPADriver() == 'ralink legacy' or \
daemon.GetSignalDisplayType() == 1:
if dbm_strength >= -60:
signal_img = 'signal-100.png'
elif dbm_strength >= -70:
signal_img = 'signal-75.png'
elif dbm_strength >= -80:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
else:
if strength > 75:
signal_img = 'signal-100.png'
elif strength > 50:
signal_img = 'signal-75.png'
elif strength > 25:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
return wpath.images + signal_img
def on_net_menu_activate(self, item):
""" Trigger a background scan to populate the network menu.
Called when the network submenu is moused over. We
sleep briefly, clear pending gtk events, and if
we're still being moused over we trigger a scan.
This is to prevent scans when the user is just
mousing past the menu to select another menu item.
"""
def dummy(x=None): pass
if self._is_scanning:
return True
self.init_network_menu()
time.sleep(.4)
while gtk.events_pending():
gtk.main_iteration()
if item.state != gtk.STATE_PRELIGHT:
return True
wireless.Scan(reply_handler=dummy, error_handler=dummy)
def populate_network_menu(self, data=None):
""" Populates the network list submenu. """
def get_prop(net_id, prop):
return wireless.GetWirelessProperty(net_id, prop)
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
submenu = net_menuitem.get_submenu()
self._clear_menu(submenu)
is_connecting = daemon.CheckIfConnecting()
num_networks = wireless.GetNumberOfNetworks()
[status, info] = daemon.GetConnectionStatus()
if daemon.GetAlwaysShowWiredInterface() or \
wired.CheckPluggedIn():
if status == misc.WIRED:
is_active = True
else:
is_active = False
self._add_item_to_menu(submenu, "Wired Network", "__wired__", 0,
is_connecting, is_active)
sep = gtk.SeparatorMenuItem()
submenu.append(sep)
sep.show()
if num_networks > 0:
for x in range(0, num_networks):
essid = get_prop(x, "essid")
if status == misc.WIRELESS and info[1] == essid:
is_active = True
else:
is_active = False
self._add_item_to_menu(submenu, essid, "wifi", x,
is_connecting, is_active)
else:
no_nets_item = gtk.MenuItem(language['no_wireless_networks_found'])
no_nets_item.set_sensitive(False)
no_nets_item.show()
submenu.append(no_nets_item)
net_menuitem.show()
def init_network_menu(self):
""" Set the right-click menu for to the scanning state. """
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
submenu = net_menuitem.get_submenu()
self._clear_menu(submenu)
loading_item = gtk.MenuItem(language['scanning'] + "...")
loading_item.set_sensitive(False)
loading_item.show()
submenu.append(loading_item)
net_menuitem.show()
def _clear_menu(self, menu):
""" Clear the right-click menu. """
for item in menu.get_children():
menu.remove(item)
item.destroy()
def toggle_wicd_gui(self):
""" Toggles the wicd GUI. """
if not self.gui_win:
self.gui_win = gui.appGui(dbus_manager)
bus = dbus_manager.get_bus()
bus.add_signal_receiver(self.gui_win.dbus_scan_finished,
'SendEndScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(self.gui_win.dbus_scan_started,
'SendStartScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(self.gui_win.update_connect_buttons,
'StatusChanged', 'org.wicd.daemon')
elif not self.gui_win.is_visible:
self.gui_win.show_win()
else:
self.gui_win.exit()
return True
class EggTrayIconGUI(TrayIconGUI):
""" Tray Icon for gtk < 2.10.
Uses the deprecated egg.trayicon module to implement the tray icon.
Since it relies on a deprecated module, this class is only used
for machines running versions of GTK < 2.10.
"""
def __init__(self, use_tray=True):
"""Initializes the tray icon"""
TrayIcon.TrayIconGUI.__init__(self, use_tray)
self.use_tray = use_tray
if not use_tray:
self.toggle_wicd_gui()
return
self.tooltip = gtk.Tooltips()
self.eb = gtk.EventBox()
self.tray = egg.trayicon.TrayIcon("WicdTrayIcon")
self.pic = gtk.Image()
self.tooltip.set_tip(self.eb, "Initializing wicd...")
self.pic.set_from_file("images/no-signal.png")
self.eb.connect('button_press_event', self.tray_clicked)
self.eb.add(self.pic)
self.tray.add(self.eb)
self.tray.show_all()
def tray_clicked(self, widget, event):
""" Handles tray mouse click events. """
if event.button == 1:
self.toggle_wicd_gui()
elif event.button == 3:
self.init_network_menu()
self.menu.popup(None, None, None, event.button, event.time)
def set_from_file(self, val=None):
""" Calls set_from_file on the gtk.Image for the tray icon. """
if not self.use_tray: return
self.pic.set_from_file(val)
def set_tooltip(self, val):
""" Set the tooltip for this tray icon.
Sets the tooltip for the gtk.ToolTips associated with this
tray icon.
"""
if not self.use_tray: return
self.tooltip.set_tip(self.eb, val)
class StatusTrayIconGUI(gtk.StatusIcon, TrayIconGUI):
""" Class for creating the wicd tray icon on gtk > 2.10.
Uses gtk.StatusIcon to implement a tray icon.
"""
def __init__(self, use_tray=True):
TrayIcon.TrayIconGUI.__init__(self, use_tray)
self.use_tray = use_tray
if not use_tray:
self.toggle_wicd_gui()
return
gtk.StatusIcon.__init__(self)
self.current_icon_path = ''
self.set_visible(True)
self.connect('activate', self.on_activate)
self.connect('popup-menu', self.on_popup_menu)
self.set_from_file(wpath.images + "no-signal.png")
self.set_tooltip("Initializing wicd...")
def on_popup_menu(self, status, button, timestamp):
""" Opens the right click menu for the tray icon. """
self.init_network_menu()
self.menu.popup(None, None, None, button, timestamp)
def set_from_file(self, path = None):
""" Sets a new tray icon picture. """
if not self.use_tray: return
if path != self.current_icon_path:
self.current_icon_path = path
gtk.StatusIcon.set_from_file(self, path)
def usage():
""" Print usage information. """
print """
wicd 1.50
wireless (and wired) connection daemon front-end.
Arguments:
\t-n\t--no-tray\tRun wicd without the tray icon.
\t-h\t--help\t\tPrint this help information.
\t-a\t--no-animate\tRun the tray without network traffic tray animations.
"""
def setup_dbus():
global bus, daemon, wireless, wired, dbus_manager
dbus_manager = DBusManager()
try:
dbus_manager.connect_to_dbus()
except DBusException:
print "Can't connect to the daemon, trying to start it automatically..."
misc.PromptToStartDaemon()
try:
dbus_manager.connect_to_dbus()
except DBusException:
gui.error(None, "Could not connect to wicd's D-Bus interface. " +
"Make sure the daemon is started.")
sys.exit(1)
dbus_ifaces = dbus_manager.get_dbus_ifaces()
daemon = dbus_ifaces['daemon']
wireless = dbus_ifaces['wireless']
wired = dbus_ifaces['wired']
return True
def main(argv):
""" The main frontend program.
Keyword arguments:
argv -- The arguments passed to the script.
"""
use_tray = True
animate = True
try:
opts, args = getopt.getopt(sys.argv[1:], 'nha', ['help', 'no-tray',
'no-animate'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, a in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-n', '--no-tray'):
use_tray = False
elif opt in ('-a', '--no-animate'):
animate = False
else:
usage()
sys.exit(2)
print 'Loading...'
setup_dbus()
if not use_tray:
the_gui = gui.appGui()
the_gui.standalone = True
mainloop = gobject.MainLoop()
mainloop.run()
sys.exit(0)
tray_icon = TrayIcon(use_tray, animate)
if daemon.GetNeedWiredProfileChooser():
daemon.SetNeedWiredProfileChooser(False)
tray_icon.icon_info.wired_profile_chooser()
bus = dbus_manager.get_bus()
bus.add_signal_receiver(tray_icon.icon_info.wired_profile_chooser,
'LaunchChooser', 'org.wicd.daemon')
bus.add_signal_receiver(tray_icon.icon_info.update_tray_icon,
'StatusChanged', 'org.wicd.daemon')
bus.add_signal_receiver(tray_icon.tr.tray_scan_ended, 'SendEndScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(tray_icon.tr.tray_scan_started,
'SendStartScanSignal', 'org.wicd.daemon.wireless')
print 'Done.'
mainloop = gobject.MainLoop()
mainloop.run()
if __name__ == '__main__':
main(sys.argv)
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
4b673bc37665e0a54a1b1b9e16dbed3ba4276ef8 | bb577626da01bf55398b760ab7079673df92050f | /app/migrations/0003_auto_20200804_1759.py | 57293dfe2e16ddb631e057402d1c9b6d10184484 | [
"MIT"
] | permissive | AngelaGua/group2_CTFLab | d07e481a72c93a47866e7efdbdfdf16da56099ed | 5b492ce46875ea37a57701686897bd9613e2dd13 | refs/heads/master | 2023-01-03T09:19:01.573443 | 2020-10-06T15:15:15 | 2020-10-06T15:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | # Generated by Django 2.1.15 on 2020-08-04 17:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_lab_args'),
]
operations = [
migrations.RemoveField(
model_name='lab',
name='args',
),
migrations.AddField(
model_name='lab',
name='argomento_1',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='argo1', to='app.Tag_Args'),
),
migrations.AddField(
model_name='lab',
name='argomento_2',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='argo2', to='app.Tag_Args'),
),
migrations.AddField(
model_name='lab',
name='argomento_3',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='argo3', to='app.Tag_Args'),
),
migrations.AddField(
model_name='lab',
name='argomento_4',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='argo4', to='app.Tag_Args'),
),
]
| [
"msn@mapoetto.net"
] | msn@mapoetto.net |
aab962d480479195e95fbf6f6f89de69c6e05402 | 73aca8a8c9c0a197e99af31bd124681b1b68e2bf | /franka-emppi-data/Simulations/franka-cabinet/vis-sim.py | d082141616db4d124cf1aafc29316c79e4fefabd | [] | no_license | i-abr/EnsembleMPPI | a4f7013fa990f997c6c0ce94647aa733bf78da86 | b3fd5bccf720fd218cdb71880b6661306dbf7a14 | refs/heads/master | 2023-06-24T18:17:40.798344 | 2020-08-20T03:52:01 | 2020-08-20T03:52:01 | 274,959,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python3
import numpy as np
from mujoco_py import load_model_from_path, MjSim, MjViewer
model_path = 'assets/franka-door.xml'
model = load_model_from_path(model_path)
sim = MjSim(model)
viewer = MjViewer(sim)
door_bid = model.body_name2id('Door')
t_model_path = 'assets/franka-cabinet.xml'
t_model = load_model_from_path(t_model_path)
t_sim = MjSim(t_model)
t_viewer = MjViewer(t_sim)
handle_sid = t_model.site_name2id('Handle')
while True:
sim.data.ctrl[:] = np.random.normal(0., 0.1, size=(sim.model.nu,))
sim.step()
viewer.render()
t_sim.data.ctrl[:] = np.random.normal(0., 0.1, size=(sim.model.nu,))
t_sim.step()
t_viewer.render()
| [
"iabr4073@gmail.com"
] | iabr4073@gmail.com |
b34289eaf185e4d32c68ce971ed745443c0712dd | 9c6837404b15c71ef13b0615701dbde49806ffa3 | /app/app.py | 48f35b56eba471c5966b68c407bbd4fabbf14d2f | [
"MIT"
] | permissive | gladunvv/send-messages-service | d43bd68af892aeb268e2f75b91756eaa5eed1976 | a467f2daab77feb5ad9c72e02d5aa12741fc20b7 | refs/heads/master | 2020-09-17T07:10:48.814024 | 2019-12-09T20:25:37 | 2019-12-09T20:25:37 | 224,031,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import flask
import os
app = flask.Flask(__name__)
app.config["DEBUG"] = True
import routes
if __name__ == "__main__":
app.run(debug=True)
| [
"bincha.1997@gmail.com"
] | bincha.1997@gmail.com |
0fac912558de9a1141bb62d3223f1aa8fd825e70 | 1b9075ffea7d4b846d42981b41be44238c371202 | /2008/devel/desktop/xfce4/goodies/xfce4-notifyd/actions.py | 0be89f389aad103384a5f9e18a9beb460910be54 | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure('--libexecdir=/usr/lib/xfce4 \
--disable-static')
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "README")
| [
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
] | MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2 |
33ed125671b5ae7caf921093bc582b018e9e6f48 | 52e2588508fe1161cd393550b275f2eedd85c5a3 | /SixthA(OOP).py | cfa716e3005bede65be3a22192892b35673e9959 | [] | no_license | evgenygamza/MagicWand | ea0caf7706f0a767bf01ec9345d1e57b3c5baad8 | 9b28775ab19bc4b58ad20d2c3341b26defb9ec4c | refs/heads/master | 2023-05-12T06:51:20.458683 | 2021-05-26T18:58:45 | 2021-05-26T18:58:45 | 371,141,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # -*- coding: cp1251 -*-
__Author__ = 'Gamza'
class Hui:
def __init__(self, days_after_shaving, days_after_bath, active_days):
self.volosatost = 2 * days_after_shaving
self.aromat = 3 * days_after_bath * active_days
class NemutuiHui(Hui):
def __init__(self, a, b, c):
Hui.__init__(self, a, b, c)
self.tvorog = True
self.privlekatelnost = self.volosatost * self.aromat
vasilii = NemutuiHui(2, 3, 5)
print(vasilii.aromat, vasilii.volosatost, vasilii.tvorog, vasilii.privlekatelnost)
| [
"1i4m5g2@#$"
] | 1i4m5g2@#$ |
e32da698d05d799a03cf900f64916a0071fc5c50 | 7f315bfdd51ad2d2fd1872b0118da7121491e6dd | /shujujiegou/03_select_sort.py | 2f9bb64eabf4ab1358548562a463c3c8629dc3e5 | [
"MIT"
] | permissive | summerliu1024/PythonExercises | 6c868f72e2dffedcfb996ef272ccbd843a3e9100 | 43e07998540a73f0112538646ded37b39e8da88b | refs/heads/master | 2020-06-15T13:38:37.564283 | 2019-07-09T02:56:00 | 2019-07-09T02:56:00 | 195,314,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | def select_sort(alist):
"""选择排序"""
n = len(alist)
for j in range(n-1):
min_index = j
for i in range(j+1, n):
if alist[i] < alist[min_index]:
min_index = i
if j != min_index:
alist[j], alist[min_index] = alist[min_index], alist[j]
if __name__ == '__main__':
li = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(li)
select_sort(li)
print(li)
# [17, 20, 26, 31, 44, 54, 55, 77, 93]
# [54, 26, 77, 17, 77, 31, 44, 55, 20]
# 最坏时间 复杂度 O(n^2)
# 最优时间 复杂度 O(n^2)
# 算法不稳定 | [
"948605548@qq.com"
] | 948605548@qq.com |
8850ac4d33cbe4f2fe5c64ab1bbb490703182870 | 18f6e4c3de87a3edabaaca80bff14b0c0f83fe30 | /rabo_converter_qif_v2.py | 288b5b43c180b8c3ef44c07dc8c92cbe4b97dfd4 | [
"MIT"
] | permissive | Deinara/rabotoqif | 54acf2c00a040d2344aadabf7ee8d4744a879335 | 3c54cda47c101c961a78b12f7799e7fd915f11ec | refs/heads/master | 2022-01-11T13:12:43.173012 | 2019-07-20T08:33:09 | 2019-07-20T08:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | #!/usr/bin/env python3
"""Converts Rabobank CSV to QIF"""
import datetime as dt
import os
import pandas as pd
from qifparse import qif
def main():
today = str(dt.date.today())
# format_str required for interpretating date by qifparse
format_str = "%Y-%m-%d"
# check current directory for matching files by Rabobank
flist = []
for file in os.listdir("."):
if (
file.endswith(".csv")
and (file.startswith("CSV_A"))
or file.startswith("CSV_O")
):
flist.append(file)
# iterate csv-files and generate related qif file(s)
for f in flist:
df = pd.read_csv(f, thousands=",", encoding="latin1")
# define list of accounts and rename columns
alist = df["IBAN/BBAN"].unique().tolist()
columndict = {
"Datum": "date",
"Naam tegenpartij": "payee",
"Omschrijving-1": "memo",
"Bedrag": "amount",
}
df.rename(columns=columndict, inplace=True)
df.loc[:, "amount"] = df["amount"] / 100
# establish qif_obj
qif_obj = qif.Qif()
for a in alist:
acc = qif.Account(name=str(a))
qif_obj.add_account(acc)
print(acc)
for index, row in df[df["IBAN/BBAN"] == a].iterrows():
# print(index,row)
tr = qif.Transaction()
tr.amount = row["amount"]
tr.date = dt.datetime.strptime(row["date"], format_str)
tr.payee = row["payee"]
tr.memo = row["memo"]
# tr.to_account = itag
acc.add_transaction(tr, header="!Type:Bank")
print(tr)
fname = "Import_" + today + "_" + str(f) + "_.qif"
with open(fname, "w") as output:
output.write(str(qif_obj))
# remove original file
# os.remove(f)
main()
| [
"wdunnes@gmail.com"
] | wdunnes@gmail.com |
89c7405aa647f7ce532b1ebeb9b37e2d8f6f0f5d | 63658f67fcc8d8fe376a4bfe327be6a3bef15a7d | /spark/Entrega2/05/05.old.py | 7099fe50a61ad90d2bc25a4ae83e856e7f510741 | [] | no_license | JonathanLoscalzo/catedra-big-data | 2e7399e7042e08de678dfe8894bebbc2fdee065a | c128bc24282a17c51ef0e6bf9acc8b031baebd72 | refs/heads/master | 2021-07-19T17:47:14.894079 | 2018-12-19T11:30:36 | 2018-12-19T11:30:36 | 146,965,681 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | from pyspark import SparkConf, SparkContext
from pyspark.sql import Row
import sys
conf = SparkConf().setMaster("local").setAppName("cantidad_viajes")
sc = SparkContext(conf=conf)
if len(sys.argv) < 4:
sys.exit(
"\nPRIMER PARAMETRO ARCHIVO DE ENTRADA \n"
+ "SEGUNDO PARAMETRO DIRECTORIO DE SALIDA\n"
+ "TERCER PARAMETRO DURACION\n"
)
arg1 = sys.argv[1] # file trafico /tmp/data/Entrega2/trafico.txt
arg2 = sys.argv[2] # salida /tmp/data/Entrega2/05/salida
duracion = int(sys.argv[3])
lines = sc.textFile(arg1)
# (vehiculo, timestamp)
lines = lines.map(lambda line: line.split("\t")).map(lambda x: (x[0], int(x[3])))
#maximo timestamp
max_timestamp = lines.map(lambda a: a[1]).max()
def get_interval(timestamp):
i = timestamp // duracion
r = 1 if timestamp % duracion else 0
return i + r
# obtengo el intervalo al que pertenece el timestamp
#(auto, interv1)
autos_interval = lines.mapValues(get_interval)
#agrupo por intervalo la cantidad de autos distintos contabilizados
#(#intervalo, inicio, fin, cantidad_autos)
intervals = [
(
interval,
(interval - 1) * duracion,
interval * duracion if (interval * duracion < max_timestamp) else max_timestamp,
autos_interval.filter(lambda a: a[1] == interval)
.map(lambda a: (a[0]))
.distinct()
.count(),
)
for interval in range(1, get_interval(max_timestamp) + 1)
] #para cadda intervalo, calcula cuantos autos sin repetetir hay
sc.parallelize(intervals).saveAsTextFile(arg2)
sc.stop()
| [
"jonathan.r.loscalzo@gmail.com"
] | jonathan.r.loscalzo@gmail.com |
d6ee7fda37973ff33a434afd1575004b50819c0a | 751d837b8a4445877bb2f0d1e97ce41cd39ce1bd | /codegolf/hello-world-rainbow.py | 0e86441c738f717c2150798dc6f368cbd9961c53 | [
"MIT"
] | permissive | qeedquan/challenges | d55146f784a3619caa4541ac6f2b670b0a3dd8ba | 56823e77cf502bdea68cce0e1221f5add3d64d6a | refs/heads/master | 2023-08-11T20:35:09.726571 | 2023-08-11T13:02:43 | 2023-08-11T13:02:43 | 115,886,967 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #!/usr/bin/env python
"""
Dealing with colors in non-markup languages often complicates things. I would like to see some variations of how color is used in different languages.
The object of this competition is to output 'Hello World' in the seven colors of the rainbow.
According to Wikipedia, these are the 7 colors.
Red #FF0000 (RGB: 255, 0, 0)
Orange #FF7F00 (RGB: 255, 127, 0)
Yellow #FFFF00 (RGB: 255, 255, 0)
Green #00FF00 (RGB: 0, 255, 0)
Blue #0000FF (RGB: 0, 0, 255)
Indigo #6600FF (RGB: 111, 0, 255)
Violet #8B00FF (RGB: 143, 0, 255)
The rules
The program must output 'Hello World'. (Doesn't necessarily need to be text, but it must be distiguishable as 'Hello World')
Each letter must be a different color.
The colors can be in any order.
You must use each of the seven colors at least once. (You may use more than the given colors)
No use of markup languages in any case.
The winner is whoever has the lowest amount of characters AND follows the rules
Bonus -1 character if it is written in DART
I will pick the winner on Jan 11 (if I remember ;D).
Good luck
"""
def rainbow(s):
p = 31
for c in s:
print("\033[%d;1m%c" % (p, c), end='')
p += 1
if p >= 37:
p = 31
print("\033[0m")
def main():
rainbow("Hello World!")
main()
| [
"qeed.quan@gmail.com"
] | qeed.quan@gmail.com |
3dd8cd2f2c00f88ab7caac08556912691b878f5a | 50013097521c08e66aa6351ede5f4c46de84f429 | /blog/models/Post.py | 32a37ac2dc97119651d640671af5699617844f34 | [] | no_license | DrAzraelTod/webChao | 5318a55e08e96a3559094071089c0078855890b0 | 3cb348e017edf48118f1e6f054cff6d0667bded1 | refs/heads/master | 2016-09-06T18:32:10.749909 | 2016-02-23T12:29:21 | 2016-02-23T12:29:21 | 6,939,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from UserProfile import UserProfile
from Tag import Tag
from django.core.urlresolvers import reverse
POST_STATUS = (
(1, 'gelöscht'),
(2, 'Veröffentlichen'),
(0, 'Entwurf'),
)
class Post(models.Model):
text = models.TextField('Inhalt')
title = models.CharField('Titel', max_length=255)
author = models.ForeignKey(UserProfile, related_name="posts")
date = models.DateTimeField('veröffentlicht ab')
tags = models.ManyToManyField(Tag, symmetrical=False, related_name="posts")
status = models.IntegerField('Status', choices=POST_STATUS)
# filter what states (__gte) should be displayed public
display_states_above = 2
def created_today(self):
return self.date.date() == datetime.date.today()
def get_slug(self):
slug = self.title.replace(' ', '-') # spaces are bad in urls
slug = slug.replace('#', '') # because we would want to see things after this in the url
slug = slug.replace(""", '') # because i was once migrating from wordpress... dont ask!
slug = slug.replace('?', '') # questionmark because this should not get pushed into parameters
slug = slug.replace('\\', '') # dont really know if we will need this...
slug = slug.replace('/', '') # obvious
slug = slug.replace('---', '-') # ' - ' gets converted to '---' -> 'foo---bar' ->'foo-bar'
return slug
def get_absolute_url(self, byId = False):
if (byId):
return reverse('webchao.blog.views.byId', args=[self.id, self.get_slug()])
else:
return reverse('webchao.blog.views.byDate', args=[self.date.date().year, self.date.date().month, self.date.date().day, self.get_slug()])
def __unicode__(self):
return self.title
class Meta:
db_table = 'blog_post'
get_latest_by = 'date'
ordering = ['-date']
verbose_name = 'Artikel'
verbose_name_plural = 'Artikel'
| [
"dat-git@g33ky.de"
] | dat-git@g33ky.de |
ab6c34d16beb64feafd2396054dc2b98364402c5 | d1616f1cabd87c64fba9630ad7db070b1e7caa2c | /model.py | c7c0944f7273442e493b96ac1290e74792bb4d7f | [] | no_license | deligentfool/Population_based_training_pytorch | b60e24f509db81e0724820f7714c2476984ac02a | 1b80a79ead0778997cf6393aa7941b6b909ae75b | refs/heads/master | 2022-11-16T14:17:02.215898 | 2020-07-15T02:44:15 | 2020-07-15T02:44:15 | 279,745,126 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,229 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
import gym
from collections import deque
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from net import policy_net, value_net
from buffer import trajectory_buffer
class ppo_clip(object):
def __init__(self, env_id, epoch, learning_rate, gamma, lam, epsilon, capacity, update_iter, model_id=None, update_freq=50):
super(ppo_clip, self).__init__()
self.model_id = model_id
self.env_id = env_id
self.env = gym.make(self.env_id)
self.learning_rate = learning_rate
self.gamma = gamma
self.lam = lam
self.epsilon = epsilon
self.epoch = epoch
self.capacity = capacity
self.update_iter = update_iter
self.update_freq = update_freq
self.observation_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.n
self.policy_net = policy_net(self.observation_dim, self.action_dim)
self.value_net = value_net(self.observation_dim, 1)
self.value_optimizer = torch.optim.Adam(self.value_net.parameters(), lr=self.learning_rate)
self.policy_optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=self.learning_rate)
self.buffer = trajectory_buffer(capacity=self.capacity)
self.count = 0
self.train_count = 0
def reset(self):
self.count = 0
self.train_count = 0
self.buffer.clear()
def train(self):
obs, next_obs, act, rew, don, val = self.buffer.get()
obs = torch.FloatTensor(obs)
next_obs = torch.FloatTensor(next_obs)
act = torch.LongTensor(act)
rew = torch.FloatTensor(rew)
don = torch.FloatTensor(don)
val = torch.FloatTensor(val)
old_probs = self.policy_net.forward(obs)
old_probs = old_probs.gather(1, act).squeeze(1).detach()
value_loss_buffer = []
policy_loss_buffer = []
for _ in range(self.update_iter):
td_target = rew + self.gamma * self.value_net.forward(next_obs) * (1 - don)
delta = td_target - self.value_net.forward(obs)
delta = delta.detach().numpy()
advantage_lst = []
advantage = 0.0
for delta_t in delta[::-1]:
advantage = self.gamma * self.lam * advantage + delta_t[0]
advantage_lst.append([advantage])
advantage_lst.reverse()
advantage = torch.FloatTensor(advantage_lst)
value = self.value_net.forward(obs)
#value_loss = (ret - value).pow(2).mean()
value_loss = F.smooth_l1_loss(td_target.detach(), value)
value_loss_buffer.append(value_loss.item())
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
probs = self.policy_net.forward(obs)
probs = probs.gather(1, act).squeeze(1)
ratio = probs / old_probs
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1. - self.epsilon, 1. + self.epsilon) * advantage
policy_loss = - torch.min(surr1, surr2).mean()
policy_loss_buffer.append(policy_loss.item())
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
def load_weight_hyperparam(self, model_path):
model_ = torch.load(model_path)
self.policy_net.load_state_dict(model_['policy_weight'])
self.value_net.load_state_dict(model_['value_weight'])
hyperparameters = model_['hyperparameters']
self.learning_rate = hyperparameters['learning_rate']
self.gamma = hyperparameters['gamma']
self.lam = hyperparameters['lam']
self.epsilon = hyperparameters['epsilon']
def save_weight_hyperparam(self, model_path):
model_ = {}
model_['policy_weight'] = self.policy_net.state_dict()
model_['value_weight'] = self.value_net.state_dict()
hyperparameters = {}
hyperparameters['learning_rate'] = self.learning_rate
hyperparameters['gamma'] = self.gamma
hyperparameters['lam'] = self.lam
hyperparameters['epsilon'] = self.epsilon
model_['hyperparameters'] = hyperparameters
torch.save(model_, model_path)
def run(self):
while True:
if self.train_count == self.epoch:
break
obs = self.env.reset()
total_reward = 0
while True:
action = self.policy_net.act(torch.FloatTensor(np.expand_dims(obs, 0)))
next_obs, reward, done, _ = self.env.step(action)
value = self.value_net.forward(torch.FloatTensor(np.expand_dims(obs, 0))).detach().item()
self.buffer.store(obs, next_obs, action, reward, done, value)
self.count += 1
total_reward += reward
obs = next_obs
if self.count % self.update_freq == 0:
self.train_count += 1
self.train()
self.buffer.clear()
if self.train_count == self.epoch:
break
if done:
break
def eval(self, num=5):
score_list = []
for _ in range(num):
obs = self.env.reset()
total_reward = 0
while True:
action = self.policy_net.act(torch.FloatTensor(np.expand_dims(obs, 0)))
next_obs, reward, done, _ = self.env.step(action)
value = self.value_net.forward(torch.FloatTensor(np.expand_dims(obs, 0))).detach().item()
total_reward += reward
obs = next_obs
if done:
break
score_list.append(total_reward)
return np.mean(score_list)
if __name__ == '__main__':
env = gym.make('CartPole-v1').unwrapped
| [
"1027660817@qq.com"
] | 1027660817@qq.com |
4e078c68276aaed1c1699174d8b734d478bb44ce | ff85002de8fc3e8d38b96753f7358ea1dc8055af | /Infinite_sequence.py | 105c8cc00705bdc188dbf46bca2fbd0d97a61125 | [] | no_license | xlax007/Collection-of-Algorithms | d0ef8277e4f6dd5a27ed2a67bb720c3d867cbec9 | 4fe4d69f60b3b6f49624be135750f074216aacb9 | refs/heads/master | 2022-12-12T23:15:39.991983 | 2020-09-09T23:36:26 | 2020-09-09T23:36:26 | 294,251,463 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 20:27:27 2020
@author: alexi
"""
#https://codeforces.com/problemset/problem/675/A --- Alexis Galvan
def infinite_sequence():
numbers = list(map(int, input().split()))
if numbers[0] == numbers[1] or (numbers[0]+numbers[2]) == numbers[1]:
return 'YES'
elif numbers[2] == 0 or (numbers[0] < numbers[1] and numbers[2] <= 1) or (numbers[0] > numbers[1]) and numbers[2] > 1:
return 'NO'
else:
actual = numbers[0] + numbers[2]
divisor = numbers[1]-actual
if divisor % numbers[2] == 0:
return 'YES'
return 'NO'
A = infinite_sequence()
print(A) | [
"noreply@github.com"
] | noreply@github.com |
2b9b23cdd7914f0a4c717c566c36b4670d9924ad | f737ab2c0ec0fbecec740b155e005f6c433cb5e2 | /src/main/python/model/display.py | 2310d22e7e94ef2d3019afd49725ad3abe17e729 | [
"MIT"
] | permissive | senilix/pypolarmap | d6c366dbf739d1f484970a0e1d78a8101f880ed6 | 348249fc2a06cce5675b3b0d286f853ab27d4ba0 | refs/heads/master | 2020-08-03T13:56:25.504783 | 2018-09-29T20:28:56 | 2018-09-29T20:28:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,924 | py | from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from model.preferences import DISPLAY_DB_RANGE, DISPLAY_COLOUR_MAP, DISPLAY_POLAR_360
from ui.display import Ui_displayControlsDialog
class DisplayModel:
'''
Parameters to feed into how a chart should be displayed.
'''
def __init__(self, preferences):
self.__preferences = preferences
self.__dBRange = self.__preferences.get(DISPLAY_DB_RANGE)
self.__normalised = False
self.__normalisationAngle = 0
self.__visibleChart = None
self.__colour_map = self.__preferences.get(DISPLAY_COLOUR_MAP)
self.__smoothing_type = None
self.__locked = False
self.__full_polar_range = self.__preferences.get(DISPLAY_POLAR_360)
self.results_charts = []
self.measurement_model = None
def __repr__(self):
return self.__class__.__name__
@property
def colour_map(self):
return self.__colour_map
@colour_map.setter
def colour_map(self, colour_map):
self.__colour_map = colour_map
for chart in self.results_charts:
if hasattr(chart, 'updateColourMap'):
chart.updateColourMap(self.__colour_map, draw=chart is self.__visibleChart)
self.__preferences.set(DISPLAY_COLOUR_MAP, colour_map)
@property
def dBRange(self):
return self.__dBRange
@dBRange.setter
def dBRange(self, dBRange):
self.__dBRange = dBRange
for chart in self.results_charts:
chart.updateDecibelRange(draw=chart is self.__visibleChart)
self.__preferences.set(DISPLAY_DB_RANGE, dBRange)
@property
def smoothing_type(self):
return self.__smoothing_type
@smoothing_type.setter
def smoothing_type(self, smoothing_type):
self.__smoothing_type = smoothing_type
self.measurement_model.smooth(self.__smoothing_type)
@property
def normalised(self):
return self.__normalised
@normalised.setter
def normalised(self, normalised):
self.__normalised = normalised
self.measurement_model.normalisationChanged()
self.redrawVisible()
@property
def normalisationAngle(self):
return self.__normalisationAngle
@normalisationAngle.setter
def normalisationAngle(self, normalisationAngle):
changed = normalisationAngle != self.__normalisationAngle
self.__normalisationAngle = normalisationAngle
if changed and self.__normalised:
self.measurement_model.normalisationChanged()
self.redrawVisible()
@property
def full_polar_range(self):
return self.__full_polar_range
@full_polar_range.setter
def full_polar_range(self, full_polar_range):
changed = full_polar_range != self.__full_polar_range
self.__full_polar_range = full_polar_range
if changed:
self.redrawVisible()
@property
def visibleChart(self):
return self.__visibleChart
@visibleChart.setter
def visibleChart(self, visibleChart):
if self.__visibleChart is not None and getattr(self.__visibleChart, 'hide', None) is not None:
self.__visibleChart.hide()
self.__visibleChart = visibleChart
self.redrawVisible()
def redrawVisible(self):
if self.__visibleChart is not None and self.__locked is not True:
display = getattr(self.__visibleChart, 'display', None)
if display is not None and callable(display):
display()
def lock(self):
''' flags the model as locked so changes do not result in a redraw '''
self.__locked = True
def unlock(self):
''' flags the model as unlocked and redraws '''
self.__locked = False
self.redrawVisible()
class DisplayControlDialog(QDialog, Ui_displayControlsDialog):
'''
Display Parameters dialog
'''
def __init__(self, parent, display_model, measurement_model):
super(DisplayControlDialog, self).__init__(parent)
self.setupUi(self)
self.__display_model = display_model
self.__measurement_model = measurement_model
self.yAxisRange.setValue(self.__display_model.dBRange)
self.normaliseCheckBox.setChecked(self.__display_model.normalised)
self.__select_combo(self.smoothingType, self.__display_model.smoothing_type)
for m in self.__measurement_model:
self.normalisationAngle.addItem(str(m._h))
if not self.__select_combo(self.normalisationAngle, str(self.__display_model.normalisationAngle)):
self.__display_model.normalisationAngle = None
stored_idx = 0
from app import cms_by_name
for idx, (name, cm) in enumerate(cms_by_name.items()):
self.colourMapSelector.addItem(name)
if name == self.__display_model.colour_map:
stored_idx = idx
self.colourMapSelector.setCurrentIndex(stored_idx)
self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)
def __select_combo(self, combo, value):
if value is not None:
idx = combo.findText(value)
if idx != -1:
combo.setCurrentIndex(idx)
return idx
return None
def apply(self):
''' Updates the parameters and reanalyses the model. '''
from app import wait_cursor
with wait_cursor():
self.__display_model.lock()
self.__display_model.smoothing_type = self.smoothingType.currentText()
self.__display_model.dBRange = self.yAxisRange.value()
self.__display_model.normalised = self.normaliseCheckBox.isChecked()
self.__display_model.normalisationAngle = self.normalisationAngle.currentText()
self.__display_model.colour_map = self.colourMapSelector.currentText()
self.__display_model.unlock()
| [
"mattkhan@gmail.com"
] | mattkhan@gmail.com |
2ad1d1e156288780892eb4a16b3b3b4f46fac3bc | 524432657f857970cbd7c3dd506734c75b5878bf | /venv/Scripts/pip3-script.py | 7c9f7a85de8f2a94dd6c3e3d67d7aef63f91e602 | [] | no_license | breno29silva/Metodos-II | a75e7e0aafa44488ec875f77458aebeb24bcd070 | a63b0ea010565f3e64f8e30af25681f161af3a84 | refs/heads/master | 2022-04-13T05:37:22.454670 | 2020-04-02T17:15:52 | 2020-04-02T17:15:52 | 251,729,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!C:\Users\Breno\PycharmProjects\Metodo_numerico_2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"breno.silva.2903@gmail.com"
] | breno.silva.2903@gmail.com |
82c03e966e820471b4b51300b640e67822c1908d | fd133e75a1dfda1e38b5241a33147622fa07eea4 | /peopleFinder/apps.py | 9bfc589f902de3616a048d1f63921d3a3f2b5a2e | [] | no_license | TheWildUnicorn/GrandDrape | 39642c887ef046447a0c03f625fe15a1fd703573 | d7e30fd6732454ead073ca6ecea7b8e8ffdd09e9 | refs/heads/master | 2021-01-19T22:20:58.739069 | 2017-05-01T00:43:57 | 2017-05-01T00:43:57 | 88,796,273 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class PeoplefinderConfig(AppConfig):
name = 'peopleFinder'
| [
"jaydon134@me.com"
] | jaydon134@me.com |
307fe34057a5441ac99c86b9d36f636a49e70675 | 54892b54ebb2f492093c48781d0e25eb5b9ecfc2 | /Aula01/aula01DesviosCondicionais3.py | 04aa044d464de8ec09ded39724b69a034a3f5e13 | [
"MIT"
] | permissive | sartorileonardo/Curso-Intro-Python-Univali | f6c6c3d8379ab375ed469d00fef8bbaaa4eaf897 | 7f2a7c46b8ddf72391e58f22099d3d8ec91cbf7b | refs/heads/master | 2021-08-14T14:38:18.928647 | 2017-11-16T01:41:27 | 2017-11-16T01:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | print("Test IF/ELSE")
grade1 = input("Entre com sua nota:")
grade1 = int(grade1)
if grade1 >= 7:
print("Você passou!")
if(grade1 >= 9):
print("Sua nota é A")
elif grade1 >= 8:
print("Sua nota é B")
elif grade1 >= 7:
print("Sua nota é C")
else:
print("Não passou!")
| [
"noreply@github.com"
] | noreply@github.com |
dbf95929d8d6ee23c4ba280b0087426af2f2d6a7 | f966c891c666db846d86406cb9c08a530902d032 | /algorithms/implementation/larrys_array.py | 463216acec541b8c6a7c8847fad3576cde14e85c | [] | no_license | rickharris-dev/hacker-rank | 36620a16894571e324422c83bd553440cf5bbeb1 | 2ad0fe4b496198bec1b900d2e396a0704bd0c6d4 | refs/heads/master | 2020-12-25T14:33:20.118325 | 2016-09-06T01:10:43 | 2016-09-06T01:10:43 | 67,264,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/python
t = int(raw_input().strip())
for i in range(0,t):
n = int(raw_input().strip())
a = map(int,raw_input().strip().split(' '))
inversions = 0
for j in range(0,n):
inversions += abs(a[j] - (j + 1))
while j > 0:
if a[j - 1] > a[j]:
swap = a[j]
a[j] = a[j - 1]
a[j - 1] = swap
inversions -= 1
j -= 1
else:
break
if inversions % 2 == 0:
print "YES"
else:
print "NO"
| [
"rickharris724@gmail.com"
] | rickharris724@gmail.com |
911428d5455577a205d978c0b62f024af2f59acb | c33d1754fca5079113023c5e94323fce080f3bb4 | /webapp/webapp/webapp/urls.py | f159223d03888d4eb66c063593774258c0486233 | [] | no_license | arpitgupta1906/scientific_chart_reader | 12a45643d6182f2b69f4dbc330232a2e11d05147 | fcdee1b6a24ac1ef87014e276f6d11d24183c7a2 | refs/heads/master | 2022-12-11T01:55:07.533245 | 2021-01-12T19:05:24 | 2021-01-12T19:05:24 | 237,563,534 | 3 | 1 | null | 2022-12-08T11:20:30 | 2020-02-01T04:49:09 | Python | UTF-8 | Python | false | false | 971 | py | """webapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('core.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"gupta.25.iitj.ac.in"
] | gupta.25.iitj.ac.in |
00100d269f830789446f2c2dec2b09e8f48e9b1a | 7823d31688879b2d4dcfd2e3c11fb2c862f35a23 | /image_retrieval/server/algorithm/__init__.py | 54615a50ab3d115940cbce7402700f464f4a7c66 | [] | no_license | FMsunyh/dlfive | 7637631f54520673e4ec417b3c02b5334ecdf026 | ffae48aac5ece4de5ff9afccc69b093a72e09637 | refs/heads/master | 2021-09-19T05:59:51.040214 | 2018-07-24T06:29:40 | 2018-07-24T06:29:40 | 108,929,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/7/2017 9:32 AM
# @Author : sunyonghai
# @File : __init__.py.py
# @Software: BG_AI
# ========================================================= | [
"fmsunyh@gmail.com"
] | fmsunyh@gmail.com |
b7f3405dd102d34eed117d70ad67c2746d477d49 | 66af5573eef648ba76fcf0156de41b411ca38c6c | /sikuli-ide/test-scripts/linux-basic.sikuli/linux-basic.py | 181ef6d214d253f05603b2aa890850cefc3a0f8c | [
"MIT"
] | permissive | sikuli/sikuli | 5221f35a5fb9114bcaaab12d75fcf67ae341966b | 4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab | refs/heads/develop | 2023-08-28T09:39:58.135194 | 2019-10-27T08:34:31 | 2019-10-27T08:34:31 | 2,393,437 | 1,486 | 302 | MIT | 2018-10-04T11:47:59 | 2011-09-15T15:47:51 | HTML | UTF-8 | Python | false | false | 101 | py | find("1265075160887.png")
d = VDict()
d["1265075226698.png"] = "OK"
print d["1265075226698.png"][0]
| [
"vgod@mit.edu"
] | vgod@mit.edu |
65980ffd9b7eac7b38bc414af33e2b19415c581d | c4789b87d3a86795be92f9d328aad00ddc0da2a7 | /web/biassite/biassite/wsgi.py | 4b6a46823bd4028965619450ef54161afbc93085 | [] | no_license | eroberts20/bias_crawler | e0ce3cc5b2c76fb64ba8249be838f3aa0908f38f | 774388604d70e561a29621a75134981f9a5f9afb | refs/heads/master | 2021-01-19T11:20:52.341771 | 2017-05-16T11:07:45 | 2017-05-16T11:07:45 | 82,237,482 | 4 | 2 | null | 2020-07-23T12:35:41 | 2017-02-16T23:50:34 | Python | UTF-8 | Python | false | false | 394 | py | """
WSGI config for biassite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "biassite.settings")
application = get_wsgi_application()
| [
"eroberts20@mail.csuchico.edu"
] | eroberts20@mail.csuchico.edu |
bdb0a54a4124c2fb7c3bf8a4609ace0492123ef6 | 8e9d48d5a085334fff34d3c841617dc2b2dd1cff | /TF1_Project/simple_resnet.py | c231ee82f52306ff5eff227711b7b27bf5761ce3 | [] | no_license | OTapio/demo_projects | 4a74cf229192fcf1357c143dfda83bbb9ecdd1e0 | c184fba9f9130a9cdcc5facddf9adbea3db3ab0f | refs/heads/master | 2022-05-21T05:22:11.877561 | 2022-05-01T16:23:55 | 2022-05-01T16:23:55 | 228,912,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,057 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
#import tensorflow as tf
import time
from datetime import datetime
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import math
import errno
import shutil
import sys
sys.path.append(os.path.dirname(os.getcwd()))
############### RESNET
print("\n PROGRAM BEGINS \n")
starttime = time.time()
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
def batch_norm(inputs, training, data_format):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def sub_block(inputs, training, filters, kernel_size, strides, data_format, name):
with tf.name_scope(name):
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
outputs = conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format)
return outputs
class Model(object):
"""Base class for building the Resnet Model."""
def __init__(self, num_classes, num_filters, kernel_size,
conv_stride, first_pool_size, first_pool_stride,
data_format=None):
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with tf.variable_scope('resnet_model', reuse=tf.AUTO_REUSE):
data_format=self.data_format
with tf.name_scope('Conv1'):
# print("Conv1 input: ",inputs.shape)
self.output_Conv1 = conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=3, strides=1, data_format=data_format)
# print("Conv1 output: ",self.output_Conv1.shape)
with tf.name_scope('Conv2-x'):
self.shortcut2_1 = conv2d_fixed_padding(inputs=self.output_Conv1, filters=64, kernel_size=1, strides=1,data_format=data_format)
# print("conv2-1 padding: ",self.shortcut2_1.shape)
self.conv2_1 = sub_block(self.output_Conv1, training, filters=64, kernel_size=3, strides=1, data_format=data_format, name='conv2-1')
# print("conv2_1 : ",self.conv2_1.shape)
self.conv2_2 = sub_block(self.conv2_1, training, filters=64, kernel_size=3, strides=1, data_format=data_format, name='conv2-2')
# print("conv2_2 : ",self.conv2_2.shape)
self.shortcut2_2 = self.shortcut2_1 + self.conv2_2
#print(self.shortcut2_2.shape)
self.conv2_3 = sub_block(self.shortcut2_2, training, filters=64, kernel_size=3, strides=1, data_format=data_format, name='conv2-3')
# print("conv2_3 : ",self.conv2_3.shape)
self.conv2_4 = sub_block(self.conv2_3, training, filters=64, kernel_size=3, strides=1, data_format=data_format, name='conv2-4')
# print("conv2_4 : ",self.conv2_4.shape)
self.outputs_block_1 = self.conv2_4 + self.shortcut2_2
#print(self.outputs_block_1.shape)
with tf.name_scope('Conv3-x'):
self.shortcut3_1 = conv2d_fixed_padding(inputs=self.outputs_block_1, filters=128, kernel_size=1, strides=2, data_format=data_format)
# print("conv3 padding: ",self.shortcut3_1.shape)
self.conv3_1 = sub_block(self.outputs_block_1, training, filters=128, kernel_size=3, strides=2, data_format=data_format, name='conv3-1')
# print("conv3_1 : ",self.conv3_1.shape)
self.conv3_2 = sub_block(self.conv3_1, training, filters=128, kernel_size=3, strides=1, data_format=data_format, name='conv3-2')
# print("conv3_2 : ",self.conv3_2.shape)
self.shortcut3_2 = self.shortcut3_1 + self.conv3_2
#print(self.shortcut3_2.shape)
self.conv3_3 = sub_block(self.shortcut3_2, training, filters=128, kernel_size=3, strides=1, data_format=data_format, name='conv3-3')
# print("conv3_3 : ",self.conv3_3.shape)
self.conv3_4 = sub_block(self.conv3_3, training, filters=128, kernel_size=3, strides=1, data_format=data_format, name='conv3-4')
# print("conv3_4 : ",self.conv3_4.shape)
self.outputs_block_2 = self.conv3_4 + self.shortcut3_2
#print(self.outputs_block_2.shape)
with tf.name_scope('Conv4-x'):
self.shortcut4_1 = conv2d_fixed_padding(inputs=self.outputs_block_2, filters=256, kernel_size=1, strides=2, data_format=data_format)
# print("conv4 padding: ",self.shortcut4_1.shape)
self.conv4_1 = sub_block(self.outputs_block_2, training, filters=256, kernel_size=3, strides=2, data_format=data_format, name='conv4-1')
# print("conv4_1 : ",self.conv4_1.shape)
self.conv4_2 = sub_block(self.conv4_1, training, filters=256, kernel_size=3, strides=1, data_format=data_format, name='conv4-2')
# print("conv4_2 : ",self.conv4_2.shape)
self.shortcut4_2 = self.shortcut4_1 + self.conv4_2
#print(self.shortcut4_2.shape)
self.conv4_3 = sub_block(self.shortcut4_2, training, filters=256, kernel_size=3, strides=1, data_format=data_format, name='conv4-3')
# print("conv4_3 : ",self.conv4_3.shape)
self.conv4_4 = sub_block(self.conv4_3, training, filters=256, kernel_size=3, strides=1, data_format=data_format, name='conv4-4')
# print("conv4_4 : ",self.conv4_4.shape)
self.outputs_block_3 = self.conv4_4 + self.shortcut4_2
#print(self.outputs_block_3.shape)
with tf.name_scope('Conv5-x'):
self.shortcut5_1 = conv2d_fixed_padding(inputs=self.outputs_block_3, filters=512, kernel_size=1, strides=2,data_format=data_format)
# print("conv5 padding: ",self.shortcut5_1.shape)
self.conv5_1 = sub_block(self.outputs_block_3, training, filters=512, kernel_size=3, strides=2, data_format=data_format, name='conv5-1')
# print("conv5_1 : ",self.conv5_1.shape)
self.conv5_2 = sub_block(self.conv5_1, training, filters=512, kernel_size=3, strides=1, data_format=data_format, name='conv5-2')
# print("conv5_2 : ",self.conv5_2.shape)
self.shortcut5_2 = self.shortcut5_1 + self.conv5_2
#print(self.shortcut5_2.shape)
self.conv5_3 = sub_block(self.shortcut5_2, training, filters=512, kernel_size=3, strides=1, data_format=data_format, name='conv5-3')
# print("conv5_3 : ",self.conv5_3.shape)
self.conv5_4 = sub_block(self.conv5_3, training, filters=512, kernel_size=3, strides=1, data_format=data_format, name='conv5-4')
# print("conv5_4 : ",self.conv5_4.shape)
self.outputs_block_4 = self.conv5_4 + self.shortcut5_2
#print(self.outputs_block_4.shape)
inputs = batch_norm(self.outputs_block_4, training, self.data_format)
# print(inputs.shape)
inputs = tf.nn.relu(inputs)
# print(inputs.shape)
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
inputs = tf.reduce_mean(inputs, axes, keepdims=True)
# print(inputs.shape)
inputs = tf.identity(inputs, 'final_reduce_mean')
# print(inputs.shape)
inputs = tf.squeeze(inputs, axes)
# print(inputs.shape)
inputs = tf.layers.dense(inputs=inputs, units=self.num_classes)
# print(inputs.shape)
inputs = tf.identity(inputs, 'final_dense')
# print(inputs.shape)
return inputs
############### UTILS
def _parse_function(example_proto):
features = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.int64, default_value=0)}
parsed_features = tf.parse_single_example(example_proto, features)
images = parsed_features["image"]
images = tf.decode_raw(images, tf.uint8)
# channel first
images = tf.reshape(images, [3, 32, 32])
images = tf.cast(images, tf.float32)
images = (images - 127) / 128.0 * 4
return images, parsed_features["label"]
def get_data(data_dir, mode, batch_size):
if mode == 'train':
file = 'train.tfrecords'
elif mode == 'validation':
file = 'validation.tfrecords'
elif mode == 'eval':
file = 'eval.tfrecords'
else:
raise ValueError('mode should be %s or %s or %s' % ('train', 'validation', 'eval'))
path = os.path.join(data_dir, file)
dataset = tf.data.TFRecordDataset(path)
dataset = dataset.map(_parse_function)
if mode == 'train':
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
itr = dataset.make_one_shot_iterator()
images, labels = itr.get_next()
return images, labels
def configure_learning_rate(global_step, num_samples, FLAGS):
decay_steps = int(num_samples * FLAGS.num_epochs_per_decay / FLAGS.batch_size)
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
def get_cross_entropy(logits, labels):
logits = tf.cast(logits, tf.float32)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)
return cross_entropy
def get_accuracy(logits, labels):
logits = tf.cast(logits, tf.float32)
accuracy = tf.metrics.accuracy(labels, tf.argmax(logits, axis=1))
return accuracy[1]
def get_reg_loss(weight_decay):
reg_loss = weight_decay * tf.add_n(
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])
return reg_loss
def validate(sess, accuracy_val, batch_size, val_samples):
num = 1
while True:
acc_value = sess.run(accuracy_val)
num += batch_size
print('Calculating accuracy on validation set: processed %d samples' % num, end='\r')
if num > val_samples:
return acc_value
TRAIN_SAMPLES = 50000
VAL_SAMPLES = 10000
##############################
# Flags most related to you #
##############################
tf.app.flags.DEFINE_integer(
'batch_size', 64, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'epoch_number', 10,
'Number of epoches')
tf.app.flags.DEFINE_string(
'data_dir', None,
'Directory of dataset.')
tf.app.flags.DEFINE_string(
'train_dir', None,
'Directory where checkpoints and event logs are written to.')
##############################
# Flags for learning rate #
##############################
tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum for MomentumOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays. Note: this flag counts '
'epochs per clone but aggregates per sync replicas. So 1.0 means that '
'each clone will go over full epoch individually, but replicas will go '
'once across all replicas.')
##############################
# Flags for log and summary #
##############################
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 30,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'summary_every_n_steps', 30,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 300,
'The frequency with which the model is saved, in seconds.')
FLAGS = tf.app.flags.FLAGS
##############################
# Build ResNet #
##############################
images, labels = get_data(FLAGS.data_dir, 'train', FLAGS.batch_size)
resnet = Model(
num_classes=10, num_filters=64, kernel_size=3, conv_stride=1,
first_pool_size=None, first_pool_stride=None,
data_format='channels_first')
############################################
# Loss, Accuracy, Train, Summary and Saver #
############################################
weight_decay = 2e-4
logits = resnet(images, training=True)
cross_entropy = get_cross_entropy(logits, labels)
accuracy = get_accuracy(logits, labels)
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('accuracy', accuracy)
reg_loss = get_reg_loss(weight_decay)
tf.summary.scalar('reg_loss', reg_loss)
total_loss = cross_entropy + reg_loss
tf.summary.scalar('total_loss', total_loss)
global_step = tf.train.create_global_step()
learning_rate = configure_learning_rate(global_step, TRAIN_SAMPLES, FLAGS)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=FLAGS.momentum)
grads = optimizer.compute_gradients(total_loss)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(tf.trainable_variables())
############################################
# For validation #
############################################
var_exclude = [v.name for v in tf.local_variables()]
images_val, labels_val = get_data(FLAGS.data_dir, 'validation', FLAGS.batch_size)
logits_val = resnet(images_val, training=False)
accuracy_val = get_accuracy(logits_val, labels_val)
# clear former accuracy information for validation
var_to_refresh = [v for v in tf.local_variables() if v.name not in var_exclude]
init_local_val = tf.variables_initializer(var_to_refresh)
#### HYPER PARAMETERS
print("\nHyper parameters: ")
print("TRAIN_SAMPLES: ", TRAIN_SAMPLES)
print("VAL_SAMPLES: ", VAL_SAMPLES)
print("batch_size: ", FLAGS.batch_size)
print("epoch_number: ", FLAGS.epoch_number)
print("data_dir: ", FLAGS.data_dir)
print("train_dir: ", FLAGS.train_dir)
print("momentum: ", FLAGS.momentum)
print("learning_rate: ", FLAGS.learning_rate)
print("learning_rate_decay_factor: ", FLAGS.learning_rate_decay_factor)
print("num_epochs_per_decay: ", FLAGS.num_epochs_per_decay)
print("log_every_n_steps: ", FLAGS.log_every_n_steps)
print("summary_every_n_steps: ", FLAGS.summary_every_n_steps)
print("save_interval_secs: ", FLAGS.save_interval_secs)
print("\n")
sess = tf.Session()
init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/log', sess.graph)
# update trainable variables in the graph
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(train_op, update_ops)
sess.run(init_global)
sess.run(init_local)
############################################
# Let's start running #
############################################
epoch_steps = int(TRAIN_SAMPLES / FLAGS.batch_size)
print('number of steps each epoch: ', epoch_steps)
epoch_index = 0
max_steps = FLAGS.epoch_number * epoch_steps
ori_time = time.time()
next_save_time = FLAGS.save_interval_secs
for step in range(max_steps):
start_time = time.time()
if step % epoch_steps == 0:
epoch_index += 1
if epoch_index > 0:
sess.run(init_local_val)
accuracy_val_value = validate(sess, accuracy_val, FLAGS.batch_size, VAL_SAMPLES)
duration = time.time() - start_time
duration = float(duration) / 60.0
val_format = 'Time of validation after epoch %02d: %.2f mins, val accuracy: %.4f'
print(val_format % (epoch_index - 1, duration, accuracy_val_value))
[_, total_l_value, entropy_l_value, reg_l_value, acc_value] = \
sess.run([train_op, total_loss, cross_entropy, reg_loss, accuracy])
total_duration = time.time() - ori_time
total_duration = float(total_duration)
assert not np.isnan(total_l_value), 'Model diverged with loss = NaN'
if step % FLAGS.log_every_n_steps == 0:
format_str = ('Epoch %02d/%2d time=%.2f mins: step %d total loss=%.4f loss=%.4f reg loss=%.4f accuracy=%.4f')
print(format_str % (epoch_index, FLAGS.epoch_number, total_duration / 60.0, step, total_l_value, entropy_l_value, reg_l_value, acc_value))
if step % FLAGS.summary_every_n_steps == 0:
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if total_duration > next_save_time:
next_save_time += FLAGS.save_interval_secs
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
save_path = saver.save(sess, checkpoint_path, global_step=global_step)
print('saved model to %s' % save_path)
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
save_path = saver.save(sess, checkpoint_path, global_step=global_step)
print('saved the final model to %s' % save_path)
sess.run(init_local_val)
accuracy_val_value = validate(sess, accuracy_val, FLAGS.batch_size, VAL_SAMPLES)
print("accuracy_val_value: ", accuracy_val_value)
print("\n")
endtime = time.time()
print("\nTime: ",endtime - starttime)
print("\n PROGRAM ENDS \n")
# Time of validation after epoch 14: 0.07 mins, val accuracy: 0.7847
# Epoch 15/15 time=17.49 mins: step 10950 total loss=0.5596 loss=0.1032 reg loss=0.4564 accuracy=0.8354
# Epoch 15/15 time=17.54 mins: step 10980 total loss=0.5243 loss=0.0687 reg loss=0.4556 accuracy=0.8357
# Epoch 15/15 time=17.58 mins: step 11010 total loss=0.5165 loss=0.0616 reg loss=0.4549 accuracy=0.8361
# Epoch 15/15 time=17.63 mins: step 11040 total loss=0.6055 loss=0.1517 reg loss=0.4539 accuracy=0.8364
# Epoch 15/15 time=17.67 mins: step 11070 total loss=0.5989 loss=0.1455 reg loss=0.4534 accuracy=0.8367
# Epoch 15/15 time=17.72 mins: step 11100 total loss=0.5310 loss=0.0780 reg loss=0.4530 accuracy=0.8371
# Epoch 15/15 time=17.76 mins: step 11130 total loss=0.5101 loss=0.0573 reg loss=0.4527 accuracy=0.8374
# Epoch 15/15 time=17.81 mins: step 11160 total loss=0.5274 loss=0.0752 reg loss=0.4523 accuracy=0.8377
# Epoch 15/15 time=17.85 mins: step 11190 total loss=0.6009 loss=0.1489 reg loss=0.4520 accuracy=0.8380
# Epoch 15/15 time=17.89 mins: step 11220 total loss=0.5793 loss=0.1273 reg loss=0.4520 accuracy=0.8383
# Epoch 15/15 time=17.94 mins: step 11250 total loss=0.5895 loss=0.1372 reg loss=0.4523 accuracy=0.8386
# Epoch 15/15 time=17.98 mins: step 11280 total loss=0.8106 loss=0.3584 reg loss=0.4522 accuracy=0.8389
# Epoch 15/15 time=18.03 mins: step 11310 total loss=0.6082 loss=0.1561 reg loss=0.4521 accuracy=0.8392
# Epoch 15/15 time=18.07 mins: step 11340 total loss=0.5757 loss=0.1239 reg loss=0.4518 accuracy=0.8395
# Epoch 15/15 time=18.11 mins: step 11370 total loss=0.5238 loss=0.0723 reg loss=0.4515 accuracy=0.8398
# Epoch 15/15 time=18.15 mins: step 11400 total loss=0.7250 loss=0.2735 reg loss=0.4515 accuracy=0.8401
# Epoch 15/15 time=18.20 mins: step 11430 total loss=0.5691 loss=0.1169 reg loss=0.4521 accuracy=0.8404
# Epoch 15/15 time=18.24 mins: step 11460 total loss=0.5734 loss=0.1213 reg loss=0.4521 accuracy=0.8407
# Epoch 15/15 time=18.28 mins: step 11490 total loss=0.6202 loss=0.1681 reg loss=0.4522 accuracy=0.8409
# Epoch 15/15 time=18.34 mins: step 11520 total loss=0.5869 loss=0.1354 reg loss=0.4515 accuracy=0.8412
# Epoch 15/15 time=18.38 mins: step 11550 total loss=0.5337 loss=0.0830 reg loss=0.4507 accuracy=0.8415
# Epoch 15/15 time=18.42 mins: step 11580 total loss=0.5174 loss=0.0671 reg loss=0.4502 accuracy=0.8418
# Epoch 15/15 time=18.47 mins: step 11610 total loss=0.5217 loss=0.0718 reg loss=0.4499 accuracy=0.8422
# Epoch 15/15 time=18.51 mins: step 11640 total loss=0.5087 loss=0.0591 reg loss=0.4496 accuracy=0.8424
# Epoch 15/15 time=18.56 mins: step 11670 total loss=0.5671 loss=0.1181 reg loss=0.4490 accuracy=0.8428
# Epoch 15/15 time=18.60 mins: step 11700 total loss=0.4862 loss=0.0375 reg loss=0.4487 accuracy=0.8430
# saved the final model to testi_1_Final\model.ckpt-11715
# accuracy_val_value: 0.75865847ion set: processed 10049 samples | [
"ossi.tapio@iki.fi"
] | ossi.tapio@iki.fi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.