seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6227495516 | #Dependencies
"""
!pip install -U sklearn
!pip install pmdarima
!pip install river
!pip install tslearn
!pip install arch
!pip install skorch
"""
#Imports
import json
import math
import calendar
from datetime import timedelta
from datetime import datetime as dt
import numpy as np
import pandas as pd
import warnings
import os
import sys
warnings.simplefilter('ignore')
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import mlflow
import mlflow.statsmodels
from mlflow import log_metric, log_param, log_metrics, log_params, log_artifact, log_artifacts
import optuna
from urllib.parse import urlparse
from pmdarima.arima import auto_arima as pmautoarima
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX as SARIMA
import statsmodels.api as sm
import statsmodels.graphics as smg
import statsmodels.stats as sm_stats
import statsmodels.tsa.api as tsa
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import pickle
import joblib
import sklearn
from sklearn import preprocessing
from sklearn.svm import SVR
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, mean_absolute_percentage_error
import torch
import torch.nn as nn
import torch.optim as optim
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras import layers
from river import drift
#visualization
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
import logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def get_data(folder, filename):
if folder == 'covid':
df = pd.read_csv('./data/'+folder+'/'+filename, index_col=0)
#df = df.loc[df.index < '2022-01-01']
df = df.iloc[:710]
return df
def fix_outliers(df,q = .99, zero = True):
# cortar período inicial sem casos
for k,d in enumerate(df):
if k > 0 and d > df.iloc[k-1]:
break
df = df.iloc[k:]
# substituir zeros
if zero == False:
df = df.mask(df == 0, np.nan).fillna(method='bfill')
# converter valores extremos para NaN e substituir pelo anterior
df = df.mask(df > df.quantile(q), np.nan).fillna(method='bfill')
df = df.mask(df < 0, np.nan).fillna(0)
return df
def train_val_test_split(data, train_size:float, val_size:float):
train_data = data.iloc[:int(len(data)*train_size)]
val_data = data.iloc[int(len(data)*train_size):int(len(data)*(train_size+val_size))]
test_data = data.iloc[-int(len(data)*(1-train_size-val_size)):]
return train_data, val_data, test_data
def draw_data(country, train,val,test):
fig, ax = plt.subplots()
ax.plot(train, color='black')
ax.plot(val, color='darkgray')
ax.plot(test, color='lightgray')
plt.savefig('outputs/'+country+'/data.png')
# Log an artifact (output file)
return True
def get_drifts(data, col, detector='adwin'):
if (detector == 'adwin'):
drift_detector = drift.ADWIN(delta=0.001)
data_diff = data.to_dict()[col]
drifts =[]
drift_data = {}
for k in data_diff:
#print(k)
in_drift, in_warning = drift_detector.update(data_diff[k])
if in_drift:
drifts.append(k)
#print(f"Change detected at index {k}, input value: {data[k]}")
for key, drift_point in enumerate(drifts):
if key == 0:
drift_data[key] = data[:drift_point]
elif key == len(drifts)-1:
drift_data[key] = data[drifts[key-1]:drift_point]
drift_data[key+1] = data[drift_point:]
else:
drift_data[key] = data[drifts[key-1]:drift_point]
return drifts, drift_data
def draw_drifts(country, drifts, drift_data, train_data):
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(18,8), sharex=True)
for d in range(len(drift_data)):
ax1.plot(drift_data[d].fillna(method='bfill'))
#print(drifts)
#print(train_data.loc[drifts].values[0])
#print(train_data.loc[drifts].values[0,:])
#print(train_data.loc[drifts].values[:,0])
ax1.bar(x=train_data.loc[drifts].index, height=train_data.loc[drifts].values[:,0], width=2, color='r')
#ax1.annotate(train_data.loc[drifts].index, xy=(10, -100), xycoords='axes points', xytext=(train_data.loc[drifts].index, -150), textcoords='data')
"""
for drift_point in pd.to_datetime(drifts, format="%m/%d/%y"):
print(drift_point)
print(drift_point.date())
#ax1.annotate(k, xy=(10, 100), xycoords='axes points', xytext=(drift_point, -10), textcoords='data')
#ax1.annotate(drift_point.date().strftime('%Y-%m-%d'), xy=(10, -100), xycoords='axes points', xytext=(drift_point-delta(days=10), -150), textcoords='data', rotation=90)
ax1.annotate(drift_point.date(), xy=(10, -100), xycoords='axes points', xytext=(train_data.loc[drifts].index, -150), textcoords='data')
#ax1.annotate(k, xy=(10, -5000), xycoords='axes points', xytext=(dt.strptime(drift_point, "%m/%d/%y")-timedelta(days=20), -5500), textcoords='data')
"""
ax2.plot(train_data.cumsum())
plt.savefig('outputs/'+country+'/drifts.png')
# Log an artifact (output file)
return True
def random_walk(data):
return data.shift(1).dropna()
def train_arima(train_data, sarima=False):
if sarima == True:
arima_order = pmautoarima(train_data, max_p=7, d=1, max_q=7, m=7, seasonal=True, trace=True, information_criterion='aic', suppress_warnings=True, maxiter = 50, stepwise=True)
arima_base = ARIMA(train_data, order=arima_order.order, seasonal_order=arima_order.seasonal_order)
#log_param("order", arima_order.order)
#log_param("seasonal_order", arima_order.seasonal_order)
else:
arima_order = pmautoarima(train_data, max_p=7, d=1, max_q=7, m=7, seasonal=False, trace=True, information_criterion='aic', suppress_warnings=True, maxiter = 50, stepwise=True)
arima_base = ARIMA(train_data, order=arima_order.order)
#log_param("order", arima_order.order)
arima_model = arima_base.fit()
return arima_model
def update_arima(model, test_data):
arima_updated = model.apply(test_data)
return arima_updated
class ELM():
def __init__(self, input_size, h_size, activation, device=None):
self._input_size = input_size
self._h_size = h_size
self._output_size = 1
self._device = device
self.activation_name = activation
self._alpha = nn.init.uniform_(torch.empty(self._input_size, self._h_size, device=self._device), a=-1., b=1.)
self._beta = nn.init.uniform_(torch.empty(self._h_size, self._output_size, device=self._device), a=-1., b=1.)
self._bias = torch.zeros(self._h_size, device=self._device)
if activation == 'tanh':
self._activation = torch.tanh
elif activation == 'relu':
self._activation = torch.relu
elif activation == 'sigmoid':
self._activation = torch.sigmoid
def predict(self, x):
h = self._activation(torch.add(x.mm(self._alpha), self._bias))
out = h.mm(self._beta)
return out
def fit(self, x, t):
temp = x.mm(self._alpha)
H = self._activation(torch.add(temp, self._bias))
H_pinv = torch.pinverse(H)
self._beta = H_pinv.mm(t)
def splitter(data, lags):
X = pd.DataFrame(tsa.add_lag(data, lags=lags))
y = X.pop(0)
return X, y
def normalizer(input, fit=True, transform=1):
""" pass normalizer to fit """
MinMax = preprocessing.MinMaxScaler(feature_range=(-1,1))
if fit == True:
out = MinMax.fit_transform(input)
return MinMax, out
else:
if transform == -1:
out = fit.inverse_transform(input)
else:
out = fit.transform(input)
return out
def torcher(data):
#print(data)
#print(torch.from_numpy(data).float())
return torch.from_numpy(data).float()
def torch_data(train_data, val_data, test_data, lags):
#train_data, val_data, test_data = train_val_test_split(get_data(), 0.7, 0.2)
X_train = pd.DataFrame(tsa.add_lag(train_data, lags =lags))
#X_train = pd.DataFrame(tsa.add_lag(train_data.diff().dropna(), lags =14))
y_train = torch.from_numpy(X_train.pop(0).values.reshape(-1,1)).float()
X_val = pd.DataFrame(tsa.add_lag(val_data, lags =lags))
#X_train = pd.DataFrame(tsa.add_lag(train_data.diff().dropna(), lags =14))
y_val = torch.from_numpy(X_val.pop(0).values.reshape(-1,1)).float()
#X_test = pd.DataFrame(tsa.add_lag(test_data, lags =14))
X_test = pd.DataFrame(tsa.add_lag(test_data, lags =lags))
y_test = torch.from_numpy(X_test.pop(0).values.reshape(-1,1)).float()
#normalization ELM
min_value = float(train_data.min())
base = float(train_data.max()) - float(train_data.min())
X_train_mm = ((X_train - min_value)/base).to_numpy()
X_train_mm = torch.from_numpy(X_train_mm).float()
X_val_mm = ((X_val - min_value)/base).to_numpy()
X_val_mm = torch.from_numpy(X_val_mm).float()
X_test_mm = ((X_test - min_value)/base).to_numpy()
X_test_mm = torch.from_numpy(X_test_mm).float()
return X_train_mm, y_train, X_val_mm, y_val, X_test_mm, y_test
def objective_elm(trial):
#h_size = trial.suggest_int("h_size", 2, 20)
h_size = trial.suggest_categorical('h_size', [8, 16, 32, 64, 100, 200])
activation = trial.suggest_categorical("activation", ["sigmoid", "tanh", "relu"])
# Generate the model.
results = np.zeros(10)
for i in range(10):
model = ELM(input_size=14, h_size=h_size, activation=activation, device=device) #variar o input-size (FIX)
model.fit(X_train_mm, y_train)
y_pred = model.predict(X_val_mm)
mse = -mean_squared_error(y_val, y_pred)
results[i] = mse
return results.mean()
def train_elm(x, y):
#optimization
study = optuna.create_study(direction="maximize", study_name='elm_study')
study.optimize(objective_elm, n_trials=50, show_progress_bar=True)
params = study.best_params
best_elm = ELM(input_size=14, h_size=params['h_size'], activation=params['activation'], device=device) #variar o input-size (FIX)
best_elm.fit(x, y)
return best_elm
def objective_svm(trial):
svr_k = trial.suggest_categorical('kernel',['linear', 'rbf'])
#svr_k = trial.suggest_categorical('kernel',['rbf'])
svr_g = trial.suggest_categorical("gamma", [1, 0.1, 0.01, 0.001]) #auto, scale
#svr_g = trial.suggest_float("gamma", 0.001, 1, log=True) #auto, scale
svr_c = trial.suggest_categorical("C", [0.1, 1, 100, 1000, 10000])
#svr_c = trial.suggest_float("C", 0.1, 10000, log=True)
svr_e = trial.suggest_categorical("epsilon", [0.1, 0.01, 0.001])
#svr_e = trial.suggest_float("epsilon",0.001, 0.1, log=True)
svr_t = trial.suggest_categorical("tolerance", [0.01, 0.001, 0.0001])
#svr_t = trial.suggest_float("tolerance", 0.0001, 0.01, log=True)
regressor_obj = sklearn.svm.SVR(kernel=svr_k, gamma=svr_g, C=svr_c, epsilon=svr_e, tol=svr_t)
score = sklearn.model_selection.cross_val_score(regressor_obj, np.concatenate((X_train_mm, X_val_mm)), np.concatenate((y_train, y_val)),
n_jobs=-1, cv=TimeSeriesSplit(2, test_size=len(y_val)//2),
scoring='neg_root_mean_squared_error').mean()
return score
def train_svm(x, y):
#optimization
study = optuna.create_study(direction="maximize", study_name='svr_study')
study.optimize(objective_svm, n_trials=20, show_progress_bar=True)
params = study.best_params
best_svr = SVR(kernel=params['kernel'], C=params['C'], epsilon=params['epsilon'], tol=params['tolerance'])
best_svr.fit(x, y)
return best_svr
def objective_lstm(trial):
model = Sequential()
model.add(layers.LSTM(units= trial.suggest_categorical('units', [8, 16, 32, 64, 100, 200]), input_shape=(14, 1)))
#model.add(layers.LSTM(100, input_shape=(14, 1), dropout=0.2, return_sequences=True))
#model.add(layers.Dropout(0.2))
#model.add(layers.LSTM(100, dropout=0.2))
#model.add(layers.Dropout(0.2))
model.add(layers.Dense(1, activation=trial.suggest_categorical('activation', ['relu', 'linear', 'tanh']),) )
#model.add(layers.Dense(1, activation='linear') )
score = np.zeros(3)
for i in range(3):
# We compile our model with a sampled learning rate.
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='mse')
model.fit(
X_train_mm,
y_train,
validation_data=(X_val_mm, y_val),
shuffle=False,
batch_size=32,
epochs=50,
verbose=False,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)]
)
# Evaluate the model accuracy on the validation set.
score[i] = model.evaluate(X_val_mm, y_val, verbose=0)
return -score.mean()
def lstm(input_size, units, activation):
model = Sequential()
model.add(layers.LSTM(units= units, input_shape=(input_size, 1)))
#model.add(layers.Dense(1, activation='linear' ))
model.add(layers.Dense(1, activation=activation ))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='mse')
return model
def train_lstm(x, y):
#optimization
study = optuna.create_study(direction="maximize", study_name='lstm_study')
study.optimize(objective_lstm, n_trials=20, show_progress_bar=True)
params = study.best_params
log_params(study.best_params)
#best_lstm = lstm(input_size=14, units=params['units'], activation='linear') #variar o input-size (FIX)
best_lstm = lstm(input_size=14, units=params['units'], activation=params['activation']) #variar o input-size (FIX)
best_lstm.fit(x,
y,
validation_data=(X_val_mm, y_val),
shuffle=False,
batch_size=32,
epochs=100,
verbose=False,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)])
return best_lstm
def eval_metrics(actual, pred):
rmse = mean_squared_error(actual, pred, squared=False)
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
#mape = mean_absolute_percentage_error(actual, pred)
#print(actual)
#print(pred)
#print(actual.loc[actual[actual.columns[0]] > 0])
actual_adj = actual.loc[actual[actual.columns[0]] > 0]
pred_adj = pred.loc[actual_adj.index]
mape = mean_absolute_percentage_error(actual_adj, pred_adj)
return {'rmse': rmse, 'mae': mae, 'mape': mape, 'r2':r2}
def draw_predictions(country, predictions, true):
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(predictions.reset_index(drop=True), label='pred')
ax.plot(true.reset_index(drop=True), label='true')
plt.legend()
plt.savefig('outputs/'+country+'/prediction.png')
def get_oracle(predictions, true):
df_error = predictions.iloc[1:].rsub(np.array(true.iloc[1:]), axis=0).abs()
oracle = {}
selection = []
for row in df_error.rank(axis=1).idxmin(axis=1).items():
oracle[row[0]] = predictions.at[row[0], row[1]]
selection.append(row[1])
return selection, pd.Series(oracle)
def post_forecast(preds):
return preds.mask(preds < 0, 0)
def log_arts(country,model):
log_artifacts("outputs/"+country+"/data")
log_artifacts("outputs/"+country+"/preds/"+model)
log_artifact('outputs/'+country+'/prediction.png')
return True
def main():
exp = "codrift_220518"
mlflow.set_experiment(exp)
if sys.argv[1] == 'help':
print('models: rw, arima, sarima, AS, ASDS, ASO...')
country = sys.argv[1]
model = sys.argv[2]
split = sys.argv[3]
lags = int(sys.argv[4])
size = int(sys.argv[5])
if (model == 'ASDS' or model == 'AEDS' or model == 'ASVDS'):
K = int(sys.argv[6])
rname = country+'.'+model+'.'+split+'.'+str(lags)+'.'+str(size)+'.'+str(K)
else:
rname = country+'.'+model+'.'+split
print(exp)
print(rname)
with mlflow.start_run(run_name=rname):
if not os.path.exists("outputs/"+country+"/data"):
os.makedirs("outputs/"+country+"/data")
if not os.path.exists("outputs/"+country+"/preds/"+model):
os.makedirs("outputs/"+country+"/preds/"+model)
if not os.path.exists("models/"+country):
os.makedirs("models/"+country)
data = get_data('covid', country+'_daily.csv')
data = fix_outliers(data)
train_data, val_data, test_data = train_val_test_split(data, 0.7, 0.2)
#print('train: ', train_data.head(1), train_data.tail(1), len(train_data) )
#print('val: ', val_data.head(1), val_data.tail(1), len(val_data) )
#print('test: ', test_data.head(1), test_data.tail(1), len(test_data) )
train_data.to_csv("outputs/"+country+"/data/train_data.csv")
val_data.to_csv("outputs/"+country+"/data/val_data.csv")
test_data.to_csv("outputs/"+country+"/data/test_data.csv")
draw_data(country, train_data, val_data, test_data)
log_artifact('outputs/'+country+'/data.png')
global X_train_mm
global y_train
global X_val_mm
global y_val
global X_test_mm
global y_test
X_train_mm = 0
y_train = 0
X_val_mm = 0
y_val = 0
X_test_mm = 0
y_test = 0
X, y = splitter(data, lags)
sz = len(X)
X_train, y_train = X[:int(sz*0.7)], y[:int(sz*0.7)]
X_val, y_val = X[int(sz*0.7):int(sz*0.9)], y[int(sz*0.7):int(sz*0.9)]
X_test, y_test = X[int(sz*0.9)-1:], y[int(sz*0.9)-1:]
if not os.path.exists("models/"+country+"/normx.pkl"):
normx, X_train_mm = normalizer(X_train)
normy, y_train_mm = normalizer(y_train.values.reshape(-1,1))
joblib.dump(normx, "models/"+country+"/normx.pkl")
joblib.dump(normy, "models/"+country+"/normy.pkl")
else:
normx = joblib.load("models/"+country+"/normx.pkl")
normy = joblib.load("models/"+country+"/normy.pkl")
X_val_mm = normalizer(X_val, normx)
X_test_mm = normalizer(X_test, normx)
y_val_mm = normalizer(y_val.values.reshape(-1,1), normy)
y_test_mm = normalizer(y_test.values.reshape(-1,1), normy)
if model == 'rw':
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
#Baseline
if split == 'val':
y_pred = post_forecast(random_walk(val_data))
metrics = eval_metrics(val_data[1:], y_pred)
draw_predictions(country, y_pred, val_data)
elif split == 'test':
y_pred = post_forecast(random_walk(test_data))
metrics = eval_metrics(test_data.iloc[size+1:], y_pred.iloc[size:])
draw_predictions(country, y_pred, test_data)
mlflow.set_tags({'data': country, 'split': split, 'model': model})
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_metrics(metrics)
log_arts(country,model)
mlflow.end_run()
if model == 'arima':
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
if not os.path.exists("models/"+country+"/arima.pkl"):
arima = train_arima(train_data)
arima.save("models/"+country+"/arima.pkl")
else:
arima = sm.load("models/"+country+"/arima.pkl")
log_params(arima.specification)
with open("outputs/"+country+"/arima.txt", "w") as f:
f.write(arima.summary().as_text())
log_artifact("outputs/"+country+"/arima.txt")
if split == 'val':
arima = update_arima(arima, val_data)
y_pred = post_forecast(arima.predict())
metrics = eval_metrics(val_data, y_pred)
draw_predictions(country, y_pred, val_data)
elif split == 'test':
arima = update_arima(arima, test_data)
y_pred = post_forecast(arima.predict())
#metrics = eval_metrics(y_pred, test_data)
test_data.index = y_pred.index
print('mape', mean_absolute_percentage_error(test_data, y_pred))
metrics = eval_metrics(test_data.iloc[7:], y_pred.iloc[7:])
draw_predictions(country, y_pred, test_data)
log_metrics(metrics)
log_params(arima.specification)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'size': size})
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_arts(country,model)
mlflow.end_run()
if model == 'sarima':
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
if not os.path.exists("models/"+country+"/sarima.pkl"):
sarima = train_arima(train_data, sarima=True)
sarima.save("models/"+country+"/sarima.pkl")
else:
sarima = sm.load("models/"+country+"/sarima.pkl")
log_params(sarima.specification)
if split == 'val':
sarima = update_arima(sarima, val_data)
y_pred = post_forecast(sarima.predict())
metrics = eval_metrics(val_data, y_pred)
draw_predictions(country, y_pred, val_data)
elif split == 'test':
sarima = update_arima(sarima, test_data)
y_pred = post_forecast(sarima.predict())
metrics = eval_metrics(test_data.iloc[7:], y_pred.iloc[7:])
draw_predictions(country, y_pred, test_data)
log_metrics(metrics)
log_params(sarima.specification)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'size': size})
with open("outputs/"+country+"/sarima.txt", "w") as f:
f.write(sarima.summary().as_text())
log_artifact("outputs/"+country+"/sarima.txt")
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_arts(country,model)
mlflow.end_run()
if model == 'svm':
#norm, X_train_mm = normalizer(X_train)
#X_val_mm = normalizer(X_val, norm)
X_train_mm = normalizer(X_train, normx)
y_train = normalizer(y_train.values.reshape(-1,1), normy).ravel()
X_val_mm = normalizer(X_val, normx)
y_val = normalizer(y_val.values.reshape(-1,1), normy).ravel()
X_test_mm = normalizer(X_test, normx)
#print(X_train_mm)
#print(y_train)
#print(np.concatenate((X_train_mm, X_val_mm), axis=0), np.concatenate((y_train, y_val), axis=0))
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
if not os.path.exists("models/"+country+"/svm.pkl"):
svm = train_svm(X_train_mm, y_train)
joblib.dump(svm, "models/"+country+"/svm.pkl")
else:
svm = joblib.load("models/"+country+"/svm.pkl")
if split == 'test':
#y_pred = normalizer(svm.predict(X_test_mm).reshape(-1,1), normy, -1).flatten()
y_pred = svm.predict(X_test_mm).reshape(-1,1).flatten()
y_pred = post_forecast(pd.DataFrame(y_pred))
#y_pred = post_forecast(pd.DataFrame(svm.predict(X_test_mm)))
y_test = pd.DataFrame(y_test.reset_index(drop=True))
metrics = eval_metrics(y_test.iloc[7:], y_pred.iloc[7:])
draw_predictions(country, y_pred, y_test)
log_metrics(metrics)
log_params(svm.get_params())
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'size': size})
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_arts(country,model)
mlflow.end_run()
if model == 'elm':
#X_train_mm, y_train, X_val_mm, y_val, X_test_mm, y_test = torch_data(train_data, val_data, test_data, lags)
#norm, X_train_mm = normalizer(X_train)
#X_val_mm = normalizer(X_val, norm)
X_train_mm = normalizer(X_train, normx)
y_train_mm = normalizer(y_train.values.reshape(-1,1), normy)
X_val_mm = normalizer(X_val, normx)
y_val_mm = normalizer(y_val.values.reshape(-1,1), normy)
X_test_mm = normalizer(X_test, normx)
X_train_mm, y_train, X_val_mm, y_val = torcher(X_train_mm), torcher(y_train_mm.reshape(-1,1)), torcher(X_val_mm), torcher(y_val_mm.reshape(-1,1))
#X_train_mm, y_train, X_val_mm, y_val = torcher(X_train_mm), torcher(y_train.values.reshape(-1,1)), torcher(X_val_mm), torcher(y_val.values.reshape(-1,1))
X_test_mm, y_test= torcher(X_test_mm), torcher(y_test.values.reshape(-1,1))
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
if not os.path.exists("models/"+country+"/elm.pkl"):
elm = train_elm(X_train_mm, y_train)
torch.save(elm ,"models/"+country+"/elm.pkl")
else:
elm = torch.load("models/"+country+"/elm.pkl")
if split == 'test':
y_pred = normalizer(elm.predict(X_test_mm).numpy().reshape(-1,1), normy, -1).flatten()
y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
#y_pred = post_forecast(pd.Series(elm.predict(X_test_mm).numpy().flatten()))
y_test = pd.DataFrame(y_test.numpy()).reset_index(drop=True)
metrics = eval_metrics(y_test.iloc[7:], y_pred.iloc[7:])
draw_predictions(country, y_pred, y_test)
log_metrics(metrics)
log_params({'h_size': elm._h_size})
#log_params({'h_size': elm._h_size, 'activation' :elm.activation_name})
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'size': size})
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_arts(country,model)
mlflow.end_run()
if model == 'lstm':
#X_train_mm, y_train, X_val_mm, y_val, X_test_mm, y_test = torch_data(train_data, val_data, test_data, lags)
#norm, X_train_mm = normalizer(X_train)
#X_val_mm = normalizer(X_val, norm)
X_train_mm = normalizer(X_train, normx)
y_train = normalizer(y_train.values.reshape(-1,1), normy)
X_val_mm = normalizer(X_val, normx)
y_val = normalizer(y_val.values.reshape(-1,1), normy)
pd.DataFrame(X_train_mm).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_input_train.csv')
pd.DataFrame(y_train).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_output_train.csv')
X_test_mm = normalizer(X_test, normx)
#X_train_mm, y_train, X_val_mm, y_val = torcher(X_train_mm), torcher(y_train_mm.reshape(-1,1)), torcher(X_val_mm), torcher(y_val_mm.reshape(-1,1))
#X_train_mm, y_train, X_val_mm, y_val = torcher(X_train_mm), torcher(y_train.values.reshape(-1,1)), torcher(X_val_mm), torcher(y_val.values.reshape(-1,1))
#X_test_mm, y_test= torcher(X_test_mm), torcher(y_test.values.reshape(-1,1))
#with mlflow.start_run(run_name=country+'.'+model+'.'+split):
if not os.path.exists("models/"+country+"/lstm"):
lstm = train_lstm(X_train_mm, y_train)
lstm.save("models/"+country+"/lstm")
else:
lstm = keras.models.load_model("models/"+country+"/lstm")
if split == 'test':
pd.DataFrame(X_test_mm).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_input_test.csv')
y_pred = lstm.predict(X_test_mm)
pd.DataFrame(lstm.predict(X_test_mm)).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_output_test.csv')
y_pred = normalizer(y_pred.reshape(-1,1), normy, -1).flatten()
y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
#y_pred = post_forecast(pd.Series(elm.predict(X_test_mm).numpy().flatten()))
y_test = pd.DataFrame(y_test).reset_index(drop=True)
metrics = eval_metrics(y_test.iloc[7:], y_pred.iloc[7:])
draw_predictions(country, y_pred, y_test)
log_metrics(metrics)
#log_params()
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'size': size})
pd.DataFrame(y_pred).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_arts(country,model)
mlflow.end_run()
#ADWIN-SARIMA (AS)
if model == "AS":
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
if not os.path.exists("models/"+country+"/sarimas"):
os.makedirs("models/"+country+"/sarimas")
sarimas = {}
for k, dft in drift_data.items():
try:
sarimas[k] = sm.load("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
except:
try:
sarima = train_arima(dft, sarima=True)
sarima.save("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
sarimas[k] = sm.load("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
except:
k -= 1
if split == 'val':
sarimas[k] = update_arima(sarimas[k], val_data)
elif split == 'test':
sarimas[k] = update_arima(sarimas[k], test_data)
preds = {}
for k, m in sarimas.items():
with mlflow.start_run(run_name=country+'.'+model+'.'+split+'.'+str(k), nested=True):
preds[k] = m.predict()
log_params(m.specification)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'submodel': k, 'drift': detector})
if split == 'val':
metrics = eval_metrics(val_data, preds[k])
elif split == 'test':
metrics = eval_metrics(test_data.iloc[size:], preds[k].iloc[size:])
log_metrics(metrics)
pd.DataFrame(preds).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_artifacts("outputs/"+country+"/data")
log_artifacts("outputs/"+country+"/preds/"+model)
mlflow.end_run()
#ADWIN-SARIMA-ORACLE (ASO)
if model == "ASO":
submodel = "AS"
detector = 'adwin'
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo AS antes e depois tente novamente')
else:
preds = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
if split == 'val':
true = pd.read_csv("outputs/"+country+"/data/val_data.csv", index_col=0, parse_dates=True)
elif split == 'test':
true = pd.read_csv("outputs/"+country+"/data/test_data.csv", index_col=0, parse_dates=True)
#print(preds)
#print(true)
#oracle = get_oracle(preds, true)
print(preds.shape)
print(true.shape)
best, oracle = get_oracle(preds, true)
pd.Series(best).to_csv("outputs/"+country+"/preds/"+model+"/best.csv")
oracle.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
metrics = eval_metrics(true.iloc[size+1:], oracle.iloc[size:])
draw_predictions(country, oracle, true)
log_metrics(metrics)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size})
log_arts(country,model)
mlflow.end_run()
#ADWIN-SARIMA-DYNAMIC-SELECTION (ASDS)
if model == "ASDS":
submodel = "AS"
size = int(sys.argv[5])
K = int(sys.argv[6])
lags = int(sys.argv[4])
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo AS antes, e tente novamente')
else:
sarimas = {}
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
for k, dft in drift_data.items():
try:
sarimas[k] = sm.load("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
except:
try:
sarima = train_arima(dft, sarima=True)
sarima.save("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
sarimas[k] = sm.load("models/"+country+"/sarimas/sarima"+str(k)+".pkl")
except:
k -= 1
if split == 'val':
sarimas[k] = update_arima(sarimas[k], val_data)
data = val_data
elif split == 'test':
sarimas[k] = update_arima(sarimas[k], test_data)
data = test_data
data.index = pd.to_datetime(data.index)
preds = {}
errors = {}
selection = {}
for w in data.rolling(window=size):
if len(w) == size:
#print(w)
first = w.index[0]
last = w.index[-1]
#print(first)
#print(last)
preds[last] = {}
errors[last] = {}
selection[last] = {}
for k, m in sarimas.items():
preds[last][k] = m.predict(start=first, end=last)
errors[last][k] = mean_squared_error(preds[last][k], w)
#print(preds)
#print(errors[last])
df_error = pd.Series(errors[last]).rank()
#print(df_error)
for i in range(K):
try:
selection[last][i] = df_error.loc[df_error == i+1].index.values[0]
except:
#print(['*']*1000)
#print(df_error.idxmin()) # solucao para ranks malucos 1.5, 2 sem 1...
selection[last][i] = df_error.idxmin()
# #print(selection[last])
#selection[last] = df_error.loc[df_error < K+1].index.values[:K]
df_selection = pd.DataFrame(selection).T
df_selection.index = pd.to_datetime(df_selection.index)
preds_all = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
preds_selection = {}
#print(preds_all)
for row in df_selection.iterrows():
preds_selection[row[0]] = preds_all.loc[row[0]].iloc[row[1]].mean()
#print(row[0])
#print(row[1])
#print(preds_all.loc[row[0]].iloc[row[1]])
preds_selection = pd.Series(preds_selection).T
#print(preds_selection)
#print(data)
#print(data.align(preds_selection, join='right', axis=0))
#metrics = eval_metrics(preds_selection, data.reindex_like(preds_selection))
metrics = eval_metrics(data.iloc[size:], preds_selection.iloc[1:])
draw_predictions(country, preds_selection, data)
log_metrics(metrics)
df_selection.to_csv("outputs/"+country+"/preds/"+model+"/selection.csv")
preds_selection.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_params({'pool': 'sarimas', 'window_size': size ,'K':K, 'metric':'mse', 'distance': None})
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size, 'k': k})
log_arts(country,model)
mlflow.end_run()
#ADWIN-SVM (ASV)
if model == "ASV":
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
if not os.path.exists("models/"+country+"/svms"):
os.makedirs("models/"+country+"/svms")
svms = {}
for k, dft in drift_data.items():
try:
svms[k] = joblib.load("models/"+country+"/svms/svms"+str(k)+".pkl")
except:
try:
X_train, y_train = splitter(dft, lags)
X_val, y_val = splitter(dft, lags) #gambiarra, é preciso definir o conjunto de validaçao dentro da janela de drift
X_train_mm = normalizer(X_train, normx)
y_train = normalizer(y_train.values.reshape(-1,1), normy).ravel()
X_val_mm = normalizer(X_val, normx)
y_val = normalizer(y_val.values.reshape(-1,1), normy).ravel()
svm = train_svm(X_train_mm, y_train)
joblib.dump(svm, "models/"+country+"/svms/svms"+str(k)+".pkl")
svms[k] = joblib.load("models/"+country+"/svms/svms"+str(k)+".pkl")
except:
k -= 1
preds = {}
for k, m in svms.items():
with mlflow.start_run(run_name=country+'.'+model+'.'+split+'.'+str(k), nested=True):
y_pred = normalizer(m.predict(X_test_mm).reshape(-1,1), normy, -1).flatten()
#y_pred = m.predict(X_test_mm).reshape(-1,1).flatten()
#print(y_pred)
y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
preds[k] = y_pred
y_test = pd.DataFrame(y_test).reset_index(drop=True)
metrics = eval_metrics(y_test.iloc[7:], y_pred.iloc[7:])
log_params(m.get_params())
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'submodel': k, 'drift': detector})
#metrics = eval_metrics(test_data.iloc[size:], preds[k].iloc[size:])
log_metrics(metrics)
pd.DataFrame(preds).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_artifacts("outputs/"+country+"/data")
log_artifacts("outputs/"+country+"/preds/"+model)
mlflow.end_run()
#ADWIN-SVM-DYNAMIC-SELECTION (ASVDS)
if model == "ASVDS":
submodel = "ASV"
size = int(sys.argv[5])
K = int(sys.argv[6])
lags = int(sys.argv[4])
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo ASV antes, e tente novamente')
else:
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
svms = {}
for k, dft in drift_data.items():
try:
#print('carrega',k)
svms[k] = joblib.load("models/"+country+"/svms/svms"+str(k)+".pkl")
except:
print('erro carrega', k)
#X_test_mm = normalizer(X_test, norms[k])
#X_test_mm, y_test = torcher(X_test_mm), torcher(y_test.values.reshape(-1,1))
#X_test_mm = torcher(X_test_mm)
#print('xtest', X_test)
#print('ytest', y_test)
X_test_mm = normalizer(X_test, normx)
preds = {}
errors = {}
selection = {}
for w in range(size,len(y_test)):
last = w
first = w - size
preds[last] = {}
errors[last] = {}
selection[last] = {}
for k, m in svms.items():
y_pred = normalizer(m.predict(X_test_mm[first:last]).reshape(-1,1), normy, -1).flatten()
#y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
preds[last][k] = pd.Series(y_pred).reset_index(drop=True)
#preds[last][k] = m.predict(X_test_mm[first:last])
errors[last][k] = mean_squared_error(y_test.iloc[first:last], preds[last][k])
#print(preds)
#print(y_test.iloc[first:last])
#print(errors[last])
df_error = pd.Series(errors[last]).rank()
#print(df_error)
for i in range(K):
try:
selection[last][i] = df_error.loc[df_error == i+1].index.values[0]
except:
#print(['*']*1000)
#print(df_error.idxmin()) # solucao para ranks malucos 1.5, 2 sem 1...
selection[last][i] = df_error.idxmin()
# #print(selection[last])
#selection[last] = df_error.loc[df_error < K+1].index.values[:K]
df_selection = pd.DataFrame(selection).T
#df_selection.index = pd.to_datetime(df_selection.index)
preds_all = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
preds_selection = {}
#print(preds_all)
for row in df_selection.iterrows():
preds_selection[row[0]] = preds_all.loc[row[0]].iloc[row[1]].mean()
#print(row[0])
#print(row[1])
#print(preds_all.loc[row[0]].iloc[row[1]])
preds_selection = pd.Series(preds_selection).T
#print(preds_selection)
#print(data)
#print(data.align(preds_selection, join='right', axis=0))
#metrics = eval_metrics(preds_selection, data.reindex_like(preds_selection))
metrics = eval_metrics(pd.DataFrame(y_test).reset_index(drop=True).iloc[size:], preds_selection)
draw_predictions(country, preds_selection, data)
log_metrics(metrics)
pd.DataFrame(errors).to_csv("outputs/"+country+"/preds/"+model+"/errors.csv")
pd.DataFrame(preds).to_csv("outputs/"+country+"/preds/"+model+"/preds.csv")
df_selection.to_csv("outputs/"+country+"/preds/"+model+"/selection.csv")
preds_selection.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_params({'pool': 'elms', 'window_size': size ,'K':K, 'metric':'mse', 'distance': None})
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size, 'k': k})
log_arts(country,model)
mlflow.end_run()
#ADWIN-SVM-ORACLE (ASVO)
if model == "ASVO":
submodel = "ASV"
detector = 'adwin'
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo AS antes e depois tente novamente')
else:
preds = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
true = pd.read_csv("outputs/"+country+"/data/test_data.csv", index_col=0, parse_dates=True).reset_index(drop=True)
#print(preds.shape)
#print(true.shape)
#oracle = get_oracle(preds, true)
best, oracle = get_oracle(preds, true)
pd.Series(best).to_csv("outputs/"+country+"/preds/"+model+"/best.csv")
oracle.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
metrics = eval_metrics(true.iloc[size+1:], oracle.iloc[size:])
draw_predictions(country, oracle, true)
log_metrics(metrics)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size})
log_arts(country,model)
mlflow.end_run()
#ADWIN-ELM (AE)
if model == "AE":
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
if not os.path.exists("models/"+country+"/elms"):
os.makedirs("models/"+country+"/elms")
if not os.path.exists("models/"+country+"/norms"):
os.makedirs("models/"+country+"/norms")
elms = {}
for k, dft in drift_data.items():
try:
elms[k] = torch.load("models/"+country+"/elms/elm"+str(k)+".pkl")
except:
try:
X_train, y_train = splitter(dft, lags)
X_val, y_val = splitter(dft, lags) #gambiarra, é preciso definir o conjunto de validaçao dentro da janela de drift
X_train_mm = normalizer(X_train, normx)
y_train_mm = normalizer(y_train.values.reshape(-1,1), normy)
X_val_mm = normalizer(X_val, normx)
y_val_mm = normalizer(y_val.values.reshape(-1,1), normy)
#print('norm')
X_train_mm, y_train, X_val_mm, y_val = torcher(X_train_mm), torcher(y_train_mm.reshape(-1,1)), torcher(X_val_mm), torcher(y_val_mm.reshape(-1,1))
#print('torch')
elm = train_elm(X_train_mm, y_train)
#print('treinamento')
torch.save(elm ,"models/"+country+"/elms/elm"+str(k)+".pkl")
elms[k] = torch.load("models/"+country+"/elms/elm"+str(k)+".pkl")
except:
pass
#k -= 1
#joblib.dump(norm, "models/"+country+"/norms/norm"+str(k)+".pkl")
preds = {}
for k, m in elms.items():
with mlflow.start_run(run_name=country+'.'+model+'.'+split+'.'+str(k), nested=True):
X_test_mm = normalizer(X_test, normx)
X_test_mm = torcher(X_test_mm)
log_params({'h_size': m._h_size, 'activation' :m.activation_name})
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'submodel': k, 'drift': detector})
y_pred = normalizer(m.predict(X_test_mm).numpy().reshape(-1,1), normy, -1).flatten()
y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
#y_pred = post_forecast(pd.Series(m.predict(X_test_mm).numpy().flatten())).reset_index(drop=True)
y_test = pd.DataFrame(y_test).reset_index(drop=True)
#y_test = pd.DataFrame(y_test.numpy())
metrics = eval_metrics(y_test.iloc[7:], y_pred.iloc[7:])
log_metrics(metrics)
preds[k] = y_pred
#print(preds)
#print(pd.DataFrame(preds))
pd.DataFrame(preds).to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_artifacts("outputs/"+country+"/data")
log_artifacts("outputs/"+country+"/preds/"+model)
mlflow.end_run()
#ADWIN-ELM-DYNAMIC-SELECTION (AEDS)
if model == "AEDS":
submodel = "AE"
size = int(sys.argv[5])
K = int(sys.argv[6])
lags = int(sys.argv[4])
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo AE antes, e tente novamente')
else:
detector = 'adwin'
drifts, drift_data = get_drifts(train_data, country, detector=detector)
draw_drifts(country, drifts, drift_data, train_data)
log_artifact('outputs/'+country+'/drifts.png')
elms = {}
for k, dft in drift_data.items():
try:
#print('carrega',k)
elms[k] = torch.load("models/"+country+"/elms/elm"+str(k)+".pkl")
except:
print('erro carrega', k)
#X_test_mm = normalizer(X_test, norms[k])
#X_test_mm, y_test = torcher(X_test_mm), torcher(y_test.values.reshape(-1,1))
#X_test_mm = torcher(X_test_mm)
#print('xtest', X_test)
#print('ytest', y_test)
X_test_mm = normalizer(X_test, normx)
X_test_mm = torcher(X_test_mm)
preds = {}
errors = {}
selection = {}
for w in range(size,len(y_test)):
last = w
first = w - size
preds[last] = {}
errors[last] = {}
selection[last] = {}
for k, m in elms.items():
y_pred = normalizer(m.predict(X_test_mm[first:last]).numpy().reshape(-1,1), normy, -1).flatten()
#y_pred = post_forecast(pd.Series(y_pred)).reset_index(drop=True)
preds[last][k] = pd.Series(y_pred).reset_index(drop=True)
#preds[last][k] = m.predict(X_test_mm[first:last])
errors[last][k] = mean_squared_error(y_test.iloc[first:last], preds[last][k])
#print(preds)
#print(y_test.iloc[first:last])
#print(errors[last])
df_error = pd.Series(errors[last]).rank()
#print(df_error)
for i in range(K):
try:
selection[last][i] = df_error.loc[df_error == i+1].index.values[0]
except:
#print(['*']*1000)
#print(df_error.idxmin()) # solucao para ranks malucos 1.5, 2 sem 1...
selection[last][i] = df_error.idxmin()
# #print(selection[last])
#selection[last] = df_error.loc[df_error < K+1].index.values[:K]
df_selection = pd.DataFrame(selection).T
#df_selection.index = pd.to_datetime(df_selection.index)
preds_all = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
preds_selection = {}
#print(preds_all)
for row in df_selection.iterrows():
preds_selection[row[0]] = preds_all.loc[row[0]].iloc[row[1]].mean()
#print(row[0])
#print(row[1])
#print(preds_all.loc[row[0]].iloc[row[1]])
preds_selection = pd.Series(preds_selection).T
#print(preds_selection)
#print(data)
#print(data.align(preds_selection, join='right', axis=0))
#metrics = eval_metrics(preds_selection, data.reindex_like(preds_selection))
metrics = eval_metrics(pd.DataFrame(y_test).reset_index(drop=True).iloc[size:], preds_selection)
draw_predictions(country, preds_selection, data)
log_metrics(metrics)
pd.DataFrame(errors).to_csv("outputs/"+country+"/preds/"+model+"/errors.csv")
pd.DataFrame(preds).to_csv("outputs/"+country+"/preds/"+model+"/preds.csv")
df_selection.to_csv("outputs/"+country+"/preds/"+model+"/selection.csv")
preds_selection.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
log_params({'pool': 'elms', 'window_size': size ,'K':K, 'metric':'mse', 'distance': None})
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size, 'k': k})
log_arts(country,model)
mlflow.end_run()
#ADWIN-ELM-ORACLE (AEO)
if model == "AEO":
submodel = "AE"
detector = 'adwin'
if not os.path.exists("outputs/"+country+"/preds/"+submodel):
print('execute o modelo AS antes e depois tente novamente')
else:
preds = pd.read_csv("outputs/"+country+"/preds/"+submodel+"/"+submodel+'_'+split+'.csv', index_col=0, parse_dates=True)
true = pd.read_csv("outputs/"+country+"/data/test_data.csv", index_col=0, parse_dates=True).reset_index(drop=True)
#print(preds.shape)
#print(true.shape)
#oracle = get_oracle(preds, true)
best, oracle = get_oracle(preds, true)
pd.Series(best).to_csv("outputs/"+country+"/preds/"+model+"/best.csv")
oracle.to_csv("outputs/"+country+"/preds/"+model+"/"+model+'_'+split+'.csv')
metrics = eval_metrics(true.iloc[size+1:], oracle.iloc[size:])
draw_predictions(country, oracle, true)
log_metrics(metrics)
mlflow.set_tags({'data': country, 'split': split, 'model': model, 'drift': detector, 'size': size})
log_arts(country,model)
mlflow.end_run()
"""
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
# Model registry does not work with file store
if tracking_url_type_store != "file":
# Register the model
# There are other ways to use the Model Registry, which depends on the use case,
# please refer to the doc for more information:
# https://mlflow.org/docs/latest/model-registry.html#api-workflow
mlflow.statsmodels.log_model(arima, "model", registered_model_name="arimao")
mlflow.statsmodels.log_model(sarima, "model", registered_model_name="sarimao")
else:
mlflow.statsmodels.log_model(arima, "model")
mlflow.statsmodels.log_model(sarima, "model")
"""
if __name__ == "__main__":
main() | amarabuco/codrift | codrift3.py | codrift3.py | py | 60,743 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.simplefilter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "seaborn.set_theme",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.W... |
35947650994 | # -*- coding: utf-8 -*-
import os
import csv
import ast
from itertools import islice
import matplotlib.pyplot as plt
from sklearn import preprocessing
import numpy
"""
This script is plotting all of bad sample data
"""
if __name__ == '__main__':
root=os.getcwd()
bsxs=["bs1","bs2","bs3","bs4","bs5","bs6","bs7","bs8"]
rstfile=root+"/result/RMSEdata.csv"
plotdir=root+"/result/grpplot/"
labels=('nrm>0.9','0.9>nrm>0.6','0.6>nrm>0.4','0.4>nrm>0.2','0.2>nrm>0.1','0.1>nrm')
colors=("red","pink","orange","yellow", "green","cyan","c","b")
with open(rstfile, 'r') as rslt:
readcsv = csv.reader(rslt)
RMSETC = []
for row in readcsv:
RMSETC.append([float(row[1]),float(row[2])])
#Plotting all data of correlation of force/RMSE and TC error
dtn=0
allplotfile=plotdir+"alldata-bs.png"
fig = plt.figure()
ax3 = fig.add_subplot(111)
plt.title("All data")
ax3.set_xlabel("TC Err (fm 112.1/300K)")
ax3.set_ylabel("force/RMSE (meV/A)")
ax3.grid(True)
plt.rcParams["legend.edgecolor"] ='green'
for i, bsx in enumerate(bsxs):
for j in range(0, 10):
ax3.scatter(RMSETC[dtn][1],RMSETC[dtn][0],c=colors[i],marker='.')
dtn=dtn+1
left, right = ax3.get_xlim()
ax3.set_xlim(-0.1, right*1.2)
#ax4 is only for plotting legend of all kind of data
ax4 = ax3.twinx()
for i, bsx in enumerate(bsxs):
ax4.scatter(RMSETC[0][1],RMSETC[0][0],c=colors[i],marker='.',label=bsx)
handler4, label4 = ax4.get_legend_handles_labels()
ax3.legend(handler4, label4,loc='upper right',title='Sample Grp',bbox_to_anchor=(0.97, 0.85, 0.14, .100),borderaxespad=0.,)
fig.delaxes(ax4)
plt.savefig(allplotfile)
plt.close()
| s-okugawa/HDNNP-tools | tools/bad-sample/plot-allbs.py | plot-allbs.py | py | 1,785 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
38069299722 | import os
import glob
import shutil
import re
import gi
gi.require_version('Limba', '1.0')
gi.require_version('AppStream', '1.0')
from gi.repository import Limba
from gi.repository import AppStream
from hashlib import sha256
from ..utils import build_cpt_path
from ..repository.models import *
from ..user import User
from ..extensions import db
from ..utils import get_current_time
from .dscfile import DSCFile
class IPKImporter():
def __init__(self, search_dir):
self._import_dir = search_dir
self._asmdata = AppStream.Metadata()
self._asmdata.set_locale("C")
self._xdg_cat_map = { 'AudioVideo': self._category_by_id("multimedia"),
'Audio': self._category_by_id("multimedia"),
'Video': self._category_by_id("multimedia"),
'Development': self._category_by_id("development"),
'Education': self._category_by_id("education"),
'Game': self._category_by_id("games"),
'Graphics': self._category_by_id("graphics"),
'Network': self._category_by_id("network"),
'Office': self._category_by_id("customization"),
'Science': self._category_by_id("science"),
'Settings': self._category_by_id("tools"),
'System': self._category_by_id("system"),
'Utility': self._category_by_id("tools"),
'Arcade': self._category_by_id("arcade")
}
def _category_by_id(self, cat_name):
return Category.query.filter_by(idname=cat_name).one()
def _map_categories(self, cpt):
xdg_cats = cpt.get_categories()
if cpt.get_kind() != AppStream.ComponentKind.DESKTOP:
return [Category.query.filter_by(idname="components").one()]
if not xdg_cats:
return [Category.query.filter_by(idname="other").one()]
cats = list()
for xcat in xdg_cats:
cat = self._xdg_cat_map.get(xcat)
if cat:
cats.append(cat)
if len(cats) == 0:
return [Category.query.filter_by(idname="other").one()]
return cats
def _import_package(self, pkg_fname, sha256sum, dsc):
pkg = Limba.Package()
pkg.open_file(pkg_fname)
if pkg.has_embedded_packages():
self._reject_dsc("Package contains embedded packages. This is not allowed in repositories.", dsc)
return
pki = pkg.get_info()
cpt_xml = pkg.get_appstream_data()
self._asmdata.clear_components()
self._asmdata.parse_data(cpt_xml)
cpt = self._asmdata.get_component()
pkgid = pkg.get_id()
cptname = pki.get_name()
arch = pki.get_architecture()
dest_pkgfname = "%s_%s.ipk" % (pkgid.replace("/", "-"), arch)
cpt_desc = cpt.get_description()
if not cpt_desc:
cpt_desc = "<p>A software component</p>"
# we just accept packages for the master repository for now
repo_name = dsc.get_val('Target')
repo = None
try:
repo = Repository.query.filter_by(name=repo_name).one()
except:
self._reject_dsc("Could not find target repository: %s" % (repo_name), dsc)
return
repo_pool_path = os.path.join(repo.root_dir, "pool", build_cpt_path (cptname))
repo_icons_path = os.path.join(repo.root_dir, "assets", build_cpt_path (cptname), pki.get_version(), "icons")
pkg_dest = os.path.join(repo_pool_path, dest_pkgfname)
repo_location = os.path.join(build_cpt_path (cptname), dest_pkgfname)
dbcpt = Component(
cid=cpt.get_id(),
kind=AppStream.ComponentKind.to_string(cpt.get_kind()),
sdk=True if pki.get_kind() == Limba.PackageKind.DEVEL else False,
name=cpt.get_name(),
summary=cpt.get_summary(),
description=cpt_desc,
developer_name=cpt.get_developer_name(),
url=cpt.get_url(AppStream.UrlKind.HOMEPAGE),
xml=cpt_xml,
repository=repo
)
dbcpt.categories = self._map_categories(cpt)
db.session.add(dbcpt)
dbpkg = Package(
name=pki.get_name(),
version=pki.get_version(),
kind=PackageKind.SDK if pki.get_kind() == Limba.PackageKind.DEVEL else PackageKind.COMMON,
fname=repo_location,
architecture=arch,
sha256sum=sha256sum,
dependencies=pki.get_dependencies(),
component=dbcpt,
repository=repo
)
db.session.add(dbpkg)
pkg.extract_appstream_icons(repo_icons_path)
if not os.path.exists(repo_pool_path):
os.makedirs(repo_pool_path)
shutil.copyfile(pkg_fname, pkg_dest)
def _reject_dsc(self, reason, dsc):
print("REJECT: %s => %s" % (reason, str(dsc)))
# TODO: Actually reject the package and move it to the morgue
def _process_dsc(self, dscfile):
dsc = DSCFile()
dsc.open(dscfile)
uploader = dsc.get_val('Uploader')
if not uploader:
self._reject_dsc("Uploader field was not set.", dsc)
return
m = re.findall(r'<(.*?)>', uploader)
if not m:
self._reject_dsc("Unable to get uploader email address.", dsc)
return
user = None
try:
user = User.query.filter_by(email=m[0]).one()
except:
self._reject_dsc("Could not find user '%s'" % (uploader), dsc)
return
key = None
try:
key = dsc.validate(user.gpghome)
key = key.replace(' ', '')
except Exception as e:
self._reject_dsc("Validation failed: %s" % (str(e)), dsc)
return
if key != user.pgpfpr:
self._reject_dsc("Validation failed: Fingerprint does not match user", dsc)
return
# if we are here, everything is fine - we can import the packages if their checksums match
for sha256sum, fname in dsc.get_files().items():
real_sha256 = None
fname_full = os.path.join(self._import_dir, fname)
with open(fname_full, 'rb') as f:
real_sha256 = sha256(f.read()).hexdigest()
if real_sha256 != sha256sum:
self._reject_dsc("Validation failed: Checksum mismatch for '%s'" % (fname), dsc)
return
self._import_package(fname_full, sha256sum, dsc)
def import_packages(self):
for fname in glob.glob(self._import_dir+"/*.dsc"):
self._process_dsc(fname)
db.session.commit()
| limbahq/limba-hub | lihub/maintain/ipkimport.py | ipkimport.py | py | 6,824 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "gi.require_version",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gi.require_version",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gi.repository.AppStream.Metadata",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
5310216277 | from collections import deque
from typing import List, Tuple
def shortest_distance(maze: List[List[int]], src: Tuple[int, int], dest: Tuple[int, int]) -> int:
row = len(maze)
col = len(maze[0])
visited = [[False for i in range(col)] for j in range(row)]
distance = [[float('inf') for i in range(col)] for j in range(row)]
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
queue = deque()
queue.append(src)
visited[src[0]][src[1]] = True
distance[src[0]][src[1]] = 0
while queue:
r, c = queue.popleft()
for i in range(4):
rr, cc = r + dx[i], c + dy[i]
if (rr >= 0) and (rr < row) and (cc >= 0) and (cc < col) and maze[rr][cc] == 1:
if not visited[rr][cc]:
visited[rr][cc] = True
distance[rr][cc] = distance[r][c] + 1
queue.append((rr, cc))
else:
if distance[r][c] + 1 < distance[rr][cc]:
distance[rr][cc] = distance[r][c] + 1
queue.append((rr, cc))
if distance[dest[0]][dest[1]] != float('inf'):
return distance[dest[0]][dest[1]]
else:
return -1
maze = [[1, 0, 1, 1],
[1, 1, 1, 0],
[0, 1, 0, 1],
[1, 1, 1, 1]]
src = (3, 0)
dest = (0, 2)
print(shortest_distance(maze, src, dest))
| Sourolio10/Leetcode-Practice | Graph/Shortest_distance/shortest_dist_in_matrix_using_djitras_queue.py | shortest_dist_in_matrix_using_djitras_queue.py | py | 1,376 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
16954723069 | # Link For Problem: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
import functools
class Solution:
"""
DFS + Memoization Approach.
TC : O(n)
SC : O(n)
"""
def longestIncreasingPath(self, matrix: list[list[int]]) -> int:
R, C = len(matrix), len(matrix[0])
g = [[[] for _ in range(C)] for _ in range(R)]
for i in range(R):
for j in range(C):
for (r, c) in ((i+1, j), (i-1, j), (i, j+1), (i, j-1)):
if 0 <= r < R and 0 <= c < C and matrix[r][c] > matrix[i][j]:
g[i][j].append((r, c))
@functools.lru_cache(None)
def helper(i, j):
return 1 + max(helper(r, c) for r, c in g[i][j]) if g[i][j] else 1
return max(helper(i, j) for i in range(R) for j in range(C))
| loopclub2022/MonthLongChallenge | Anurag_19_CSE/Python/day19.py | day19.py | py | 857 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "functools.lru_cache",
"line_number": 30,
"usage_type": "call"
}
] |
1585593679 | from typing import Optional
from decimal import Decimal
from validator_collection import validators
from highcharts_core import errors
from highcharts_core.metaclasses import HighchartsMeta
from highcharts_core.utility_classes.gradients import Gradient
from highcharts_core.utility_classes.patterns import Pattern
class DataClass(HighchartsMeta):
"""Definition of ranges for use in a choropleth map."""
def __init__(self, **kwargs):
self._color = None
self._from_ = None
self._name = None
self._to = None
self.color = kwargs.get('color', None)
self.from_ = kwargs.get('from_', None)
self.name = kwargs.get('name', None)
self.to = kwargs.get('to', None)
@property
def color(self) -> Optional[str | Gradient | Pattern]:
"""The color of the data class. Defaults to :obj:`None <python:None>`.
If :obj:`None <python:None>`, the color is pulled from the global or
chart-specific colors array.
:rtype: :class:`str <python:str>`, :class:`Gradient`, :class:`Pattern``, or
:obj:`None <python:None>`
"""
return self._color
@color.setter
def color(self, value):
from highcharts_core import utility_functions
self._color = utility_functions.validate_color(value)
@property
def from_(self) -> Optional[int | float | Decimal]:
"""The point value at which the data class' value range begins. Defaults to
:obj:`None <python:None>`.
.. note::
The range of each :class:`DataClass` is closed at both ends, but can be
overridden by a subsequent :class:`DataClass` instance included in the
:meth:`ColorAxis.data_classes` collection.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._from_
@from_.setter
def from_(self, value):
self._from_ = validators.numeric(value, allow_empty = True)
@property
def name(self) -> Optional[str]:
"""The name of the data class as it should apear in the legend. Defaults to
:obj:`None <python:None>`.
If :obj:`None <python:None>`, it is automatically created based on the
:meth:`from_ <DataClass.from_>` and :meth:`to <DataClass.to>` values.
.. hint::
For full programmatic control, :meth:`Legend.label_formatter` can be used. In
the formatter (JavaScript) function, ``this.from`` and ``this.to`` can be
accessed.
:rtype: :class:`str <python:str>` or :obj:`None <python:None>`
"""
return self._name
@name.setter
def name(self, value):
self._name = validators.string(value, allow_empty = True)
@property
def to(self) -> Optional[int | float | Decimal]:
"""The point value at which the data class' value range ends. Defaults to
:obj:`None <python:None>`.
.. note::
The range of each :class:`DataClass` is closed at both ends, but can be
overridden by a subsequent :class:`DataClass` instance included in the
:meth:`ColorAxis.data_classes` collection.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._to
@to.setter
def to(self, value):
self._to = validators.numeric(value, allow_empty = True)
@classmethod
def _get_kwargs_from_dict(cls, as_dict):
kwargs = {
'color': as_dict.get('color', None),
'from_': as_dict.get('from', None),
'name': as_dict.get('name', None),
'to': as_dict.get('to', None)
}
return kwargs
def _to_untrimmed_dict(self, in_cls = None) -> dict:
untrimmed = {
'color': self.color,
'from': self.from_,
'name': self.name,
'to': self.to
}
return untrimmed
| highcharts-for-python/highcharts-core | highcharts_core/options/axes/data_classes.py | data_classes.py | py | 3,874 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "highcharts_core.metaclasses.HighchartsMeta",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "highcharts_core.utility_classes.gradients.Gradient",
"line_number": 27,
"usage... |
6584194716 | from typing import Dict, Optional, Tuple, TYPE_CHECKING
from UM.Logger import Logger
from UM.Version import Version
from cura.Backups.Backup import Backup
if TYPE_CHECKING:
from cura.CuraApplication import CuraApplication
class BackupsManager:
"""
The BackupsManager is responsible for managing the creating and restoring of
back-ups.
Back-ups themselves are represented in a different class.
"""
def __init__(self, application: "CuraApplication") -> None:
self._application = application
def createBackup(self) -> Tuple[Optional[bytes], Optional[Dict[str, str]]]:
"""
Get a back-up of the current configuration.
:return: A tuple containing a ZipFile (the actual back-up) and a dict containing some metadata (like version).
"""
self._disableAutoSave()
backup = Backup(self._application)
backup.makeFromCurrent()
self._enableAutoSave()
# We don't return a Backup here because we want plugins only to interact with our API and not full objects.
return backup.zip_file, backup.meta_data
def restoreBackup(self, zip_file: bytes, meta_data: Dict[str, str]) -> None:
"""
Restore a back-up from a given ZipFile.
:param zip_file: A bytes object containing the actual back-up.
:param meta_data: A dict containing some metadata that is needed to restore the back-up correctly.
"""
if not meta_data.get("cura_release", None):
# If there is no "cura_release" specified in the meta data, we don't execute a backup restore.
Logger.log("w", "Tried to restore a backup without specifying a Cura version number.")
return
self._disableAutoSave()
backup = Backup(self._application, zip_file = zip_file, meta_data = meta_data)
restored = backup.restore()
if restored:
# At this point, Cura will need to restart for the changes to take effect.
# We don't want to store the data at this point as that would override the just-restored backup.
self._application.windowClosed(save_data = False)
def _disableAutoSave(self) -> None:
"""Here we (try to) disable the saving as it might interfere with restoring a back-up."""
self._application.enableSave(False)
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(False)
else:
Logger.log("e", "Unable to disable the autosave as application init has not been completed")
def _enableAutoSave(self) -> None:
"""Re-enable auto-save and other saving after we're done."""
self._application.enableSave(True)
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(True)
else:
Logger.log("e", "Unable to enable the autosave as application init has not been completed")
| Ultimaker/Cura | cura/Backups/BackupsManager.py | BackupsManager.py | py | 3,130 | python | en | code | 5,387 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "cura.Backups.Backup.Backup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Op... |
30644851301 | from ..math_basics import is_RealIntervalFieldElement # type: ignore
from ..exceptions import InsufficientPrecisionError # type: ignore
from typing import Sequence
__all__ = ['floor_as_intergers', 'SpatialDict']
def floor_as_integers(x) -> Sequence[int]:
"""
Computes floor of a number or interval, returning a list of integers
if evaluating floor is ambiguous.
sage: floor_as_integers(RIF(1.8, 1.9))
[1]
sage: floor_as_integers(RIF(1.9, 2.1))
[1, 2]
>>> floor_as_integers(1.4)
[1]
>>> floor_as_integers(2.01)
[1, 2]
>>> floor_as_integers(1.99)
[1, 2]
"""
if is_RealIntervalFieldElement(x):
f = x.floor()
l = f.lower().round()
u = f.upper().round() + 1
n = u - l
if n > 5:
raise InsufficientPrecisionError(
"Too many integers (%d) in given interval to compute floor. "
"Increasing precision should fix it." % n)
return list(range(l, u))
else:
f = x.floor()
int_f = int(f)
d = x - f
if d < 0.125:
return [ int_f - 1, int_f ]
if d > 0.875:
return [ int_f, int_f + 1 ]
return [ int_f ]
class _Entry:
"""
A helper for SpatialDict.
The implementation of SpatialDict has the same instance of _Entry
stored for multiple keys so that updating the value for all keys
can be done by assigning the new value to _Entry.value only once.
"""
def __init__(self, value):
self.value = value
class SpatialDict:
"""
A python dict-like object appropriate for using numerical points (e.g.,
in the hyperboloid model) as keys. That is, look-ups return
the same entry for points that are almost but not exactly the
same due to rounding-errors.
To achieve this, the points are asumed to be in some lattice
and the minimal distance between any two points in the lattice
must be given.
"""
_scale = 1024
def __init__(self, min_distance, verified):
RF = min_distance.parent()
self._min_distance = min_distance
self._RF_scale = RF(self._scale)
if verified:
self._right_distance_value = min_distance
self._left_distance_value = 0
else:
self._right_distance_value = min_distance * RF(0.125)
self._left_distance_value = min_distance * RF(0.5)
self._data = { }
def setdefault(self, point, default):
reps_and_ikeys = self._representatives_and_ikeys(point)
for rep, ikey in reps_and_ikeys:
for other_rep, entry in self._data.get(ikey, []):
d = self.distance(rep, other_rep)
if d < self._right_distance_value:
return entry.value
if not (self._left_distance_value < d):
raise InsufficientPrecisionError(
"Could neither verify that the two given tiles are "
"the same nor that they are distinct. "
"Distance between basepoint translates is: %r. "
"Injectivty diameter about basepoint is: %r." % (
d, self._min_distance))
entry = _Entry(default)
for rep, ikey in reps_and_ikeys:
self._data.setdefault(ikey, []).append((rep, entry))
return default
def distance(self, point_0, point_1):
raise NotImplementedError()
def representatives(self, point):
# Applies, e.g., translation by geodesic matrix
return [ point ]
def float_hash(self, point):
raise NotImplementedError()
def _representatives_and_ikeys(self, point):
return [
(rep, ikey)
for rep in self.representatives(point)
for ikey in floor_as_integers(self._RF_scale * self.float_hash(rep)) ]
| ekim1919/SnapPy | python/drilling/spatial_dict.py | spatial_dict.py | py | 3,952 | python | en | code | null | github-code | 1 | [
{
"api_name": "math_basics.is_RealIntervalFieldElement",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "exceptions.InsufficientPrecisionError",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "typing.Sequence",
"line_number": 8,
"usage_type": "name"
... |
20068242046 | from tkinter import Tk, Label, Button, Entry, Text, messagebox
from os import environ
from source.email import Email
from dotenv import load_dotenv
#se crea la ventana
venatana = Tk()
#se pone el titulo, color y tamaño de la ventana
venatana.title("Semana 12")
venatana.geometry("500x400")
venatana.config(bg="#5F6A6A")
#se crean los parametros de los objetos de la ventana
share = Label(venatana, text="Correo: ", font=14, bg="#5F6A6A")
user = Label(venatana, text="Usuario: ", font=14, bg="#5F6A6A")
ce = Entry(venatana, font= 14, bg="white")
user2 = Entry(venatana, font= 14, bg="white")
message1= Text(venatana, font= 14, bg="white")
#Se cre la estructura para HTML
html = """
<!DOCYPE html>
<html>
<body>
<h1>Estimad@ {}</h1>
<p>{}</p>
</body>
</html>
"""
load_dotenv()
#Se cre la funcion para enviar el correo
def sendemail():
correo = Email()
correo.mandar_email([ce.get()], "Semana 12 Programacion", message_format=html.format(user2.get(), message1.get('1.0', 'end-1c')), format="html")
messagebox.showinfo("Aviso", "El correo se ha enviado de manera sastifactoria.")
#se cre el bonton de envio
btnshare = Button(venatana, text="Enviar", command=sendemail, bg="#283747")
#se establecen los parametros de los objetos dentro de la ventana
share.place(x=50, y=10)
ce.place(x=150, y=10, width=290)
user.place(x=50, y=60)
user2.place(x=150, y=60, width=290)
message1.place(x=60, y=110, width=380, height=225)
btnshare.place(x=60, y=350, width=380, height=40)
venatana.mainloop()
| DanCastSV/S12 | S12/main.py | main.py | py | 1,511 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_numbe... |
73740570595 | # Face Recognition/Identification
import cv2 as cv
import face_recognition as fr
import os
# training data
knownFaces = [
{
"file": "JoeBiden.jpg",
"name": "Joe Biden",
"code": None,
},
{
"file": "FumioKishida.jpg",
"name": "Fumio Kishida",
"code": None,
},
{
"file": "XiJinPing.jpg",
"name": "Xi Dada",
"code": None,
},
{
"file": "JustinTrudeau.jpg",
"name": "Justin Trudeau",
"code": None,
},
]
# return the encoding of the face detected in the image file
def train(face):
# img is using "RGB" color
img = fr.load_image_file("face-images/known/" + face["file"])
faceLocations = fr.face_locations(img)
faceCodes = fr.face_encodings(img, faceLocations)
if len(faceLocations) > 0:
top, right, bottom, left = faceLocations[0]
frame = cv.cvtColor(img, cv.COLOR_RGB2BGR)
cv.rectangle(frame, (left, top), (right, bottom), (255,0,0), 2)
cv.imshow(face['name'],frame)
if len(faceCodes) > 0:
return faceCodes[0]
else:
return None
def recognize(filePath):
img = fr.load_image_file(filePath)
frame = cv.cvtColor(img, cv.COLOR_RGB2BGR)
locations = fr.face_locations(img)
codes = fr.face_encodings(img, locations)
knownCodes = [face['code'] for face in knownFaces]
for loc, code in zip(locations, codes):
top, right, bottom, left = loc
# draw face border
cv.rectangle(frame, (left,top), (right,bottom), (255,0,0), 2)
# draw face name
matches = fr.compare_faces(knownCodes, code, tolerance=0.5)
name = "unknown"
for i in range(len(matches)):
if matches[i] == True:
name = knownFaces[i]['name']
break
cv.putText(frame, name, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 2)
cv.imshow('face recog', frame)
# train by known faces
for face in knownFaces:
face['code'] = train(face)
#print(face)
# recognize unknow faces
recognize("face-images/unknown/u003.jpg")
cv.waitKey(0) | smstong/ai | ai012.py | ai012.py | py | 2,130 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "face_recognition.load_image_file",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_locations",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_encodings",
"line_number": 35,
"usage_type": "call... |
33397514940 | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
startPoint = 5
state1 = np.loadtxt('1.0_2.0_state')[:,0].reshape(1,201,1)
encoder_input1 = state1[:,:startPoint,:]
decoder_input1 = state1[:,startPoint:-1,:]
output1 = state1[:,startPoint:,:]
state2 = np.loadtxt('2.0_0.5_state')[:,0].reshape(1,201,1)
encoder_input2 = state2[:,:startPoint,:]
decoder_input2 = state2[:,startPoint:-1,:]
output2 = state2[:,startPoint:,:]
state3 = np.loadtxt('-2.0_1.0_state')[:,0].reshape(1,201,1)
encoder_input3 = state3[:,:startPoint,:]
decoder_input3 = state3[:,startPoint:-1,:]
output3 = state3[:,startPoint:]
state4 = np.loadtxt('-1.0_-1.0_state')[:,0].reshape(1,201,1)
encoder_input4 = state4[:,:startPoint,:]
decoder_input4 = state4[:,startPoint:-1,:]
output4 = state4[:,startPoint:,:]
train_en_input = np.concatenate((encoder_input1,encoder_input2,encoder_input3,encoder_input4), axis=0)
train_de_input = np.concatenate((decoder_input1,decoder_input2,decoder_input3,decoder_input4), axis=0)
train_output = np.concatenate((output1,output2,output3,output4), axis=0)
train_en_input = torch.Tensor(train_en_input)
train_de_input = torch.Tensor(train_de_input)
train_output = torch.Tensor(train_output)
# print(train_en_input.shape,train_de_input.shape,train_output.shape)
stateV = np.loadtxt('0.0_1.0_state')[:,0].reshape(1,201,1)
encoder_inputV = stateV[:,:startPoint,:]
decoder_inputV = stateV[:,startPoint:-1,:]
outputV = stateV[:,startPoint:]
val_en_input = torch.Tensor(encoder_inputV)
val_de_input = torch.Tensor(decoder_inputV)
val_output = torch.Tensor(outputV)
class RNN(nn.Module):
def __init__(self, input_size=1 , hidden_size=48, output_size=1, n_layers=1):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.input_size = input_size
self.n_layers = n_layers
# encoder and decoder have different parameters
self.encoder = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
self.decoder = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
# self.gru = nn.GRU(input_size, hidden_size, n_layers,batch_first=True)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, en_input: torch.Tensor, de_input = None, predict_length=0):
hidden = None
if predict_length == 0:
encoder_outputs, hidden = self.encoder(en_input, hidden)
encoder_last = self.out(encoder_outputs)[:, -1, :].reshape(-1, 1, 1)
decoder_outputs,hidden = self.decoder(de_input, hidden)
decoder_outputs = self.out(decoder_outputs)
output = torch.cat((encoder_last, decoder_outputs), dim=1)
return output
else:
total_sequence = en_input
encoder_outputs, hidden = self.encoder(en_input, hidden)
decoder_input = encoder_outputs[:, -1, :].reshape(-1, 1, self.hidden_size)
decoder_input = self.out(decoder_input)
total_sequence = torch.cat((total_sequence, decoder_input),dim=1)
one_input = decoder_input
for i in range(predict_length - 1):
# one_input = total_sequence[-1].reshape(-1, 1, 1)
one_output, hidden = self.decoder(one_input, hidden)
one_output = self.out(one_output)
total_sequence = torch.cat((total_sequence, one_output),dim=1)
one_input = one_output
return total_sequence[:,-predict_length:,:]
LSTM_model = RNN()
# LSTM_model = torch.load("model1")
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(LSTM_model.parameters(),lr=0.01)
def train(epochs):
train_loss = []
# minimum = 1e6
for epoch in range(epochs):
LSTM_model.train()
output = LSTM_model(en_input = train_en_input,de_input = train_de_input,predict_length = 0)
loss = criterion(output,train_output)
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer.zero_grad()
if epoch % 20 == 0:
LSTM_model.eval()
val_predict = LSTM_model(en_input=val_en_input,de_input = val_de_input,predict_length = 0)
val_loss = criterion(val_predict,val_output)
train_loss.append(loss.tolist())
print(f"epoch:{epoch},train_loss:{loss},val_loss:{val_loss}")
torch.save(LSTM_model, "model2")
return train_loss
trainLoss = train(801)
testName = '0.5_0.5_'
# testName = '-0.5_0.5_'
# testName = '1.0_0.0_'
test_state = torch.Tensor(np.loadtxt(testName+'state').reshape(201,2))
test_input = torch.Tensor(np.loadtxt(testName+'state')[:startPoint,0].reshape(1,startPoint,1))
test_result = np.loadtxt(testName+'state')[startPoint:,0].reshape(201-startPoint,1)
predict_result = LSTM_model(en_input=test_input,predict_length = 201-startPoint)
predict_result = predict_result.detach().numpy()
fig,axes = plt.subplots(2,1)
ax1=axes[0]
ax1.plot(test_result[:,0],label = "real")
ax1.plot(predict_result[:,:,:].reshape(-1),label = "predict")
ax1.legend()
plt.show()
print(np.mean(np.square(test_result-predict_result)))
| Bonbon-Tang/DL-for-dynamical-systems | MSSPartial/noiseFree/Train_RNNS2.py | Train_RNNS2.py | py | 5,234 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_... |
15103185485 | import calendar
import pandas as pd
import numpy as np
def remove_high_corr(df,target='Israelis_Count',threshold=0.5):
'''
return dataframe without corrlation that can be drop.
args:
df = dataframe
target = string of the target
threshold = default 0.5
'''
target_col = df.pop(target)
df.insert(len(df.columns), target, target_col)
cor_matrix = df.corr().abs()
corr_df = cor_matrix.where(np.triu(np.ones(cor_matrix.shape),k=1).astype(np.bool))
#מתודה שאומרת בי בקורלורציה עם מי
cols = corr_df.columns.to_list()
list_corr_not_empty=[]
for i in range(len(cols)-1):
tmp = []
for j in range(len(cols)-1):
if abs(corr_df.iloc[i,j]) >= threshold and cols[i] is not cols[j] :
tmp.append(cols[j])
if len(tmp)>0:
tmp.append(cols[i])
list_corr_not_empty.append(tmp)
def Key(p):
return corr_df[target][p]
stay = [max(sub,key=Key) for sub in list_corr_not_empty]
drops = [ c for sub in list_corr_not_empty for c in sub if c not in stay ]
return df.drop(list(set(drops)),axis=1)
pass
def remove_outliers(df,target_name='Israelis_Count'):
'''
return df without outliers.
args:
df = dataframe
target = string of the target
'''
import matplotlib.pyplot as plt
plt.cla()
bp = plt.boxplot(df[target_name])
minimums = [round(item.get_ydata()[0], 4) for item in bp['caps']][::2]
maximums = [round(item.get_ydata()[0], 4) for item in bp['caps']][1::2]
return df.drop(df [ (df[target_name]>maximums[0]) | (df[target_name]<minimums[0])].index)
def plot_line(prediction,actual,title='',path_save=None,file_name=None,fig_size_tuple=(18,8),xlim=None,ylim=None
,alpha_prediction=1,alpha_actual=1):
'''plot the line graph of the resualts
title can be added with title
if want to save add path_name and file_name
arugments: prediction, actual, title='' ,path_save=None ,file_name=None , fig_size_tuple=(18,8) xlim=None,ylim=None
example for saving:
path_name= 'folder1/save_here_folder/'
path_file = file_name.png
'''
import os
from pylab import rcParams
rcParams['figure.figsize'] = fig_size_tuple[0],fig_size_tuple[1]
import matplotlib.pyplot as plt
import pandas as pd
res = pd.DataFrame(data={
'Predictions':prediction,
'Actual':actual
})
plt.plot(res.index, res['Predictions'], color='r', label='Predicted Visitors',alpha=alpha_prediction)
plt.plot(res.index, res['Actual'], color='b', label='Actual Visitors',alpha=alpha_actual)
plt.grid(which='major', color='#cccccc', alpha=0.5)
plt.legend(shadow=True)
plt.title(title, family='Arial', fontsize=26)
plt.ylabel('Visitors', family='Arial', fontsize=22)
plt.xticks(rotation=45, fontsize=16)
plt.yticks(rotation=45, fontsize=16)
plt.xlim(xlim)
plt.ylim(ylim)
if path_save is not None:
isExist = os.path.exists(path_save)
if not isExist:
os.makedirs(path_save)
plt.savefig(path_save+file_name)
plt.show()
def plot_residuals(prediction,actual,title='',path_save=None,file_name=None,fig_size_tuple=(18,8),xlim=None,ylim=None):
'''plot the residuales of the resualts
if want to save add path_name and file_name
arugments: prediction, actual, title='' ,path_save=None ,file_name=None, fig_size_tuple=(18,8) xlim=None,ylim=None
example:
path_name= 'folder1/save_here_folder/'
path_file = file_name.png
'''
import os
from pylab import rcParams
rcParams['figure.figsize'] = fig_size_tuple[0],fig_size_tuple[1]
import matplotlib.pyplot as plt
import pandas as pd
res = pd.DataFrame(data={
'Predictions':prediction,
'Actual':actual
})
res['residuals'] = res['Predictions'] - res['Actual']
plt.plot(res.Predictions,res.residuals,color='r',marker='.',linestyle='None')
plt.xlabel('Visitors', family='Arial', fontsize=22)
plt.ylabel('Residuals', family='Arial', fontsize=22)
plt.plot(res.Predictions,res.residuals*0,color='b')
plt.title(title, family='Arial', fontsize=26)
plt.grid(which='major', color='#cccccc', alpha=0.5)
plt.legend(shadow=True)
plt.yticks(rotation=45, fontsize=16)
plt.xlim(xlim)
plt.ylim(ylim)
plt.xticks(rotation=45, fontsize=16)
if path_save is not None:
isExist = os.path.exists(path_save)
if not isExist:
os.makedirs(path_save)
plt.savefig(path_save+file_name)
plt.show()
def split_date(dataframe):
'''
split the date in the df to columns years
month and days
return df with column year,month,day
'''
import pandas as pd
dataframe = dataframe.set_index("Date")
dataframe['day'] = dataframe.index.day
dataframe['month'] = dataframe.index.month
dataframe['year'] = dataframe.index.year
dataframe.reset_index(drop=False,inplace=True)
return dataframe
def get_rmse(x,y):
from sklearn.metrics import mean_squared_error
from math import sqrt
return sqrt(mean_squared_error(x,y))
def remove_unique_one(df):
'''
remove columns with 1 feature only
return df without columns with 1 feature only
'''
drop_one_unique = [x for x in df.columns if len(df[x].value_counts())==1]
return df.drop(drop_one_unique,axis=1)
def remove_pollution_site(dataset):
'''
remove the feature
'nox','pm10','pm2.5','so2','is_Site_exceeded_pm10','is_Site_exceeded_pm2.5', 'is_Site_exceeded_nox','is_Site_exceeded_so2'
'''
print('remove pollution site Successfully')
return dataset.drop(['nox','pm10','pm2.5','so2','is_Site_exceeded_pm10','is_Site_exceeded_pm2.5', 'is_Site_exceeded_nox','is_Site_exceeded_so2'],axis=1)
def move_target_to_last(dataset,target='Israelis_Count'):
t = dataset[target]
dataset.drop(target,axis=1,inplace=True)
dataset[target] = t
return dataset
def remove(df , to_remove):
cols = df.columns
if to_remove in cols:
df.drop(to_remove , inplace=True , axis = 1)
return df
def get_weekday(dataset):
dataset['week_Day'] = dataset.Date.apply(lambda date : calendar.day_name[date.weekday()])
days = pd.get_dummies(dataset['week_Day'])
dataset = pd.concat([dataset , days] , axis=1)
dataset = remove(dataset , 'week_Day')
return dataset
def add_last_visitors_for_all_sites_in_df(df,target='Israelis_Count'):
'''
use the method last_year_entries_info for each site in the dataframe
return df
'''
dataset = df.copy()
dataset['Last_year_visitors_IL'] = 0
sites = dataset.Site_Name.unique()
dataset = function.split_date(dataset)
dataset = function.move_target_to_last(dataset, target)
dataset = dataset.sort_values(['year','month','day'])
for site in sites:
print(site)
site_dataset = dataset.loc[dataset.Site_Name==site]
site_dataset = function.last_year_entries_info(site_dataset,target)
# print(site_dataset.Last_year_visitors_IL )
dataset.loc[dataset.Site_Name==site,'Last_year_visitors_IL'] = site_dataset.Last_year_visitors
pass
print('**********************************************')
print('Add All Sites Last year visitors Successfully')
print('**********************************************')
return dataset
from datetime import date
def date_diff(a , b, target=1):
year,month,day = a.year,a.month,a.day
d0 = date(year,month,day)
year,month,day = b.year,b.month,b.day
d1= date(year,month,day)
delta = d1 - d0
if abs(delta.days) == target:
return True
else : return False
def outputLimeAsDf(exp):
d = {}
for i in exp.as_list():
sign = 1
if('=0' in i[0]):
sign = -1
value = i[-1]*sign
name = i[0].replace('<','')
name = name.replace('>','')
name = name.replace('=','')
name = name.replace(' ','')
name = name.replace('.','')
name = name.replace('0','')
name = name.replace('1','')
name = name.replace('2','')
name = name.replace('3','')
name = name.replace('4','')
name = name.replace('5','')
name = name.replace('6','')
name = name.replace('7','')
name = name.replace('8','')
name = name.replace('9','')
name = name.replace('pm','pm10')
name = name.replace('_',' ')
str.replace
# print(i[0],i[-1])
d[name]=value
return pd.DataFrame.from_dict(d,orient='index')
def printOutputeCoef(coef):
d = {}
for tup in coef:
d[tup[-1]] = tup[0]
return pd.DataFrame.from_dict(d,orient='index')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn import linear_model
def mlrModelResTrainTestCoeff(dataframe,shaffle=False):
'''
return 3 df: train result, test results, coeff results.
get df as data, with Data columns!.
'''
dataframe.dropna(inplace=True)
dataframe.sort_values('Date')
y = dataframe[['Date','Israelis_Count']]
X = dataframe.drop('Israelis_Count',axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=shaffle, test_size = 0.2)
X_train_scaler = MinMaxScaler()
X_test_scaler = MinMaxScaler()
X_train_scaled = X_train_scaler.fit_transform(X_train.drop('Date',axis=1))
X_test_scaled = X_test_scaler.fit_transform(X_test.drop('Date',axis=1))
mlr = linear_model.LinearRegression()
mlr.fit(X_train_scaled,y_train.Israelis_Count)
prediction = mlr.predict(X_train_scaled)
resTrain = pd.DataFrame(
data={
'Prediction':prediction,
'Actual': y_train.Israelis_Count.values },
index=y_train.Date
)
prediction = mlr.predict(X_test_scaled)
resTest = pd.DataFrame(
data={
'Prediction':prediction,
'Actual': y_test.Israelis_Count.values },
index=y_test.Date
)
coef = sorted( list(zip(np.round(mlr.coef_,5).T,X_train.drop("Date",axis=1).columns)))
d = {}
for tup in coef:
d[tup[-1]] = tup[0]
coefDF = pd.DataFrame.from_dict(d,orient='index')
return resTrain,resTest,coefDF
def printRes(res ,plotLine=True ,plotResiduals = False, n = 10):
'''
print results from df reuslts and n samples
'''
res = res.sort_index()
print('rmse',get_rmse(res.Prediction, res.Actual))
print('std',np.std(res.Actual))
if plotResiduals:
plot_residuals(actual=res.Actual,prediction=res.Prediction)
if plotLine:
plot_line(actual=res.Actual,prediction=res.Prediction)
print('Sample rows:')
print( res.sample(n))
from lime_stability.stability import LimeTabularExplainerOvr
def getLimeAvg(X_test_scaled,X_train_scaled,X_train,model,numRunLimeForSameIndex=20):
'''
return the avg of lime results for each index.
'''
class_names=['Israelis_Count']
categorical_features = np.argwhere(
np.array([len(set(X_train_scaled[:,x]))
for x in range(X_train_scaled.shape[1])]) <= 2).flatten()
print(X_train_scaled.shape)
print(categorical_features.shape)
print(X_train_scaled.shape)
explainer = LimeTabularExplainerOvr(np.array(X_train_scaled),
feature_names=X_train.drop('Date',axis=1).columns,
class_names=class_names,
categorical_features=categorical_features,
verbose=True,
mode='regression'
)
limeDF = pd.DataFrame(data={'IsVacation':[0],'Temperature':[0],'is weekend':[0],'pm10':[0]}).T
limeDF
avgCount = 0
for i in range(0,len(X_test_scaled)) :
for j in range(numRunLimeForSameIndex):
exp = explainer.explain_instance((X_test_scaled[i]),model.predict,num_features=100)
limeDF += outputLimeAsDf(exp)
avgCount += 1
return limeDF/ avgCount | LiavCohn/nature-reserves-paper | function.py | function.py | py | 11,566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.triu",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.bool",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.cla",
"li... |
39473584807 | import numpy as np
from keras.models import load_model
test_images = np.load("mnist_test_images.npy")
test_labels = np.load("mnist_test_labels.npy")
network = load_model("mnist_model.h5")
# Evaluate the model
print("Evaluating...")
test_loss, test_acc = network.evaluate(test_images, test_labels)
print()
print("Test accuracy:", test_acc)
print("Test loss:", test_loss)
| byelipk/deep-mnist | mnist_eval.py | mnist_eval.py | py | 373 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 7,
"usage_type": "call"
}
] |
15143059359 | #!/usr/bin/env python
# coding: utf-8
# # COURSE: Master statistics and machine learning: Intuition, Math, code
# ##### COURSE URL: udemy.com/course/statsml_x/?couponCode=202006
# ## SECTION: The t-test family
# ### VIDEO: Mann-Whitney U test
# #### TEACHER: Mike X Cohen, sincxpress.com
# In[ ]:
# import libraries
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# In[ ]:
## generate the data
# the data (note the different sample sizes)
N1 = 30
N2 = 35
data1 = np.random.poisson(2,N1)
data2 = np.random.poisson(1,N2)
plt.plot(1+np.random.randn(N1)/10,data1,'ks',markerfacecolor='w')
plt.plot(2+np.random.randn(N2)/10,data2,'ro',markerfacecolor='w')
plt.xlim([0,3])
plt.xticks([1,2],labels=('data1','data2'))
plt.xlabel('Data group')
plt.ylabel('Data value')
plt.show()
# In[ ]:
## now for the test
U,p = stats.mannwhitneyu(data1,data2)
print(U,p)
| mikexcohen/Statistics_course | Python/ttest/stats_ttest_MannWhitneyU.py | stats_ttest_MannWhitneyU.py | py | 900 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "numpy.random.poisson",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.poisson",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.ran... |
70599120354 | from dask.distributed import Client
import dask.dataframe as dd
from fastapi import FastAPI
client = Client()
app = FastAPI()
# Import data
opioids_data = dd.read_csv(
"/Users/lorna/Documents/MIDS 2022/First Semester/720 Practicing Data Science/dask/arcos_2011_2012.tsv",
sep="\t",
usecols=["BUYER_STATE","TRANSACTION_DATE","Combined_Labeler_Name", "MME_Conversion_Factor", "CALC_BASE_WT_IN_GM"],
dtype={"TRANSACTION_DATE": "object"}
)
opioids_data["morphine_equivalent"] = (opioids_data["CALC_BASE_WT_IN_GM"]) * (opioids_data["MME_Conversion_Factor"])
opioids_data["date"] = dd.to_datetime(opioids_data.TRANSACTION_DATE, format = "%m%d%Y")
opioids_data["year"] = opioids_data.date.dt.year
opioids_data1 = opioids_data[["BUYER_STATE","Combined_Labeler_Name","morphine_equivalent"]]
opioids_data2 = opioids_data[["BUYER_STATE","year","Combined_Labeler_Name","morphine_equivalent"]]
#get the dataset summary
@app.get("/")
def read_root():
test = "testing root"
return test
#BUYER States and the total they bought
@app.get("/states")
def read_state():
total_states = opioids_data1.groupby(["BUYER_STATE"]).morphine_equivalent.sum()
total_states_final = total_states.compute()
return total_states_final
#enter a state and they return a total of morphine equivalent.
@app.get("/states/{state_id}")
def read_state_purchases(state_id:str):
total_states = opioids_data2.groupby(["BUYER_STATE","year"]).morphine_equivalent.sum().reset_index()
state = total_states.loc[total_states["BUYER_STATE"] == state_id].compute()
return state
#Companies and how much they sold
@app.get("/companies")
def read_companies():
total_companies = opioids_data1.groupby(["Combined_Labeler_Name"]).morphine_equivalent.sum()
total_companies_final = total_companies.compute()
return total_companies_final
#companies and how much they sold
@app.get("/companies/{company_name}")
def read_companies_sales(company_name:str):
total_companies = opioids_data2.groupby(["Combined_Labeler_Name","year"]).morphine_equivalent.sum().reset_index()
company = total_companies.loc[total_companies["Combined_Labeler_Name"]== company_name].compute()
return company
#companies and how much they sold
@app.get("/companies/states/{company_name}")
def read_companies_states(company_name:str):
companies = opioids_data2.groupby(["year","Combined_Labeler_Name", "BUYER_STATE"]).morphine_equivalent.sum().reset_index()
states = companies.loc[companies["Combined_Labeler_Name"]== company_name,"BUYER_STATE"].unique().compute()
return states
| lornamariak/dask-api | app/main.py | main.py | py | 2,589 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dask.distributed.Client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dask.dataframe.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dask.dat... |
19752788443 | import tempfile
import os
import torch
from kaner.common import save_text, save_json
from kaner.adapter.tokenizer.char import CharTokenizer
def test_chartokenizer():
"""Test the class `CharTokenizer`."""
tokens = ["[UNK]", "[PAD]", "南", "京", "市", "长", "江", "大", "桥"]
token_embeddings = [
[0.0, 0.0],
[0.0, 0.0],
[0.9, 0.3],
[0.7, 0.8],
[0.21, 0.78],
[0.51, 0.82],
[0.23, 0.91],
[0.39, 0.61],
[0.98, 0.45]
]
tmp_folder = tempfile.TemporaryDirectory()
folder_name = tmp_folder.name
save_text("\n".join(tokens), folder_name, "tokens.txt")
torch.save(torch.tensor(token_embeddings), os.path.join(folder_name, "token_embeddings.checkpoints"))
config = {"n_tokens": 9, "token_dim": 2}
save_json(config, folder_name, "token_configs.json")
tokenizer = CharTokenizer(folder_name)
assert len(tokenizer) == 9
config["pad_id"] = 1
assert tokenizer.configs() == config
assert tokenizer.unk_token == "[UNK]"
assert tokenizer.pad_token == "[PAD]"
assert tokenizer["南"] == 2
assert tokenizer[3] == "京"
assert tokenizer.tokenize("南京是好朋友") == ["南", "京", "是", "好", "朋", "友"]
assert tokenizer.convert_tokens_to_ids(["南", "京", "是", "好", "朋", "友"]) == [2, 3, 0, 0, 0, 0]
assert [[round(e, 2) for e in em] for em in tokenizer.embeddings().tolist()] == token_embeddings
tmp_folder.cleanup()
| knowledgeresearch/kaner | tests/adapter/tokenizer/test_char.py | test_char.py | py | 1,483 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "kaner.common.save_text",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.t... |
1041291189 | import logging.config
import logging.handlers
def set_logging(project_name, logging_path='mylog.log'):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(project_name)
logger.setLevel(logging.DEBUG)
filehandler = logging.handlers.TimedRotatingFileHandler(filename=logging_path, when='D', backupCount=7)
filehandler.setLevel(logging.INFO)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
logger.info("Logger started")
| Maxsparrow/StatMouse2 | statmouse/logger.py | logger.py | py | 700 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.config.Formatter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "logging.config.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging... |
35911465282 | import torch
from torch.nn.utils.rnn import pad_sequence
class Translator:
def __init__(self, model, dataset):
self.model = model
self.dataset = dataset
def translate(self, src_strs, device="cuda"):
src_field, trg_field = self.dataset.src, self.dataset.trg
src_vocab, trg_vocab = src_field.vocab, trg_field.vocab
unk_idx, pad_idx = src_vocab["<unk>"], src_vocab["<pad>"]
sos_idx, eos_idx = trg_vocab["<sos>"], trg_vocab["<eos>"]
def src_str_to_id(token):
id = src_vocab.stoi.get(token)
if id is None:
id = unk_idx
return id
def trg_id_to_str(id):
return trg_vocab.itos[id]
src_tokens = list(map(src_field.tokenize, src_strs))
src_idxs = [torch.tensor(list(map(src_str_to_id, tokens)), dtype=torch.int64)
for tokens in src_tokens]
src_idxs = pad_sequence(src_idxs, batch_first=True, padding_value=pad_idx).to(device)
trg_ids = self.generate_trg_idxs(src_idxs, sos_idx, eos_idx)
trg_tokens = [list(map(trg_id_to_str, ids)) for ids in trg_ids]
return trg_tokens
@torch.no_grad()
def generate_trg_idxs(self, src, sos_idx, eos_idx, stopping_len=64):
batch_size, _ = src.shape
trg = sos_idx * torch.ones(batch_size, 1, dtype=torch.int64).to(src.device)
idxs = [i for i in range(batch_size)]
trg_idxs = [None for _ in range(batch_size)]
len_counter = 0
while True:
logits = self.model(src, trg)
pred_idxs = torch.argmax(logits, dim=-1)
next_idxs = pred_idxs[:, -1].unsqueeze(-1)
trg = torch.cat((trg, next_idxs), dim=-1)
eos_mask = (next_idxs == eos_idx).squeeze(-1)
for i, stop in reversed(list(enumerate(eos_mask))):
if not stop:
continue
idx = idxs[i]
trg_idxs[idx] = trg[i, 1:-1].tolist()
del idxs[i]
src = src[eos_mask == 0]
trg = trg[eos_mask == 0]
if len(src) == 0:
break
len_counter += 1
if len_counter >= stopping_len:
for i in range(len(trg)):
idx = idxs[i]
trg_idxs[idx] = trg[i, 1:].tolist()
break
return trg_idxs | r-gould/transformer | src/translator.py | translator.py | py | 2,422 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.tensor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.rnn.pad_sequence",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.o... |
27783022168 | import pandas as pd
from pathlib import Path
from configs import Config, configs
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from typing import Union, List
import pickle
import logging
class Dataset:
def __init__(self, path_to_dataset: Union[Path, str],
output_dir: Union[Path, str],
val_size: float = 0.2,
config_name: str = 'default',
path_to_label_encoder: Union[Path, str] = None):
self.path_to_dataset = Path(path_to_dataset)
self.output_dir = Path(output_dir)
self.val_size = val_size
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True)
self.dataframe = pd.read_csv(self.path_to_dataset)
self.X_train = None
self.y_train = None
self.X_val = None
self.y_val = None
self.X = None
self.X_seq_id = None
self.y = None
self.config = Config(**configs[config_name])
self.label_encoder = (self._load_label_encoder(path_to_label_encoder=path_to_label_encoder)
if path_to_label_encoder is not None
else None)
def apply_training_pipeline(self):
self.clean()
self.process()
self.train_val_split(val_size=self.val_size)
self.export_training()
def apply_test_pipeline(self):
self.process()
self.export_test()
def clean(self) -> pd.DataFrame():
df_clean = self.dataframe.copy()
cols_to_keep = ['protein_sequence', 'tm', 'seq_id']
df_clean = df_clean[cols_to_keep]
nb_elements_before = len(df_clean)
logging.info(f'> Raw dataset length: {nb_elements_before}')
df_clean.dropna(inplace=True)
nb_elements_after = len(df_clean)
logging.info(f'> {nb_elements_before - nb_elements_after} rows dropped due to null values')
logging.info(f'> Upper bound for sequence length: {self.config.seq_len_upper_bound_drop}')
nb_elements_before = len(df_clean)
if self.config.seq_len_upper_bound_drop is not None:
df_clean.loc[:, 'sequence_length'] = pd.Series([len(seq) for seq in df_clean.protein_sequence])
df_clean.drop(df_clean[df_clean.sequence_length < self.config.seq_len_upper_bound_drop].index, inplace=True)
df_clean.drop(columns='sequence_length', inplace=True)
nb_elements_after = len(df_clean)
logging.info(f'> {nb_elements_before - nb_elements_after} rows dropped due to seq_len_upper_bound')
self.dataframe = df_clean
logging.info('> Dataset cleaned')
def process(self) -> None:
df = self.dataframe.copy()
max_seq_length_in_df = df.protein_sequence.str.len().max()
if 'tm' in df.columns:
self.y = df.tm.to_numpy()
if self.label_encoder is None:
# Label encode
unique_amino_acids = []
df.protein_sequence.apply(lambda sequence: Dataset.compute_unique_amino_acids(sequence, unique_amino_acids))
self.label_encoder = LabelEncoder()
self.label_encoder.fit(unique_amino_acids)
encoded_sequences = df.protein_sequence.transform(lambda sequence: self.label_encode_sequences(sequence))
# Padding
sequence_length = self.config.max_seq_len if self.config.max_seq_len is not None else max_seq_length_in_df
encoded_sequences = pad_sequences(encoded_sequences, maxlen=sequence_length,
padding='post', truncating='post', value=-1)
# One-hot encoding
self.X = to_categorical(encoded_sequences)
self.X_seq_id = df.seq_id.to_numpy()
logging.info('> Dataset preprocessed')
@staticmethod
def compute_unique_amino_acids(seq: str, accumulator: List[str]) -> None:
accumulator += [amino_acid for amino_acid in seq if amino_acid not in accumulator]
def label_encode_sequences(self, seq: str) -> List[int]:
return self.label_encoder.transform([amino_acid for amino_acid in seq])
def train_val_split(self, val_size: float, random_state=42):
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(
self.X,
self.y,
test_size=val_size,
random_state=random_state
)
def export_training(self):
with open(self.output_dir / 'label_encoder.pkl', 'wb') as f:
pickle.dump(self.label_encoder, f)
with open(self.output_dir / 'X_train.pkl', 'wb') as f:
pickle.dump(self.X_train, f)
with open(self.output_dir / 'y_train.pkl', 'wb') as f:
pickle.dump(self.y_train, f)
with open(self.output_dir / 'X_val.pkl', 'wb') as f:
pickle.dump(self.X_val, f)
with open(self.output_dir / 'y_val.pkl', 'wb') as f:
pickle.dump(self.y_val, f)
logging.info(f'> Dataset exported to {self.output_dir}')
def export_test(self):
with open(self.output_dir / 'label_encoder.pkl', 'wb') as f:
pickle.dump(self.label_encoder, f)
with open(self.output_dir / 'X_test.pkl', 'wb') as f:
pickle.dump(self.X, f)
with open(self.output_dir / 'X_seq_id.pkl', 'wb') as f:
pickle.dump(self.X_seq_id, f)
logging.info(f'> Dataset exported to {self.output_dir}')
def _load_label_encoder(self, path_to_label_encoder: Union[Path, str]):
with open(path_to_label_encoder, 'rb') as f:
return pickle.load(f)
| ClementJu/kaggle-novozymes-enzyme-stability-prediction | src/data_preparation/dataset.py | dataset.py | py | 5,745 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_numbe... |
20275942333 | import openpyxl
wb = openpyxl.load_workbook('NUMBER_PLATE_LIST (Autosaved)1.xlsx')
ws=wb.active
count=0
i=0
first=ws['C']
second=ws['D']
a=[]
for(cell,cell_1) in zip(first,second):
if str(cell.value) == str(cell_1.value).split()[-1]:
count=count+1
if str(cell.value) != str(cell_1.value).split()[-1]:
count=count+1
a.append(count)
i=i+1
f=open("vehicle_list_day_footage.txt","r")
f1=open("out.txt","w")
count=0
i=0
x=0
for lines in f:
count=count+1
if(count == a[i]):
print(lines)
i=i+1
f1.write(lines)
continue
| drishtiramesh/Python_Scripts | Extract_Wrong_Detections.py | Extract_Wrong_Detections.py | py | 647 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 2,
"usage_type": "call"
}
] |
1498623289 | #!/usr/bin/env python3
# dpw@Darryls-iMac.localdomain
# 2023-03-10 21:45:37
#
# @see https://schedule.readthedocs.io/
import threading
import time
import schedule
# no input args; with args, use functools.partial to define
def job():
print(
f"working at {time.time()} on thread {threading.current_thread()}", flush=True
)
def run_threaded(func):
jthread = threading.Thread(target=func)
jthread.start()
schedule.every(5).seconds.do(run_threaded, job) # or job, *args, **kwargs)
n = 60
while n > 0:
schedule.run_pending()
time.sleep(1)
n -= 1
| darrylwest/python-play | concurrency/run-at.py | run-at.py | py | 588 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.current_thread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "schedule.every",
... |
25015396838 | from typing import Optional, List, Dict
from src.db_models import LabelGroup, LabelFormat, FieldScope, CalculatedField, Driver, DriverChange
from src.data_models import LabelFormatApi, LabelFormatApiList
from src.data_models import CalculatedFieldApi, CalculatedFieldApiList
from src.data_models import DriverApi, DriverApiList
from src.data_models import DriverChangeApi, DriverChangeApiList
from src import db
def get_calculated_fields(field_scope_code: str) -> CalculatedFieldApiList:
field_scope: Optional[FieldScope] = FieldScope.query.filter_by(code=field_scope_code).first()
if not field_scope:
raise ValueError(f"invalid scope code: {field_scope_code}")
fields = CalculatedField.query.filter_by(scope_id=field_scope.id).order_by(CalculatedField.order_key).all()
wrapper = CalculatedFieldApiList(title=field_scope.title, items=[])
for field in fields:
wrapper.items.append(CalculatedFieldApi.from_orm(field))
return wrapper
def save_calculated_fields(field_scope_code: str, req_data: CalculatedFieldApiList) -> int:
from src import db
field_scope: Optional[FieldScope] = FieldScope.query.filter_by(code=field_scope_code).first()
if not field_scope:
field_scope = FieldScope(code=field_scope_code, title=req_data.title)
db.session.add(field_scope)
else:
field_scope.title = req_data.title
db.session.commit()
order_key = 1
db_obj_list = []
# transform to objects, add order key
for item in req_data.items:
db_obj = CalculatedField(**item.dict(), order_key=order_key, scope_id=field_scope.id)
db_obj_list.append(db_obj)
order_key += 1
try:
calculated_fields = CalculatedField.query.filter_by(scope_id=field_scope.id).all()
# cleanup old records
for cf in calculated_fields:
db.session.delete(cf)
db.session.commit()
# write new records
for obj in db_obj_list:
db.session.add(obj)
db.session.commit()
except Exception as ex:
db.session.rollback()
raise ex
return len(db_obj_list)
def get_calculated_fields_all() -> Dict[str, CalculatedFieldApiList]:
field_scopes: List[FieldScope] = FieldScope.query.order_by(FieldScope.code).all()
out = {}
for field_scope in field_scopes:
fields = get_calculated_fields(field_scope.code)
out[field_scope.code] = fields
return out
def save_calculated_fields_all(d: Dict[str, CalculatedFieldApiList]) -> int:
res = 0
for code, values in d.items():
res += save_calculated_fields(code, values)
return res
def get_label_formats(label_group_code: str) -> LabelFormatApiList:
label_group: Optional[LabelGroup] = LabelGroup.query.filter_by(code=label_group_code).first()
if not label_group:
raise Exception(f"invalid label group: {label_group_code}")
labels = LabelFormat.query.filter_by(group_id=label_group.id).order_by(LabelFormat.order_key).all()
wrapper = LabelFormatApiList(title=label_group.title, items=[])
for label in labels:
wrapper.items.append(LabelFormatApi.from_orm(label))
return wrapper
def save_label_formats(label_group_code: str, req_data: LabelFormatApiList) -> int:
from src import db
label_group: Optional[LabelGroup] = LabelGroup.query.filter_by(code=label_group_code).first()
if not label_group:
label_group = LabelGroup(code=label_group_code, title=req_data.title)
db.session.add(label_group)
else:
label_group.title = req_data.title
db.session.commit()
order_key = 1
db_obj_list = []
# transform to objects, add order key
for item in req_data.items:
db_obj = LabelFormat(**item.dict(), order_key=order_key, group_id=label_group.id)
db_obj_list.append(db_obj)
order_key += 1
try:
label_formats = LabelFormat.query.filter_by(group_id=label_group.id).all()
# cleanup old records
for lf in label_formats:
db.session.delete(lf)
db.session.commit()
# add new records
for obj in db_obj_list:
db.session.add(obj)
db.session.commit()
except Exception as ex:
db.session.rollback()
raise ex
return len(db_obj_list)
def get_label_formats_all() -> Dict[str, LabelFormatApiList]:
label_groups: List[LabelGroup] = LabelGroup.query.order_by(LabelGroup.code).all()
out = {}
for label_group in label_groups:
formats = get_label_formats(label_group.code)
out[label_group.code] = formats
return out
def save_label_formats_all(d: Dict[str, LabelFormatApiList]):
res = 0
for code, values in d.items():
res += save_label_formats(code, values)
return res
def get_drivers() -> DriverApiList:
from src.db_models import Driver
db_drivers = Driver.query.order_by(Driver.name).all()
wrapper = DriverApiList(__root__=[])
for db_driver in db_drivers:
wrapper.__root__.append(DriverApi.from_orm(db_driver))
return wrapper
def save_drivers(req_data: DriverApiList) -> int:
from src import db
db_obj_list = []
# transform to objects, add order key
for item in req_data.__root__:
db_obj = Driver(**item.dict())
db_obj_list.append(db_obj)
try:
drivers = Driver.query.all()
# cleanup old records
for d in drivers:
db.session.delete(d)
db.session.commit()
# add new records
for obj in db_obj_list:
db.session.add(obj)
db.session.commit()
except Exception as ex:
db.session.rollback()
raise ex
return len(db_obj_list)
def get_driver_changes() -> DriverChangeApiList:
from src.db_models import DriverChange
db_driver_changes = DriverChange.query.order_by(DriverChange.valid_from).all()
wrapper = DriverChangeApiList(__root__=[])
for db_driver_change in db_driver_changes:
wrapper.__root__.append(DriverChangeApi.from_orm(db_driver_change))
return wrapper
def save_driver_changes(req_data: DriverChangeApiList) -> int:
from src import db
db_obj_list = []
# transform to objects, add order key
for item in req_data.__root__:
db_obj = DriverChange(**item.dict())
db_obj_list.append(db_obj)
try:
driver_changes = DriverChange.query.all()
# cleanup old records
for dch in driver_changes:
db.session.delete(dch)
# add new records
for obj in db_obj_list:
db.session.add(obj)
db.session.commit()
except Exception as ex:
db.session.rollback()
raise ex
return len(db_obj_list)
| krezac/tesla-race-analyzer | src/backup.py | backup.py | py | 6,723 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "src.db_models.FieldScope",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "src.db_models.FieldScope.query.filter_by",
"line_number": 13,
"usage_type": "call"
},
{
... |
10541671700 | from collections import deque
# 각 노드가 연결된 정보를 표현(2차원 리스트)
n, m = map(int, input().split(' '))
graph = [[] for _ in range(n+1)]
for i in range(m):
a,b = map(int,input().split(' '))
graph[a].append(b) # 서로 마주하기 때문에 해당 인덱스의 리스트에 값을 추가한다.
graph[b].append(a)
#print(graph)
def bfs(start) :
# 큐(Queue) 구현을 위해 deque 라이브러리 사용
queue = deque([start])
# 현재 노드를 방문 처리
visited[start] = 1
while queue: # 큐가 없을때 까지 계속 수행
# 큐에서 하나의 원소를 뽑아 출력하기
v = queue.popleft()
# 아직 방문하지 않은 인접한 원소들을 큐에 삽입
for i in graph[v]:
if not visited[i] :
visited[i] = visited[v] + 1
queue.append(i)
result = []
for i in range(1, n+1) :
visited = [0] * (n+1) # 방문횟수는 친구의 수 + 1
bfs(i)
result.append(sum(visited))
# 가장 작은 케빈 베이컨의 수를 가지고 있는 사람의 인덱스 + 1 을 해주어 출력한다.
print(result.index(min(result)) + 1) | Lee-han-seok/Solving_Algorithm_SQL | 백준/Silver/1389. 케빈 베이컨의 6단계 법칙/케빈 베이컨의 6단계 법칙.py | 케빈 베이컨의 6단계 법칙.py | py | 1,220 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
}
] |
25015394148 | from flask import url_for, flash, Markup, redirect, Response
from flask_admin import expose
from flask_jwt_extended import create_access_token
import pendulum
from src.admin.admin_forms import TestLabelFormatForm, TestCalculatedFieldForm, DriverChangeForm, \
ConfigRestoreForm, ConfigBackupForm, GenerateJwtTokenForm, CreateNewUserForm, CustomPageForm
from src.data_processor.data_processor import data_processor
from src.parent_views import MyRoleRequiredCustomView
from src.data_models import ConfigBackupData
class MyTestCalculatedFieldView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = TestCalculatedFieldForm()
from src import configuration, db
from src.db_models import FieldScope, CalculatedField
form.field_scope.choices = [(g.code, g) for g in FieldScope.query.all()]
if form.validate_on_submit():
try:
return_value = data_processor.test_custom_calculated_field('__test_field__', form.field_scope.data, form.fn_code.data, "") # TODO the return type is not needed for now
flash(Markup(f"Return value is <b>{return_value['__test_field__']}</b>"), "info")
if form.add.data:
scope = FieldScope.query.filter_by(code=form.field_scope.data).first()
cf_ok = CalculatedField.query.filter_by(scope_id=scope.id).order_by(CalculatedField.order_key.desc()).first()
order_key = 1
if cf_ok:
order_key = cf_ok.order_key + 1
cf = CalculatedField(
name=form.name.data,
description=form.description.data,
return_type=form.return_type.data,
calc_fn=form.fn_code.data,
scope_id=scope.id,
order_key=order_key
)
try:
db.session.add(cf)
db.session.commit()
flash(f"Calculated field {form.name.data} stored to database for code {form.field_scope.data}", "info")
except Exception as ex:
db.session.rollback()
flash(f"Can't add {form.name.data}: {ex}", "error")
except Exception as ex:
flash(f"{type(ex).__name__}: {ex}", "error")
laps = data_processor.get_laps_raw()
lap = laps[-2 if len(laps) > 1 else -1]
lap = {k: v for k, v in lap.items() if not isinstance(v, dict) and not isinstance(v, list)}
return self.render('admin/test_calculated_field.html', form=form,
current_status=data_processor.get_status_raw(),
position=data_processor.get_positions_raw()[-1],
lap=lap,
charging=data_processor.get_charging_process_list_raw()[-1],
total=data_processor.get_total_raw(),
forecast=data_processor.get_forecast_raw(),
configuration=configuration.dict(),
post_url=url_for('test_calculated_field.index'), with_categories=True)
class MyTestLabelFormatTestView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = TestLabelFormatForm()
from src.db_models import LabelGroup, LabelFormat
from src import db
form.label_group.choices = [(g.code, g) for g in LabelGroup.query.all()]
from src.data_processor.labels import get_calc_functions
form.format_fn.choices = get_calc_functions()
if form.validate_on_submit():
try:
return_value = data_processor.test_custom_label_format(form.label_group.data, form.field_name.data,
'__test_label__', form.format_fn.data,
form.format.data, form.unit.data, form.default.data
)
flash(Markup(f"Return value for label formatting is <b>{return_value[0].value}</b>"), "info")
if form.add.data:
group = LabelGroup.query.filter_by(code=form.label_group.data).first()
lf_ok = LabelFormat.query.filter_by(group_id=group.id).order_by(LabelFormat.order_key.desc()).first()
order_key = 1
if lf_ok:
order_key = lf_ok.order_key + 1
lf = LabelFormat(
label=form.label.data,
field=form.field_name.data,
format_function=form.format_fn.data,
format=form.format.data,
unit=form.unit.data,
default=form.default.data,
group_id=group.id,
order_key=order_key
)
try:
db.session.add(lf)
db.session.commit()
flash(f"Label format {form.field_name.data} stored to database for code {form.label_group.data}", "info")
except Exception as ex:
db.session.rollback()
flash(f"Can't add {form.field_name.data}: {ex}", "error")
except Exception as ex:
flash(f"{type(ex).__name__}: {ex}", "error")
raise ex
api_token = create_access_token(identity='api')
return self.render('admin/test_label_format.html', form=form, post_url=url_for('test_label_format.index'), with_categories=True, field_list_url=url_for('api_bp.get_list_of_fields'), api_token=api_token)
class DriverChangeView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = DriverChangeForm()
from src.db_models import Driver, DriverChange
from src import db
drivers = [("", "---")] + [(g.name, g.name) for g in Driver.query.all()]
form.driver.choices = drivers
form.copilot.choices = drivers
if form.validate_on_submit():
now = pendulum.now(tz='utc')
active_records = DriverChange.query.filter_by(valid_to=None).all()
for rec in active_records:
rec.valid_to = now
db.session.add(DriverChange(driver=form.driver.data, copilot=form.copilot.data, valid_from=now))
db.session.commit()
return redirect(url_for("admin.index"))
return self.render("admin/driver_change.html", form=form)
class ConfigBackupView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = ConfigBackupForm()
from src import db, configuration
if form.validate_on_submit():
now = pendulum.now(tz='utc')
from src.backup import get_calculated_fields_all, get_label_formats_all, get_drivers, get_driver_changes
backup = ConfigBackupData(
configuration=configuration,
calculated_fields=get_calculated_fields_all(),
label_formats=get_label_formats_all(),
drivers=get_drivers(),
driver_changes=get_driver_changes()
)
return Response(backup.json(indent=2),
mimetype='application/json',
headers={'Content-Disposition':
f"attachment;filename=tran_backup-{now.format('YYYY-MM-DD-HH-mm-ss')}.json"})
return self.render("admin/config_backup.html", form=form)
class ConfigRestoreView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = ConfigRestoreForm()
if form.validate_on_submit():
backup_file = form.backup_file.data
backup_data = backup_file.read()
backup = ConfigBackupData.parse_raw(backup_data)
overwrite_config_file = form.overwrite_config_file.data
from src import load_config
if form.restore_config.data:
load_config(backup.configuration, overwrite_config_file)
flash("Configuration restored", "info")
if overwrite_config_file:
flash("Configuration file replaced", "info")
from src.backup import save_calculated_fields_all, save_label_formats_all, save_drivers, save_driver_changes
if form.restore_calculated_fields.data:
cnt = save_calculated_fields_all(backup.calculated_fields)
flash(f" {cnt} calculated fields restored", "info")
if form.restore_label_formats.data:
cnt = save_label_formats_all(backup.label_formats)
flash(f" {cnt} label formats restored", "info")
if form.restore_drivers.data:
cnt = save_drivers(backup.drivers)
flash(f" {cnt} drivers restored", "info")
if form.restore_driver_changes.data:
cnt = save_driver_changes(backup.driver_changes)
flash(f" {cnt} driver changes restored", "info")
return self.render("admin/config_restore.html", form=form, with_categories=True)
class GenerateJwtTokenView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
api_token = None
form = GenerateJwtTokenForm()
if form.validate_on_submit():
hours = form.hours.data
from flask_security import current_user
from datetime import timedelta
duration = timedelta(hours=hours)
api_token = create_access_token(identity="api::" + current_user.email, expires_delta=duration)
else:
if form.errors:
for err, err_value in form.errors.items():
flash(f"{err}: {err_value}", "error")
return self.render("admin/generate_jwt_token.html", form=form, with_categories=True, api_token=api_token)
class CreateNewUserView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = CreateNewUserForm()
from src.db_models import Role
from src import user_datastore, db
from flask_security import hash_password
db_roles = Role.query.order_by(Role.name).all()
roles = [(r.name, r.name) for r in db_roles]
form.roles.choices = roles
if form.validate_on_submit():
try:
user_datastore.create_user(email=form.email.data, password=hash_password(form.password.data))
db.session.commit()
for r in form.roles.data:
user_datastore.add_role_to_user(form.email.data, r)
db.session.commit()
flash(f"User {form.email.data} created", "info")
except Exception as ex:
flash(f"Creating user failed: {ex}", "error")
return self.render("admin/create_new_user.html", form=form, with_categories=True)
class CustomPageView(MyRoleRequiredCustomView):
@expose('/', methods=['GET', 'POST'])
def index(self):
form = CustomPageForm()
from src import db
from src.db_models import CustomPage
form.pages.choices = [("", "---")] + [(p.name, p.name) for p in CustomPage.query.filter_by(deleted=False).order_by(CustomPage.name).all()]
if form.validate_on_submit():
try:
if form.delete.data:
page = CustomPage.query.filter_by(name=form.pages.data).first()
if page:
form.name.data = page.name
form.template.data = page.template
page.name = page.name + "_" + pendulum.now(tz='utc').to_iso8601_string()
page.deleted = True
db.session.commit()
flash(f"Page {form.name.data} deleted (you can save it again)", "info")
else:
flash(f"Page {form.name.data} does not exist", "error")
if form.load.data:
page = CustomPage.query.filter_by(name=form.pages.data).first()
if page:
form.name.data = page.name
form.template.data = page.template
flash(f"Page {form.name.data} loaded", "info")
else:
flash(f"Page {form.name.data} does not exist", "error")
elif form.save.data:
page = CustomPage.query.filter_by(name=form.name.data).first()
if page:
page.template = form.template.data
else:
page = CustomPage(name=form.name.data, template=form.template.data)
db.session.add(page)
db.session.commit()
flash(f"Page {form.name.data} saved", "info")
except Exception as ex:
flash(f"Creating page failed: {ex}", "error")
db.session.rollback()
form.pages.choices = [("", "---")] + [(p.name, p.name) for p in
CustomPage.query.filter_by(deleted=False).order_by(CustomPage.name).all()]
return self.render("admin/custom_page.html", form=form, with_categories=True)
| krezac/tesla-race-analyzer | src/admin/admin_views.py | admin_views.py | py | 13,674 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "src.parent_views.MyRoleRequiredCustomView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "src.admin.admin_forms.TestCalculatedFieldForm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.db_models.FieldScope.query.all",
"line_number": 2... |
32894121704 | import os
from PyQt5.QtCore import QObject,pyqtSignal,pyqtSlot
import pyautogui
from time import sleep
class Worker(QObject):
'Object managing the simulation'
finished = pyqtSignal()
sbMessage = pyqtSignal(list)
def __init__(self):
super(Worker, self).__init__()
def control(self,action):
cd = {"X Coordinate":"int","Y Coordinate":"int","Relative X":"int","Relative Y":"int","Clicks":"int","Times":"int",
"Button":["left","middle","right"], "Overwrite":["yes","no"], "Search Until Find":["yes","no"], "Press After Found":["yes","no"],"Fast Find":["yes","no"],
"Duration":"float","Interval":"float", "Presses":"int","Wait":"float","Miliseconds":"int",
"Text":"str", "Key":"key", "Hotkey":"multiKey","Dir Path":"str","File Name":"str","File Path":"str"}
keys = pyautogui.KEYBOARD_KEYS
j=0
for i in range(1,len(action)):
setting_ = action[i+j:i+j+2]
if cd[setting_[0]] == "int":
try:int(setting_[1])
except: return [False,i]
if setting_[0] == "Button":
if not setting_[1] in cd[setting_[0]]: return [False,i]
if cd[setting_[0]] == "float":
try: float(setting_[1])
except: return [False,i]
if cd[setting_[0]] == "key":
if not setting_[1] in keys: return [False,i]
if cd[setting_[0]] == "multiKey":
for _ in setting_[1].split():
if not _ in keys: return [False,i]
if setting_[0] == "Dir Path":
if not os.path.isdir(setting_[1]): return [False,"{} , no directory.".fotmat(i)]
if setting_[0] == "File Path":
if not os.path.isfile(setting_[1]): return [False,"{} , no file.".fotmat(i)]
if setting_[0] == "File Name":
if setting_[1] == "" or None: return [False,"{} , no file name.".fotmat(i)]
if cd[setting_[0]] == ["yes","no"]:
if not setting_[1] in ["yes","no"]: return [False,i]
if i+j+2 == len(action):
return [True]
j+=1
def run(self):
for action in self._actions:
if not self._running:
self.sbMessage.emit(["Program stopped.",4000])
self.finished.emit()
break
if action[0] == "Delay":
delay = int(action[2])
sleep(delay/1000)
# =============================================================================
# loop = QEventLoop()
# QTimer.singleShot(delay, loop.quit)
# loop.exec_()
# =============================================================================
##ÿQTest.qWait(delay)
# MOUSE
if action[0] == "Move To":
x,y,duration = int(action[2]),int(action[4]),float(action[6])
pyautogui.moveTo(x,y,duration)
if action[0] == "Move Relative":
x,y,duration = int(action[2]),int(action[4]),float(action[6])
pyautogui.moveRel(x,y,duration)
if action[0] == "Drag To":
x,y,duration = int(action[2]),int(action[4]),float(action[6])
pyautogui.dragTo(x,y,duration)
if action[0] == "Drag Relative":
x,y,duration = int(action[2]),int(action[4]),float(action[6])
pyautogui.dragRel(x,y,duration)
if action[0] == "Click To":
x,y,button,clicks,interval,duration = int(action[2]),int(action[4]),action[6],int(action[8]),float(action[10]),float(action[12])
pyautogui.click(x=x,y=y,button=button,clicks=clicks,interval=interval,duration=duration)
if action[0] == "Click":
button,clicks,interval,duration = action[2],int(action[4]),float(action[6]),float(action[8])
pyautogui.click(button=button,clicks=clicks,interval=interval,duration=duration)
if action[0] == "Scroll":
clicks = int(action[2])
pyautogui.scroll(clicks=clicks)
# KEYBOARD
if action[0] == "Write":
interval,text = float(action[2]),action[4]
pyautogui.typewrite(text,interval=interval)
if action[0] == "Press":
presses,interval,text = int(action[2]),float(action[4]),action[6]
pyautogui.press(text,interval=interval,presses=presses)
if action[0] == "Hotkey":
hotkey = action[2].split()
pyautogui.hotkey(*hotkey)
if action[0] == "Key Down":
hotkey = action[2]
pyautogui.keyDown(hotkey)
if action[0] == "Key Up":
hotkey = action[2]
pyautogui.keyUp(hotkey)
# SCREEN
if action[0] == "Screenshot":
overwrite,dir_path,file_name = action[2],action[4],action[6]
if overwrite == "yes":
path = os.path.join(dir_path,file_name)+".png"
pyautogui.screenshot(path)
if overwrite == "no":
path = os.path.join(dir_path,file_name)
path = self.notOverwrite(path)
pyautogui.screenshot(path)
if action[0] == "Find Image":
cb1,cb2,cb3,wait,path = action[2],action[4],action[6],float(action[8]),action[10]
if cb1 == "yes":
while self._running:
a = None
try:
if cb3:
a = pyautogui.locateCenterOnScreen(path,grayscale=True)
else:
a = pyautogui.locateCenterOnScreen(path)
except:pass
if a:
break
sleep(wait)
if cb2 == "yes" and a:
pyautogui.click(a[0],a[1])
if cb1 == "no":
a = None
try:
if cb3:
a = pyautogui.locateCenterOnScreen(path,grayscale=True)
else:
a = pyautogui.locateCenterOnScreen(path)
except:pass
if cb2 == "yes" and a:
pyautogui.click(a[0],a[1])
@pyqtSlot()
def start(self,steps,actions,loop):
self._step = 0
self._running = True
self._maxSteps = steps
self._actions = actions
for i,action in enumerate(self._actions):
c = self.control(action)
if c[0] == False:
self.sbMessage.emit(["Error at {},{}".format(i+1,c[1]),5000])
self._running == False
if loop == False:
while self._step < self._maxSteps and self._running == True:
self._step += 1
self.sbMessage.emit(["Running... {}".format(self._step)])
self.run()
if loop == True:
while self._running == True:
self._step += 1
self.sbMessage.emit(["Running... {}".format(self._step)])
self.run()
if self._running == True:
self.sbMessage.emit(["Successfully end.",4000])
self.finished.emit()
else:
self.sbMessage.emit(["Program stopped.",4000])
self.finished.emit()
def stop(self):
self.finished.emit()
self._running = False
def notOverwrite(self,path):
if not os.path.isfile(path+".png"):
return path+".png"
for i in range(1,100000):
path2 = path+"({})".format(i)
if not os.path.isfile(path2+".png"):
return path2+".png"
| konexis/Automate | worker.py | worker.py | py | 8,191 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtCore.QObject",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "py... |
39309134173 | import tornado.ioloop
import tornado.web
import requests
import argparse
class MainHandler(tornado.web.RequestHandler):
def get(self, slug):
url = "http://osu.ppy.sh/api/{}?".format(slug)
# Build URL with all get params
for i in self.request.arguments:
url += "{}={}&".format(i, self.get_argument(i))
# Remove last &/? if needed
url = url[:len(url)-1]
# Send request to osu!api and get response
try:
resp = requests.get(url).text
print("API request: api/{}".format(slug))
self.write(resp)
except:
self.send_error()
print("Error while getting osu!api response")
raise
class GeneralHandler(tornado.web.RequestHandler):
def get(self, what, slug):
# Build URL
url = "http://osu.ppy.sh/{}/{}".format(what, slug)
# Send request to osu!api and get response
try:
resp = requests.get(url).text
self.write(resp)
print("General request: {}/{}".format(what, slug))
except:
self.send_error()
print("Error while getting web response")
raise
if __name__ == "__main__":
# CLI stuff
__author__ = "Nyo"
parser = argparse.ArgumentParser(description="osu!api proxy for local ripple servers")
parser.add_argument('-p','--port', help="osu!api proxy server port", required=False)
args = parser.parse_args()
# Get port from arguments
if (args.port != None):
serverPort = args.port
else:
serverPort = 5003
# Start server
print("osu!api proxy listening on 127.0.0.1:{}...".format(serverPort))
app = tornado.web.Application([(r"/api/(.*)", MainHandler), (r"/(.*)/(.*)", GeneralHandler)])
app.listen(serverPort)
tornado.ioloop.IOLoop.current().start()
| osuripple/osuapiproxy | osuapiproxy.py | osuapiproxy.py | py | 1,617 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tornado.ioloop.web",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.web"... |
5812455236 | '''
Bitwise operations done on the files
'''
import cv2
import numpy as np
img1 = np.zeros((250, 500, 3),np.uint8)
img1 = cv2.rectangle(img1, (200,0), (300, 100), (255,255,255), -1)
img2 = np.zeros((250, 500, 3),np.uint8)
img2 = cv2.rectangle(img2, (0, 0), (250, 250), (255,255,255), -1)
cv2.imshow('image1',img1)
cv2.imshow('image2',img2)
bitAnd = cv2.bitwise_and(img2,img1)
#cv2.imshow('bit-AND',bitAnd)
bitOr = cv2.bitwise_or(img2, img1)
#cv2.imshow('bit-or',bitOr)
bitnot = cv2.bitwise_not(img2)
cv2.imshow('bitnot',bitnot)
cv2.waitKey(0)
cv2.destroyAllWindows() | Utkichaps/opencv-tutorial-practice | g.bitwise_ops.py | g.bitwise_ops.py | py | 598 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
8765523962 | #!/usr/bin/env python
__author__ = 'MidnightInAPythonWorld'
# Check for Python3
import sys
if sys.version_info[0] != 3:
print("[-] This script requires Python 3")
print("[-] Exiting script")
exit()
# stdlib
import json,os,requests,argparse
# Check for Pandas
try:
import pandas as pd
except:
print("[-] This script requires Pandas to be installed.")
print("[-] Exiting script")
exit()
def query_grey_noise(api_key,api_params):
"""
This function will accept API Key (provided via argpaser) and Params from user input.
Documentation for this API are located here: https://docs.greynoise.io/#greynoise-api-gnql
"""
api_headers = {
'Accept' : 'application/json' ,
'Accept-Language' : 'en-US' ,
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko' ,
'Accept-Encoding' : 'gzip, deflate' ,
'Connection' : 'Keep-Alive' ,
'key' : api_key ,
}
url = 'https://api.greynoise.io/v2/experimental/gnql'
try:
print('[*] Attempting GreyNoise API request for query: ' , api_params['query'] )
api_requests = requests.get(url, headers = api_headers, params=api_params, timeout=15.000, verify=True)
api_json = api_requests.json()
print('[*] Successfully queried GreyNoise API.')
except:
print("[!] Failed to fetch GreyNoise API with base URL of: ", url)
if api_json['count'] >=1:
return api_json
else:
return None
def write_results_to_csv(api_data,filename):
''' This function will parse the data that is returned from API.
Function is using Pandas as a quick way to write to CSV.
'''
for item in api_data['data']:
del item['raw_data']
df = pd.DataFrame(api_data['data'])
# if file does not exist write header
if not os.path.isfile(filename):
df.to_csv(filename)
else: # else it exists so append without writing the header
df.to_csv(filename, mode='a', header=False)
print('[*] Successfully created file: ', filename)
def main():
parser=argparse.ArgumentParser()
parser.add_argument('api_key', help='The API key used to query Grey Noise API.')
args=parser.parse_args()
api_key = args.api_key
api_params = {}
api_params['query'] = input("Enter query string in GNQL format: ")
api_params['size'] = int(input("Enter Size (max is 10000): "))
filename = input("Enter filename to write results to: ")
api_data = query_grey_noise(api_key,api_params)
if api_data:
print("[!] Search had " + str(api_data['count']) + " matches.")
write_results_to_csv(api_data,filename)
else:
print("[!] Search had 0 matches.")
if __name__== "__main__":
main()
exit()
| MidnightInAPythonWorld/API-GreyNoise | api_grey_noise_gnql.py | api_grey_noise_gnql.py | py | 2,808 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
... |
13430467993 | """ Usage:
Start the flask server by running:
$ python tests/test_map_flask.py
And then head to http://127.0.0.1:5000/ in your browser to see the map displayed
"""
import base64
from flask import Flask
import folium
from folium import IFrame
from mongoengine import connect
from PhotoManagement.db import Address
def createMap():
lat = []
lon = []
info = []
lat_min = None
lat_max = None
lon_min = None
lon_max = None
for a in Address.objects():
if len(a.photos) == 0:
continue
photo = a.photos[0]
lat.append(a.latitude)
lon.append(a.longitude)
if lat_min is None or a.latitude < lat_min:
lat_min = a.latitude
if lat_max is None or a.latitude > lat_max:
lat_max = a.latitude
if lon_min is None or a.longitude < lon_min:
lon_min = a.longitude
if lon_max is None or a.longitude > lon_max:
lon_max = a.longitude
if not hasattr(photo, "miniature"):
print("No miniature photo id=%s" % photo.id)
continue
mini = photo.miniature.read()
encoded = base64.b64encode(mini)
svg = """
<object data="data:image/jpg;base64,{}" width="{}" height="{} type="image/svg+xml">
</object>""".format
width, height, fat_wh = 78, 78, 1.25
iframe = IFrame(
svg(encoded.decode("UTF-8"), width, height),
width=width * fat_wh,
height=height * fat_wh,
)
popup = folium.Popup(iframe, max_width=2650)
info.append(popup)
m = folium.Map(
location=[lat_min / 2 + lat_max / 2, lon_min / 2 + lon_max / 2],
tiles="Stamen Terrain",
)
for xlat, xlon, xinfo in zip(lat, lon, info):
folium.Marker([xlat, xlon], popup=xinfo).add_to(m)
m.fit_bounds([[lat_min, lon_min], [lat_max, lon_max]])
return m
connect("photo_mgt")
app = Flask(__name__)
@app.route("/")
def index():
folium_map = createMap()
return folium_map._repr_html_()
if __name__ == "__main__":
app.run(debug=True)
| ydethe/photomanagement | tests/test_map_flask.py | test_map_flask.py | py | 2,130 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PhotoManagement.db.Address.objects",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PhotoManagement.db.Address",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "base64.b64encode",
"line_number": 50,
"usage_type": "call"
},
{
"ap... |
15592539446 | #!/usr/bin/env python
import sys
import string
import xml.dom.minidom
class NamedElement:
def __init__(self, xml_element):
name_element = xml_element.getElementsByTagName('name')[0]
self.name = name_element.firstChild.data.strip()
class DocumentedElement:
def __init__(self, xml_element):
doc_element = xml_element.getElementsByTagName('documentation')[0]
self.documentation = doc_element.firstChild.data.strip()
class TypedElement:
def __init__(self, xml_element):
self.type = []
type_element = xml_element.getElementsByTagName('type')[0]
choices = [e for e in type_element.childNodes
if e.nodeType == e.ELEMENT_NODE]
node = choices[0]
while node:
if node.nodeName == 'enum-value':
enum = node.getAttribute('name')
self.type.append(('enum-value', enum))
else:
self.type.append(node.nodeName)
choices = [e for e in node.childNodes
if e.nodeType == e.ELEMENT_NODE]
node = (choices or [None])[0]
class IpcFoo:
def __init__(self, xml_element):
self.version = int(xml_element.getAttribute('version'))
self.objects = []
self.constants = {}
self.enums = {}
constant_elements = xml_element.getElementsByTagName('constant')
for constant_element in constant_elements:
constant = IpcConstant(constant_element)
self.constants[constant.name] = constant
enum_elements = xml_element.getElementsByTagName('enum')
for enum_element in enum_elements:
enum = IpcEnum(enum_element, self.constants)
self.enums[enum.name] = enum
object_elements = xml_element.getElementsByTagName('object')
object_id = 1 # ID 0 is reserved for signal voodoo
for object_element in object_elements:
object = IpcObject(object_element)
object.id = object_id
object_id += 1
self.objects.append(object)
class IpcEnum(NamedElement):
def __init__(self, xml_element, constants):
NamedElement.__init__(self, xml_element)
member_elements = xml_element.getElementsByTagName('member')
members = {}
current_value = 0
for member_element in member_elements:
name = member_element.firstChild.data.strip()
ref_val = member_element.getAttribute('ref-value')
ref_type = member_element.getAttribute('ref-type')
if ref_type == 'constant' and ref_val != '':
current_value = constants[ref_val].value
elif ref_val != '':
current_value = members[ref_val]
members[name] = current_value
current_value += 1
self.members = members
class IpcConstant(NamedElement):
def __init__(self, xml_element):
NamedElement.__init__(self, xml_element)
value_element = xml_element.getElementsByTagName('value')[0]
val_type = value_element.getAttribute('type')
raw_value = value_element.firstChild.data.strip()
if val_type == 'integer':
self.value = int(raw_value)
else:
self.value = raw_value
class IpcObject(NamedElement):
def __init__(self, xml_element):
NamedElement.__init__(self, xml_element)
self.id = 0
self.methods = []
self.broadcasts = []
self.signals = []
method_elements = xml_element.getElementsByTagName('method')
method_id = 32 # IDs 0..31 are reserved for voodoo use
for method_element in method_elements:
method = IpcMethod(method_element)
method.id = method_id
method_id += 1
self.methods.append(method)
signal_elements = xml_element.getElementsByTagName('signal')
for signal_element in signal_elements:
self.signals.append(IpcSignalOrBroadcast(signal_element))
broadcast_elements = xml_element.getElementsByTagName('broadcast')
for broadcast_element in broadcast_elements:
self.broadcasts.append(IpcSignalOrBroadcast(broadcast_element))
class IpcMethod(NamedElement, DocumentedElement):
def __init__(self, xml_element):
NamedElement.__init__(self, xml_element)
DocumentedElement.__init__(self, xml_element)
self.id = 0
self.arguments = []
self.return_value = None
argument_elements = xml_element.getElementsByTagName('argument')
for argument_element in argument_elements:
self.arguments.append(IpcMethodArgument(argument_element))
return_value_elements = xml_element.getElementsByTagName('return_value')
if return_value_elements:
self.return_value = IpcReturnValue(return_value_elements[0])
class IpcMethodArgument(NamedElement, DocumentedElement, TypedElement):
def __init__(self, xml_element):
NamedElement.__init__(self, xml_element)
DocumentedElement.__init__(self, xml_element)
TypedElement.__init__(self, xml_element)
class IpcReturnValue(DocumentedElement, TypedElement):
def __init__(self, xml_element):
DocumentedElement.__init__(self, xml_element)
TypedElement.__init__(self, xml_element)
class IpcSignalOrBroadcast(NamedElement, DocumentedElement):
def __init__(self, xml_element):
NamedElement.__init__(self, xml_element)
DocumentedElement.__init__(self, xml_element)
self.id = 0
self.return_value = None
# id_element = xml_element.getElementsByTagName('id')[0]
# self.id = int(id_element.firstChild.data.strip())
return_value_elements = xml_element.getElementsByTagName('return_value')
self.return_value = IpcReturnValue(return_value_elements[0])
def parse_xml(file):
#load the xml file
doc = xml.dom.minidom.parse(file)
ipc_element = doc.getElementsByTagName('ipc')[0]
return IpcFoo(ipc_element)
| niahoo/exmms2 | priv/genipc/genipc.py | genipc.py | py | 5,278 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "xml.dom.minidom.dom.minidom.parse",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 171,
"usage_type": "name"
}
] |
12476247076 | from keras.models import load_model
import keras
import numpy as np
from keras import backend as K
from sklearn.metrics import accuracy_score
import sys
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
def cal_adversis_epsilon(data,gradient,epsilon):
adversaries=data + epsilon * np.sign(gradient)
return adversaries
def get_gradient(data, model):
tf.compat.v1.disable_eager_execution()
gradients = K.gradients(model.output, model.input)[0]
sess = tf.compat.v1.InteractiveSession()
sess.run(tf.compat.v1.global_variables_initializer())
eval_gradients = sess.run(gradients, feed_dict={model.input:data})
return eval_gradients
'''
def get_gradient(data, model):
gradients = K.gradients(model.output,model.input)[0]
iterate = K.function(model.input, gradients)
grad = iterate([data])
return grad[0]
'''
def predicted_labels(data,model):
labels_D = model.predict(data)
labels_D = np.array(list(map(np.argmax, labels_D)))
labels_D = keras.utils.to_categorical(labels_D, 2)
return labels_D
test_data_file = sys.argv[1]
target_model_file = sys.argv[2]
bb_model_file = sys.argv[3]
test_label_file = 'test_labels.npy'
test_data = np.load(test_data_file)
test_labels = np.load(test_label_file)
m_model = load_model(target_model_file)
b_model = load_model(bb_model_file)
predicted_test_minus_labels = predicted_labels(test_data,m_model)
orignal_acc = accuracy_score(test_labels,predicted_test_minus_labels)
print('Orignal_accuracy: {}'.format(orignal_acc))
gradient = get_gradient(test_data, b_model)
test_adv = cal_adversis_epsilon(test_data, gradient, 0.0625)
predicted_new_test_labels = predicted_labels(test_adv,m_model)
final_acc = accuracy_score(test_labels,predicted_new_test_labels)
print('final accuracy: {}'.format(final_acc))
print("Accuracy Dropped a Total By :", orignal_acc - final_acc)
| Suraj-Jha1508/Deep_Learning_CS677 | assignment_10/test.py | test.py | py | 1,919 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tensorflow.compat.v1.disable_eager_execution",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.sign",
"line_number": 13,
"usage_type": "call"
},
{
"api_... |
23003384613 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Feb 20, 2013
@author: Maribel Acosta
@author: Fabian Floeck
@author: Michael Ruster
'''
from mw.xml_dump import Iterator as mwIterator
from mw.xml_dump.functions import EXTENSIONS as mwExtensions
from mw.xml_dump.functions import open_file
from functions.print import *
import functions.PageProcessing as PageProcessing
import functions.DumpConditions as Conditions
import os
from sys import argv
def extractFileNamesFromPath(path):
""" Returns a list of file names that are identified under path.
If path is a directory, its contents will be returned non-recursively. Thus,
all non-directories with the suffix of a supported datatype will be returned.
The list will be sorted alphabetically in ascending manner.
If path identifies a file, it will be returned as a list.
When path does neither, a FileNotFoundError is thrown.
"""
if os.path.isdir(path):
directoryContents = os.listdir(path)
fileNames = []
# filter for XML files only.
for supportedFiletype in mwExtensions.keys():
fileNames.extend(os.path.join(path, f)
for f in directoryContents
if f.lower().endswith("." + supportedFiletype)
)
fileNames.sort()
elif os.path.isfile(path):
fileNames = [path]
else:
raise FileNotFoundError('No file or directory could be found in "%s"' % path)
return fileNames
def analyseDumpsAndOutputWriteToDisk(path, blockLog, condition):
""" Load dump file(s) from path and iterate over their pages and their
revisions. All revisions will be matched against the blockLog to calculate
how many seconds after the creation of the revision, the author was blocked
(if s/he was blocked at all afterwards).
"""
assert ((condition == Conditions.isRegisteredUserTalk and not blockLog) or
(condition == Conditions.isDeletionDiscussion and blockLog)), '[E] Blocks may not be empty when processing deletion discussions.'
if condition == Conditions.isDeletionDiscussion:
print("[I] Loading blocked users and the associated blocking timestamps into memory.", end=' ')
import BlockTimeCalculation
blocks = BlockTimeCalculation.createBlockedUsersDict(blockLog)
print("Done.")
else:
blocks = None
for fileName in extractFileNamesFromPath(path):
print('[I] Now processing the file "%s".' % fileName)
# Access the file.
dumpIterator = mwIterator.from_file(open_file(fileName))
# Iterate over the pages.
for page in dumpIterator:
shouldDeletionDiscussionsBeProcessed = condition == Conditions.isDeletionDiscussion
if condition(page):
(revisions_order, revisions) = PageProcessing.process(page, shouldDeletionDiscussionsBeProcessed)
if shouldDeletionDiscussionsBeProcessed:
assert blocks, '[E] Blocks was empty.'
writeAllRevisions(revisions_order, revisions, blocks)
else:
assert page.title, '[E] The page title was empty.'
writeAllRevisions(revisions_order, revisions, blocks, page.title)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="WikiWho DiscussionParser: An algorithm for extracting posts on Wikipedia page deletion discussions and checking when the post's author has last been blocked.",
epilog="""
WikiWho DiscussionParser, Copyright (C) 2015 Fabian Flöck, Maribel Acosta, Michael Ruster (based on wikiwho by Fabian Flöck, Maribel Acosta).
WikiWho DiscussionParser comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For more information, see the LICENSE and README.md files this program should have been distributed with.
""")
parser.add_argument('-i', dest='pageDumpPath', required=True,
help='Path to the Wikipedia page(s) dump (XML, 7z, bz2…).')
parser.add_argument('-b', dest='blockLog', type=argparse.FileType('r'),
default=None, nargs='?',
help='Path to the block log file produced wit 0nse/WikiParser (CSV).'),
parser.add_argument('-c', dest='condition', nargs='?',
default='isDeletionDiscussion', type=str,
help='Decide whether you want to process deletion discussions or user warnings. It must identify a boolean method returning True or False on a Page object. Available options are "isDeletionDiscussion" and "isRegisteredUserTalk". The default is "isDeletionDiscussion".')
args = parser.parse_args()
condition = Conditions.parse(args.condition)
analyseDumpsAndOutputWriteToDisk(args.pageDumpPath, args.blockLog, condition)
| 0nse/WikiWho | WikiWho.py | WikiWho.py | py | 5,085 | python | en | code | null | github-code | 1 | [
{
"api_name": "os.path.isdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "mw.xml_dump.functions.EXTENSION... |
22817131703 | # -*- coding: utf-8 -*-
# @Author: Jeremiah
# @Date: 2017-05-26 23:27:33
# @Last Modified by: jemarks
# @Last Modified time: 2017-05-31 20:13:15
#This file exists in order to provider some basic objects for SRs, SPs, Stores, and backstop reports.
import datetime
import os
import pandas
import collections
from petm_teams import teams
from emails import team_email
import file_man
sftp_format_string = "Petsmart_Backstop_%m-%d-%Y_at_%H.%M.xlsx"
colsInOrder = ["SR #", "Date Opened", "Days Since Open", "Priority", "SR Substatus", "SR Short Description", "LOS", "Site Area", "City", "State", "Site #", "SC Name", "TL", "SR/Activity Last Update "]
Stats = collections.namedtuple('Stats', ['new', 'continued', 'closed'])
class ServiceRequest(object):
"""docstring for ServiceRequest"""
def __init__(self, line_of_data, bs_ts = None):
super(ServiceRequest, self).__init__()
self.line_of_data = line_of_data
self.sr = line_of_data['SR #']
self.site = line_of_data['Site #']
self.sp = line_of_data['SC Number']
self.sp_name = line_of_data['SC Name']
self.date_opened = line_of_data['Date Opened']
self.time_open = line_of_data["Days Since Open"]
self.priority = line_of_data["Priority"]
self.tl = line_of_data["TL"]
self.los = line_of_data["LOS"]
self.short_description = line_of_data["SR Short Description"]
self.status = line_of_data["SR Substatus"]
self.nte = line_of_data["NTE"]
self.ot = line_of_data['Overtime Allowed ']
self.backstop_timestamp = bs_ts
def __eq__(self, otherobject):
if (type(otherobject) == type(" ")):
return otherobject == self.sr
else:
return otherobject.sr == self.sr
class BackStopReport(object):
"""docstring for BackStopReport"""
def __init__(self, path):
super(BackStopReport, self).__init__()
self.path = path
self.srs = {}
self.backstop_timestamp = datetime.datetime.strptime(os.path.basename(self.path), sftp_format_string)
self.jdate = file_man.get_julian_stamp(self.path)
self.load()
def load(self):
self.raw_file = pandas.read_excel(self.path, sheetname=1).to_dict(orient='records')
for record in self.raw_file:
self.srs[record['SR #']] = ServiceRequest(record, self.backstop_timestamp)
def compare(self, previous_backstop, further_backstops=[]):
mysrs = list(self.srs.keys())
previous_srs = list(previous_backstop.srs.keys())
stats_all = self.compare_subset(mysrs, previous_srs)
mycrits = [x.sr for x in self.srs.values() if x.priority == "Critical"]
previous_crits = [x.sr for x in previous_backstop.srs.values() if x.priority == "Critical"]
stats_crits = self.compare_subset(mycrits, previous_crits)
stats_tl = collections.defaultdict(list)
crits_tl = collections.defaultdict(list)
for teamlead in teams:
this_tl = [x.sr for x in self.srs.values() if x.tl == teamlead]
previous_tl = [x.sr for x in previous_backstop.srs.values() if x.tl == teamlead]
this_tl_crits = [x.sr for x in self.srs.values() if x.tl == teamlead and x.priority == "Critical"]
previous_tl_crits = [x.sr for x in previous_backstop.srs.values() if x.tl == teamlead and x.priority == "Critical"]
stats_tl[teamlead] = self.compare_subset(this_tl, previous_tl)
crits_tl[teamlead] = self.compare_subset(this_tl_crits, previous_tl_crits)
# print(team_email.overviewTable(stats_all, stats_crits, stats_tl, crits_tl))
return stats_all, stats_crits, stats_tl, crits_tl
def compare_subset(self, subset1, subset2):
# subset1 = list(subset1.keys())
# subset2 = list(subset2.keys())
srs_continued=[]
srs_closed=[]
srs_new=[]
for each_sr in subset1:
if each_sr in subset2:
srs_continued.append(each_sr)
else:
srs_new.append(each_sr)
for each_sr in subset2:
if each_sr not in subset1:
srs_closed.append(each_sr)
return Stats(new=srs_new, continued=srs_continued, closed=srs_closed)
| jeremiahmarks/vcde | backstop_automation/backstop_objects.py | backstop_objects.py | py | 3,841 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name... |
12889471327 | import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from torchvision import transforms
from functions import*
import os
'''
bbvi without Rao_Blackwellization and Control Variates
'''
num_epochs=1
batchSize=500
num_S=5#训练的采样数量
dim=1000000+1
eta=0.05#步长
num_St=100#测试的采样数量
interval=20
#读取数据
train_index=np.linspace(0,999999,1000000)
with open('./dataset/criteo-train-sub1000000.txt','r') as f:
train_datas=f.readlines()
train_loader=DataLoader(train_index,batch_size=batchSize,shuffle=True)
#定义分布参数
para=torch.zeros(dim*2,requires_grad=True)
#para[dim:]=torch.ones(dim)*(-0.5)
#其他变量
scale=1000000/batchSize
G=torch.zeros(dim*2)
#需要储存结果
elbo_list=[]
para_list=[]
#开始迭代
for epoch in range(num_epochs):
for i ,data_index in enumerate(train_loader):
labels,images=data_preprocess(train_datas,data_index,dim)
revise=batchSize/len(images)
#过程变量
gradients=torch.zeros((num_S,dim*2))
#ELBO evaluate & record para
if i==len(train_loader)-1:
para_list.append(para.clone().detach().numpy())
if (epoch*len(train_loader)+i)%interval==0:
elbo_list.append(elbo_evaluate(images,labels,para,dim,scale,revise,num_St).item())
#算法起始位置
z_samples=sampleZ(para,dim,num_S)
log_qs=ng_log_Qs(para,z_samples,dim)
log_priors=ng_log_Priors(z_samples,dim)
log_likelihoods=ng_log_Likelihoods(images,labels,z_samples,dim)
for s in range(len(z_samples)):
gradients[s]=grad_log_Q(para,z_samples[s],dim)
elbo_temp=log_likelihoods*revise+log_priors/scale-log_qs/scale
grad_temp=torch.matmul(torch.diag(elbo_temp),gradients)
grad_avg=torch.mean(grad_temp,0)
G+=grad_avg*grad_avg
rho=eta/torch.sqrt(G)
update=rho*grad_avg
para.data+=update
#print information
print(torch.median(update.abs()),torch.max(update.abs()))
if (epoch*len(train_loader)+i)%interval==0:
print('Epoch[{}/{}], step[{}/{}]'.format(\
epoch+1,
num_epochs,
i+1,len(train_loader)))
print('ELBO: {:.3f}\n'.format(\
elbo_list[len(elbo_list)-1]))
if not os.path.exists('./result_elbo'):
os.makedirs('./result_elbo')
result=np.array(elbo_list)
np.save('./result_elbo/bbvi_basic.npy',result)
if not os.path.exists('./result_para'):
os.makedirs('./result_para')
result=np.array(para_list)
np.save('./result_para/bbvi_basic.npy',result) | allenzhangzju/Black_Box_Variational_Inference | bbvi_criteo4000000/bbvi_basic.py | bbvi_basic.py | py | 2,620 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.dataloader.DataLoader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tor... |
40365228173 | #encoding=utf-8
import sktensor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
import scipy.io as scio
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
from random import random
from scipy.sparse import rand as sprand
from compt_methods import *
from road_similar import *
import seaborn as sns
#根据确定的数据维度大小获取数据,对于缺失部分补平均数
def get_tensor(mat_file,ori_file,size_=0):
ori_data = scio.loadmat(mat_file)['Speed']
if size_:
p,q,r = size_
ori_data = ori_data[:p,:q,:r]
scio.savemat(ori_file,{'Speed':ori_data})
return ori_data
#根据缺失率和原始数据进行随机缺失并保存缺少数据
def gene_rand_sparse(ori_data, miss_ratio,miss_file):
data = ori_data.copy()
dshape = np.shape(data)
rand_ts = np.random.random_sample(dshape)
zero_ts = np.zeros(dshape)
data = data*(rand_ts>miss_ratio) + zero_ts*(rand_ts<=miss_ratio)
#if not os.path.exists(miss_file):
scio.savemat(miss_file,{'Speed':data})
return data
#根据缺失率和原始数据进行连续缺失并保存缺失数据
def gene_cont_sparse(ori_data,miss_ratio,miss_file):
data = ori_data.copy()
dshape = data.shape
rand_ts = np.random.random_sample(dshape[:-1])
S = np.rint(rand_ts+0.5-miss_ratio)
W_cont = np.zeros(dshape)
for k in range(dshape[2]):
W_cont[:,:,k] = S[:,:]
data = data*W_cont
scio.savemat(miss_file,{'Speed':data})
return data
gene_sparse = {'rand':gene_rand_sparse,'cont':gene_cont_sparse}
#获取缺失数据,缺失位置和真实缺失率
def get_sparsedata(miss_file):
#miss_loc = []
sparse_data = scio.loadmat(miss_file)['Speed']
dshape = np.shape(sparse_data)
W_miss = sparse_data==0
true_miss_ratio = W_miss.sum()/sparse_data.size
return sparse_data,W_miss,true_miss_ratio
#处理原始的缺失数据
def deal_orimiss(ori_data,shorten = False):
p,q,r = ori_data.shape
zero_mat = np.zeros((p,q,r))
sparse_data = (ori_data>1)*ori_data+(ori_data<=1)*zero_mat
W = sparse_data>0
W_miss = np.where(sparse_data<=1)
M_pos = np.where(W_miss)
if not shorten:
for i in range(len(W_miss[0])):
pos1,pos2,pos3 = W_miss[0][i],W_miss[1][i],W_miss[2][i]
neigh_info = []
for n2 in (1,-1):
for n3 in (1,-1):
try:
temp = sparse_data[pos1,pos2+n2,pos3+n3]
neigh_info.append(temp)
except:
pass
if sum(neigh_info)>0:
sparse_data[pos1,pos2,pos3] = sum(neigh_info)/(np.array(neigh_info)>0).sum()
return sparse_data,W
Arr = [set(arr.tolist()) for arr in M_pos]
Arr_len = [len(arr) for arr in Arr]
Arr_short = Arr_len.index(min(Arr_len))
sparse_data = np.delete(sparse_data,list(Arr[Arr_short]),Arr_short)
return sparse_data,W
#用奇异值比例来确定秩
def tk_rank(data,thre = 0.9):
rank_set = [0,0,0]
for i in range(3):
if np.isnan(data).all():
print(np.isnan(data).sum())
sys.exit()
if np.isnan(data).all():
print(np.isinf(data).sum())
sys.exit()
U,sigma,VT = scipy.linalg.svd(dtensor(data).unfold(i),0)
for r in range(len(sigma)):
if sum(sigma[:r])/sum(sigma) > thre:
rank_set[i] = r
break
return rank_set
#预填充,这里采用同一线圈同一时段不同日期的平均数填充
def pre_impute(sparse_data,W,day_axis=1,bias_bool = False):
if not bias_bool:
pos = np.where(W==False)
for p in range(len(pos[0])):
i,j,k = pos[0][p],pos[1][p],pos[2][p]
if day_axis == 0:
if (sparse_data[:,j,k]>0).sum()>0:
sparse_data[i,j,k] = np.sum(sparse_data[:,j,k])/(sparse_data[:,j,k]>0).sum()
else:
sparse_data[i,j,k] = np.sum(sparse_data[i,:,:])/(sparse_data[i,:,:]>0).sum()
elif day_axis == 1:
if (sparse_data[i,:,k]>0).sum()>0:
sparse_data[i,j,k] = np.sum(sparse_data[i,:,k])/(sparse_data[i,:,k]>0).sum()
else:
sparse_data[i,j,k] = np.sum(sparse_data[:,j,:])/(sparse_data[:,j,:]>0).sum()
return sparse_data
b,b1 = {},{}
sp = np.shape(sparse_data)
impute_data = sparse_data.copy()
mean = np.sum(impute_data)/(impute_data>0).sum()
for n in range(3):
b[n] = np.random.uniform(0,0.1,sp[n])
b1[n] = np.zeros(sp[n])
seta,miu = 1e-4,800
sum_F = sum([np.sum(b[n]**2) for n in range(3)])
J = 1/2*np.sum(W*(sparse_data-impute_data)**2)+miu/2*sum_F
ite = 0
while ite < 100:
ite += 1
J_pre = J
for i in range(sp[0]):
b1[0][i] = (1-seta*miu)*b[0][i]+seta*np.sum(W[i,:,:]*(sparse_data-impute_data)[i,:,:])
for j in range(sp[1]):
b1[1][j] = (1-seta*miu)*b[1][j]+seta*np.sum(W[:,j,:]*(sparse_data-impute_data)[:,j,:])
for k in range(sp[2]):
b1[2][k] = (1-seta*miu)*b[2][k]+seta*np.sum(W[:,:,k]*(sparse_data-impute_data)[:,:,k])
if sum([np.sum((b1[n]-b[n])**2)**0.5 for n in range(3)]) < 0.001:
pass
for n in range(3):
b[n] = b1[n].copy()
for i in range(sp[0]):
for j in range(sp[1]):
for k in range(sp[2]):
impute_data[i,j,k] = mean+b[0][i]+b[1][j]+b[2][k]
sum_F = sum([np.sum(b[n]**2) for n in range(3)])
J = 1/2*np.sum(W*(sparse_data-impute_data)**2)+miu/2*sum_F
if abs(J-J_pre) < 1:
break
print(np.sum(W*(sparse_data-impute_data)**2))
sparse_data[W==False] = mean+b[0][i]+b[1][j]+b[2][k]
return sparse_data
#求rmse,mape,mae,rse
def rmse_mape_rse(est_data,ori_data,W):
S = np.shape(est_data)
W_miss = (W==False)
diff_data = np.zeros(S)+W_miss*(est_data-ori_data)
#diff_data = ori_data-est_data
rmse = float((np.sum(diff_data**2)/W_miss.sum())**0.5)
mre_mat=np.zeros_like(est_data)
mre_mat[W_miss]=np.abs((est_data[W_miss]-ori_data[W_miss])/ori_data[W_miss])
mape = float(np.sum(mre_mat)/W_miss.sum())*100
rse = float(np.sum(diff_data**2)**0.5/np.sum(ori_data[W_miss]**2)**0.5)
mae = float(np.sum(np.abs(diff_data))/W_miss.sum())
return round(rmse,4),round(mape,2),round(rse,4),round(mae,4)
def show_img(X,RMSE_list,MAE_list,name_list):
for i in range(len(name_list)):
plt.plot(X,RMSE_list[i],'--o')
plt.savefig(img_dir+'rmse_'+name_list[i])
plt.close()
plt.plot(X,MAE_list[i],'--o')
plt.savefig(img_dir+'mae_'+name_list[i])
plt.close()
return 0
def compare_iter(ori_speeddata,miss_data,miss_pos,W):
sp = np.shape(miss_data)
rank_set = [0,0,0]
main_rate = 0.9
alpha = [1/3,1/3,1/3]
beta = [0.1,0.1,0.1]
beta1 = beta.copy()
gama = [2,2,2]
lou = 1e-3
K = 100
conv = 1e-4
conv_list = np.arange(1e-4,1e-3,5e-5)
K_list = [50+10*count for count in range(16)]
RMSE_lrtc_list,MAE_lrtc_list = [],[]
RMSE_silrtc_list,MAE_silrtc_list = [],[]
RMSE_halrtc_list,MAE_halrtc_list = [],[]
range_list = conv_list
for conv in conv_list:
est_lrtc = lrtc_cpt(miss_data,beta,alpha,gama,conv,K,W)
RMSE_lrtc,MAPE_lrtc,RSE_lrtc,MAE_lrtc = rmse_mape_rse(est_lrtc,ori_speeddata,W)
RMSE_lrtc_list.append(RMSE_lrtc)
MAE_lrtc_list.append(MAE_lrtc)
est_silrtc = silrtc_cpt(miss_data,alpha,beta1,conv,K)
RMSE_silrtc,MAPE_silrtc,RSE_silrtc,MAE_silrtc = rmse_mape_rse(est_silrtc,ori_speeddata,W)
RMSE_silrtc_list.append(RMSE_silrtc)
MAE_silrtc_list.append(MAE_silrtc)
est_halrtc = halrtc_cpt(miss_data,lou,conv,K,W)
RMSE_halrtc,MAPE_halrtc,RSE_halrtc,MAE_halrtc = rmse_mape_rse(est_halrtc,ori_speeddata,W)
RMSE_halrtc_list.append(RMSE_halrtc)
MAE_halrtc_list.append(MAE_halrtc)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(range_list,RMSE_lrtc_list,'r--^',label='lrtc')
ax.plot(range_list,RMSE_silrtc_list,'r--s',label='silrtc')
ax.plot(range_list,RMSE_halrtc_list,'r--D',label='halrtc')
ax.legend(loc='best')
plt.savefig(img_dir+'compare_conv_rmse.png')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(range_list,MAE_lrtc_list,'r--^',label='lrtc')
ax.plot(range_list,MAE_silrtc_list,'r--s',label='silrtc')
ax.plot(range_list,MAE_halrtc_list,'r--D',label='halrtc')
ax.legend(loc='best')
plt.savefig(img_dir+'compare_conv_mae.png')
plt.close()
#show_img(K_list,RMSE_tc,MAE_tc,['lrtc','silrtc','halrtc'])
return 0
def compare_methods(ori_speeddata,ori_W):
RM_dict,MA_dict,RS_dict,MP_dict = {},{},{},{}
rt_dict = {}
miss_list = []
eva_dict = {'rmse':RM_dict,'mae':MA_dict,'mape':MP_dict}
for i in range(8):
print('----'+str(i)+'----')
miss_ratio = round(0.1*(i+1),2)
miss_path = data_dir+'miss_'+str(miss_ratio)+''.join(['_'+str(ch) for ch in data_size])+'.mat'
if not os.path.exists(miss_path):
gene_rand_sparse(ori_speeddata,miss_ratio,miss_path)
miss_data,W_miss,tm_ratio = get_sparsedata(miss_path)
W = (W_miss==False)
rW = W | (ori_W == False)
miss_list.append(round(tm_ratio * 100,1))
#预填充
time_s = time.time()
miss_data = pre_impute(miss_data, W)
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(miss_data, ori_speeddata, rW)
km = 'pre-impute'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s, 1))
"""
#参数
p = 0.7
K = 100 #iterations
F_thre = 1e-4 #F_norm convergence threshold
#halrtc
time_s = time.time()
lou = 1 / T_SVD(miss_data, p)[0][0]
est_halrtc = halrtc_cpt(miss_data,lou,F_thre,K,W,alpha=[1/3,1/3,1/3])
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(est_halrtc,ori_speeddata,rW)
km = 'HaLRTC'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
#Kmeans+halrtc
time_s = time.time()
K_n = 4 #cluster_num
est_kmeans = Kmeans_ha(miss_data, W, K_n, K, F_thre, p)
time_e = time.time()
rm,mp,rs,ma = rmse_mape_rse(est_kmeans,ori_speeddata,rW)
km = 'HaLRTC-CSP'
if km not in RM_dict:
RM_dict[km],MP_dict[km],RS_dict[km],MA_dict[km] = [],[],[],[]
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
'''
#STD
time_s = time.time()
ap,lm,thre = 2e-10,0.05,0.1
est_STD = STD_cpt(miss_data, W, thre, ap, lm, p)
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(est_STD, ori_speeddata, rW)
km = 'STD'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
'''
#BPCA
time_s = time.time()
est_BPCA = BPCA_cpt(miss_data, p)
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(est_BPCA, ori_speeddata, rW)
km = 'BPCA'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
"""
eva_dict = {'RMSE':RM_dict,'MAE':MA_dict,'MRE':MP_dict,'Run_Time':rt_dict}
metric_dict = {'RMSE':'km/h', 'MAE':'km/h', 'MRE':'%', 'Run_Time':'s'}
eva_Ylim = {'RMSE':[2,10],'MAE':[0,5],'MRE':[5,20],'Run_Time':[0,5000]}
shape = ['r--o','r--*','r--x','r--^','r--s','r--D']
MK = ['o','o','*','*','x','x']
CR = ['r','b','y','r','b','y']
fw = open('compare_methods' + '.txt', 'w')
fw.write('methods:'+','.join(list(eva_dict['RMSE'].keys()))+'\n')
fw.write('Missing Rate (%):' + ','.join(list(map(str, miss_list))) + '\n')
for eva in eva_dict:
#fig = plt.figure()
#ax = fig.add_subplot(1,1,1)
plt.xlabel('Missing Rate (%)')
plt.ylabel(eva+' ('+metric_dict[eva]+')')
# xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi],
# [r'$-pi$', r'$-pi/2$', r'$0$', r'$+pi/2$', r'$+pi$'])
nl = 0
fw.write(eva+':\n')
for method in eva_dict[eva]:
plt.plot(miss_list,eva_dict[eva][method],color=CR[nl],marker=MK[nl],label='$'+method+'$')
fw.write(','.join(list(map(str, eva_dict[eva][method]))) + '\n')
nl += 1
plt.legend(loc='best')
plt.savefig(img_dir+'compare_mr_'+'_'+eva+'.png')
plt.close()
fw.close()
return 0
def svd_vary(sparse_data):
ds = sparse_data.shape
SG = np.zeros((max(ds),3))
plt.xlabel('Missing Rate (%)')
for i in range(len(ds)):
A = dtensor(sparse_data).unfold(i)
# U,sigma,VT = np.linalg.svd(A,0)
# SG[:len(sigma),i] = sigma
for j in range(ds[i]):
SG[j,i] = np.var(A[j])
plt.plot(list(range(max(ds))),SG[:,i],'r--o',label='$'+str(i)+'$')
plt.legend(loc=1)
plt.savefig(img_dir+'svd_vary.png')
plt.close()
return
#比较三维张量和四维张量的填充结果
def compare_3d_4d(ori_speeddata,miss_data):
time1 = time.time()
lou = 1 / T_SVD(miss_data, 0.7)[0][0]
#lou = 0.1
print(lou)
est_halrtc = halrtc_cpt(miss_data, lou, 1e-4, 100, W)
time2 = time.time()
print('3d_halrtc:', rmse_mape_rse(est_halrtc, ori_speeddata, (W | (ori_W == False))))
print('3d_time', str(time2 - time1) + 's')
return
def compare_PI(ori_speeddata,ori_W):
RM_dict, MA_dict, RS_dict, MP_dict = {}, {}, {}, {}
rt_dict = {}
miss_list = []
# 参数
p = 0.7
K = 100 # iterations
F_thre = 1e-4 # F_norm convergence threshold
for i in range(8):
print('----'+str(i)+'----')
miss_ratio = round(0.1*(i+1),2)
miss_path = data_dir+'miss_'+str(miss_ratio)+''.join(['_'+str(ch) for ch in data_size])+'.mat'
if not os.path.exists(miss_path):
gene_rand_sparse(ori_speeddata,miss_ratio,miss_path)
miss_data,W_miss,tm_ratio = get_sparsedata(miss_path)
W = (W_miss==False)
miss_list.append(round(tm_ratio * 100,1))
#预填充
#miss_data = pre_impute(miss_data,W,False)
rW = W|(ori_W==False)
time_s = time.time()
K_n = 4 # cluster_num
#est_kmeans = Kmeans_ha(miss_data, W, K_n, K, F_thre, p)
est_halrtc = halrtc_cpt(miss_data, 1e-3, 1e-4, 100, W,[1/3,1/3,1/3])
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(est_halrtc, ori_speeddata, rW)
km = 'Without-PI'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
miss_data = pre_impute(miss_data, W, False)
rW = W | (ori_W == False)
time_s = time.time()
K_n = 4 # cluster_num
#est_kmeans = Kmeans_ha(miss_data, W, K_n, K, F_thre, p)
est_halrtc = halrtc_cpt(miss_data, 1e-3, 1e-4, 100, W,[1/3,1/3,1/3])
time_e = time.time()
rm, mp, rs, ma = rmse_mape_rse(est_halrtc, ori_speeddata, rW)
km = 'With-PI'
if km not in RM_dict:
RM_dict[km], MP_dict[km], RS_dict[km], MA_dict[km] = [], [], [], []
rt_dict[km] = []
RM_dict[km].append(rm)
MA_dict[km].append(ma)
MP_dict[km].append(mp)
RS_dict[km].append(rs)
rt_dict[km].append(round(time_e - time_s,1))
eva_dict = {'RMSE': RM_dict, 'MAE': MA_dict, 'MRE': MP_dict, 'Run_Time': rt_dict}
metric_dict = {'RMSE': 'km/h', 'MAE': 'km/h', 'MRE': '%', 'Run_Time': 's'}
eva_Ylim = {'RMSE': [2, 10], 'MAE': [0, 5], 'MRE': [5, 20], 'Run_Time': [0, 5000]}
shape = ['r--o', 'r--*', 'r--x', 'r--^', 'r--s', 'r--D']
MK = ['o', 'o', '*', '*', 'x', 'x']
CR = ['r', 'b', 'y', 'r', 'b', 'y']
fw = open('compare_PI_' + '.txt', 'w')
fw.write('Missing Rate (%):' + ','.join(list(map(str, miss_list))) + '\n')
for eva in eva_dict:
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
plt.xlabel('Missing Rate (%)')
plt.ylabel(eva + ' (' + metric_dict[eva] + ')')
# xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi],
# [r'$-pi$', r'$-pi/2$', r'$0$', r'$+pi/2$', r'$+pi$'])
nl = 0
fw.write(eva + ':' + '\n')
for method in eva_dict[eva]:
plt.plot(miss_list, eva_dict[eva][method], color=CR[nl], marker=MK[nl], label='$' + method + '$')
fw.write(','.join(list(map(str, eva_dict[eva][method]))) + '\n')
nl += 1
plt.legend(loc='best')
plt.savefig(img_dir + 'compare_PI_' + '_' + eva + '.png')
plt.close()
fw.close()
return 0
def ori_imputation(miss_data,W,ori_speeddata,ori_W):
ds = miss_data.shape
rW = W|(ori_W==False)
est_Km = Kmeans_ha(miss_data, W, 4, 100, 1e-4, 0.7)
plt.xlim(0,50)
plt.ylim(0,50)
plt.plot([0,50],[0,50],color='b',linewidth=2)
#plt.scatter(1, 2, s=10, c='r')
r = 1
for j in range(ds[1]):
for k in range(ds[2]):
if not rW[r,j,k]:
plt.scatter(ori_speeddata[r,j,k],est_Km[r,j,k],s=10,c='r')
plt.savefig('填充真实对比图.png')
plt.close()
return
if __name__ == '__main__':
data_dir = './data/'
img_dir = './img_test/'
mat_path = '/home/qiushye/2013_east/2013_east_speed.mat'
#数据:日期数,线圈数量,时间间隔数
#data_size = (60,80,144)
#data_size = (15,35,288)
data_size = (30,20,72)
#广州数据
ori_speeddata = scio.loadmat('../GZ_data/60days_tensor.mat')['tensor']
#train_sim(ori_speeddata[:,:30,:])
#assign_group(ori_speeddata[:,:30,:])
#simMat = multi_sim(ori_speeddata[:,:30,:])
#scio.savemat('road_sim.mat',{'sim':simMat})
#sys.exit()
#print(np.var(ori_speeddata))
shorten = False
print((ori_speeddata==0).sum()/ori_speeddata.size)
ori_speeddata,ori_W = deal_orimiss(ori_speeddata,shorten)
data_size = np.shape(ori_speeddata)
print(data_size)
# compare_methods(ori_speeddata,ori_W)
# compare_PI(ori_speeddata,ori_W)
#sys.exit()
miss_ratio = 0.2
miss_path = data_dir+'miss_'+str(round(miss_ratio,1))+'_'+'_'.join([str(ch) for ch in data_size])+'.mat'
#miss_path = data_dir+'cont_miss_'+'_'.join([str(ch) for ch in data_size])+'.mat'
if not os.path.exists(miss_path):
gene_rand_sparse(ori_speeddata,miss_ratio,miss_path)
miss_data,W_miss,tm_ratio = get_sparsedata(miss_path)
# f, ax = plt.subplots()
# sns.heatmap(miss_data[0], cmap='RdBu', linewidths=0.05, ax=ax)
# ax.invert_yaxis()
# ax.set_xlabel('time interval')
# ax.set_ylabel('days')
# plt.savefig(img_dir+'miss_heatmap.png')
# plt.close()
# sys.exit()
W = miss_data>0
#W1 = miss_data==0
data_shape = np.shape(ori_speeddata)
miss_data = pre_impute(miss_data,W)
print('pre_impute:', rmse_mape_rse(miss_data, ori_speeddata, W | (ori_W == False)))
#compare_3d_4d(ori_speeddata,miss_data)
ori_imputation(miss_data, W, ori_speeddata, ori_W)
#svd_vary(miss_data)
sys.exit()
#est_partmiss(ori_speeddata, ori_W, miss_data, W, 0.04)
#u,sigma,vt = np.linalg.svd(dtensor(miss_data).unfold(1),0)
#print(sigma[0],sigma[0]/sum(sigma))
#sys.exit()
lou = 1e-3
K = 100
conv = 1e-4
halrtc_para = [3e-3,100,1e-4]
[lou,K,conv_thre] = halrtc_para
time0 = time.time()
#est_halrtc = halrtc_cpt(miss_data, 1.3e-3, 1e-4, 100, W, 0)
time1 = time.time()
#print('ori_halrtc:', rmse_mape_rse(est_halrtc, ori_speeddata, (W | (ori_W == False))))
print('ori_time', str(time1- time0) + 's')
K_n = 2
labels = SC_1(miss_data, 6, K_n, axis=0)
est_SC = cluster_ha(labels, miss_data, W, K_n, halrtc_para, axis=0)
time_e = time.time()
print('sc_est:',rmse_mape_rse(est_SC, ori_speeddata, W|(ori_W==False)))
clr_assign,K_n = road_Kmeans(miss_data,ori_W,K_n,W,axis=0,method='cos')
est_kmeans = cluster_ha(clr_assign,miss_data,W,K_n,halrtc_para,axis=0)
print('kmeans_est:',rmse_mape_rse(est_kmeans,ori_speeddata,W|(ori_W==False)))
time2 = time.time()
print('kmeans_time:',time2-time1,'s')
sys.exit()
cr = range(30)
| qiushye/ITS_217 | tensor_cpt.py | tensor_cpt.py | py | 21,683 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.seterr",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_numb... |
3415780911 | import os
from reportlab.pdfgen import canvas
from reportlab.lib.utils import ImageReader
def img2pdf(filepath):
name = filepath + '.pdf'
raw_file = os.listdir(filepath)
''' get img file'''
img_list = []
for i in raw_file:
end = i.split('.')[-1]
if end in ['jpg','jpeg','png']:
img_list.append(filepath+'/'+i)
img_list.sort()
''' write to file '''
x = canvas.Canvas(name)
for img in img_list:
img_r = ImageReader(img)
img_size = img_r.getSize()
x.setPageSize(img_size)
x.drawImage(img,0,0)
x.showPage()
x.save()
return True
if __name__ == '__main__':
path = 'kf'
print('loading...')
if img2pdf(path):
print('{} ok.'.format(path))
| ll104567/image2pdf | image2pdf.py | image2pdf.py | py | 792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "repo... |
73742107553 | # detect jolly jumpers
import sys
from functools import reduce
def is_jolly(nums):
if len(nums) == 1:
return True
# else check the diffs
mask = [False for x in range(len(nums))]
mask[0] = True # the zero diff does not count
i = 0
j = 1
while j < len(nums):
diff = abs(nums[j] - nums[i])
if diff < len(mask):
mask[diff] = True
i += 1
j += 1
# The first element does not matter
return reduce(lambda a, b: a and b, mask)
if __name__ == '__main__':
lines = []
for line in sys.stdin:
lines.append(line)
for line in lines:
parts = line.replace("\n","").split(" ")
parts = list(map(lambda k: int(k), parts))
jolly = is_jolly(parts[1:])
if jolly:
print("Jolly")
else:
print("Not jolly")
| DylanMeeus/CompetitiveProgramming3 | UVa/chapter2/10038.py | 10038.py | py | 863 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 28,
"usage_type": "attribute"
}
] |
75205683873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
files_to_database.py
Create a database with all files in selected directory
"""
import os
import sys
import sqlite3
from pathlib import Path
import hashlib
class Database:
def __init__(self, name):
self.conn = sqlite3.connect(name)
self.cur = self.conn.cursor()
self.create_table()
def create_table(self):
query = """CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
path TEXT NOT NULL UNIQUE,
md5 TEXT,
mtime TEXT,
ctime TEXT,
size TEXT
);"""
self.cur.execute(query)
self.conn.commit()
def insert(self, path, md5, mtime, ctime, size):
"""Insert a path to database"""
query = f"INSERT OR REPLACE INTO files (path, md5, mtime, ctime, size) " \
f"VALUES ('{path}', '{md5}', '{mtime}', '{ctime}','{size}');"
self.cur.execute(query)
self.conn.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
self.cur.close()
def main(path):
"""docstring for main"""
path = Path(path)
db = Database(f'{path.stem}_files.db')
for f in path.rglob('*'):
if f.is_file():
db.insert(str(f.resolve()), # path
hashlib.md5(f.read_bytes()).hexdigest(), # md5 checksum
f.stat().st_mtime, # modification time
f.stat().st_ctime, # creation time
f.stat().st_size) # size
print(f)
# for root, dirs, files in os.walk(path):
# for f in files:
# f_path = os.path.join(root, f)
# print(f_path)
# db.insert(f_path)
if __name__ == '__main__':
main(sys.argv[1])
| thejoltjoker/python | files/files_to_database.py | files_to_database.py | py | 1,842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number"... |
32738209475 | import argparse
from colorama import init
from .common import Fore, Messages, Path
from .. import Havok
def parse_args():
parser = argparse.ArgumentParser(
description="Compare Havok packfiles.\nAlso works between Wii U and Switch packfiles."
)
parser.add_argument(
"hkFiles", type=Path, help="Paths to Havok packfiles for comparison", nargs="+"
)
return parser.parse_args()
def main():
init(autoreset=True)
args = parse_args()
if not args.hkFiles or len(args.hkFiles) <= 1:
return None
files = []
for hkFile in args.hkFiles:
Messages.loading(hkFile)
hk = Havok.from_file(hkFile)
Messages.deserializing(hkFile)
hk.deserialize()
files.append(hk)
for i in range(len(files) - 1):
print(
f"{Fore.BLUE}Comparing '{files[i].path.name}' and '{files[i + 1].path.name}'"
)
for hkfile0, hkfile1 in zip(files[i].files, files[i + 1].files):
if hkfile0.data.contents[0] == hkfile1.data.contents[0]:
print(f"{Fore.GREEN}File contents match!")
else:
print(f"{Fore.RED}File contents do not match!")
Messages.done()
if __name__ == "__main__":
main()
| krenyy/botw_havok | botw_havok/cli/hk_compare.py | hk_compare.py | py | 1,257 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "common.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "colorama.init",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "common.Messages.loa... |
29783814673 | #!/usr/bin/env python
from StringIO import StringIO
import pycurl
# The BeautifulSoup module
from bs4 import BeautifulSoup
import lxml
import argparse
url = 'http://kotaku.com/'
storage = StringIO()
storage = StringIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, storage.write)
# again setting headers to prevent bots from scraping
c.setopt(c.HTTPHEADER, ['User-Agent: Mozilla/5.0 (compatible; Googlebot/2.1; ',
'header_name2: header_value2'])
c.perform()
c.close()
content = storage.getvalue()
# content lxml
soup = BeautifulSoup(content)
# Beautiful Soup examples
# print soup
spans = soup.find_all('span') # find all spans
# print len(spans)
# find divs with class row
div_rows = soup.find_all('div', {'class' : 'row'})
| jmgamboa/Scrapers | ScrapeStringio.py | ScrapeStringio.py | py | 791 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "StringIO.StringIO",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "StringIO.StringIO",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pycurl.Curl",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
42776218853 | import numpy as np
import plotly.graph_objs as go
alpha = 0.01
length = 1 # length of the kernel
a = 1
t = 1 # time for modeling
h = 1e-2 # x-axis step of the mesh
tau = 1e-2 # time step of the mesh
M = int(length / h) + 1 # amount of x-axis nodes of the mesh : 11 now
y = np.array([]) # array with y(n, i) values
N = int(t / tau) + 1 # amount of time nodes of the mesh : 11 now
# matrix coefficients
A = a ** 2 * tau ** 2 / h ** 4
B = - 4 * A
C = 1 + 6 * A
D = - 4 * A
E = A
x_arr = np.linspace(0, length, M)
# filling array with initial conditions y(0, x) = alpha * x ** 2
for i in range(M): # M iterations for M nodes of the mesh
y = np.append(y, alpha * x_arr[i] ** 2)
y = np.vstack((y, y))
y[1, 1] = 0 # use initial condition for y(1,1)
y = np.vstack((y, np.zeros((N - 2, M)))) # array with initial conditions, boundary conditions and zeroes NxM now: 11x11
for n in range(2, N):
# matrix creation
slae_matrix = np.zeros((1, M)) # matrix of the system of the linear algebraic equations
free_members_column = np.array([]) # column of the free members of the system of the linear algebraic equations
# fill column of the free members
for i in range(M): # M unknown and M equations
if i < 2 or i > M - 3: # using the initial and boundary conditions
free_members_column = np.append(free_members_column, 0)
else:
free_members_column = np.append(free_members_column, 2 * y[n - 1, i] - y[n - 2, i])
# fill SLAE matrix
for i in range(M):
matrix_row = np.zeros((1, M))
if i == 0: # using boundary condition #1
slae_matrix[0, i] = 1
continue
elif i == 1: # using boundary condition #2
matrix_row[0, i] = 1
elif 1 < i < M - 2:
matrix_row[0, i - 2] = A
matrix_row[0, i - 1] = B
matrix_row[0, i] = C
matrix_row[0, i + 1] = D
matrix_row[0, i + 2] = E
elif i == M - 2: # using boundary condition #3
matrix_row[0, i - 1] = 1
matrix_row[0, i] = -2
matrix_row[0, i + 1] = 1
else: # using boundary condition #4
matrix_row[0, i - 3] = -1
matrix_row[0, i - 2] = 3
matrix_row[0, i - 1] = -3
matrix_row[0, i] = 1
slae_matrix = np.vstack((slae_matrix, matrix_row))
y[n] = np.linalg.solve(slae_matrix, free_members_column)
print(f'{n} / {N}')
fig = go.Figure()
for n in range(N):
fig.add_trace(go.Scatter(x=x_arr, y=y[n], name=f't = {n * tau} секунд'))
fig.update_layout(title="Численное решение",
xaxis_title="x, м",
yaxis_title="y, м",
margin=dict(l=0, r=30, t=30, b=0))
fig.show()
| lehakofficial/HARD_KERNEL | Gauss_method.py | Gauss_method.py | py | 2,789 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_numb... |
18544973064 | #!/usr/bin/env python3
import os
import secrets
import sqlite3
from flask import Flask
from flask import Response
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
app = Flask(__name__)
app.config["API_URL"] = os.environ.get("API_URL", "http://localhost:5000/api/getQuote/")
app.config["DATABASE"] = "quotes.sqlite"
def get_quote(lang):
"""Return a quote in lang."""
with sqlite3.connect(app.config["DATABASE"]) as db:
return db.execute(f"SELECT quote FROM quotes_{lang} ORDER BY random() LIMIT 1").fetchone()[0]
@app.route("/")
@app.route("/<lang>/")
def index(lang=None):
if lang is None:
lang = request.accept_languages.best_match(['en', 'de'])
if lang not in ("de", "en"):
return redirect(url_for("index", lang="en"))
nonce = secrets.token_hex(32)
response = Response(
render_template("index.html", lang=lang, nonce=nonce, quote=get_quote(lang))
)
response.headers["Content-Security-Policy"] = f"default-src 'self'; script-src 'nonce-{nonce}'"
return response
@app.route("/<lang>/suggest/", methods=["GET", "POST"])
def suggest(lang):
if lang not in ("de", "en"):
return redirect(url_for("suggest", lang="en"))
if request.method == "POST":
reason = request.form.get("reason")
thanks = None
if reason:
thanks = "yes"
with sqlite3.connect(app.config["DATABASE"]) as conn:
conn.execute("INSERT INTO suggestions VALUES (?, CURRENT_TIMESTAMP);", (reason,))
return redirect(url_for("suggest", lang=lang, thanks=thanks))
return render_template("suggest.html", lang=lang, thanks=bool(request.args.get("thanks")))
@app.route("/<lang>/api/getQuote/")
def api(lang):
if lang not in ("de", "en"):
return Response(
jsonify({"status": "error", "message": "Unknown language!"}),
content_type="text/json",
status=400,
)
table = {"de": "quotes_de", "en": "quotes_en"}[lang]
quote = get_quote(lang)
return jsonify(
{"status": "ok", "quote": quote}
)
| Varbin/historisch-gewachsen.de | app.py | app.py | py | 2,185 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"lin... |
33622872466 | # coding:utf-8
import requests
from lxml import html
import os
import datetime
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing
import re
from multiprocessing import Pool
def header(referer):
headers = {
'Host': 'i.meizitu.net',
'Pragma': 'no-cache',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36',
'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
'Referer': '{}'.format(referer),
}
return headers
# 获取主页列表
def getPage(pageNum):
baseUrl = 'http://www.mzitu.com/page/{}'.format(pageNum)
try:
selector = html.fromstring(requests.get(baseUrl,timeout=(3.09,20)).content)
except:
return []
urls = []
for i in selector.xpath('//ul[@id="pins"]/li/a/@href'):
urls.append(i)
return urls
# 图片链接列表, 标题
# url是详情页链接
def getPiclink(url):
try:
sel = html.fromstring(requests.get(url,timeout=(3.09,20)).content)
except:
return None
# 图片总数
total = sel.xpath('//div[@class="pagenavi"]/a[last()-1]/span/text()')[0]
# 标题
title = sel.xpath('//h2[@class="main-title"]/text()')[0]
# 文件夹格式
dirName = u"[第{}页].[共{}张].{}".format(pag_2,total,title)
dirName = dirName.replace(":","")
dirName = dirName.replace("?","")
dirName = dirName.replace("\\","")
dirName = dirName.replace("*","")
dirName = dirName.replace("/","")
ll = []
lll = []
for parent,dirnames,filenames in os.walk(os.path.abspath('.')):
ll.append(parent)
for k in ll:
s = re.findall(r'.+张]\.(.*)',k)
if s == [] :
ll.remove(k)
else:
lll.append(s[0])
s = re.findall(r'.+张]\.(.*)',dirName)[0]
if s not in lll:
os.mkdir(dirName)
else:
print('目录名重复')
return None
try:
total = int(total)
except:
total = 1
n = 1
for i in range(total):
try:
link = '{}/{}'.format(url, i+1)
try:
s = html.fromstring(requests.get(link,timeout=(3.09,20)).content)
except:
continue
jpgLink = s.xpath('//div[@class="main-image"]/p/a/img/@src')[0]
try:
img = requests.get(jpgLink, headers=header(jpgLink),timeout=(3.09,20)).content
except:
continue
filename = '%s/%s/%s.jpg' % (os.path.abspath('.'), dirName, n)
print(u'正在下载:第%s页-第%s张-共%s张-%s' % (pag_2, n,total,title))
with open(filename, "wb+") as jpg:
jpg.write(img)
n += 1
except:
pass
def start_heiheihei(pageNum):
global pag_2
pag_2 = pageNum
p = getPage(pageNum)
if p == []:
return None
with ThreadPool(5) as pool:
pool.map(getPiclink, p)
pool.close()
pool.join()
if __name__ == '__main__':
multiprocessing.freeze_support()
start = datetime.datetime.now()
result = [x for x in range(1,188)]
p = Pool(4)
r = p.map(start_heiheihei,result)
p.close()
p.join()
end = datetime.datetime.now()
runtime = str((end-start).seconds)
print(runtime)
| liuyongliu/liuyong_code | 妹子图爬取.py | 妹子图爬取.py | py | 3,570 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lxml.html.fromstring",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
... |
40759539134 | import json
import pickle
import random
from os.path import join, dirname
import nltk
from random import shuffle
from string import punctuation
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from JarbasModelZoo.features import extract_word_features
MODEL_META = {
"corpus": "macmorpho",
"corpus_homepage": "http://www.nilc.icmc.usp.br/lacioweb/",
"tagset": "Universal Dependencies",
"lang": "pt",
"model_id": "sklearn_macmorpho_udep_svm_tagger",
"algo": "sklearn.svm.LinearSVC",
"required_packages": ["scikit-learn"]
}
nltk.download('mac_morpho')
def convert_to_universal_tag(t, reverse=True):
tagdict = {
'n': "NOUN",
'num': "NUM",
'v-fin': "VERB",
'v-inf': "VERB",
'v-ger': "VERB",
'v-pcp': "VERB",
'pron-det': "PRON",
'pron-indp': "PRON",
'pron-pers': "PRON",
'art': "DET",
'adv': "ADV",
'conj-s': "CONJ",
'conj-c': "CONJ",
'conj-p': "CONJ",
'adj': "ADJ",
'ec': "PRT",
'pp': "ADP",
'prp': "ADP",
'prop': "NOUN",
'pro-ks-rel': "PRON",
'proadj': "PRON",
'prep': "ADP",
'nprop': "NOUN",
'vaux': "VERB",
'propess': "PRON",
'v': "VERB",
'vp': "VERB",
'in': "X",
'prp-': "ADP",
'adv-ks': "ADV",
'dad': "NUM",
'prosub': "PRON",
'tel': "NUM",
'ap': "NUM",
'est': "NOUN",
'cur': "X",
'pcp': "VERB",
'pro-ks': "PRON",
'hor': "NUM",
'pden': "ADV",
'dat': "NUM",
'kc': "ADP",
'ks': "ADP",
'adv-ks-rel': "ADV",
'npro': "NOUN",
}
if t in ["N|AP", "N|DAD", "N|DAT", "N|HOR", "N|TEL"]:
t = "NUM"
if reverse:
if "|" in t: t = t.split("|")[0]
else:
if "+" in t: t = t.split("+")[1]
if "|" in t: t = t.split("|")[1]
if "#" in t: t = t.split("#")[0]
t = t.lower()
return tagdict.get(t, "." if all(tt in punctuation for tt in t) else t)
dataset = [[(w, convert_to_universal_tag(t)) for (w, t) in sent]
for sent in nltk.corpus.mac_morpho.tagged_sents()]
shuffle(dataset)
cutoff = int(len(dataset) * 0.8)
train_data = dataset[:cutoff]
test_data = dataset[cutoff:]
def transform_to_dataset(tagged_sentences):
X, y = [], []
for tagged in tagged_sentences:
untagged = [w for w, t in tagged]
for index in range(len(tagged)):
X.append(extract_word_features(untagged, index))
y.append(tagged[index][1])
return X, y
X, y = transform_to_dataset(train_data)
# Use only the first 10K samples
# numpy.core._exceptions.MemoryError: Unable to allocate 3.21 TiB for an
# array...
X = X[:10000]
y = y[:10000]
clf = Pipeline([
('vectorizer', DictVectorizer(sparse=False)),
('classifier', LinearSVC())
])
clf.fit(X, y)
print('Training completed')
X_test, y_test = transform_to_dataset(test_data)
X_test = X_test[:10000]
y_test = y_test[:10000]
acc = clf.score(X_test, y_test)
print("Accuracy:", acc)
# Accuracy: 0.9204
MODEL_META["accuracy"] = acc
# save pickle
path = join(dirname(dirname(dirname(__file__))),
"models", "postag", MODEL_META["model_id"] + ".pkl")
with open(path, "wb") as f:
pickle.dump(clf, f)
META = join(dirname(dirname(dirname(__file__))), "JarbasModelZoo", "res")
meta_path = join(META, MODEL_META["model_id"] + ".json")
with open(meta_path, "w") as f:
json.dump(MODEL_META, f)
def pos_tag(sentence):
tags = clf.predict(
[extract_word_features(sentence, index)
for index in range(len(sentence))])
return zip(sentence, tags)
| OpenJarbas/ModelZoo | train/postag/sklearn_macmorpho_udep_svm.py | sklearn_macmorpho_udep_svm.py | py | 3,791 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nltk.download",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.mac_morpho.tagged_sents",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "... |
20255221258 | import os
from collections import namedtuple
from pathlib import Path
import string
import random
from PIL import Image
from PIL.ImageFile import ImageFile
from werkzeug.utils import secure_filename
from configuration import SoccerConfig
Chunk = namedtuple("Chunk", "path url")
STORAGE_PATH = Path(SoccerConfig.STORAGE_PATH)
def url(filename, subdir: str) -> str:
"""Get url file
Args:
filename: filename
subdir: subdirectory
Returns:
str url
"""
if not filename:
return ""
static_url = SoccerConfig.STATIC_URL
if subdir:
static_url = static_url + "/" + subdir
return static_url + "/" + filename
def path(filename: str, subdir: str) -> Path:
"""Get full file path
Args:
filename: filename
subdir: subdirectory
Returns:
str full path
"""
upload_dir = Path(SoccerConfig.STORAGE_PATH)
if subdir:
upload_dir = upload_dir / subdir
return upload_dir / filename
def save(file, subdir: str, filename=None, close_after=True) -> str:
"""Save file
Args:
file: save able file ex: (FileStorage, ImageFile)
subdir: sub directory
filename: filename of file
close_after: close file after done ?
Returns:
str filename
"""
upload_dir = Path(SoccerConfig.STORAGE_PATH)
if subdir:
upload_dir = upload_dir / subdir
# make sure upload directory exists
if not upload_dir.is_dir():
upload_dir.mkdir(parents=True)
if not filename:
filename = file.filename
# make sure filename is safe and no collision
filename = safe_filename(filename)
while (upload_dir / filename).is_file():
filename = safe_filename(file.filename)
if isinstance(file, ImageFile):
if file.mode in ('RGBA', 'LA', '1', 'P'):
file = file.convert("RGB")
size = (1312, 984) # untuk membatasi ukuran image yang di upload
if file.size[0] > size[0] or file.size[1] > size[1]:
file.thumbnail(size, Image.ANTIALIAS)
file.save(str(upload_dir / filename), quality=90)
else:
file.save(str(upload_dir / filename), quality=90)
else:
file.save(str(upload_dir / filename))
if close_after:
file.close()
return filename
def safe_filename(filename, maxchar=40):
"""Secure filename and add random string
Args:
filename: filename
maxchar: maximum character
Example Result:
[nama_file]_randomgstring - inifilecermin_awkwards.jpg
"""
name, ext = os.path.splitext(filename)
random_str = '_' + ''.join(
random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(10)
)
name_max = maxchar - len(ext) - len(random_str)
if len(name) > name_max:
name = name[:name_max]
name = "%s%s%s" % (name, random_str, ext)
return secure_filename(name) | IsnandaZain/bola-app | soccer/libs/file.py | file.py | py | 2,953 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "configuration.SoccerConfig.STORAGE_PATH",
"line_number": 15,
"usage_type": "attribute"
},
{
"... |
29489219323 | import math
from typing import Callable
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def GetInterval(start:int,end:int,step:float, fn:Callable[[float],float]) -> dict[str,list[int]]:
x = np.linspace(start,end,math.ceil((end-start)/step))
my_dict = {"X":[],"Y":[]}
for i in x:
my_dict["X"].append(i)
my_dict["Y"].append(fn(i))
return my_dict
def x2(i)->float:
return i*i
def x3(i)->float:
return i*i*i
a = GetInterval(-20,20,1,x2)
b = GetInterval(-20,20,1,x3)
fig, axis = plt.subplots(nrows = 1, ncols = 2, figsize = (15,10))
axis[0].plot(a["X"],a["Y"],color='b',linewidth=4)
axis[1].plot(b["X"],b["Y"],color='r',linewidth=4)
axis[0].set_title("f(x)= x²")
axis[1].set_title("f(x)= x³")
axis[0].grid()
axis[1].grid()
plt.show()
| Grinduim/Bosch-2022.2 | Bosch/Revisão Python - Queila/Arquivos Prova/untitled1.py | untitled1.py | py | 806 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Callable",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
... |
15651178657 | # -*- coding: utf-8 -*-
import multiprocessing
import os
def do_this(what):
whoami(what)
def whoami(what):
print("进程 %s 说: %s" % (os.getpid(), what))
if __name__ == '__main__':
whoami('我是主程序')
for n in range(4):
p = multiprocessing.Process(target=do_this, args=("我是 #%s 进程" % n,))
p.start()
| ivix-me/note-introducing-python | ch10/1003/100302/mp.py | mp.py | py | 357 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getpid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 19,
"usage_type": "call"
}
] |
33764436406 | '''
using :keyword:`with` statement
'''
from easyprocess import EasyProcess
from pyvirtualdisplay.smartdisplay import SmartDisplay
if __name__ == "__main__":
with SmartDisplay(visible=0, bgcolor='black') as disp:
with EasyProcess('xmessage hello'):
img = disp.waitgrab()
img.show()
| tawfiqul-islam/RM_DeepRL | venv/lib/python3.6/site-packages/pyvirtualdisplay/examples/screenshot3.py | screenshot3.py | py | 321 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "pyvirtualdisplay.smartdisplay.SmartDisplay",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "easyprocess.EasyProcess",
"line_number": 9,
"usage_type": "call"
}
] |
42233449488 | from http import HTTPStatus
from django.test import TestCase
from konekt.forms import CandidateForm, EducationFormset, ExperienceFormset, SkillsFormset
class TestForms(TestCase):
def test_candidate_form(self):
form = CandidateForm({
'name': 'Pierre Dupont',
'job': 'DevOps',
'address': 'Paris',
'phone': '0123465789',
'email': 'pierre@bti.com',
'status': 'Freelance',
'availability': 'Sous 3 mois',
'mobility': 'Paris',
'price': '600',
'salary': ''
})
self.assertTrue(form.is_valid())
def instantiate_formset(self, formset_class, data, instance=None, initial=None):
prefix = formset_class().prefix
formset_data = {}
for i, form_data in enumerate(data):
for name, value in form_data.items():
if isinstance(value, list):
for j, inner in enumerate(value):
formset_data['{}-{}-{}_{}'.format(prefix, i, name, j)] = inner
else:
formset_data['{}-{}-{}'.format(prefix, i, name)] = value
formset_data['{}-TOTAL_FORMS'.format(prefix)] = len(data)
formset_data['{}-INITIAL_FORMS'.format(prefix)] = 0
if instance:
return formset_class(formset_data, instance=instance, initial=initial)
else:
return formset_class(formset_data, initial=initial)
def test_education_formset(self):
formset = self.instantiate_formset(EducationFormset, [
{
'date': '01/01/2021',
'name': 'Xxx',
},
])
self.assertTrue(formset.is_valid())
def test_experiences_formset(self):
formset = self.instantiate_formset(ExperienceFormset, [
{
'job_title': 'DevOps',
'company_name': 'BTI',
'job_location': 'Levallois',
'duration': '12 mois',
'description': 'cicd, cloud',
'tools': 'azure, aws, jenkins',
},
])
self.assertTrue(formset.is_valid())
def test_skills_formset(self):
formset = self.instantiate_formset(SkillsFormset, [
{
'name': 'aws',
},
])
self.assertTrue(formset.is_valid())
class AddCandidateViewTests(TestCase):
def test_get(self):
response = self.client.get("/add_candidate/")
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, '<h1 class="title-huge">BTI Konekt - Ajouter un·e candidat·e</h1>', html=True)
| sebabert/Konekt-pub | konekt/konekt/tests.py | tests.py | py | 2,701 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "konekt.forms.CandidateForm",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "konekt.forms.EducationFormset",
"line_number": 43,
"usage_type": "argument"
},
{
"... |
23030254845 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
# Create your models here.
def user_avatar_path(user, filename: str):
return 'avatar/user_{0}/{1}'.format(user.username, filename)
def user_thumbnail_path(user, filename: str):
return 'avatar/thumbnail/{0}_{1}'.format(user.pk, filename)
class User(AbstractUser):
email = models.EmailField(_('email address'), unique=True)
avatar = models.ImageField(
verbose_name='Avatar',
# width_field=100,
# height_field=100,
upload_to=user_avatar_path,
null=True,
blank=True
)
birth = models.DateField(
verbose_name='Birthday',
null=True,
blank=True,
)
thumbnail = models.ImageField(
verbose_name='thumbnail',
# width_field=100,
# height_field=100,
upload_to=user_thumbnail_path,
null=True,
blank=True
)
class Meta:
verbose_name = 'User'
verbose_name_plural = verbose_name
ordering = ('-id', )
def __str__(self):
return self.username
@property
def avatar_url(self):
if not self.thumbnail:
return '/static/image/default_avatar.png'
else:
return self.thumbnail.url
| blueglasses1995/VideoRecomApp | users/models.py | models.py | py | 1,341 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.EmailField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
... |
9919923837 | import logging
import json
from flask import request, jsonify;
from codeitsuisse import app;
logger = logging.getLogger(__name__)
@app.route('/encryption', methods=['POST'])
def encrypt():
data = request.get_json();
logging.info("data sent for evaluation {}".format(data))
result = []
for test_case in data:
result.append(encry(test_case["n"],test_case["text"]))
logging.info("My result :{}".format(result))
return json.dumps(result);
def encry(n,text):
p_text = ""
for ch in text:
if ch.isalnum():
p_text += ch.upper()
start = 0
list_of_substrings = []
for i in range(n):
length = len(p_text) // n
remainder = len(p_text) % n
if i < remainder:
length += 1
substring = p_text[start: start + length]
list_of_substrings.append(substring)
start += length
result = ""
for i in range(len(p_text)):
str_index_in_list = i%n
str_index = i // n
result += list_of_substrings[str_index_in_list][str_index]
return result
| hlx1024/pythondemo25th | codeitsuisse/routes/secret_message.py | secret_message.py | py | 1,023 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.info",
... |
72919674913 | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.IndexView.as_view()),
path('alumno/', views.AlumnoView.as_view(),name='alumnos'),
path('alumno/<int:alumno_id>', views.AlumnoDetailView.as_view()),
path('alumno_horario/', views.Alumno_HorarioView.as_view()), # Corrección de ruta
path('alumno_horario/<int:alumno_horario_id>', views.Alumno_HorarioDetailView.as_view()),
]
| aaronbarra040998/avanve-proy03 | Avance API ALumnos/lab13/api/urls.py | urls.py | py | 434 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
73799501155 | #Data analaysis using Instagram
from IPython.display import Image
from IPython.display import display
from InstagramAPI import InstagramAPI
username="krisha_mehta"
InstagramAPI = InstagramAPI(username,"prideandprejudice")
InstagramAPI.login()
InstagramAPI.getProfileData()
result = InstagramAPI.LastJson
#print(result)
#All the results are stored in Json format
#print (result['user']['biography'])
InstagramAPI.timelineFeed()
#result1 = InstagramAPI.LastJson
#print(result1[text])
#Get_posts_from_list()
#Get_url()
#image_urls = InstagramAPI.LastJson
#image_urls
import time
myposts=[]
has_more_posts = True
max_id=""
while has_more_posts:
InstagramAPI.getSelfUserFeed(maxid=max_id)
if InstagramAPI.LastJson['more_available'] is not True:
has_more_posts = False #stop condition
print ("stopped")
max_id = InstagramAPI.LastJson.get('next_max_id','')
myposts.extend(InstagramAPI.LastJson['items']) #merge lists
time.sleep(2) # Slows the script down to avoid flooding the servers
profile_pic = myposts[0]['caption']['user']['profile_pic_url']
img = Image(profile_pic)
display(profile_pic)
| krishamehta/InstagramAnalysis | instagram.py | instagram.py | py | 1,145 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "InstagramAPI.InstagramAPI",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "InstagramAPI.InstagramAPI.login",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "InstagramAPI.InstagramAPI",
"line_number": 8,
"usage_type": "name"
},
{
"... |
29710262008 | from datetime import datetime
from db import db
class BookCopieModel(db.Model):
__tablename__ = 'book_copie'
id = db.Column(db.Integer, primary_key=True)
contribution_date = db.Column(
db.DateTime(timezone=True), default=datetime.utcnow)
book_id = contributor_user_id = db.Column(db.Integer,
db.ForeignKey('book.id'))
book = db.relationship('BookModel')
contributor_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
contributor_user = db.relationship('UserModel')
borrower_list = db.relationship('BorrowModel')
def __init__(self, book_id, contributor_user_id):
self.book_id = book_id
self.contributor_user_id = contributor_user_id
def json(self):
return {
'copy_id': self.id,
'book_isbn': self.book.isbn,
'contribution_date': str(self.contribution_date),
'contributor_user_email': self.contributor_user.email,
'borowers': [x.json() for x in self.borrower_list]
}
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first() | rezid/api-rest | models/book_copie.py | book_copie.py | py | 1,339 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "db.db.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "db.db.Column",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 8,
"u... |
8077731236 | import json
def get_historique_user(user):
# Load existing user data from the file (if any)
try:
with open('users.json', 'r') as f:
users = json.load(f)
except FileNotFoundError:
users = []
# Search for the user in the list
for u in users:
if u['user'] == user:
# If the user exists, return their history as a string
history_str = ''
for entry in u.get('history', []):
history_str += f"{entry['date']} - ({entry['amount']})\n"
return history_str.strip()
break
# If the user is not found, return an empty string
return "pas d'action sur votre compte."
| RadouaneElarfaoui/BankSystem | get_history.py | get_history.py | py | 724 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
}
] |
18558221401 | """
Implementation of a language predictor based on a transformer architecture
by the wonderful OpenNMT team at Harvard.
SOURCE: OpenNMT: Open-Source Toolkit for Neural Machine Translation
AUTHORS: Guillaume Klein
Yoon Kim
Yuntian Deng
Jean Senellart
Alexander M. Rush
EDITORS: Luke Nonas-Hunter
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.autograd import Variable
import math
import copy
import time
class Transformer(nn.Module):
"""
A standard Encoder-Decoder transformer architecture.
Attributes:
encoder: Instance of the encoder module.
decoder: Instance of the decoder module.
src_embed: Instance of embeddings module for the source data.
tgt_embed: Instance of embeddings module for the target data.
generator: Instance of generator module.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
"""
Instantiate the model.
"""
super(Transformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"""
Take in and process masked src and target sequences.
Args:
src: Source data tensor.
tgt: Target data tensor.
src_mask: Boolean tensor illustrating what source data should be
examined at any given timestep.
tgt_mask: Boolean tensor illustrating what target data should be
examined at any given timestep.
Returns:
Output of the decode element given the output of the encode element
and the target data (or memory).
"""
return self.decode(self.encode(src, src_mask),
src_mask,
tgt,
tgt_mask)
def encode(self, src, src_mask):
"""
Run values through the encoder module.
Args:
src: Source tensor to run through encoder module.
src_mask: Boolean tensor indicating what data should be examined
at any given timestep.
"""
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
"""
Run values through the decoder.
Args:
memory: Data tensor that contains previous words predicted by the
model.
src_mask: Boolean tensor indicating what source data should be
examined at any given timestep.
"""
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
def generate_text(self, tokenized_prompt, vocab):
"""
Generate text given a text prompt and vocabulary.
Args:
tokenized_prompt: List of ordered words from the input sentence.
vocab: Instance of vocabulary object containing all vocabulary from
the dataset the model was trained on.
"""
assert len(tokenized_prompt) <= 510
numerical_prompt = [torch.tensor([vocab.stoi["<SOS>"]]
+ vocab.numericalize(tokenized_prompt,
tokenize=False)
+ [vocab.stoi["<EOS>"]]),
torch.tensor([0 for _
in range(512)])]
numerical_prompt = pad_sequence(numerical_prompt,
batch_first=False,
padding_value=0)
numerical_prompt = numerical_prompt.transpose(0, 1)
mask = (numerical_prompt[0] != vocab.stoi["<PAD>"]).unsqueeze(-2)
model_out = self.greedy_decode(self,
numerical_prompt[0], mask,
max_len=256,
start_symbol=vocab.stoi["<SOS>"],
end_symbol=vocab.stoi["<EOS>"])
text = ""
for i in range(1, model_out.size(1)):
sym = vocab.itos[model_out[0, i].item()]
if sym == "<EOS>":
break
text += f"{sym} "
return text
@staticmethod
def clones(module, N):
"""
Helper: Produce N identical layers.
Args:
module: Module to be duplicated.
N: Integer number of models to duplicate.
Returns:
N identical layers.
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
@staticmethod
def subsequent_mask(size):
"""
Helper: Mask out subsequent positions
Args:
size: Size of model.
Returns:
Tensor representing new mask.
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
@staticmethod
def make_model(src_vocab, tgt_vocab, N=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"""
Helper: Construct a model from hyperparameters.
d_model and d_ff must be divisable by h
Args:
src_vocab: Integer number of vocabulary words used in the source
sentences.
tgt_vocab: Integer number of vocabulary words used in the target
sentences.
N: Integer of decode and encode layers.
d_model: Integer size of model (determines length of input).
d_ff: Integer size of the feed forward network.
h: The number of attention heads.
dropout: Float representing the rate of node deactivation.
Return:
An instance of the Transformer class based on the structure
outlined by the parameters of the function.
"""
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = Transformer(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
@staticmethod
def attention(query, key, value, mask=None, dropout=None):
"""
Helper: Compute 'Scaled Dot Product Attention'
Args:
query: Tensor containing query data.
key: Tensor containing key data.
value: Tensor containing value data.
mask: Boolean tensor illustrating what data should be examined.
dropout: Float representing the rate of node deactivation.
Return:
An output tensor of a single attention head.
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
@staticmethod
def greedy_decode(model, src, src_mask, max_len,
start_symbol, end_symbol=None):
"""
Helper: Calculate the best word given the model output.
Args:
model: Instance of the Transformer class.
src: Source data (also refered to as the prompt).
src_mask: Boolean list illustrating what data should be examined.
max_len: Maximum tokens in output
start_symbol: Token indicating start of sentence.
end_symbol: Token indicating end of sentence.
Return:
A tensor containing the index representation of the output
sentence generated by the model given the original prompt.
"""
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len - 1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(Transformer.subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
_, next_word = torch.max(prob, dim=1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1)
.type_as(src.data)
.fill_(next_word)], dim=1)
if end_symbol is not None and next_word == end_symbol:
break
return ys
class Generator(nn.Module):
"""
Define standard linear + softmax generation step.
Attributes:
proj: A linear feed forward module which takes the output of the decode
block and projects it onto a series of nodes which represents all
the possible vocabulary words the model knows.
"""
def __init__(self, d_model, vocab):
"""
Instantiate standard linear + softmax neural network.
Args:
d_model: Integer dimension of model input.
vocab: Integer size of vocabulary
"""
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
"""
Feed data through the layer.
Args:
x: The data to be passed through the network.
Returns:
The output of the self.proj layer after being processed by a
softmax function.
"""
return F.log_softmax(self.proj(x), dim=-1)
class Encoder(nn.Module):
"""
Core encoder is a stack of N layers.
Attributes:
layers: List of encoder layers which make up the module.
norm: Instance of LayerNorm.
"""
def __init__(self, layer, N):
"""
Instantiate encoder module.
Args:
layer: Instance of EncoderLayer to be duplicated.
N: Integer of times to duplicate the given layer.
"""
super(Encoder, self).__init__()
self.layers = Transformer.clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"""
Pass the input (and mask) through each later in turn.
Args:
x: Source data tensor.
mask: Boolean tensor illustrating what data should be examined.
Returns:
Output of all the encoder layers.
"""
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"""
Construct a normalization layer.
Attributes:
a_2: Parameter tensor which represents the constant that scales the
output of the normalization function.
b_2: Parameter tensor which represents the constant that offsets the
output of the normalization function.
eps: Float point representing learning rate.
"""
def __init__(self, features, eps=1e-6):
"""
Instantiate layer norm.
Args:
features: Integer number of features in layer.
eps: Floating point representation of learning rate.
"""
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
"""
Feed data through the layer.
Args:
x: Source data tensor.
Returns:
Normalized data.
"""
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
Attributes:
norm: Instance of LayerNorm.
dropout: Dropout function defined by pytorch.
"""
def __init__(self, size, dropout):
"""
Initalize a sublayer connection.
Args:
size: Size of the layer.
dropout: Floating point representation of dropout rate.
"""
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""
Apply residual connection to any sublayer with same size.
Args:
x: Source tensor.
sublayer: Previous layer to run data through.
"""
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"""
Assemble an encoder layer for use inside an encoder.
Attributes:
self_attn: Instance of the MultiHeadedAttention class.
feed_forward: Linear feed forward neural network.
sublayer: List of SublayerConnection modules which normalize the data.
size: Size of the model.
"""
def __init__(self, size, self_attn, feed_forward, dropout):
"""
Instantiate an encoder layer.
Args:
size: Integer size of the layer.
self_attn: Instance of the MultiHeadedAttention class.
feed_forward: Linear feed forward neural network.
dropout: Floating point representation of dropout rate.
"""
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = Transformer.clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"""
Feed data through layer.
Args:
x: Source data tensor.
mask: Boolean tensor illustrating what data should be examined.
"""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"""
Generic N layer decoder with masking.
Attributes:
layers: List of decoder layers which make up the module.
norm: Instance of LayerNorm.
"""
def __init__(self, layer, N):
"""
Instantiate a decoder module.
Args:
layer: Instance of DecoderLayer to be duplicated.
N: Integer of times to duplicate the given layer.
"""
super(Decoder, self).__init__()
self.layers = Transformer.clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
"""
Feed data through the decoder.
This includes running the data through all the decoder layers and the
normalization later.
Args:
x: Source data tensor.
memory: Tensor containing words generated by the model so far.
src_mask: Boolean tensor illustrating what source data should be
examined.
tgt_mask: Boolean tensor illustrating what memory data should be
examined.
Returns:
Output from the decoder.
"""
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"""
Decoder layer is made of self-attn, src-attn, and feed forward.
Attributes:
size: Integer dimension of model.
self_attn: Instance of the MultiHeadedAttention class.
src_attn: Instance of the MultiHeadedAttention class.
feed_forward: Linear feed forward neural network.
sublayer: List of connecting sublayers.
"""
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
"""
Instantiate a decoder layer.
Args:
size: Integer dimension of model.
self_attn: Instance of the MultiHeadedAttention class.
src_attn: Instance of the MultiHeadedAttention class.
feed_forward: Linear feed forward neural network.
dropout: Floating point representation of dropout rate.
"""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = Transformer.clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"""
Feeds data through decoder layer.
Args:
x: Source data tensor.
memory: Tensor containing words generated by the model so far.
src_mask: Boolean tensor illustrating what source data should be
examined.
tgt_mask: Boolean tensor illustrating what memory data should be
examined.
Returns:
Output of a single decoder layer.
"""
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
"""
Combines multiple attention functions together into one layer.
Attributes:
d_k: Integer dimension of query, key, and value vectors.
h: Integer number of attention heads.
linears: List of linear feed forward layers.
attn: Function representing a single attention head.
"""
def __init__(self, h, d_model, dropout=0.1):
"""
Take in model size and number of heads and instantiate
MultiHeadedAttention object.
Args:
h: Integer number of attention heads.
d_model: Integer dimension of model.
dropout: Floating point representation of dropout rate.
"""
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = Transformer.clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""
Feed data through a multiheaded attention layer.
Args:
query: Data tensor of query words.
key: Data tensor of key words.
value: Data tensor of value words.
mask: Boolean tensor which indicates which data should be excluded.
Returns:
Output of multiheaded attention layer.
"""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[linear(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for linear, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch
x, self.attn = Transformer.attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"""
Implementats FFN.
Attributes:
w_1: First liner layer in the network.
w_2 Second linear layer in the network
dropout: Dropout function defined by pytorch.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
"""
Instantiate PositionwiseFeedForward object.
d_model: Integer dimension of the input/output layers.
d_ff: Integer dimension of the hidden layers.
dropout: Floating point representation of dropout layer.
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Feed data through the network.
Args:
x: Source data tensor.
Returns:
Output of the feed forward network.
"""
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
"""
Embed text as vectors.
Attributes:
lut: A look up table defined by the pytorch Embedding module.
d_model: Integer dimension of the model.
"""
def __init__(self, d_model, vocab):
"""
Instantiates an embedding module.
d_model: Integer dimension of the model.
vocab: Integer number of vocabulary words.
"""
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
"""
Feed data through the embedding module.
Args:
x: Source data tensor.
Returns:
Output of embeddings layer.
"""
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"""
Implement the PE function.
Attributes:
dropout: Dropout function defined by pytorch.
"""
def __init__(self, d_model, dropout, max_len=5000):
"""
Instantiate positional encoding module.
Args:
d_model: Integer dimension of model.
dropout: Floating point representation of dropout rate.
max_len: Maximum position value.
"""
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
"""
Feed data through the positional encoding layer.
Args:
x: Source data tensor.
Returns:
The source data now encoded with each words relative position in
the sentence.
"""
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
if __name__ == "__main__":
tmp_model = Transformer.make_model(10, 10, 2)
| nonas-hunter/adventure-generator | transformer.py | transformer.py | py | 23,533 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_... |
26465804587 | ##### Sales Prediction with Linear Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.float_format", lambda x: "%.2f" % x)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, cross_val_score
##### Simple Linear Regression with OLS Using Scikit-Learn
df = pd.read_csv("advertising.csv")
print(df.shape)
print(df.describe().T)
##### Model
X = df[["TV"]]
y = df[["sales"]]
reg_model = LinearRegression().fit(X, y)
## b + w*TV
# tv katsayısı (w)
print(reg_model.coef_[0][0])
# sabit (b)
print(reg_model.intercept_[0])
##### Tahmin
# 150 birimlik TV harcaması olsa satıs ne olur?
print(reg_model.intercept_[0] + reg_model.coef_[0][0] * 150)
## modelin görselleştirilmesi
vis = sns.regplot(x=X, y=y, scatter_kws={"color": "g", "s": 9},
ci=False, color="b") # model tahmin değerleri mavi, gerçek değerler yesil
vis.set_title(f"Model Denklemi: Sales = {round(reg_model.intercept_[0], 2)} + TV*{round(reg_model.coef_[0][0], 2)}")
vis.set_ylabel("Satıs Sayısı")
vis.set_xlabel("TV Harcamaları")
plt.xlim(-10, 310)
plt.ylim(bottom=0)
plt.show()
##### Tahmin Başarısı
# MSE
y_pred = reg_model.predict(X) # tahmin edilen değerler
mean_squared_error(y, y_pred) # gerçek değerler ve tahmin değerleri prmteler
y.mean() # gercek bagımlı değişken değerlerin ort
y.std() # gercek bagımlı değişken degerlerin std sapması, degerler 9-19 arasında değişiyor
# RMSE
np.sqrt(mean_squared_error(y, y_pred))
# MAE
mean_absolute_error(y, y_pred)
# R-KARE, veri setindeki bagımsız degisklerin bagımlı degiskeni acıklama yuzdesidir
print(reg_model.score(X, y))
##### *Değişken sayısı arttıkca R-kare şişebilir. bu yuzden duzeltilmiş r karenin de değeri göz onune alınmalıdır.
###### *Katsayıların anlamlılığı, modelin anlamlılığı ile ilgilenmiyoruz, optimizasyon, makine öğrenmesi açısından yüksek tahmin başarısıyla ilgileniyoruz. doğrusal formda tahmin etmek.
##### *Gelişmiş regresyon problemleri çözmek için doğrusal değil ağaca dayalı yöntemleri daha cok kullanacağız.
##### Multiple Linear Regression
X = df.drop("sales", axis=1)
y = df[["sales"]]
##### Model
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
print(X_test.shape)
print(y_train.shape)
reg_model = LinearRegression().fit(X_train, y_train)
# b
print(reg_model.intercept_[0])
# w
print(reg_model.coef_[0])
##### Tahmin
# regresyon cıktısı
print(reg_model.intercept_[0] + reg_model.coef_[0][0] * 30 + reg_model.coef_[0][1] * 10 + reg_model.coef_[0][2] * 40)
veri_deneme = [[30], [10], [40]]
veri_deneme = pd.DataFrame(veri_deneme).T
reg_model.predict(veri_deneme) # tahmin
###### Tahmin Başarısı
y_pred = reg_model.predict(X_train)
np.sqrt(mean_squared_error(y_train, y_pred)) # train RMSE
reg_model.score(X_train, y_train) # train R-Kare
y_pred = reg_model.predict(X_test)
np.sqrt(mean_squared_error(y_test, y_pred)) # test RMSE
reg_model.score(X_test, y_test) # test R-Kare
# 10 katlı cv RMSE
np.mean(np.sqrt(-cross_val_score(reg_model, X, y, cv=10, scoring="neg_mean_squared_error")))
# crossun basında - olmasının sebebi negatif sonuclar vermesi, negatif hata olmayacağından - ile carpılarak pozitif sayılar elde edilir.
# veri setinin boyutu az oldugundan capraz dogrulamaya bakmak daha mantıklı olabilir
### BONUS
##### Simple Linear Regression with Gradient Descent from Scratch
# Cost function MSE
def cost_function(Y, b, w, X):
num_of_ob = len(Y)
sse = 0
for i in range(0, num_of_ob):
y_pred = b + w * X[i] # bagimli tahmin degerleri
y_sum = (Y[i] - y_pred) ** 2
sse += y_sum
mse = sse / num_of_ob
return mse
# gradient descent
def update_weights(Y, b, w, X, learning_rate):
num_of_ob = len(Y)
b_derivative_sum = 0
w_derivative_sum = 0
for i in range(0, num_of_ob):
y_pred = b + w * X[i] # bagimli tahmin degerleri
y = Y[i] # bagimli gercek degerleri
b_derivative_sum += (y_pred - y) # b ye gore kismi turev
w_derivative_sum += (y_pred - y) * X[i] # w ye gore kismi turev
new_b = b - (learning_rate * b_derivative_sum * 1 / num_of_ob)
new_w = w - (learning_rate * w_derivative_sum * 1 / num_of_ob)
return new_b, new_w
# train fonksiyonu
def train(Y, initial_b, initial_w, X, learning_rate, num_iters):
print('b:{} w:{} MSE:{}'.format(initial_b, initial_w, cost_function(Y, initial_b, initial_w, X)))
b = initial_b
w = initial_w
cost_h = []
for i in range(num_iters):
b, w = update_weights(Y, b, w, X, learning_rate)
mse = cost_function(Y, b, w, X)
cost_h.append(mse)
if i % 500 == 0:
print('iter:{} b:{:.2f}, w:{:.2f}, MSE:{:.2f}'.format(i, b, w, mse))
print('after {} iterations b: {:.2f}, w: {:.2f}, MSE: {:.2f}'.format(num_iters, b, w, cost_function(Y, b, w, X)))
return cost_h, b, w
df = pd.read_csv('advertising.csv')
X = df['radio']
Y = df['sales']
# hyperparameters
initial_b = 0.001
initial_w = 0.001
learning_rate = 0.001
num_iters = 10000
cost_h, b, w = train(Y, initial_b, initial_w, X, learning_rate, num_iters)
| seymagkts/machine_learning | Module_4/linear_reg_exercises.py | linear_reg_exercises.py | py | 5,376 | python | tr | code | 0 | github-code | 1 | [
{
"api_name": "pandas.set_option",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 27,
"usage_type": "call"
},
{
"api_name":... |
9348809940 | from flask import Flask, request, jsonify, make_response
from functools import lru_cache
import json
app = Flask(__name__)
"""
comma_separtaed_params_to_list splits string when comma occurs and append splitted tokens to a list
input_str: input string
Returns: a list of tokens separated by commas
"""
def comma_separated_params_to_list(input_str: str) -> list:
result = []
for val in input_str.split(','):
if val:
result.append(val)
return result
@app.route('/api/ping')
def get_ping():
return make_response(jsonify({'success': True}), 200)
class Posts:
JSON_FIlE = "data.json"
@classmethod
@lru_cache()
def load(cls):
with open(cls.JSON_FIlE, "r") as postsFile:
return json.load(postsFile).get("posts", [])
@classmethod
def write(cls, posts: list):
with open(cls.JSON_FIlE, "w") as postsFile:
json.dump({"posts": posts}, postsFile, indent=2)
@classmethod
def get_by_id(cls, posts: list, post_id: str):
return next(filter(lambda pos: pos.get("id") == post_id, posts), None)
@classmethod
def get_by_tag(cls, posts: list, post_tags: list) -> list:
ret = []
for post in posts:
for tag in post_tags:
if tag in post.get("tags"):
# print(post.get("id"), tag, ret)
ret.append(post)
break
return ret
@classmethod
def sort_by_param(cls, posts: list, param: str, reverse: bool) -> list:
return sorted(posts, key=lambda k: k[param], reverse=reverse)
@app.route('/api/posts', methods=['GET', 'POST'])
def get_post_posts():
if request.method == "POST":
print("POST")
data = request.get_json()
posts = Posts.load()
existing_post = Posts.get_by_id(posts, data.get("id"))
if existing_post:
return {"error": "Post with same id already exists"}, 400
posts.append(data)
Posts.write(posts)
return "", 201
if request.method == "GET":
print("GET")
response = {}
post_tag = request.args.getlist('tags') or request.form.getlist('tags')
if len(post_tag) == 1 and ',' in post_tag[0]:
post_tag = comma_separated_params_to_list(post_tag[0])
sort_by = request.args.get('sortBy', default="id")
direction = request.args.get('direction', default="asc")
if not post_tag:
return {"error": "Tags parameter is required"}, 400
if sort_by not in ["id", "reads", "likes", "popularity"]:
return {"error": "sortBy parameter is invalid"}, 400
if direction not in ["asc", "desc"]:
return {"error": "direction parameter is invalid"}, 400
reverse = False if direction == "asc" else True
posts = Posts.load()
post = Posts.get_by_tag(posts, post_tag)
post = Posts.sort_by_param(post, sort_by, reverse)
if post:
response["posts"] = post
return response
return None
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=80)
| johnlgtmchung/flask_api_practice | app.py | app.py | py | 3,142 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_nu... |
2957910804 | #!/usr/bin/python3
import sys
from q import run, run_text
if len(sys.argv) == 1:
while True:
text = input('> ')
if text == 'exit': break
print(run_text(text))
elif len(sys.argv) == 2:
run(sys.argv[1])
else:
print('Usage: q [file]') | nirvanasupermind/qlang | build/lib/cli.py | cli.py | py | 277 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "q.run_text",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "q.run",
"line_number": 13,
... |
6055001780 | import glob
import os
import json
import re
import tqdm
import pickle
import pandas as pd
import arguments as args
files = glob.glob(os.path.join(args.scopes_dir, '*.scopes.json'))
os.makedirs(args.cooc_dir, exist_ok=True)
def str_normalize(value):
return re.sub(r'\s+', ' ', value).lower()
def get_item_string_by_coveredtext(scope, item):
p = item['properties']
begin, end = p['begin'], p['end']
return str_normalize(scope['text'][begin:end])
def get_item_string(scope, item, prop):
return item['properties'][prop] if prop else get_item_string_by_coveredtext(scope, item)
cooccurrences = {}
for file in tqdm.tqdm(files):
file_id = os.path.basename(file).split('.')[0]
with open(file, encoding='utf-8') as f:
data = json.load(f)
scopes = [d for d in data if d['type'] == args.scope_type]
for scope_id, scope in enumerate(scopes):
if args.event_type and not any([a for a in scope.get('sub', []) if a['type'] == args.event_type]):
continue
items = sorted([a for a in scope.get('sub', []) if a['type'] == args.item_type], key=lambda a: a['properties']['begin'])
cooccurrences_ = set()
for i in range(len(items) - 1):
for j in range(i + 1, len(items)):
cooc = tuple(get_item_string(scope, items[i_], args.item_property) for i_ in [i, j])
cooccurrences_.add(cooc)
for cooc in cooccurrences_:
cooccurrences.setdefault(cooc, []).append((file_id, scope_id))
# save cooccurrences with scopes ids
with open(os.path.join(args.cooc_dir, 'cooccurrences.scope_ids.pkl'), mode='wb') as f:
pickle.dump(cooccurrences, f)
# build dataframe
processed = set()
data = []
for a, b in cooccurrences:
if (b, a) not in processed:
forward_count = len(cooccurrences[(a, b)])
backward_count = len(cooccurrences.get((b, a), []))
total_count = forward_count + backward_count
ratio = forward_count / total_count
data.append((a, b, forward_count, backward_count, total_count, ratio))
processed.add((a, b))
df = pd.DataFrame(data, columns=['item_left', 'item_right', 'forward_count', 'backward_count', 'total_count', 'ratio'])
df.to_csv(os.path.join(args.cooc_dir, 'cooccurrences.csv'))
| ewoij/cooccurrences-graph | 02_build_cooccurrences.py | 02_build_cooccurrences.py | py | 2,271 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "arguments.scopes_dir",
"line_... |
12826388315 | import numpy as np
import h5py as hp
import sys
first = sys.argv[1]
second = sys.argv[2]
third= sys.argv[3]
fourth = sys.argv[4]
process = sys.argv[5]
grid = (2048,2048,2048)
def get_mass(path):
try:
f=hp.File(path,'r')
except IOError:
print('files not found')
return np.zeros(grid, dtype=np.float32)
else:
try:
mass = f["mass"][:].astype(np.float32)
except KeyError:
print('mass field not found - creating substitute')
return np.zeros(grid, dtype=np.float32)
else:
print('correctly found the mass')
return mass
total = get_mass('subtotal4_ptl_'+first+'.hdf5')
print(sys.getsizeof(total))
sf = get_mass('subtotal4_ptl_'+second+'.hdf5')
total = np.add(total,sf,dtype=np.float32)
del sf
sf = get_mass('subtotal4_ptl_'+third+'.hdf5')
total = np.add(total,sf, dtype=np.float32)
del sf
sf = get_mass('subtotal3_ptl_13.hdf5')
total = np.add(total,sf,dtype=np.float32)
del sf
print('finished adding, new file:')
print(sys.getsizeof(total))
w = hp.File('final_ptl_'+process+'.hdf5','w')
w.create_dataset("mass",data=total)
#is this working | calvinosinga/HIColor | previous_versions/combine_fields.py | combine_fields.py | py | 1,200 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": ... |
10856186589 | from collections import Counter
def MostPopularNumbers(array, size):
c = Counter(array)
values = c.most_common(size)
return min(values)
a = [1, 1, 2, 2, 3, 4, 5, 6]
MostPopularNumbers(a, a.count)
| luizvictorPa/intro-to-computer-science-with-python | testes_extras/ex3.py | ex3.py | py | 213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 4,
"usage_type": "call"
}
] |
40057309346 | from __future__ import print_function
import os,sys,inspect
from termcolor import colored
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import models as Models
import global_vars as Global
from utils.args import args
import categories.classifier_setup as CLSetup
import categories.kway_logistic_setup as KLogisticSetup
import categories.deep_ensemble_setup as DeepEnsembleSetup
import categories.ae_setup as AESetup
import categories.pixelcnn_setup as PCNNSetup
if args.exp != 'model_ref':
print(colored('The exp is NOT model_ref!', 'yellow'))
def needs_processing(args, dataset_class, models, suffix):
"""
This function checks whether this model is already trained and can be skipped.
"""
for model in models:
for suf in suffix:
home_path = Models.get_ref_model_path(args, model.__name__, dataset_class.__name__, model_setup=True, suffix_str=suf)
hbest_path = os.path.join(home_path, 'model.best.pth.done')
if not os.path.isfile(hbest_path):
return True
return False
task_list = [
# The list of models, The function that does the training, Can I skip-test?, suffix of the operation.
# The procedures that can be skip-test are the ones that we can determine
# whether we have done them before without instantiating the network architecture or dataset.
# saves quite a lot of time when possible.
(Global.dataset_reference_classifiers, CLSetup.train_classifier, True, ['base']),
(Global.dataset_reference_classifiers, KLogisticSetup.train_classifier, True, ['KLogistic']),
(Global.dataset_reference_classifiers, DeepEnsembleSetup.train_classifier, True, ['DE.%d'%i for i in range(5)]),
(Global.dataset_reference_autoencoders, AESetup.train_BCE_AE, False, []),
(Global.dataset_reference_autoencoders, AESetup.train_MSE_AE, False, []),
(Global.dataset_reference_vaes, AESetup.train_variational_autoencoder, False, []),
(Global.dataset_reference_pcnns, PCNNSetup.train_pixelcnn, False, []),
]
# Do a for loop to run the training tasks.
for task_id, (ref_list, train_func, skippable, suffix) in enumerate(task_list):
target_datasets = ref_list.keys()
print('Processing %d datasets.'%len(target_datasets))
for dataset in target_datasets:
print('Processing dataset %s with %d networks for %d-%s.'%(colored(dataset, 'green'), len(ref_list[dataset]), task_id, colored(train_func.__name__, 'blue')))
if skippable and not needs_processing(args, Global.all_datasets[dataset], ref_list[dataset], suffix=suffix):
print(colored('Skipped', 'yellow'))
continue
ds = Global.all_datasets[dataset]()
for model in ref_list[dataset]:
model = model()
print('Training %s'%(colored(model.__class__.__name__, 'blue')))
train_func(args, model, ds.get_D1_train())
| ashafaei/OD-test | setup/model_setup.py | model_setup.py | py | 3,061 | python | en | code | 61 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "inspect.getfile",
"line... |
36270372978 | # -*- coding: utf-8 -*-
'''从自己选出的优质股中,读取excel值,查看每个股票的K图,和证券公司的研报,筛选出好的股票存入excel'''
import time
import pandas as pd
from selenium import webdriver
from openpyxl import load_workbook
class ShowK:
def __init__(self):
self.share_list0 = []
self.share_list1 = []
def read_from_excel(self):
wb = load_workbook("C:\\Users\\Zhouxiong\\OneDrive\\桌面\\share_of_fund.xlsx")
ws = wb.get_sheet_by_name('before_check_k')
first_column = ws['A']
for x in range(len(first_column)):
print(first_column[x].value)
self.share_list0.append(first_column[x].value)
# return share_list0
@staticmethod
def save_to_excel(share_list1):
wb = load_workbook('C:\\Users\\Zhouxiong\\OneDrive\\桌面\\share_of_fund.xlsx')
ws = wb.get_sheet_by_name('after_check_k')
j = ws.max_column
for z in range(len(share_list1)):
ws.cell(row=z+1, column=j+1).value = share_list1[z]
wb.save("C:\\Users\\Zhouxiong\\OneDrive\\桌面\\share_of_fund.xlsx")
# def save_to_excel():
# df1 = pd.DataFrame({
# 's_name': s_name,
# 's_code': s_code
# })
# df1.to_excel('C:/Users/Zhouxiong/OneDrive/桌面/K1.xlsx', sheet_name='after_check')
def begin(self):
# s_name = []
# s_code = []
# d = pd.read_excel('C:/Users/Zhouxiong/OneDrive/桌面/K0.xlsx', sheet_name='before_check')
driver = webdriver.Chrome(r'C:\Users\Zhouxiong\AppData\Local\Google\Chrome\Application\chromedriver.exe') # 打开谷歌浏览器
num = 0
self.read_from_excel()
print(self.share_list0)
for i in self.share_list0:
try:
driver.get('http://quote.cfi.cn/quote_'+str(i)+'.html')
time.sleep(1)
driver.find_element_by_link_text(u"日K").click()
time.sleep(5)
driver.find_element_by_link_text(u"周K").click()
time.sleep(5)
driver.find_element_by_link_text(u"月K").click()
time.sleep(5)
driver.find_element_by_link_text(u"研报一览").click()
time.sleep(5)
c = input('该股票是否优质,y or n:')
if c == 'y':
num += 1
self.share_list1.append(i)
except:
print(''+i+'网页获取失败')
continue
print('选出的股票数为:'+str(num)+'')
self.save_to_excel(self.share_list1)
driver.quit()
if __name__ == '__main__':
show_k = ShowK()
show_k.begin()
| zhouxiongaaa/myproject | my_stock/check_k.py | check_k.py | py | 2,757 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 45,
"usage_type": "call"
},
{
"api_name"... |
35730064961 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 09:47:56 2021
@author: ebbek
"""
import pandas as pd
import numpy as np
from inflhist import inflhist
import scipy.stats as sp
from infl_concat import infl_concat
from inflhist import inflhist
from figure_formatting import figure_formatting
from AUcolor import AUcolor
import os
import matplotlib.dates as mdates
from matplotlib.ticker import ScalarFormatter
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import font_manager as fm
from matplotlib.gridspec import GridSpec
import warnings
warnings.filterwarnings("ignore")
colors,color_names = AUcolor()
title_font = fm.FontProperties(fname='AUfonts/AUPassata_Bold.ttf',size=22)
subtitle_font = fm.FontProperties(fname='AUfonts/AUPassata_Rg.ttf',size=14)
figure_font = fm.FontProperties(fname='AUfonts/AUPassata_Bold.ttf',size=18)
ticks_font = fm.FontProperties(fname='AUfonts/AUPassata_Rg.ttf',size=18)
country_code_font = fm.FontProperties(fname='AUfonts/AUPassata_Bold.ttf',size=18)
plt.close('all')
path_parent = os.path.dirname(os.getcwd())
gendatadir = path_parent + '/gendata/' # Directory in which general data is located
moddatadir = path_parent + '/moddata/' # Directory in which modelled inflow time series are located
resdatadir = path_parent + '/resdata/' # Directory in which calibrated modelled inflow time series are located
histdatadir = path_parent + '/histdata/' # Directory in which historical inflow time series are located
figuredir = path_parent + '/figure/' # Directory in which saved figures are located
#%% ============================ INPUT ========================================
gcm_list = ['MPI-M-MPI-ESM-LR','ICHEC-EC-EARTH','CNRM-CERFACS-CNRM-CM5','MOHC-HadGEM2-ES', 'NCC-NorESM1-M'] # General Circulation Model
rcm_list = ['RCA4','HIRHAM5'] # Regional Climate Model
rcp_list = ['85'] # Representative Concentration Pathways
hydrotype = 'HDAM' # Type of hydropower plant
WI = 1 # Wattsight data included (1) or not (0)
#%% ============================= OUTPUT ======================================
if WI == 1:
country_name = ['Norway','France','Spain','Switzerland','Sweden','Germany','Austria','Italy',
'Bulgaria','Croatia','Portugal', 'Romania','Czech_Republic', 'Hungary',
'Bosnia_and_Herzegovina','Serbia','Slovenia','Finland','Poland','Slovakia',
'North_Macedonia','Montenegro']
country_iso_alpha_2 = ['NO','FR','ES','CH','SE','DE','AT','IT','BG','HR','PT','RO',
'CZ','HU','BA','RS','SI','FI','PL','SK','MK','ME']
nrows = 8
ncols = 3
lp = -2
else:
country_name = ['Norway','France','Spain','Switzerland','Sweden','Austria','Italy', 'Romania',
'Bulgaria','Portugal', 'Montenegro', 'Serbia']
country_iso_alpha_2 = ['NO','FR','ES','CH','SE','AT','IT','RO','BG','PT', 'ME', 'RS']
nrows = 4
ncols = 3
lp = 0
# nrows = 1
# ncols = 1
# country_name = ['Norway']
# country_iso_alpha_2 = ['NO']
if nrows > 1:
if len(country_iso_alpha_2) % 3 != 0:
fig,ax = figure_formatting('','',1,1,figsiz = (17,14))
gs = GridSpec(nrows, ncols)
ax = [0]*len(country_iso_alpha_2)
ax_twin = [0]*len(country_iso_alpha_2)
counter = 0
for row in range(nrows-1):
for col in range(ncols):
ax[counter] = plt.subplot(gs[row,col])
ax[counter].tick_params(axis='y', colors=colors[0])
ax[counter].set_yticklabels(ax[counter].get_yticks(), fontProperties = ticks_font)
ax[counter].set_xticklabels(ax[counter].get_xticks(), fontProperties = ticks_font)
ax[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax_twin[counter] = ax[counter].twinx()
ax_twin[counter].tick_params(axis='y', colors=colors[6])
ax_twin[counter].set_yticklabels(ax_twin[counter].get_yticks(), fontProperties = ticks_font)
ax_twin[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
counter += 1
if len(country_iso_alpha_2) % 3 == 1:
ax[counter] = plt.subplot(gs[row+1,0])
ax[counter].tick_params(axis='y', colors=colors[0])
ax[counter].set_yticklabels(ax[counter].get_yticks(), fontProperties = ticks_font)
ax[counter].set_xticklabels(ax[counter].get_xticks(), fontProperties = ticks_font)
ax[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax_twin[counter] = ax[counter].twinx()
ax_twin[counter].tick_params(axis='y', colors=colors[6])
ax_twin[counter].set_yticklabels(ax_twin[counter].get_yticks(), fontProperties = ticks_font)
ax_twin[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
elif len(country_iso_alpha_2) % 3 == 2:
ax[counter] = plt.subplot(gs[row+1,0])
ax[counter].tick_params(axis='y', colors=colors[0])
ax[counter].set_yticklabels(ax[counter].get_yticks(), fontProperties = ticks_font)
ax[counter].set_xticklabels(ax[counter].get_xticks(), fontProperties = ticks_font)
ax[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax_twin[counter] = ax[counter].twinx()
ax_twin[counter].tick_params(axis='y', colors=colors[6])
ax_twin[counter].set_yticklabels(ax_twin[counter].get_yticks(), fontProperties = ticks_font)
ax_twin[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax[counter+1] = plt.subplot(gs[row+1,1])
ax[counter+1].tick_params(axis='y', colors=colors[0])
ax[counter+1].set_yticklabels(ax[counter+1].get_yticks(), fontProperties = ticks_font)
ax[counter+1].set_xticklabels(ax[counter+1].get_xticks(), fontProperties = ticks_font)
ax[counter+1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax[counter+1].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax_twin[counter+1] = ax[counter+1].twinx()
ax_twin[counter+1].tick_params(axis='y', colors=colors[6])
ax_twin[counter+1].set_yticklabels(ax_twin[counter+1].get_yticks(), fontProperties = ticks_font)
ax_twin[counter+1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
fig.subplots_adjust(wspace=0.3)
fig.subplots_adjust(hspace=0.4)
fig1,ax1 = figure_formatting('','',1,1,color='k',figsiz = (17,14))
gs1 = GridSpec(nrows, ncols)
ax1 = [0]*len(country_iso_alpha_2)
counter = 0
for row in range(nrows-1):
for col in range(ncols):
ax1[counter] = plt.subplot(gs1[row,col])
ax1[counter].tick_params(axis='y', colors=colors[0])
ax1[counter].set_yticklabels(ax1[counter].get_yticks(), fontProperties = ticks_font)
ax1[counter].set_xticklabels(ax1[counter].get_xticks(), fontProperties = ticks_font)
ax1[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax1[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
counter += 1
if len(country_iso_alpha_2) % 3 == 1:
ax1[counter] = plt.subplot(gs[row+1,0])
ax1[counter].tick_params(axis='y', colors='k')
ax1[counter].set_yticklabels(ax1[counter].get_yticks(), fontProperties = ticks_font)
ax1[counter].set_xticklabels(ax1[counter].get_xticks(), fontProperties = ticks_font)
ax1[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax1[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
elif len(country_iso_alpha_2) % 3 == 2:
ax1[counter] = plt.subplot(gs[row+1,0])
ax1[counter].tick_params(axis='y', colors='k')
ax1[counter].set_yticklabels(ax1[counter].get_yticks(), fontProperties = ticks_font)
ax1[counter].set_xticklabels(ax1[counter].get_xticks(), fontProperties = ticks_font)
ax1[counter].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax1[counter].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax1[counter+1] = plt.subplot(gs[row+1,1])
ax1[counter+1].tick_params(axis='y', colors='k')
ax1[counter+1].set_yticklabels(ax1[counter+1].get_yticks(), fontProperties = ticks_font)
ax1[counter+1].set_xticklabels(ax1[counter+1].get_xticks(), fontProperties = ticks_font)
ax1[counter+1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
ax1[counter+1].xaxis.set_major_formatter(ScalarFormatter(useMathText=True, useOffset=False))
fig1.subplots_adjust(wspace=0.3)
fig1.subplots_adjust(hspace=0.4)
else:
fig,ax,ax_twin = figure_formatting('','',nrows,ncols,figsiz = (18,12.5),ylab_twin='',color_twin=colors[6],twin=True)
fig1,ax1 = figure_formatting('','',nrows,ncols,color='k',figsiz = (18,12.5))
rcp = rcp_list[0]
for c in range(len(country_name)):
gcm_it = 0
count = 0
country = country_iso_alpha_2[c]
country_l = country_name[c]
hist = inflhist(histdatadir,1991,2020,country,country_l)*1e-3
hist = hist[(hist.T != 0).any()] # Removing rows with zeros
hist_m_train = hist #.drop(columns='month')
hist_m_train['month'] = hist_m_train.index.month
hist_m_train_seasonal = hist_m_train.groupby('month').mean()
ind = pd.date_range('2016/01/01','2016/12/31',freq='MS')
hist_m_train_seasonal.set_index(ind,inplace=True)
ax[c].text(0.01, 0.7,country, transform=ax[c].transAxes,fontproperties=ticks_font,zorder=10)
ax1[c].text(0.01, 0.7,country, transform=ax1[c].transAxes,fontproperties=ticks_font,zorder=10)
pearson = [0]*len(gcm_list)*len(rcm_list)
if c == 0:
ax[c].plot(hist_m_train_seasonal,label='Historical',color=colors[0],zorder=20,lw=2,linestyle='--')
# ax[c].set_ylabel('Historical inflow [TWh]')
else:
ax[c].plot(hist_m_train_seasonal,color=colors[0],zorder=5,lw=2,linestyle='--')
for gcm in gcm_list:
rcm_it = 0
gcm = gcm_list[gcm_it]
for rcm in rcm_list:
rcm = rcm_list[rcm_it]
infl_cal,infl_EOC = infl_concat(country,gendatadir,moddatadir,gcm,rcm,rcp)
infl_cal = (infl_cal*(3.6e12)**-1)*1e-3 # Unit conversion from Joule to TWh
time_dt = pd.to_datetime(infl_cal.index)
infl_cal['date'] = time_dt
infl_cal.set_index('date',inplace=True)
infl_cal_monthly = infl_cal.groupby(pd.Grouper(freq='MS')).sum()
infl_cal_monthly['month'] = infl_cal_monthly.index.month
infl_cal_m_train_seasonal = infl_cal_monthly.groupby('month').mean()
infl_cal_m_train_seasonal.set_index(ind,inplace=True)
# ax_twin[c].set_ylabel('Modelled inflow [TWh]')
pearson[count] = hist_m_train_seasonal.corrwith(infl_cal_m_train_seasonal).inflow.round(2)
RF = hist_m_train_seasonal.values/infl_cal_m_train_seasonal.values
if c == 0:
ax_twin[c].plot(infl_cal_m_train_seasonal,color=colors[1+count],label=gcm + '-' + rcm,lw=1)
ax1[c].plot(infl_cal_m_train_seasonal.index,RF,marker='o', linestyle='dashed',markersize=4,color=colors[1+count],label=gcm + '-' + rcm,lw=1)
else:
ax_twin[c].plot(infl_cal_m_train_seasonal,color=colors[1+count],lw=1)
ax1[c].plot(infl_cal_m_train_seasonal.index,RF,marker='o', linestyle='dashed',markersize=4,color=colors[1+count],lw=1)
count += 1
rcm_it += 1
gcm_it += 1
if np.mean(pearson) >= 0:
ax[c].text(0.8, 0.7,'r=' + str(np.mean(pearson).round(2)), transform=ax[c].transAxes,fontproperties=ticks_font,zorder=10)
else:
ax[c].text(0.77, 0.7,'r=' + str(np.mean(pearson).round(2)), transform=ax[c].transAxes,fontproperties=ticks_font,zorder=10)
ax[c].xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax[c].set_xlim([min(ind),max(ind)])
ax[c].xaxis.set_minor_locator(mdates.MonthLocator(interval=1))
ax[c].xaxis.set_major_locator(mdates.MonthLocator(interval=2))
ax1[c].xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax1[c].set_xlim([min(ind),max(ind)])
ax1[c].xaxis.set_minor_locator(mdates.MonthLocator(interval=1))
ax1[c].xaxis.set_major_locator(mdates.MonthLocator(interval=2))
fig.legend(frameon=False,loc='lower center',ncol=3,prop=ticks_font,handletextpad=0.1,labelspacing=0,borderaxespad=-0.5)
fig1.legend(frameon=False,loc='lower center',ncol=3,prop=ticks_font,handletextpad=0.1,labelspacing=0,borderaxespad=-0.5)
if len(country_name) == 2:
fig.savefig(figuredir + 'Model_evaluation_' + country_name[0] + '_' + country_name[1] + '_' + rcp + '.png',bbox_inches='tight')
fig1.savefig(figuredir + 'Retain_factors_' + country_name[0] + '_' + country_name[1] + '_' + rcp + '.png',bbox_inches='tight')
elif len(country_name) == 22:
fig.savefig(figuredir + 'Model_evaluation_' + rcp + '.png',bbox_inches='tight')
fig1.savefig(figuredir + 'Retain_factors_' + rcp + '.png',bbox_inches='tight') | ebbekyhl/Future-operation-of-hydropower-in-Europe | scripts/model_evaluation.py | model_evaluation.py | py | 13,813 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "AUcolor.AUcolor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 25,
"usage_type": "call"
},
{
"ap... |
38206554377 | from flask_app import app
from flask import render_template, redirect, request, session
from flask_app.models.dojo import Dojo
from flask_app.models.ninja import Ninja
@app.route('/create_dojo', methods=['POST'])
def create_user():
data={
'name':request.form['name'],
}
Dojo.create_dojo(data)
return redirect('/dojos')
@app.route('/dojos')
def alldojo():
alldojo = Dojo.getAllDojos()
return render_template ('dojos.html', dojos = alldojo)
@app.route('/delete/<int:id>')
def deleteuser(id):
data={
'id':id
}
Dojo.delete_dojo(data)
return redirect('/dojos')
| megikapo18/dojo_ninjas | flask_app/controllers/dojos.py | dojos.py | py | 620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask_app.models.dojo.Dojo.create_dojo",
"line_number": 14,
"usage_type": "call"
},
{
"api_... |
413332120 | from pathlib import Path
from copy import deepcopy
from typing import Callable, Optional, Any, \
Union, Generator, TextIO
import numpy as np
from dae.utils.variant_utils import get_interval_locus_ploidy
from dae.variants_loaders.raw.flexible_variant_loader import \
flexible_variant_loader
from dae.variants.core import Allele
from dae.genomic_resources.reference_genome import ReferenceGenome
from dae.pedigrees.families_data import FamiliesData
def _cnv_location_to_vcf_trasformer() \
-> Callable[[dict[str, Any]], dict[str, Any]]:
"""Expand shorthand loc notation to separate crom, pos, pos_end attrs.
In case the input uses CNV location this transformer will produce
internal (chrom, pos, pos_end) description of the CNV position.
"""
def transformer(result: dict[str, Any]) -> dict[str, Any]:
location = result["location"]
chrom, pos_range = location.split(":")
beg, end = pos_range.split("-")
result["chrom"] = chrom
result["pos"] = int(beg)
result["pos_end"] = int(end)
return result
return transformer
def _cnv_vcf_to_vcf_trasformer() \
-> Callable[[dict[str, Any]], dict[str, Any]]:
"""Convert pos and pos_end to int.
In case the input uses VCF-like description of the CNVs this
transformer will check it and handle the proper type conversion for
`pos` and `pos_end` values.
"""
def trasformer(result: dict[str, Any]) -> dict[str, Any]:
chrom = result["chrom"]
pos = int(result["pos"])
pos_end = int(result["pos_end"])
result["chrom"] = chrom
result["pos"] = pos
result["pos_end"] = pos_end
return result
return trasformer
def _configure_cnv_location(
header: list[str],
transformers: list[Callable[[dict[str, Any]], dict[str, Any]]],
cnv_chrom: Optional[str] = None,
cnv_start: Optional[str] = None,
cnv_end: Optional[str] = None,
cnv_location: Optional[str] = None) -> None:
"""Configure the header and position-handling transformers.
This helper function will **configure** the header and transformers needed
to handle position of CNVs in the input record.
"""
if cnv_chrom is not None or cnv_start is not None or \
cnv_end is not None:
if cnv_location is not None:
raise ValueError(
f"mixed variant location definitions: "
f"vcf({cnv_chrom}:{cnv_start}-{cnv_end}) and "
f"location({cnv_location})")
if cnv_chrom is None:
cnv_chrom = "chrom"
if cnv_start is None:
cnv_start = "pos"
if cnv_end is None:
cnv_end = "pos_end"
chrom_index = header.index(cnv_chrom)
start_index = header.index(cnv_start)
end_index = header.index(cnv_end)
header[chrom_index] = "chrom"
header[start_index] = "pos"
header[end_index] = "pos_end"
transformers.append(_cnv_vcf_to_vcf_trasformer())
else:
if cnv_location is None:
cnv_location = "location"
location_index = header.index(cnv_location)
header[location_index] = "location"
transformers.append(_cnv_location_to_vcf_trasformer())
def _cnv_dae_best_state_to_best_state(
families: FamiliesData, genome: ReferenceGenome) \
-> Callable[[dict[str, Any]], dict[str, Any]]:
"""Transform old dae family id/best state to canonical form.
In case the genotype of the CNVs is specified in old
dae family id/best state notation, this transformer will handle it
and transform it to canonical family id/best state form
"""
def transformer(result: dict[str, Any]) -> dict[str, Any]:
variant_type = result["variant_type"]
actual_ploidy = np.fromstring(
result["best_state"], dtype=np.int8, sep=" ")
family_id = result["family_id"]
family = families[family_id]
chrom = result["chrom"]
pos = result["pos"]
pos_end = result["pos_end"]
expected_ploidy = np.asarray([
get_interval_locus_ploidy(
chrom, pos, pos_end, p.sex, genome
) for p in family.members_in_order
])
if variant_type == Allele.Type.large_duplication:
alt_row = actual_ploidy - expected_ploidy
elif variant_type == Allele.Type.large_deletion:
alt_row = expected_ploidy - actual_ploidy
else:
raise ValueError(
f"unexpected variant type: {variant_type}")
ref_row = expected_ploidy - alt_row
best_state = np.stack((ref_row, alt_row)).astype(np.int8)
result["best_state"] = best_state
return result
return transformer
def _cnv_person_id_to_best_state(
families: FamiliesData, genome: ReferenceGenome) \
-> Callable[[dict[str, Any]], dict[str, Any]]:
"""Transform variant into canonical family id/best state form.
In case the genotype is specified by person id having the variant
this transformer will transform it into canonical family id/best state
form
"""
def transformer(result: dict[str, Any]) -> dict[str, Any]:
person_id = result["person_id"]
if "family_id" in result:
family_id = result["family_id"]
family = families[family_id]
person = family.persons[person_id]
else:
assert len(families.persons_by_person_id[person_id]) == 1
person = families.persons_by_person_id[person_id][0]
family = families[person.family_id]
chrom = result["chrom"]
pos = result["pos"]
pos_end = result["pos_end"]
expected_ploidy = np.asarray([
get_interval_locus_ploidy(
chrom, pos, pos_end, p.sex, genome
) for p in family.members_in_order
])
alt_row = np.zeros(len(family.members_in_order), dtype=np.int8)
alt_row[person.member_index] = 1
ref_row = expected_ploidy - alt_row
best_state = np.stack((ref_row, alt_row)).astype(np.int8)
result["best_state"] = best_state
result["family_id"] = family.family_id
return result
return transformer
def _configure_cnv_best_state(
header: list[str],
transformers: list[Callable[[dict[str, Any]], dict[str, Any]]],
families: FamiliesData,
genome: ReferenceGenome,
cnv_person_id: Optional[str] = None,
cnv_family_id: Optional[str] = None,
cnv_best_state: Optional[str] = None) -> None:
"""Configure header and transformers that handle CNV family genotypes."""
if cnv_person_id is not None:
# if cnv_family_id is not None and cnv_best_state is not None:
# raise ValueError(
# f"mixed configuration of cnv best state: "
# f"person_id({cnv_person_id}) <-> "
# f"family_id({cnv_family_id}) and "
# f"best_state({cnv_best_state})"
# )
person_index = header.index(cnv_person_id)
header[person_index] = "person_id"
transformers.append(
_cnv_person_id_to_best_state(families, genome)
)
else:
if cnv_family_id is None:
cnv_family_id = "family_id"
if cnv_best_state is None:
cnv_best_state = "best_state"
family_index = header.index(cnv_family_id)
best_state_index = header.index(cnv_best_state)
header[family_index] = "family_id"
header[best_state_index] = "best_state"
transformers.append(
_cnv_dae_best_state_to_best_state(families, genome))
def _cnv_variant_to_variant_type(
cnv_plus_values: Optional[list[str]] = None,
cnv_minus_values: Optional[list[str]] = None
) -> Callable[[dict[str, Any]], dict[str, Any]]:
"""Transform variant type to canonical internal representation.
This transformer is used to transform variant type to canonical
inernal representation using :class:`Allele.Type.large_duplication` and
:class:`Allele.Type.large_deletion`.
"""
if cnv_minus_values is None:
cnv_minus_values = ["CNV-"]
if cnv_plus_values is None:
cnv_plus_values = ["CNV+"]
def transformer(result: dict[str, Any]) -> dict[str, Any]:
variant = result["variant"]
if variant in cnv_plus_values:
variant_type = Allele.Type.large_duplication
elif variant in cnv_minus_values:
variant_type = Allele.Type.large_deletion
else:
raise ValueError(f"unexpected CNV variant type: {variant}")
result["variant_type"] = variant_type
return result
return transformer
def _configure_cnv_variant_type(
header: list[str],
transformers: list[Callable[[dict[str, Any]], dict[str, Any]]],
cnv_variant_type: Optional[str] = None,
cnv_plus_values: Optional[Union[str, list[str]]] = None,
cnv_minus_values: Optional[Union[str, list[str]]] = None
) -> None:
"""Configure header and transformer needed to handle CNV variant type."""
if cnv_plus_values is None:
cnv_plus_values = ["CNV+"]
elif isinstance(cnv_plus_values, str):
cnv_plus_values = [cnv_plus_values]
if cnv_minus_values is None:
cnv_minus_values = ["CNV-"]
if isinstance(cnv_minus_values, str):
cnv_minus_values = [cnv_minus_values]
if cnv_variant_type is None:
cnv_variant_type = "variant"
variant_type_index = header.index(cnv_variant_type)
header[variant_type_index] = "variant"
transformers.append(
_cnv_variant_to_variant_type(cnv_plus_values, cnv_minus_values)
)
def _configure_loader(
header: list[str],
families: FamiliesData,
genome: ReferenceGenome,
cnv_chrom: Optional[str] = None,
cnv_start: Optional[str] = None,
cnv_end: Optional[str] = None,
cnv_location: Optional[str] = None,
cnv_person_id: Optional[str] = None,
cnv_family_id: Optional[str] = None,
cnv_best_state: Optional[str] = None,
cnv_variant_type: Optional[str] = None,
cnv_plus_values: Optional[list[str]] = None,
cnv_minus_values: Optional[list[str]] = None) \
-> tuple[
list[str],
list[Callable[[dict[str, Any]], dict[str, Any]]]]:
"""Configure all headers and transformers needed to handle CNVs input."""
# pylint: disable=too-many-arguments
transformers: list[
Callable[[dict[str, Any]], dict[str, Any]]] = []
header = deepcopy(header)
_configure_cnv_location(
header, transformers,
cnv_chrom, cnv_start, cnv_end,
cnv_location
)
_configure_cnv_variant_type(
header, transformers,
cnv_variant_type, cnv_plus_values, cnv_minus_values)
_configure_cnv_best_state(
header, transformers,
families, genome,
cnv_person_id,
cnv_family_id, cnv_best_state)
return header, transformers
def flexible_cnv_loader(
filepath_or_buffer: Union[str, Path, TextIO],
families: FamiliesData,
genome: ReferenceGenome,
cnv_chrom: Optional[str] = None,
cnv_start: Optional[str] = None,
cnv_end: Optional[str] = None,
cnv_location: Optional[str] = None,
cnv_person_id: Optional[str] = None,
cnv_family_id: Optional[str] = None,
cnv_best_state: Optional[str] = None,
cnv_variant_type: Optional[str] = None,
cnv_plus_values: Optional[list[str]] = None,
cnv_minus_values: Optional[list[str]] = None,
cnv_sep: str = "\t",
**_kwargs: Any
) -> Generator[dict[str, Any], None, None]:
"""Load variants from CNVs input and transform them into DataFrames.
This function uses flexible variant loader infrastructure to
load variants from a CNVs data input and transform them into a pandas
`DataFrame`.
"""
# pylint: disable=too-many-locals,too-many-arguments
def line_splitter(line: str) -> list[str]:
return line.strip("\n\r").split(cnv_sep)
if isinstance(filepath_or_buffer, (str, Path)):
infile = open(filepath_or_buffer, "rt")
else:
infile = filepath_or_buffer # type: ignore
with infile as infile:
# FIXME don't throw StopIteration and fix the next line
line = next(infile) # pylint: disable=stop-iteration-return
header = line_splitter(line)
header, transformers = _configure_loader(
header,
families,
genome,
cnv_chrom,
cnv_start,
cnv_end,
cnv_location,
cnv_person_id,
cnv_family_id,
cnv_best_state,
cnv_variant_type,
cnv_plus_values,
cnv_minus_values)
variant_generator = flexible_variant_loader(
infile, header, line_splitter, transformers,
filters=[]
)
for record in variant_generator:
yield record
| iossifovlab/gpf | dae/dae/variants_loaders/cnv/flexible_cnv_loader.py | flexible_cnv_loader.py | py | 13,075 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number":... |
31383412316 | from lingpy import *
from collections import defaultdict
from sinopy import sinopy
import re
csv1 = csv2list('2017-02-18-Behr-1-197-draft-2-western.csv', strip_lines=False)
csv2 = csv2list('2017-02-18-Behr-1-197-draft-2-eastern.csv', strip_lines=False)
chars = defaultdict(list)
for i, line in enumerate(csv2[1:]+csv1[1:]):
if len(line) > 7:
if '+' in line[5] or '>' in line[5] or '{' in line[5] or len(line[5].strip()) > 1:
chars[line[5]] += [(i+1, line[2], line[3])]
guessed = 0
with open('characters.md', 'w') as f:
f.write('## Problematic Chars in Behr 2008\n\n')
f.write('number | first page | character | conversion | occurrences | pages \n')
f.write('--- | --- | --- | --- | --- | --- | --- \n')
for i, (char, vals) in enumerate(sorted(chars.items(), key=lambda x:
x[1][0][0])):
guess = '?'
if len(char) == 3:
guess = sinopy.character_from_structure(char[1]+char[0]+char[2])
if guess != '?':
guessed += 1
else:
_guess = char
composed = re.findall('{(.*?)}', char)
for c in composed:
if len(c) == 3:
r = sinopy.character_from_structure(c[1]+c[0]+c[2])
if r != '?':
_guess = _guess.replace('{'+c+'}', r)
if _guess != char:
guessed += 1
if len(_guess) == 3:
guess2 = sinopy.character_from_structure(_guess[1]+_guess[0]+_guess[2])
if guess2 != '?':
guess = guess2
else:
guess = _guess
f.write('{0} | {1} | {2} | {3} | {4} | {5} \n'.format(
i+1,
vals[0][1],
char,
guess,
len(vals),
', '.join(['{0}: {1}'.format(y, z) for x, y, z in vals])
))
| digling/rhymes | datasets/Behr2008/raw/helper-2017-01-03.py | helper-2017-01-03.py | py | 1,929 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sinopy.sinopy.character_from_structure",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sinopy.sinopy",
"line_number": 25,
"usage_type": "name"
},
{
"api_n... |
30337427434 | #import libraries
import os
import string
import glob
import subprocess
from subprocess import Popen
import shutil
import time
import datetime
from datetime import datetime
import pandas as pd
import requests
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk, Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import packages
from packages import tool_path, action
def get_taxonomy_file():
df_taxonomy = pd.read_csv(tool_path.get_current_path()[0] +'/packages/ressources/taxonomy_by_DB.tsv', sep = '\t') # Read specific taxonomy found in LOTUS_DB
df_taxonomy_all = pd.read_csv(tool_path.get_current_path()[0] + '/packages/ressources/all_taxonomy_DB.tsv', sep = '\t') # Read general taxonomy found in LOTUS_DB
return df_taxonomy, df_taxonomy_all
def get_chemontology_file():
df_chemontology = pd.read_csv(tool_path.get_current_path()[0] + '/packages/ressources/NPClassifier_taxonomy.tsv', sep = '\t')
return df_chemontology
# Main and most important function to scrap LOTUS
def get_lotus(IDD):
############################ Get the code for IDD
TorC = IDD.split(' : ')[0]
if TorC == 'T':
TaxoDB = IDD.split(' : ')[1]
NPClassifier = ''
level = IDD.split(' : ')[2]
name = IDD.split(' : ')[3]
elif TorC == 'C' :
TaxoDB = IDD.split(' : ')[1]
NPClassifier = IDD.split(' : ')[2]
level = IDD.split(' : ')[3]
name = IDD.split(' : ')[4]
elif TorC == 'LTS':
TaxoDB = IDD.split(' : ')[1]
NPClassifier = ''
level = ''
name = IDD.split(' : ')[2]
elif TorC == 'F':
TaxoDB = IDD.split(' : ')[1]
NPClassifier = ''
level = ''
name = IDD.split(' : ')[2]
print(TorC, TaxoDB, NPClassifier, level, name)
url = str('https://lotus.naturalproducts.net/api/search/simple?query=' + str(name))
response = requests.get(url)
storage = [response.json()] #get the seaerch result in a json format
nb = len(str(storage)[1:-1].split("{'id'")) - 1 #get the number of molecule for the search
print(str(nb) + ' results' )
###################################Get all the keys for the research
list_key_value=[]
for mol in range(nb):
for key in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'] :
if TaxoDB != 'All_Taxonomy_DB' and TorC == 'T':
if TaxoDB in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB])):
if storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB][go][level] != None and name in str(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB][go][level]):
key_value = [mol,key, TaxoDB, go]
# print(key_value)
list_key_value.append(key_value)
# print(storage[0]['naturalProducts'][key_value[0]]['taxonomyReferenceObjects'][key_value[1]][TaxoDB][0])
elif TaxoDB == 'All_Taxonomy_DB' and TorC == 'T':
for taxo in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo])):
if storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo][go][level] != None and name in str(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo][go][level]):
key_value = [mol,key, taxo, go]
# print(key_value)
list_key_value.append(key_value)
# print(storage[0]['naturalProducts'][key_value[0]]['taxonomyReferenceObjects'][key_value[1]][key_value[2]][0])
elif TaxoDB != 'All_Taxonomy_DB' and TorC == 'C':
if TaxoDB in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB])):
if storage[0]['naturalProducts'][mol][level] != None and name in str(storage[0]['naturalProducts'][mol][level]):
key_value = [mol,key,TaxoDB, go]
# print(key_value)
list_key_value.append(key_value)
elif TaxoDB == 'All_Taxonomy_DB' and TorC == 'C':
for taxo in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo])):
if storage[0]['naturalProducts'][mol][level] != None and name in str(storage[0]['naturalProducts'][mol][level]):
key_value = [mol,key, taxo, go]
# print(key_value)
list_key_value.append(key_value)
elif TaxoDB != 'All_Taxonomy_DB' and TorC == 'LTS':
if TaxoDB in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB])):
key_value = [mol,key,TaxoDB, go]
list_key_value.append(key_value)
elif TaxoDB == 'All_Taxonomy_DB' and TorC == 'LTS':
for taxo in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo])):
key_value = [mol,key, taxo, go]
list_key_value.append(key_value)
elif TaxoDB != 'All_Taxonomy_DB' and TorC == 'F':
if TaxoDB in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][TaxoDB])):
key_value = [mol,key, TaxoDB, go]
list_key_value.append(key_value)
elif TaxoDB == 'All_Taxonomy_DB' and TorC == 'F':
for taxo in storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key]:
for go in range(len(storage[0]['naturalProducts'][mol]['taxonomyReferenceObjects'][key][taxo])):
key_value = [mol,key, taxo, go]
list_key_value.append(key_value)
######################## transform the keys to search
df = pd.DataFrame(list_key_value, columns =['mol', 'key', 'taxo', 'go'])
list_new_key_value = []
new_list = list(set(df.mol))
for mol in new_list:
AA = {}
list_new_key = set(df.loc[df.mol == mol].key.to_list())
df2 = df.loc[df.mol == mol]
aa = {}
for x in list_new_key:
list_taxo = list(set(df2.loc[df2.key == x].taxo.to_list()))
df3 = df2.loc[df2.key == x]
rst={}
for y in list_taxo:
list_go = df3.loc[df3.taxo == y].go.to_list()
rst[y] = list_go
aa[x] = rst
AA = [mol, aa]
list_new_key_value.append(AA)
########################### get Lotus
global Dict
Dict = {
'Lotus_ID' :{} , 'smiles' : {}, 'inchi' : {}, 'inchikey' : {}, 'cas' : {}, 'iupac_name' : {}, 'molecular_formula' : {}, 'molecular_weight' : {}, 'xlogp' : {} ,
'superkingdom':{}, 'kingdom':{}, 'phylum':{}, 'classx':{}, 'order':{}, 'family': {}, 'genus' : {}, 'species' :{} ,
'chemicalTaxonomyClassyfireKingdom' : {} , 'chemicalTaxonomyClassyfireSuperclass' :{}, 'chemicalTaxonomyClassyfireClass' : {} , 'chemicalTaxonomyClassyfireDirectParent' : {} ,
'chemicalTaxonomyNPclassifierPathway' : {} ,'chemicalTaxonomyNPclassifierSuperclass' : {} , 'chemicalTaxonomyNPclassifierClass' : {},
'traditional_name' : {}
} #Create a global dict to store metadata from LOTUS
xx = 1 #start dict at 1
# if TaxoDB != "All_Taxonomy_DB" and TorC == 'T':
zz = 0
for molindex in list_new_key_value:
Dict['Lotus_ID'][xx] = storage[0]['naturalProducts'][molindex[0]]['lotus_id']
Dict['smiles'][xx] = storage[0]['naturalProducts'][molindex[0]]['smiles']
Dict['inchi'][xx] = storage[0]['naturalProducts'][molindex[0]]['inchi']
Dict['inchikey'][xx] = storage[0]['naturalProducts'][molindex[0]]['inchikey']
Dict['cas'][xx] = storage[0]['naturalProducts'][molindex[0]]['cas']
Dict['traditional_name'][xx] = storage[0]['naturalProducts'][molindex[0]]['traditional_name'].encode(encoding = 'UTF-8', errors = 'replace')
Dict['iupac_name'][xx] = (storage[0]['naturalProducts'][molindex[0]]['iupac_name']).encode(encoding = 'UTF-8', errors = 'replace')
Dict['molecular_formula'][xx] = storage[0]['naturalProducts'][molindex[0]]['molecular_formula']
Dict['molecular_weight'][xx] = storage[0]['naturalProducts'][molindex[0]]['molecular_weight']
Dict['xlogp'][xx] = storage[0]['naturalProducts'][molindex[0]]['xlogp']
Dict['chemicalTaxonomyClassyfireKingdom'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyClassyfireKingdom']
Dict['chemicalTaxonomyClassyfireSuperclass'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyClassyfireSuperclass']
Dict['chemicalTaxonomyClassyfireClass'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyClassyfireClass']
Dict['chemicalTaxonomyClassyfireDirectParent'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyClassyfireDirectParent']
Dict['chemicalTaxonomyNPclassifierPathway'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyNPclassifierPathway']
Dict['chemicalTaxonomyNPclassifierSuperclass'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyNPclassifierSuperclass']
Dict['chemicalTaxonomyNPclassifierClass'][xx] = storage[0]['naturalProducts'][molindex[0]]['chemicalTaxonomyNPclassifierClass']
spk1={}
ki1={}
ph1={}
cl1={}
or11={}
fm1={}
ge1={}
spe1={}
print(molindex[0])
for key in list_new_key_value[zz][1]: ####################decalage de 1 !!!!!!!! a reparer
print(key)
spk={}
ki={}
ph={}
cl={}
or1={}
fm={}
ge={}
spe={}
for taxo in list_new_key_value[zz][1][key]:
print(taxo)
superkingdom=[]
kingdom=[]
phylum=[]
classx=[]
order=[]
family=[]
genus=[]
species=[]
for go in range(len(list_new_key_value[zz][1][key][taxo])):
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['superkingdom'] not in superkingdom:
superkingdom.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['superkingdom'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['kingdom'] not in kingdom:
kingdom.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['kingdom'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['phylum'] not in phylum:
phylum.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['phylum'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['classx'] not in classx:
classx.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['classx'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['order'] not in order:
order.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['order'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['family'] not in family:
family.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['family'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['genus'] not in genus:
genus.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['genus'])
if storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['species'] not in species:
species.append(storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][key][taxo][list_new_key_value[zz][1][key][taxo][go]]['species'])
# print(species)
spk[taxo] = ' - '.join([str(e) for e in superkingdom])
ki[taxo] =' - '.join([str(e) for e in kingdom])
ph[taxo] =' - '.join([str(e) for e in phylum])
cl[taxo]=' - '.join([str(e) for e in classx])
or1[taxo]=' - '.join([str(e) for e in order])
fm[taxo]=' - '.join([str(e) for e in family])
ge[taxo]=' - '.join([str(e) for e in genus])
spe[taxo]=' - '.join([str(e) for e in species])
if taxo in spk1:
spk1[taxo]=spk1[taxo] + ' - ' + spk[taxo]
ki1[taxo]=ki1[taxo] +' - ' + ki[taxo]
ph1[taxo]=ph1[taxo] +' - ' + ph[taxo]
cl1[taxo]=cl1[taxo] +' - ' + cl[taxo]
or11[taxo]=or11[taxo] +' - ' + or1[taxo]
fm1[taxo]= fm1[taxo] +' - ' + fm[taxo]
ge1[taxo]=ge1[taxo] +' - ' + ge[taxo]
spe1[taxo]=spe1[taxo] +' - ' + spe[taxo]
else:
spk1[taxo]= spk[taxo]
ki1[taxo]=ki[taxo]
ph1[taxo]=ph[taxo]
cl1[taxo]=cl[taxo]
or11[taxo]=or1[taxo]
fm1[taxo]=fm[taxo]
ge1[taxo]=ge[taxo]
spe1[taxo]=spe[taxo]
for taxo in spk1:
spk1[taxo] = set(spk1[taxo].split(' - '))
ki1[taxo] = set(ki1[taxo].split(' - '))
# print(ki[taxo])
ph1[taxo] = set(ph1[taxo].split(' - '))
cl1[taxo] = set(cl1[taxo].split(' - '))
or11[taxo] = set(or11[taxo].split(' - '))
fm1[taxo] = set(fm1[taxo].split(' - '))
ge1[taxo] = set(ge1[taxo].split(' - '))
spe1[taxo] = set(spe1[taxo].split(' - '))
print(spe1)
Dict['superkingdom'][xx] = spk1
Dict['kingdom'][xx] =ki1
Dict['phylum'][xx] =ph1
Dict['classx'][xx] = cl1
Dict['order'][xx] = or11
Dict['family'][xx] = fm1
Dict['genus'][xx] = ge1
Dict['species'][xx] = spe1
#################################################################################
zz=zz+1
# Dict['reference_wikidata_id'][xx] = storage[0]['naturalProducts'][molindex[0]]['taxonomyReferenceObjects'][molindex[1]][molindex[2]][0]['reference_wikidata_id']
xx = xx + 1 # Next molecule
global df_lotus
df_lotus = pd.DataFrame.from_dict(Dict)
#df_lotus.to_csv('cfmid_input.tsv', sep = '\t', index = False)
return df_lotus
# Function to search all the selected listed criteria in LOTUS_DB
def get_lotus_add():
list_IDD = action.get_selected_search_criteria_list()
if list_IDD[0] == '':
print(str(datetime.now()) + " Come on, make a choice ! ")
messagebox.showinfo("Info", "Come on, make a chocie !")
else :
Dict_neutral = {
'Lotus_ID' :{} , 'smiles' : {}, 'inchi' : {}, 'inchikey' : {}, 'cas' : {}, 'iupac_name' : {}, 'molecular_formula' : {}, 'molecular_weight' : {}, 'xlogp' : {} ,
'superkingdom':{}, 'kingdom':{}, 'phylum':{}, 'classx':{}, 'order':{}, 'family': {}, 'genus' : {}, 'species' :{} ,
'chemicalTaxonomyClassyfireKingdom' : {} , 'chemicalTaxonomyClassyfireSuperclass' :{}, 'chemicalTaxonomyClassyfireClass' : {} , 'chemicalTaxonomyClassyfireDirectParent' : {} ,
'chemicalTaxonomyNPclassifierPathway' : {} ,'chemicalTaxonomyNPclassifierSuperclass' : {} , 'chemicalTaxonomyNPclassifierClass' : {}
,'traditional_name' : {}
}
df_general = pd.DataFrame.from_dict(Dict_neutral)
for IDD in list_IDD:
#get_lotus(IDD.split(' : ')[1])
print(IDD)
get_lotus(IDD)
df_general = pd.concat([df_general, df_lotus], ignore_index = True)
df_general= df_general.drop_duplicates(subset=['Lotus_ID'])
df_general.to_csv(tool_path.get_current_path()[0]+ '/LOTUS_DB_input/cfmid_input.tsv', sep = '\t', index = False)
fileout = open(tool_path.get_current_path()[0] + '/LOTUS_DB_input/cfmid_input.txt', "w")
fileout_sirius_db = open(tool_path.get_current_path()[0] + '/LOTUS_DB_input/structural_db.txt', "w") #####230228
print(str(datetime.now()) + " There are " + str(len(df_general['Lotus_ID'])) +" molecules for " + str(list_IDD))
messagebox.showinfo("Info", "There are " + str(len(df_general['Lotus_ID'])) +" molecules for " + str(list_IDD))
for i in df_general['Lotus_ID']:
fileout.write(i + ' ' + df_general['smiles'][int(str(df_general.Lotus_ID[df_general.Lotus_ID == i ].index.to_list()[0]))] + '\n' )
fileout_sirius_db.write(df_general['smiles'][int(str(df_general.Lotus_ID[df_general.Lotus_ID == i ].index.to_list()[0]))] + ' ' + i + '\n' ) ######230228
fileout.close()
fileout_sirius_db.close() #####230228
errorlog = open(tool_path.get_current_path()[0] + '/log/' + str(datetime.now()).replace(' ', '_').replace(':', '_').replace('.','_') + '_input_criteria.txt', 'w')
errorlog.write("This log contains criteria for research in the online LOTUS DataBase on the "+ str(datetime.now()) + " ." + "\n")
errorlog.write('The research has been made with the "for all selected categories" method.' + "\n")
errorlog.write("There are " + str(len(df_general['Lotus_ID'])) +" molecules corresponding to your criteria." + "\n")
for IDD in list_IDD:
errorlog.write(str(IDD) + "\n")
errorlog.close()
# Function to search combined selected listed criteria in LOTUS_DB
def get_lotus_or():
list_IDD = action.get_selected_search_criteria_list()
if list_IDD[0] == '':
print(str(datetime.now()) + " Come on, make a choice ! ")
messagebox.showinfo("Info", "Come on, make a chocie !")
else :
Dict_neutral = {
'Lotus_ID' :{} , 'smiles' : {}, 'inchi' : {}, 'inchikey' : {}, 'cas' : {}, 'iupac_name' : {}, 'molecular_formula' : {}, 'molecular_weight' : {}, 'xlogp' : {} ,
'superkingdom':{}, 'kingdom':{}, 'phylum':{}, 'classx':{}, 'order':{}, 'family': {}, 'genus' : {}, 'species' :{} ,
'chemicalTaxonomyClassyfireKingdom' : {} , 'chemicalTaxonomyClassyfireSuperclass' :{}, 'chemicalTaxonomyClassyfireClass' : {} , 'chemicalTaxonomyClassyfireDirectParent' : {} ,
'chemicalTaxonomyNPclassifierPathway' : {} ,'chemicalTaxonomyNPclassifierSuperclass' : {} , 'chemicalTaxonomyNPclassifierClass' : {}
,'traditional_name' : {}
}
df_general = pd.DataFrame.from_dict(Dict_neutral)
df_general2 = pd.DataFrame.from_dict(Dict_neutral)
selected_tax = []
selected_chemical = []
selected_formula = []
for IDD in list_IDD:
if 'T : ' in IDD:
selected_tax.append(IDD) # Is the criteria a Taxonomic criteria ?
elif 'C : ' in IDD:
#selected_chemical.append(IDD.split(' : ')[4]) # Is the criteria a Chemontologic criteria
selected_chemical.append(IDD)
elif 'F : ' in IDD:
#selected_formula.append(IDD.split(' : ')[2]) # Is the criteria a Formula criteria ?
selected_formula.append(IDD)
if selected_tax != [] : # if taxonomy criteria exist
for sel_tax in selected_tax:
get_lotus(sel_tax)
if selected_chemical != [] : #if taxonomy and chemontoly criteria exist
for sel_chemical in selected_chemical:
if sel_chemical.split(' : ')[4] in df_lotus['chemicalTaxonomyNPclassifierPathway'].to_list():
new_df = df_lotus.loc[(df_lotus['chemicalTaxonomyNPclassifierPathway'] == str(sel_chemical.split(' : ')[4])) | (df_lotus['chemicalTaxonomyNPclassifierPathway'] == str(str(sel_chemical.split(' : ')[4]) + '|' + str(sel_chemical.split(' : ')[4])))] ####230228
df_general = pd.concat([df_general, new_df], ignore_index = True)
elif sel_chemical.split(' : ')[4] in df_lotus['chemicalTaxonomyNPclassifierSuperclass'].to_list():
new_df = df_lotus.loc[(df_lotus['chemicalTaxonomyNPclassifierSuperclass'] == str(sel_chemical.split(' : ')[4]))]
df_general = pd.concat([df_general, new_df], ignore_index = True)
elif sel_chemical.split(' : ')[4] in df_lotus['chemicalTaxonomyNPclassifierClass'].to_list():
new_df = df_lotus.loc[(df_lotus['chemicalTaxonomyNPclassifierClass'] == str(sel_chemical.split(' : ')[4]))]
df_general = pd.concat([df_general, new_df], ignore_index = True)
if selected_formula != [] : #if taxonomy and chemontoly and formula criteria exist
for sel_formula in selected_formula:
if sel_formula.split(' : ')[2] in df_general['molecular_formula'].to_list():
new_df = df_general.loc[(df_general['molecular_formula'] == str(sel_formula.split(' : ')[2]))]
df_general2 = pd.concat([df_general2, new_df], ignore_index = True)
else :
df_general2 = pd.concat([df_general2, df_general], ignore_index = True) #if taxonomy and chemontoly exist but not formula
else : #if taxonomy exist but not chemontology
if selected_formula != [] : #if taxonomy and formula criteria exist but not chemontology
for sel_formula in selected_formula:
if sel_formula.split(' : ')[2] in df_lotus['molecular_formula'].to_list():
new_df = df_lotus.loc[(df_lotus['molecular_formula'] == str(sel_formula.split(' : ')[2]))]
df_general2 = pd.concat([df_general2, new_df], ignore_index = True)
else : # if only taxonomy criteria exist
df_general2 = pd.concat([df_general2, df_lotus], ignore_index = True)
else : #if taxonomy criteria does not exist
if selected_chemical != [] : #if taxonomy criteria does not exist but chemontology exist
for sel_chemical in selected_chemical:
get_lotus(sel_chemical)
if selected_formula != [] : #if taxonomy criteria does not exist but chemontology and formula exist
for sel_formula in selected_formula:
if sel_formula.split(' : ')[2] in df_lotus['molecular_formula'].to_list():
new_df = df_lotus.loc[(df_lotus['molecular_formula'] == str(sel_formula.split(' : ')[2]))]
df_general2 = pd.concat([df_general2, new_df], ignore_index = True)
else : #if taxonomy criteria and formula do not exist but chemontology exist
df_general2 = pd.concat([df_general2, df_lotus], ignore_index = True)
else : #if taxonomy criteria and chemontoly do not exist
if selected_formula != [] : #if taxonomy criteria and chemontoly do not exist but formula exist
for sel_formula in selected_formula:
get_lotus(sel_formula)
df_general2 = pd.concat([df_general2, df_lotus], ignore_index = True)
#messagebox.showinfo("Info", "There are " + str(len(df_general2['Lotus_ID'])) +" molecules for this research")
df_general2= df_general2.drop_duplicates(subset=['Lotus_ID']) #delete duplicates in the results of the search
if df_general2['Lotus_ID'].to_list():
df_general2.to_csv(tool_path.get_current_path()[0]+ '/LOTUS_DB_input/cfmid_input.tsv', sep = '\t', index = False) #export metadata in .TSV file
fileout = open(tool_path.get_current_path()[0]+ '/LOTUS_DB_input/cfmid_input.txt', "w") #export LOTUS_ID and SMILES in text file
fileout_sirius_db = open(tool_path.get_current_path()[0] + '/LOTUS_DB_input/structural_db.txt', "w") #####230228
for i in df_general2['Lotus_ID']:
fileout.write(i + ' ' + df_general2['smiles'][int(str(df_general2.Lotus_ID[df_general2.Lotus_ID == i ].index.to_list()[0]))] + '\n' )
fileout_sirius_db.write(df_general2['smiles'][int(str(df_general2.Lotus_ID[df_general2.Lotus_ID == i ].index.to_list()[0]))] + ' ' + i + '\n' ) ######230228
fileout.close()
fileout_sirius_db.close()
print(str(datetime.now()) + " There is a total of " + str(len(df_general2['Lotus_ID'].to_list())) + " molecules for requested categories.")
messagebox.showinfo("Info", "There is a total of " + str(len(df_general2['Lotus_ID'].to_list())) + " molecules for requested categories.")
else:
messagebox.showinfo("Info", "There is no molecules for requested categories.")
print(str(datetime.now()) + " Info", "There is no molecules for requested categories.")
errorlog = open(tool_path.get_current_path()[0] + '/log/' + str(datetime.now()).replace(' ', '_').replace(':', '_').replace('.','_') + '_input_criteria.txt', 'w')
errorlog.write("This log contains criteria for research in the online LOTUS DataBase on the "+ str(datetime.now()) + " ." + "\n")
errorlog.write('The research has been made with the "for selected chemical class.es in selected family.ies" method.' + "\n")
errorlog.write("There are " + str(len(df_general2['Lotus_ID'])) +" molecules corresponding to your criteria." + "\n")
for IDD in list_IDD:
errorlog.write(str(IDD) + "\n")
errorlog.close()
#get the cfmid 4.0 docker image identifier to log
def get_cfmid_docker_image():
ID_container_id = open(tool_path.get_current_path()[0] + '/CFM_ID_4/ID_container_cfmid.txt', 'r').read().split('\n')[0]
return ID_container_id | simremy/versadb_tk | packages/ginfo.py | ginfo.py | py | 29,035 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "packages.tool_path.get_current_path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "packages.tool_path",
"line_number": 28,
"usage_type": "name"
},
{
"api_name":... |
9333482415 | #!/usr/bin/env python3
"""
Tests for the `pre_commit.git` submodule.
"""
from os import path
from tempfile import TemporaryDirectory
from unittest import (
main,
TestCase
)
from pre_commit.git import (
ForbiddenCharacterError,
GitHandle,
RepositoryError
)
from tests.util import BasicRepo
class TestGitHandle(TestCase):
def test_get_git_root(self):
try:
# initialize repo
repo = BasicRepo()
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# test
self.assertEqual(
path.abspath(git_handle.root),
path.abspath(repo.repo_path)
)
except Exception:
raise
finally:
repo.delete()
def test_check_path_is_allowed(self):
try:
# initialize repo
repo = BasicRepo()
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# test some bad paths and file names...
with self.assertRaises(ForbiddenCharacterError):
git_handle._check_path_is_allowed("a bad/path")
with self.assertRaises(ForbiddenCharacterError):
git_handle._check_path_is_allowed("/a\nice/path/")
with self.assertRaises(ForbiddenCharacterError):
git_handle._check_path_is_allowed("this_is_/'the'/_path")
with self.assertRaises(ForbiddenCharacterError):
git_handle._check_path_is_allowed(
path.join("\\", "a_path")
)
with self.assertRaises(ForbiddenCharacterError):
git_handle._check_path_is_allowed(
path.join("/some/path", "this is_bad.ext")
)
except Exception:
raise
finally:
repo.delete()
def test_get_head_hash_bare(self):
try:
# initialize repo
repo = BasicRepo()
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# test
self.assertEqual(
git_handle.get_head_hash(),
"4b825dc642cb6eb9a060e54bf8d69288fbee4904"
)
except Exception:
raise
finally:
repo.delete()
def test_get_head_hash_not_bare(self):
try:
# initialize repo
repo = BasicRepo(bare=False)
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# test commit
test_commit_file = path.join(repo.repo_path, "test_commit")
with open(test_commit_file, "w") as foo:
foo.write("test commit")
repo.repo.git.add(test_commit_file)
repo.repo.git.commit("-m", "test commit")
self.assertEqual(
git_handle.get_head_hash(),
repo.repo.head.commit.hexsha
)
except Exception:
raise
finally:
repo.delete()
def test_get_staged_file_paths(self):
try:
# initialize repo
repo = BasicRepo(bare=False)
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# test staging
test_staged_file = path.join(repo.repo_path, "test_staged_file")
with open(test_staged_file, "w") as foo:
foo.write("test stage")
test_staged_file1 = path.join(repo.repo_path, "test_staged_file1")
with open(test_staged_file1, "w") as foo:
foo.write("test stage1")
# stage
repo.repo.git.add([test_staged_file, test_staged_file1])
self.assertEqual(
set(git_handle.get_staged_files_paths()),
set([
path.relpath(test_staged_file, repo.repo_path),
path.relpath(test_staged_file1, repo.repo_path)
])
)
except Exception:
raise
finally:
repo.delete()
def test_staged_file_content(self):
try:
# initialize repo
repo = BasicRepo(bare=False)
# initialize git handle
git_handle = GitHandle(repo.repo_path)
# write file
test_staged_file = path.join(repo.repo_path, "test_staged_file")
with open(test_staged_file, "w") as foo:
foo.write("test stage")
# stage
repo.repo.git.add([test_staged_file])
# edit the file again
with open(test_staged_file, "a") as foo:
foo.write("\nmore")
# get staged file path
pth = git_handle.get_staged_files_paths()[0]
self.assertEqual(
git_handle.get_staged_file_content(pth),
b"test stage"
)
except Exception:
raise
finally:
repo.delete()
class TestGitHandleErrors(TestCase):
def test_repository_error(self):
try:
tmp = TemporaryDirectory()
with self.assertRaises(RepositoryError):
GitHandle(tmp.name)
except Exception:
raise
finally:
tmp.cleanup()
if __name__ == "__main__":
main()
| spreemohealth/style | tests/test_git.py | test_git.py | py | 5,403 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "tests.util.BasicRepo",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pre_commit.git.GitHandle",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.pa... |
22388472051 | from flask import Flask, jsonify, request
app = Flask(__name__)
accounts = [
{"name":"Billy", 'balance':457.74},
{"name":"Renesmee", 'balance':-150.0},
{"name":"Edward", 'balance':4156.9},
{"name":"Marla", 'balance':321.31},
{"name":"Andrew", 'balance':-120.1},
{"name":"Roxane", 'balance':-10.2},
{'name':'Kelly','balance':250.0}
]
groups =[
{"name":"Positive", "members":[]},
{"name":"Negative","members":[]}
]
def groupsFormat():
i = 0
for gr in groups:
gr['members']=[]
for person in accounts:
if person['balance']>=0:
groups[0]['members'].append(i)
else:
groups[1]['members'].append(i)
i += 1
@app.route("/",methods=["GET"])
def wellcome():
return "Wellcome to banking system"
@app.route("/accounts",methods=["GET"])
def getAccounts():
return jsonify(accounts)
@app.route("/accounts/<id>",methods=["GET"])
def getAccount(id):
id = int(id)-1
return jsonify(accounts[id])
@app.route("/accounts/<id>",methods=["DELETE"])
def deleteAccount(id):
id = int(id)-1
ac = accounts[id]
accounts.remove(ac)
groupsFormat()
return jsonify(ac)
@app.route("/accounts",methods=["POST"])
def addAccounts():
name = request.json['name']
balance = float(request.json['balance'])
data = {'name':name,'balance':balance}
accounts.append(data)
groupsFormat()
return jsonify(data)
@app.route("/accounts",methods=["PUT"])
def changeAccount():
name = request.json['name']
balance = float(request.json['balance'])
data = {'name':name,'balance':balance}
for acc in accounts:
if acc['name'] == name:
acc['balance'] = balance
return jsonify(data)
groupsFormat()
return "No such name"
@app.route("/groups",methods=["GET"])
def getGroups():
change_groups = []
for g in groups:
change_groups.append({"name":g["name"]})
for acc in g["members"]:
#str = "member"+str(acc)
change_groups[-1].update({"member"+str(acc):accounts[acc]})
return jsonify(change_groups)
if __name__ == "__main__":
groupsFormat()
app.run(port = 8080)
| Kroha1999/3KURS | Univer/REST +/lab 2/get.py | get.py | py | 2,190 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_numb... |
3203865574 | import fire,os,sys
import numpy as np
from tqdm import tqdm
from skimage import io,morphology
from keras.utils import to_categorical
import cv2
from ulitities.base_functions import get_file,send_message_callback,load_label
def post_process_segment(inf,outf,Flag_cv=True, minsize=10, area_threshold=1000):
# pass
# import cv2
img = load_label(inf)
print(np.unique(img))
NB = len(np.unique(img))-1
if Flag_cv:
kernel = np.ones((minsize,minsize),np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
cv2.imwrite(outf,closing)
else:
# NB=1
if NB >1:
NB+=1
try:
# label= to_categorical(img,num_classes=NB,dtype='uint8')
label = to_categorical(img)
except:
print("Failed when to transform one hot")
return -1
result_list = []
for i in range(NB):
t = remove_small_objects_and_holes(label[:, :, i], minsize*10, area_threshold, True)
result_list.append(t[:,:,None])
label = np.concatenate(result_list, axis=2)
label = np.argmax(label, axis=2).astype(np.uint8)
else:
label =img
label= remove_small_objects_and_holes(label, minsize*10, area_threshold, True)
label = np.asarray(label,np.uint8)
cv2.imwrite(outf, label)
def remove_small_objects_and_holes(label,min_size, area_threshold, in_place=True):
label=morphology.remove_small_objects(label==1,min_size=min_size, connectivity=1,in_place=in_place)
label=morphology.remove_small_holes(label==1,area_threshold=area_threshold,connectivity=1,in_place=in_place)
return label
def batch_rmovesmallobj(send_message_callback,inputdir,outputdir,flag_cv=True,msize=5, thd=1000):
if not os.path.isdir(inputdir):
send_message_callback("Please check input directory:{}".format(inputdir))
sys.exit(-1)
if not os.path.isdir(outputdir):
send_message_callback('Warning: output directory is not existed')
os.mkdir(outputdir)
files,_=get_file(inputdir)
for file in files:#tqdm(files):
send_message_callback("Dealing with : "+file)
absname = os.path.split(file)[1]
outputfile = os.path.join(outputdir, absname)
post_process_segment(file, outputfile,Flag_cv=flag_cv, minsize=msize, area_threshold=thd)
if __name__ == "__main__":
# batch_rmovesmallobj(r"C:\Users\SCRS\Pictures\111",r"C:\Users\SCRS\Pictures\222")
# inputfile='/home/omnisky/PycharmProjects/data/samples/isprs/train_ori/label_all/top_potsdam_7_13.tif'
# outputfile='/media/omnisky/e0331d4a-a3ea-4c31-90ab-41f5b0ee2663/traindata/scrs_building/test/crfss/whole48-tt.tif'
# post_process_segment(inputfile,outputfile, Flag_cv=False, minsize=10)
fire.Fire() | scrssys/SCRS_RS_AI | mask_process/remove_small_object.py | remove_small_object.py | py | 2,935 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "ulitities.base_functions.load_label",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.o... |
38736890564 | import numpy as np
from matplotlib import pyplot as plt
import matplotlib.colors
import random as rand
class Board:
def __init__(self,dim,targetTerrain): #set default values for a board
self.board = np.zeros((dim,dim), dtype= float)
self.target = (rand.randint(0,dim-1),rand.randint(0,dim-1))
self.dim = dim
self.populateTerrain()
self.newTarget(targetTerrain)
def populateTerrain(self):
terrain = [.1, .3, .7, .9]
for i in range(self.dim):
for j in range(self.dim):
self.board[i,j] = rand.choices(terrain,weights = (.2, .3, .3, .2))[0]
def newTarget(self,terrain):
while True:
x,y = (rand.randint(0,self.dim-1),rand.randint(0,self.dim-1))
if self.board[x,y] == terrain:
self.target = (x,y)
break
def printBoard(self):
#print(self.board)
plt.figure(figsize = (10,10))
plt.pcolor(self.board,edgecolors = "black", cmap = 'Set3', linewidths = 1)
for (j,i),label in np.ndenumerate(self.board): #consider using a mapping for label to mine/flag
plt.text(i,j,label,ha='left',va='bottom')
plt.tight_layout()
plt.show() #block=False
#plt.pause(.2)
#plt.close() | akaashp/ProbabilisticHunting | Board.py | Board.py | py | 1,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.choices",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_nu... |
38777068404 | """
Aprenda a manipular datas
Realizar conversao de texto para data e vice-versa
realizar soma e subtracao em datas
- Como recuperar a data atual(DATE)
- Como trabalhar com a data, alterando sua formatação
- Como gerar um horário(TIME)
- Retornar data e hora atual(DATETIME)
- Alterar formação do DATETIME
- Realizar soma e subtracao de datas com TIMEDELTA
"""
from datetime import date, time, datetime, timedelta
def trabalhando_date():
data_atual = date.today()
print(data_atual.strftime('%d/%m/%Y')) # Dia - número, m - Mês, - Y - Ano com 4 digitos # strftime - vira string
print(data_atual.strftime('%A %B %Y')) # Dia - nome, m - Mês, Y - Ano com 4 digitos # strftime - vira string
def trabalhando_time():
time_atual = time(hour=15, minute=10, second=30)
print(time_atual)
print(time_atual.strftime('%H:%M:%S')) # H - Hora, M - Minuto, S - segundo # strftime - vira string
def trabalhando_datetime():
data_atual = datetime.now()
print(data_atual)
print(data_atual.strftime('%d/%m/%Y - %H:%M:%S')) # d - Dia número, m - Mês numero, Y - Ano numero # strftime - vira string
print(data_atual.strftime('%c')) # c - Dia da semana nome, Mês nome, Dia numero, Hora:minuto:segundos, ano 4 digitos # strftime - vira string
tupla = ('Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sabado', 'Domingo')
print(tupla[data_atual.weekday()])
data_string = '01/01/2019 12:20:25'
data_convertida = datetime.strptime(data_string, '%d/%m/%Y %H:%M:%S')
print(data_convertida)
nova_data = data_convertida - timedelta(days=365)
print(nova_data)
if __name__ == '__main__':
trabalhando_date()
trabalhando_time()
trabalhando_datetime()
| Ademilson12/Aulas_Digital | Basico/aula10.py | aula10.py | py | 1,756 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now... |
32886110882 | from nltk.corpus import stopwords
from settings.common import word_tf_df
from preprocessing_pipeline.NextGen import NextGen
from preprocessing_pipeline import (Preprocess, RemovePunctuation, Capitalization, RemoveStopWords,
RemoveShortWords, TwitterCleaner, RemoveUrls)
def load_flat_dataset(path):
dataset = []
with open(path, 'r') as f:
for line in f:
dataset.append(line.strip().split(' '))
return dataset
def load_dataset_with_dates(path):
dataset = []
with open(path, 'r') as f:
for line in f:
dataset.append(line.strip().split('\t')[1].split(' '))
return dataset
if __name__ == '__main__':
dataset_names = ['sample_tweets']
forbidden_words = [] # list of words to be blacklisted [amp, rt, ...]
syn_file = None # synonym file containing line for each set of synonyms: [word], [synonym1], [synonym2], ...
extra_ngrams = [] # list of $-separated ngrams: [new$york$city, joe$biden, donald$trump, no$new$taxes]
for j in range(0, len(dataset_names)):
ds = dataset_names[j]
stopwords_list = stopwords.words('english')
stopwords_list.append(['rt', 'amp'])
pipeline = Preprocess()
rp = RemovePunctuation(keep_hashtags=False)
ru = RemoveUrls()
cap = Capitalization()
short_words = RemoveShortWords()
tc = TwitterCleaner()
rsw = RemoveStopWords(extra_sw=stopwords_list)
pipeline.document_methods = [(tc.remove_deleted_tweets, str(tc),),
(tc.remove_users, str(tc),),
(ru.remove_urls, str(ru),),
(rp.remove_punctuation, str(rp),),
(cap.lowercase, str(cap),),
(tc.remove_rt, str(tc),),
(rsw.remove_stopwords, str(rsw),),
(short_words.remove_short_words, str(short_words),)
]
ng = NextGen()
path = 'data/{}.csv'.format(ds)
dataset = load_dataset_with_dates(path)
processed_dataset = ng.full_preprocess(dataset, pipeline, ngram_min_freq=10, extra_bigrams=None, extra_ngrams=extra_ngrams)
with open('data/{}_lightweight.csv'.format(ds), 'w') as f:
for i in range(0, len(processed_dataset)):
doc = processed_dataset[i]
f.write('{}\n'.format(' '.join(doc)))
freq = {}
freq = word_tf_df(freq, processed_dataset)
processed_dataset = ng.filter_by_tfidf(dataset=processed_dataset, freq=freq, threshold=0.25)
with open('data/{}_lightweight_tdidf.csv'.format(ds), 'w') as f:
for i in range(0, len(processed_dataset)):
doc = processed_dataset[i]
f.write('{}\n'.format(' '.join(doc)))
| GU-DataLab/topic-modeling-textPrep | process_dataset.py | process_dataset.py | py | 2,946 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "preprocessing_pipeline.Preprocess",
"line_number": 37,
"usage_type": "call"
},
{
... |
21024956783 | import os
from music21 import humdrum
from music21 import converter
import chant21
class MultipleSpinesException(Exception):
"""An exception raised when encountering multiple spines
while expecting only 1"""
pass
def extract_phrases_from_spine(spine):
"""Enxtract the phrases as music21 streams from kern
Phrases in a humdrum file are delimited by curly brackets.
All events that do not occur between an opening and closing bracket
(i.e., are not in a phrase) are ignored.
Parameters
----------
spine : music21.humdrum.spineParser.KernSpine
A humdrum spine with the melody
Returns
-------
list
A list of phrases, each represented as a music21.stream.Stream
"""
phrases = []
in_phrase = False
cur_phrase = humdrum.spineParser.KernSpine()
for event in spine:
is_phrase_start = event.contents.startswith('{')
is_phrase_end = event.contents.endswith('}')
if is_phrase_start:
in_phrase = True
if in_phrase:
cur_phrase.append(event)
if is_phrase_end and in_phrase:
phrases.append(cur_phrase)
cur_phrase = humdrum.spineParser.KernSpine()
in_phrase = False
# Parse spines and return streams
for phrase in phrases: phrase.parse()
streams = [phrase.stream.flat for phrase in phrases]
return streams
def extract_phrases_from_kern_file(filename: str) -> list:
"""Extract all phrases from a kern file.
Phrases are returned as flattened music21 streams
Parameters
----------
filename : string
filename of the file
Returns
-------
list
List of phrases as music21.stream.Stream objects
Raises
------
MultipleSpinesException
Whenever the file contains multiple spines
"""
song = humdrum.parseFile(filename)
if len(song.spineCollection.spines) > 1:
raise MultipleSpinesException
spine = song.spineCollection.spines[0]
return extract_phrases_from_spine(spine)
def extract_phrases_from_gabc_file(filename: str) -> list:
chant = converter.parse(filename)
return chant.phrases
def extract_phrases_from_file(filepath: str) -> list:
"""Extract phrases from a file to a list of music21 streams.
Delegates the extration based on the file extension.
Parameters
----------
filename : string
filename of the file
Returns
-------
list
List of phrases as music21.stream.Stream objects
"""
if not os.path.exists(filepath):
raise FileNotFoundError
extension = os.path.splitext(filepath)[1]
if extension == '.krn':
return extract_phrases_from_kern_file(filepath)
elif extension == '.gabc':
return extract_phrases_from_gabc_file(filepath)
else:
raise Exception('Unknown file format')
| bacor/cosine-contours | src/phrases.py | phrases.py | py | 2,919 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "music21.humdrum.spineParser.KernSpine",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "music21.humdrum.spineParser",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "music21.humdrum",
"line_number": 29,
"usage_type": "name"
},
{... |
36279239216 | import os
import time
import threading
from dotenv import load_dotenv
import pyttsx3
import data_handler
from datetime import datetime
from datetime import timedelta
from task import Task
class AssistantApp:
def __init__(self):
# Initialize text to speech
self.engine = pyttsx3.init()
# Initialize tasks
self.tasks = []
self.task_id = 1
self.handler = data_handler.DataHandler()
def get_response(self, message):
response = self.handler.ChatGPT(message)
# Say the response with TTS
print(response)
self.engine.say(response)
self.engine.runAndWait()
def add_task(self):
text = input("Enter task: ")
repeats = input("Generate repeating task? yes/no: ")
repeat_interval = None
if repeats == 'yes':
repeat_interval = int(input("Enter repeat interval (days): "))
repeats = True
else:
repeats = False
for attempt in range(3):
try:
date = input("Enter date dd:mm:yy: ")
time = input("Enter time hh:mm: ")
date_list = date.split(":")
time_list = time.split(":")
task_datetime = datetime(int(date_list[2]) + 2000, int(date_list[1]), int(date_list[0]), int(time_list[0]), int(time_list[1]))
break
except Exception as e:
print("Incorrect time format, try again.")
continue
if repeat_interval == None:
self.tasks.append(Task(self.task_id, text, task_datetime, repeats))
else:
self.tasks.append(Task(self.task_id, text, task_datetime, repeats, repeat_interval))
self.task_id += 1
# Store task to tasks.csv
self.handler.update_tasks(self.tasks)
return self.task_id - 1
def delete_task(self, delete_id):
for task in self.tasks:
if task.task_id == delete_id:
self.tasks.remove(task)
self.handler.update_tasks(self.tasks)
def print_tasks(self):
if self.tasks == None:
print("No tasks assigned")
return
print('*******************************************')
for task in self.tasks:
if task.task_reminder_added == True:
print(f'{task.task_id}: REMINDER: {task.task_text} {task.task_datetime.strftime("%Y-%m-%d %H:%M")}')
else:
print(f'{task.task_id}: {task.task_text} {task.task_datetime.strftime("%Y-%m-%d %H:%M")}')
print('*******************************************')
def load_tasks(self):
tasks = self.handler.load_tasks()
if tasks == None or len(tasks) == 0:
return
self.tasks = tasks
last_id = tasks[len(tasks) - 1].task_id
self.task_id = last_id + 1
def check_tasks(self):
# format: (1, 'test', datetime.datetime(2023, 5, 15, 15, 0))
while True:
current_time = datetime.now()
if self.tasks == None:
return
for task in self.tasks:
if current_time.year == task.task_datetime.year and current_time.month == task.task_datetime.month and current_time.day == task.task_datetime.day and current_time.hour == task.task_datetime.hour and current_time.minute == task.task_datetime.minute:
print(task.task_text)
self.engine.say(f"You have a task that requires your attention. The task is {task.task_text}")
self.engine.runAndWait()
if task.task_repeats == True and task.task_repeat_added == False:
self.add_repeat_task(task)
task.task_repeat_added = True
if task.task_reminder_added == False:
task.task_datetime = task.task_datetime + timedelta(hours=1)
task.task_reminder_added = True
# Possibly reduntant update tasks
self.handler.update_tasks(self.tasks)
time.sleep(45)
def add_repeat_task(self, task):
task_datetime = task.task_datetime + timedelta(days=task.task_repeat_interval)
self.tasks.append(Task(self.task_id, task.task_text, task_datetime, task.task_repeats, task.task_repeat_interval))
self.task_id += 1
self.handler.update_tasks(self.tasks)
def add_reminder(self, task):
#self.handler.add_reminder()
pass
def clear_reminders(self):
#self.handler.clear_reminders()
pass
def main(self):
# Load tasks from tasks.csv
self.load_tasks()
# Start a thread to continuously check tasks
check_tasks_thread = threading.Thread(target=self.check_tasks)
check_tasks_thread.daemon = True # Allow the thread to exit when the main program exits
check_tasks_thread.start()
while True:
self.print_tasks()
print("Choose an option or use text to prompt ChatGPT")
print("0: exit")
print("1: add task")
print("2: delete task")
print("3: list tasks")
print("-------------------------------------------")
option = input("prompt: ")
if option == "0":
self.handler.update_tasks(self.tasks)
break
elif option == "1":
self.add_task()
elif option == "2":
delete_id = int(input("Enter task id: "))
self.delete_task(delete_id)
elif option == "3":
continue
else:
self.get_response(option)
if __name__ == "__main__":
assistant = AssistantApp()
assistant.main()
| johku/Assistant | assistant.py | assistant.py | py | 5,966 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyttsx3.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "data_handler.DataHandler",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.split",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
39926577938 | from datetime import datetime
import requests
from flask import Flask, render_template
app = Flask(__name__)
year = datetime.now().year
# print(year)
response = requests.get('https://api.npoint.io/362a61befce3d173e925')
blog_posts = response.json()
# print(blog_posts)
@app.route('/')
def home():
return render_template("index.html", post_data=blog_posts, date=year)
@app.route('/post/<int:index>')
def post_selected(index):
post_current = None
for post in blog_posts:
if post['id'] == index:
post_current = post
return render_template("post.html", post_data=post_current, date=year)
if __name__ == "__main__":
app.run(debug=True)
| vytautasmatukynas/Python-Random-Learning-Testing | WEB_DEV/flask/flask_1st_templates_forms/flask_project_3_blog_sample/server.py | server.py | py | 714 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"... |
38317304274 | # Importing Required Libraries
import cv2
# define a class called ImageReader
class ImageReader:
def __init__(self, filename):
self.filename = filename
def read_image(self):
try:
img = cv2.imread(self.filename)
if img is None:
raise Exception("Error: Unable to read image file")
except Exception as e:
print(str(e))
else:
return img
# create an instance of the ImageReader
reader = ImageReader(r"C:\PYTHON\AI_ML\OpenCV\Computer_vision.jpeg")
# call the read_image() method
img = reader.read_image()
if img is not None:
# to show an image
cv2.imshow("Image", img)
# to dispaly upto defined time
cv2.waitKey(0)
# to close the open window
cv2.destroyAllWindows()
# After executing this code seperate window will be open with an image which has given in the path.
# ### Reading an Image by allow the user to enter a link
# Importing Required Libraries
import cv2
import urllib.request
import numpy as np
# define a class called ImageReader
class ImageReader:
def __init__(self, url):
self.url = url
def read_image(self):
try:
resp = urllib.request.urlopen(self.url)
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
if img is None:
raise Exception("Error: Unable to read image file")
except Exception as e:
print(str(e))
else:
return img
# create an instance of the ImageReader
url = input("Enter the URL of the image file: ")
reader = ImageReader(url)
img = reader.read_image()
if img is not None:
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# After Executing above code It will ask you to enter an url link to diaplay an image.
| dsvijayvenkat/Computer_Vision_-_OpenCV | 1.Reading_an_Image.py | 1.Reading_an_Image.py | py | 1,890 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_n... |
3260640635 | from collections import deque
import sys
input = lambda: sys.stdin.readline().rstrip()
def bfs(x, y):
q = deque()
q.append((x, y))
field[x][y] = 0
while (q):
a, b = q.popleft()
for i in range(8):
nx = a + dx[i]
ny = b + dy[i]
if 0 <= nx < m and 0 <= ny < n and field[nx][ny] == 1:
field[nx][ny] = 0
q.append((nx, ny))
if __name__ == '__main__':
dx = [-1, -1, -1, 0, 0, 1, 1, 1]
dy = [-1, 0, 1, -1, 1, -1, 0, 1]
while True:
n, m = map(int, input().split())
cnt = 0
if n == 0 and m == 0:
break
field = [list(map(int, input().split())) for _ in range(m)]
for i in range(m):
for j in range(n):
if field[i][j] == 1:
bfs(i, j)
cnt += 1
print(cnt) | zinnnn37/BaekJoon | 백준/Silver/4963. 섬의 개수/섬의 개수.py | 섬의 개수.py | py | 913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
74246591074 | import tweepy
import json
import time
import datetime
import ConfigParser
import tweepy
from tweepy.streaming import StreamListener
'''
Get API keys from Configurations document
'''
config = ConfigParser.ConfigParser()
config.readfp(open(r'./configurations.txt'))
consumerKey=config.get('API Keys', 'consumerKey')
consumerSecret=config.get('API Keys', 'consumerSecret')
accessToken=config.get('API Keys', 'accessToken')
accessSecret=config.get('API Keys', 'accessSecret')
'''
Returns given number of tweets for given Twitter handle
'''
def get_tweets(twitter_handle, num_tweets):
auth = tweepy.AppAuthHandler(consumerKey, consumerSecret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
timeline = api.user_timeline(screen_name=twitter_handle, count=num_tweets)
user_tweets = []
for current_tweet in timeline:
if not current_tweet.retweeted:
tweet = {}
tweet['tweetId'] = current_tweet.id
tweet['message'] = current_tweet.text
tweet['author'] = current_tweet.user.name
s = str(current_tweet.created_at)
new_s = time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timetuple()) * 1000
tweet['timestamp'] = new_s
user_tweets.append(tweet)
return user_tweets
| kevinjye/EverTweet | tweet_listener.py | tweet_listener.py | py | 1,336 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "ConfigParser.ConfigParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tweepy.AppAuthHandler",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.mktim... |
18022219984 | import cv2
import numpy as np
import os
os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
from PIL import Image
from utils.metrics_nocs import align, prepare_data_posefitting, draw_detections
from datasets.datasets import exr_loader,load_meta
syn_depth_path = '/data/sensor/data/real_data/test_0/0000_gt_depth.exr'
nocs_path = '/data/sensor/data/real_data/test_0/0000_coord.png'
mask_path = '/data/sensor/data/real_data/test_0/0000_mask.png'
meta_path = '/data/sensor/data/real_data/test_0/0000_meta.txt'
obj_dir = '/data/sensor/data/cad_model/real_cat_known'
synset_names = ['other', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'car', # 5
'mug', # 6
'aeroplane', # 7
'BG', # 8
]
intrinsics = np.zeros((3,3))
img_h = 720
img_w = 1280
fx = 918.295227050781
fy = 917.5439453125
cx = img_w * 0.5 - 0.5
cy = img_h * 0.5 - 0.5
camera_params = {
'fx': fx,
'fy': fy,
'cx': cx,
'cy': cy,
'yres': img_h,
'xres': img_w,
}
hw = (640 / img_w, 360 / img_h)
camera_params['fx'] *= hw[0]
camera_params['fy'] *= hw[1]
camera_params['cx'] *= hw[0]
camera_params['cy'] *= hw[1]
camera_params['xres'] *= hw[0]
camera_params['yres'] *= hw[1]
intrinsics[0,0] = camera_params['fx']
intrinsics[0,2] = camera_params['cx']
intrinsics[1,1] = camera_params['fy']
intrinsics[1,2] = camera_params['cy']
intrinsics[2,2] = 1.0
_syn_depth = cv2.imread(syn_depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
if len(_syn_depth.shape) == 3:
_syn_depth = _syn_depth[:, :, 0]
coords = Image.open(nocs_path).convert('RGB')
coords = np.array(coords) / 255.
if mask_path.split('.')[-1] == 'exr':
_mask = exr_loader(mask_path, ndim=1)
else:
_mask = Image.open(mask_path)
_mask = np.array(_mask)
if mask_path.split('.')[-1] == 'exr':
_mask = np.array(_mask * 255, dtype=np.int32)
_meta = load_meta(meta_path)
_mask_sem = np.full(_mask.shape, 8) #, 0)
_scale = np.ones((10,3)) #(class_num+1,3)
for i in range(len(_meta)):
_mask_sem[_mask == _meta[i]["index"]] = _meta[i]["label"] #1
if _meta[i]["instance_folder"] !=" " :
bbox_file = os.path.join(obj_dir,_meta[i]["instance_folder"] ,_meta[i]["name"],'bbox.txt')
bbox = np.loadtxt(bbox_file)
_scale[_meta[i]["label"]] = bbox[0, :] - bbox[1, :]
else :
_scale[_meta[i]["label"]] = np.ones((3))
_scale[_meta[i]["label"]] /= np.linalg.norm(_scale[_meta[i]["label"]])
gt_class_ids , gt_scores , gt_masks ,gt_coords ,\
gt_boxes = prepare_data_posefitting(_mask_sem,coords)
result = {}
result['gt_RTs'], scales, error_message, _ = align(gt_class_ids,
gt_masks,
gt_coords,
_syn_depth,
intrinsics,
synset_names)
if 1:
output_path = 'tmp'
draw_rgb = False
save_dir =os.path.join(output_path ,'save_{}'.format(i))
if not os.path.exists(save_dir) :
os.mkdir(save_dir)
data = 'camera'
result['gt_handle_visibility'] = np.ones_like(gt_class_ids)
rgb_path = '/data/sensor/data/real_data/test_0/0000_color.png'
_rgb = Image.open(rgb_path).convert('RGB')
_rgb = _rgb.resize((640,360))
_rgb = np.array(_rgb)
draw_detections(_rgb, save_dir, data, 1, intrinsics, synset_names, draw_rgb,
gt_boxes, gt_class_ids, gt_masks, gt_coords, result['gt_RTs'], scales, np.ones(gt_boxes.shape[0]),
gt_boxes, gt_class_ids, gt_masks, gt_coords, result['gt_RTs'], np.ones(gt_boxes.shape[0]), scales) | PKU-EPIC/DREDS | CatePoseEstimation/align.py | align.py | py | 3,904 | python | en | code | 89 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_ANYCOLOR",
"line... |
72107723554 | import json
import time
import datetime
import os.path
import GlobalConstants
import GlobalEnums
import Tools
from os import path
class ChatManager:
"""
Chat Manager
"""
def __init__(self):
#self.chatChannelGlobal = {}
self.createChannel(GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL)
def createChannel(self, channelType):
if channelType == GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL:
self.chatChannelGlobal = {}
def appendMessage(self, channelType, sender, message):
createdMessageObj = {
'sender': sender,
'message': message,
'date': Tools.getDate(),
'time': Tools.getTime()
}
if channelType == GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL:
index = len(self.chatChannelGlobal) + 1 # so it is not a zwro index
self.chatChannelGlobal[index] = createdMessageObj
# Each message added, make sure we aren't over our chat limit.
self.channelGarbageCollector()
def updateMessage(self, channelType, sender, message):
createdMessageObj = {
'sender': sender,
'message': message,
'date': Tools.getDate(),
'time': Tools.getTime()
}
if channelType == GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL:
self.chatChannelGlobal.update(createdMessageObj) # use this only for existing values
def channelGarbageCollector(self):
if len(self.chatChannelGlobal) >= GlobalConstants.CHAT_CHANNEL_GLOBAL_CAPACITY:
self.writeAndClear(GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL)
def writeAndClear(self, channelType):
self.writeChannelTypeToFile(channelType)
if channelType == GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL:
self.chatChannelGlobal.clear()
def writeChannelTypeToFile(self, channelType):
dictToWrite = None
if channelType == GlobalEnums.ChatChannel.CHAT_CHANNEL_GLOBAL:
dictToWrite = self.chatChannelGlobal
self.writeChannelToFile(dictToWrite, channelType)
def writeChannelToFile(self, dict, channelType):
print( os.path.abspath(os.getcwd()))
fileName = "%s.%s.%s" % (str(channelType.name), Tools.getDateDots(), Tools.getTimeDots())
# This is a testing check and should not happen in the real world, basically this will
# track many messages coming in at once and overwriting the last wrote file
dupeIndex = 0
while path.exists('ChatLogs/%s.txt' % fileName):
dupeIndex += 1
fileName = "%s.%s.%s.%s" % (str(channelType.name), Tools.getDateDots(), Tools.getTimeDots(), dupeIndex)
with open('ChatLogs/%s.txt' % fileName, 'w') as file:
file.write(json.dumps(dict, indent=2)) # use `json.loads` to do the reverse, indent adds to new line | RottenVisions/ouroboros-prototyping | prototyping/Chat.py | Chat.py | py | 2,536 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "GlobalEnums.ChatChannel",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "GlobalEnums.ChatChannel",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "Tools.getDate",
"line_number": 28,
"usage_type": "call"
},
{
"api_name"... |
31281558255 | import torch
import pandas as pd
import numpy as np
import torch.nn as nn
import statistics
from DLDUlib import device, train, optimize_ols, center, normalize, r_squared, cross_validate_train
import copy
names = ['SalePrice','1st_Flr_SF','2nd_Flr_SF','Lot_Area','Overall_Qual',
'Overall_Cond','Year_Built','Year_Remod/Add','BsmtFin_SF_1','Total_Bsmt_SF',
'Gr_Liv_Area','TotRms_AbvGrd','Bsmt_Unf_SF','Full_Bath']
df = pd.read_csv('AmesHousing.csv', names = names)
data = df.values # read data into a numpy array (as a list of lists)
data = data[1:] # remove the first list which consists of the labels
data = data.astype(np.float) # coerce the entries in the numpy array to floats
data = torch.FloatTensor(data) # convert data to a Torch tensor
data_train, means = center(data)
data_train, stdevs = normalize(data)
class NonLinearModel(nn.Module):
def __init__(self):
super(NonLinearModel, self).__init__()
self.layer1 = nn.Linear(13, 10)
self.layer2 = nn.Linear(10, 1)
def forward(self, xss):
xss = self.layer1(xss)
xss = torch.relu(xss)
return self.layer2(xss)
model = NonLinearModel()
criterion = nn.MSELoss()
epochs = 60
learning_rate = 0.005
momentum = 0.9
batchsize = 20
folds = 10
bail_after = 10
no_improvement = 0
best_valids = 1e15*torch.ones(folds)
while no_improvement < bail_after:
model, valids = cross_validate_train(
k = folds,
model = model,
criterion = criterion,
features = data_train[:,1:],
targets = data_train[:,:1],
epochs = epochs,
learning_rate = learning_rate,
momentum = momentum,
batchsize = batchsize,
verbosity = 1
)
print(best_valids.mean().item())
if valids.mean().item() < best_valids.mean().item():
best_model = copy.deepcopy(model)
best_valids = valids
no_improvement = 0
else:
no_improvement += 1
print("no improvement", no_improvement)
test_input = torch.FloatTensor([2855, 0, 26690, 8, 7, 1952, 1972, 1040, 2080, 1756, 8, 841, 2])
test_input = (test_input - means[1:]) / stdevs[1:]
test_output = best_model.forward(test_input).data[0]*stdevs[0]+means[0]
print("test_output:", test_output) | cpsiff/DLDU-Projects | 2_nonlinear/my_imp_nonlinear_crossv_train.py | my_imp_nonlinear_crossv_train.py | py | 2,166 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "DLDUlib.center",
... |
13953011427 | ## Code to make a PCR reaction using a pre-mixed master-mix, distribute it between wells
## of a 96-well plate, add primers and add a given number of template samples.
###INPUT### PCR variables
num_replicates = 8
num_templates = 4
total_PCR_volume = 20
master_mix_volume = 8
template_volume = 1
primer_volume = 2.5
#Define primer pairs
from opentrons import robot, containers, instruments
#Define containers - source_tube rack = cold block
pcr_plate = containers.load('96-PCR-flat', 'B1', 'pcr-plate')
trash = containers.load('trash-box', 'A3')
p200rack = containers.load('tiprack-200ul', 'A1', 'p200-rack')
#Create 96-well trough
containers.create(
'96-well-square',
grid=(8,12),
spacing=(9,9),
diameter=7,
depth=40
)
mix_trough = containers.load('96-well-square', 'B2', 'mix-trough')
#Create 6x12 p20 tip rack container
containers.create(
'tiprack-200ul-6x12',
grid=(6,12),
spacing=(9, 9),
diameter=5,
depth=60
)
p20rack = containers.load('tiprack-200ul-6x12', 'E1', 'p20-rack')
#Create 3x6 2ml tube rack for DNA samples
containers.create(
'3x6-tube-rack-2ml',
grid=(3,6),
spacing=(19.5,19.5),
diameter=9.5,
depth=40
)
template_primer_rack = containers.load('3x6-tube-rack-2ml', 'B3', 'template-primer-rack')
cold_rack = containers.load('3x6-tube-rack-2ml', 'D3', 'cold-rack')
#Define pipettes
p20 = instruments.Pipette(
trash_container=trash,
tip_racks=[p20rack],
min_volume=2,
max_volume=20,
axis="a"
)
p200 = instruments.Pipette(
trash_container=trash,
tip_racks=[p200rack],
min_volume=20,
max_volume=200,
axis="b"
)
#Define locations of PCR components
water_source = cold_rack.wells('A1')
master_mix_source = cold_rack.wells('A2')
template_sources = template_primer_rack.columns('A')
F_primer_sources = template_primer_rack.columns('B')
R_primer_sources = template_primer_rack.columns('C')
#Define DNA volumes to be added
template_volumes = [template_volume] * num_templates
num_pcr_samples = len(template_volumes)
water_volume = total_PCR_volume - master_mix_volume - (2*primer_volume) - template_volume
mix_trough_bottom = [well.bottom() for well in mix_trough.wells('A1', length=num_templates)]
p200.pick_up_tip()
#Add water
p200.distribute(
water_volume*(num_replicates+1), water_source, mix_trough_bottom, new_tip='never',
blow_out=True, disposal_volume=10)
p200.drop_tip()
#distribute MasterMix
p200.distribute(
master_mix_volume*(num_replicates+1), master_mix_source, mix_trough_bottom, blow_out=True,
disposal_volume=10)
#distribute F primers
for i in range(len(template_volumes)):
p200.transfer(
primer_volume*(num_replicates+1), F_primer_sources(i), mix_trough.wells(i).bottom(),
mix_after=(2, 20), blow_out=True, new_tip='always')
#distribute R primers
for i in range(len(template_volumes)):
p200.transfer(
primer_volume*(num_replicates+1), R_primer_sources(i), mix_trough.wells(i).bottom(),
mix_after=(2, 20), blow_out=True, touch_tip=True, new_tip='always')
#distribute template DNA
for i in range(len(template_volumes)):
p20.transfer(
template_volume*(num_replicates+1), template_sources(i), mix_trough.wells(i).bottom(),
mix_after=(2, 10), blow_out=True, touch_tip=True, new_tip='always')
#distribute mixes to PCR plate
for i in range(len(template_volumes)):
pcr_plate_bottom = [well.bottom() for well in pcr_plate.wells(i*num_replicates,
length=num_replicates, skip=8)]
p200.distribute(
total_PCR_volume, mix_trough(i), pcr_plate_bottom, mix_before=(5,200), disposal_volume=5,
dispense_speed=300)
| Arne444/PCR_premixedMM | PCR_premixedMM.py | PCR_premixedMM.py | py | 3,516 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "opentrons.containers.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "opentrons.containers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "opentrons.containers.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name... |
7087899911 | from utils.terminal import clear
from utils.Database import Database
import requests
from utils.OpenApi import OpenApi
class Dialogue:
def __init__(self):
self.database = Database()
self.main_menu()
self.open_api = OpenApi()
def main_menu(self) -> None:
"""
This function prints the main menu for the application and gets the input for the options
:return: None
"""
options = [
{"text": "Chercher un produit", "method": self.show_categories},
{"text": "Voir mes aliments substitués", "method": self.show_substitutes},
] # Put the options in a list as dicts with a pointer to the method they have to call
clear()
while True:
for index, option in enumerate(options): # foreach option print it with its index
print(str(index) + ". " + option["text"])
print("\x1b[6;32;40mChoisissez une option en écrivant le chiffre correspondant et "
"appuyez sur entrée:\x1b[0m")
try:
choice = int(input())
clear()
if 0 <= choice < len(options): # if the input is within the range of options available
return options[choice]["method"]() # call the method that's in the option at 'index'
print("\x1b[6;31;40mChoix inconnu\x1b[0m")
except ValueError: # int(input()) throws a ValueError
clear()
print("\x1b[6;31;40mVeuillez entrer un nombre entier svp\x1b[0m")
def show_categories(self) -> None:
"""
This method prints the categories available in the database and gets an input from the user to chose
which category to search in
:return: None
"""
categories = self.database.select("category") # Get all the categories in the db
while True:
for index, category in enumerate(categories): # for each category
print(str(index) + ". " + category[1]) # print the category name and its index
print("-1. Revenir au menu principal")
print("\x1b[6;32;40mChoisissez une option en écrivant le chiffre correspondant et "
"appuyez sur entrée:\x1b[0m")
try:
choice = int(input())
clear()
if choice == -1:
self.main_menu()
if 0 <= choice < len(categories):
# Show the products corresponding to the chosen category
return self.show_products(categories[choice])
print("\x1b[6;31;40mChoix inconnu\x1b[0m")
except ValueError: # int(input()) throws a ValueError
clear()
print("\x1b[6;31;40mVeuillez entrer un nombre entier svp\x1b[0m")
def show_products(self, category: list) -> None:
"""
This method prints the products that are in the category given in the arguments and gets an input from the
user to chose a product
:param category: list
:return: None
"""
# Retrieve the products that have the chosen category's id in their category field
products = self.database.select("aliment", "category=" + str(category[0]))
while True:
for index, product in enumerate(products):
print(str(index) + ". " + product[1])
print("-1. Revenir au menu principal")
print("\x1b[6;32;40mChoisissez une option en écrivant le chiffre correspondant et "
"appuyez sur entrée:\x1b[0m")
try:
choice = int(input())
clear()
if choice == -1:
self.main_menu()
if 0 <= choice < len(products):
return self.show_product_info(products[choice], category)
else:
print("\x1b[6;31;40mChoix inconnu\x1b[0m")
except ValueError: # int(input()) throws a ValueError
clear()
print("\x1b[6;31;40mVeuillez entrer un nombre entier svp\x1b[0m")
@staticmethod
def generate_search_params(category: list, nutrition_grade: str) -> str:
"""
This method generates the search parameters needed to get the products from a category and a certain
nutrition grade
:param category: the category from which the product is
:param nutrition_grade: the nutrition grade of the product
:return: the parameters to add to the search URL
"""
params = "?action=process&" \
"tagtype_0=categories&" \
"tag_contains_0=contains&" \
"tag_0={}&" \
"tagtype_1=nutrition_grades&" \
"tag_contains_1=contains&" \
"tag_1={}&" \
"json=1".format(category[0], nutrition_grade)
return params
def show_product_info(self, product: list, category: list) -> None:
"""
This function shows the infos on a product, searchs for a substitute and asks the user if he wants to save it
in the database
:param product: the product the user chose
:param category: the category of the product ( used to search for the substitute )
:return: None
"""
print("Infos produit:")
print("Nom:", product[1])
print("Grade nutritionnel:", product[3])
if "stores" in product:
print(product[4] + "\n")
substitut = self.search_substitute(product, category)
if substitut:
print("Voulez-vous enregistrer le substitut en base de données ?")
print("o. Oui")
print("n. Non")
print("\x1b[6;32;40mChoisissez une option en écrivant la lettre correspondante et "
"appuyez sur entrée:\x1b[0m")
while True:
choice = input()
if str(choice).lower() == "o":
self.save_substitute(substitut, product)
print("\x1b[6;32;40mSubstitut sauvegardé avec succès!\x1b[0m")
break
elif str(choice).lower() == "n":
print("Substitut non sauvegardé.")
break
else:
print("\x1b[6;31;40mVeuillez entrer un choix valide svp\x1b[0m")
print("\nAppuyez sur entrée pour revenir au menu principal")
input()
self.main_menu()
def save_substitute(self, substitut: dict, product: list) -> None:
"""
This method saves a substitute to the database and links it to an aliment
:param substitut: The substitute we got from the API
:param product: the product the user chose ( to link the substitute to it )
:return: None
"""
already_exists = self.database.select("substitute", "product_name = '{}'"
.format(substitut["product_name"].replace("'", "\\'")))
if already_exists:
substitute_id = already_exists[0][0]
else: # if the substitute doesn't exist create it
headers = ["product_name", "category", "nutrition_grades", "stores"]
substitut["category"] = product[2]
data = []
for header in headers:
try:
data.append("'" + substitut[header].replace("'", "\\'") + "'")
except KeyError:
data.append("NULL")
except AttributeError: # if it's an int ( = the id of the category )
data.append(str(substitut["category"]))
substitute_id = self.database.insert("substitute", headers, data)
# update the aliment with the substitute's id
self.database.update("aliment", ["substitute_id=" + str(substitute_id)], ["id=" + str(product[0])])
def search_substitute(self, product: list, category: list) -> dict:
"""
Uses the search API to retrieve a more healthy substitute to a product
:param product: the product we want to search a substitute for
:param category: the category of the product
:return: the found substitute or an empty dict
"""
substitut = None
search_url = "https://fr.openfoodfacts.org/cgi/search.pl"
for i in range(0, 5):
# get ascii value of A and add the current index to it so we can get the next letters
nutrition_grade = chr(ord("A") + i)
if ord(nutrition_grade) >= ord(product[3]): # if the nutrigrade is the same as the product's, break
print("Nous n'avons pas pu trouver de substitut plus sain à ce produit")
break
params = Dialogue.generate_search_params(category[1], nutrition_grade)
r = requests.get(search_url + params).json() # get the search url with the generated search params
if r["count"]:
substitut = r["products"][0]
print("\nSubstitut:")
print("Nom:", substitut["product_name"])
try:
print("Grade nutritionnel:", substitut["nutrition_grades"])
except KeyError:
pass
try:
print("Ou l'acheter:", substitut["stores"] + "\n")
except KeyError:
pass
break
if substitut:
substitut["category"] = category[1]
return substitut
return {}
def show_substitutes(self) -> None:
"""
Show all the aliments that have a substitute saved in the database
:return: None
"""
aliments = self.database.select("aliment", "substitute_id is not null")
while True:
for index, aliment in enumerate(aliments):
print(str(index) + ". " + aliment[1])
print("-1. Revenir au menu principal")
print("\x1b[6;32;40mChoisissez une option en écrivant le chiffre correspondant et "
"appuyez sur entrée:\x1b[0m")
try:
choice = int(input())
clear()
if choice == -1:
self.main_menu()
if 0 <= choice < len(aliments):
self.show_substitute_infos(aliments[choice])
else:
print("\x1b[6;31;40mChoix inconnu\x1b[0m")
except ValueError: # int(input()) throws a ValueError
clear()
print("\x1b[6;31;40mVeuillez entrer un nombre entier svp\x1b[0m")
def show_substitute_infos(self, aliment) -> None:
"""
Show the infos on a saved substitute
:param aliment: the aliment chosen by the user
:return: None
"""
substitute = self.database.select("substitute", "id=" + str(aliment[-1]))[0]
category = self.database.select("category", "id=" + str(substitute[2]))
print("Substitut à {}:".format(aliment[1]))
print("nom:", substitute[1])
print("categorie:", category[0][1])
print("grade nutritionnel:", substitute[3])
print("Magasins:", substitute[4])
print("\nAppuyez sur entrée pour revenir au menu principal")
input()
self.main_menu()
| adrien914/P5_Utilisez_les_donnees_publiques_de_OpenFoodFacts | utils/Dialogue.py | Dialogue.py | py | 11,416 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.Database.Database",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.OpenApi.OpenApi",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.terminal.clear",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ut... |
1323528673 | import pathlib
import tarfile
import datetime
import argparse
import json
class ConfigParser():
"""Configuration file parser."""
def __init__(self, config_file_name: str):
"""Intialization."""
self._config_file = config_file_name
self._config = dict()
def parse(self) -> bool:
"""Parse the given config file."""
config_file_path = pathlib.Path(self._config_file)
if not config_file_path.exists():
return False
with open(str(config_file_path), "r") as fhandle:
self._config = json.load(fhandle)
return True
def get_config(self, config_name: str):
"""Given a config name return it's value."""
return self._config[config_name]
def create_backup(config_file: str) -> bool:
"""Create a backup with inputs from config file."""
# Create config parser instance
config_parser = ConfigParser(config_file)
# Parse the config file
if not config_parser.parse():
print("Error parsing configuration")
return False
# 1. Generate backup archive file name
# Determine archive extension
extension = config_parser.get_config("archive_type")
# zip not supported
if extension == "zip":
print("zip archiving is not supported. Update the configuration to use tar.*")
return False
# Archive open mode is determined by extension
mode = "w:{}".format(extension)
extension = "tar.{}".format(extension)
# Current time in HourMinSeconds_YearMonthDay format
time_now = datetime.datetime.now().strftime("%H%M%S_%Y%m%d")
# Get file name prefix
prefix = config_parser.get_config("archive_prefix")
# Finally assemble the archive file name
filename = "{}_{}.{}".format(prefix, time_now, extension)
# Create destination dir if does not exist
destdir = pathlib.Path(config_parser.get_config("archive_destination"))
if not destdir.exists():
destdir.mkdir()
output_file = pathlib.Path(destdir, filename)
# 2. Create the archive
# Create the backup archive file on disk.
with tarfile.open(str(output_file), mode=mode) as backup:
# 3. Go through the list of items which need to be backed up
for item in config_parser.get_config("items"):
item_path = pathlib.Path(item)
# Add it to the archive if the file exists
if item_path.exists():
backup.add(str(item_path))
print("{} added to {}".format(str(item_path), output_file))
else:
print("{} does not exist".format(str(item_path)))
return True
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Home made backup solution")
parser.add_argument(
"-i", "--config", dest="config_file", required=True, help="Path to configuration file")
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
create_backup(options.config_file)
| sandeepbhat/home-made-backup | hmb.py | hmb.py | py | 3,031 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
31584008482 | """
Class for weather data.
"""
from json import loads
from requests import get
from .location import Location
class Weather:
"""Class for weather data."""
def __init__(self, measure, location: Location, apikey=None):
self.apikey = apikey
self.measure = measure
self.location = location
self.refresh()
def refresh(self):
"""Refetch data from API."""
if self.apikey:
self.url = "https://api.openweathermap.org/data/2.5/weather?" \
"q={}&units={}&appid={}"
self.response = get(self.url.format(
self.location.city,
self.measure,
self.apikey
)).text
else:
self.url = "http://www.7timer.info/bin/astro.php?lon={}&lat={}" \
"&unit={}&output=json&tzshift=0"
self.response = get(self.url.format(
self.location.longitude,
self.location.latitude,
self.measure
)).text
self.parsed = loads(self.response)
@property
def temperature(self):
"""Get temperature."""
if self.apikey:
return self.parsed["main"]["temp"]
return self.parsed["dataseries"][0]["temp2m"]
@property
def icon_code(self):
"""Get icon code (can be None)"""
if self.apikey:
return self.parsed["weather"][0]["icon"]
return None
| PiSmartTV/PiTV | PiTV/weather.py | weather.py | py | 1,454 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "location.Location",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_nu... |
15162365309 | import numpy as np
import pygplates
from scipy import spatial
def marsaglias_method(N):
## Marsaglia's method
dim = 3
norm = np.random.normal
normal_deviates = norm(size=(dim, N))
radius = np.sqrt((normal_deviates**2).sum(axis=0))
points = normal_deviates/radius
return points
def random_points_on_sphere(N):
# function to call Marsaglia's method and return Long/
# Lat arrays
points = marsaglias_method(N)
Long=[]
Lat=[]
for xyz in points.T:
LL = pygplates.PointOnSphere((xyz))
Lat.append(LL.to_lat_lon()[0])
Long.append(LL.to_lat_lon()[1])
return np.array(Long), np.array(Lat)
def random_points_feature(N,filename=None):
# function to call Marsaglia's method and return
# feature collection or save to file
points = marsaglias_method(N)
#multipoint = pygplates.MultiPointOnSphere((points.T))
multipoint_feature = pygplates.Feature()
multipoint_feature.set_geometry(pygplates.MultiPointOnSphere((points.T)))
multipoint_feature.set_name('Random Points from Marsaglia''s method')
multipoint_feature_collection = pygplates.FeatureCollection(multipoint_feature)
if filename is not None:
multipoint_feature_collection.write(filename)
else:
return multipoint_feature_collection
def rtp2xyz(r, theta, phi):
# if only one value, shape will be empty, hence the next if statement
if r.size==1:
rdim=1
else:
rdim = r.shape[0]
rst = r * np.sin(theta)
xout = np.zeros((rdim,3))
xout[:,0] = rst * np.cos(phi) # x
xout[:,1] = rst * np.sin(phi) # y
xout[:,2] = r * np.cos(theta) # z
return xout
def create_tree_for_spherical_data(inputLats, inputLons, inputVals, n=16):
ithetas = np.radians(90-inputLats)
iphis = np.radians(inputLons)
irs = np.ones(np.shape(ithetas))
nodes = []
ixyzs=rtp2xyz(irs.ravel(), ithetas.ravel(), iphis.ravel())
tree = spatial.cKDTree(ixyzs, n)
return tree
def sampleOnSphere(inputLats, inputLons, inputVals, othetas, ophis, tree=None, n=16):
if (tree is None):
tree = create_tree_for_spherical_data(inputLats, inputLons, inputVals)
othetas = np.radians(90-othetas)
ophis = np.radians(ophis)
oxyzs=rtp2xyz(np.ones(np.shape(othetas)), othetas, ophis)
d,l = tree.query(oxyzs)
return d,l
| atom-model/ATOM | reconstruction/sphere_tools.py | sphere_tools.py | py | 2,408 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygplates.PointOnSphere",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
4200238882 | import itertools
import json
import math
import random
import statistics
from collections import defaultdict
from html.parser import HTMLParser
import boto3
import requests
from rating import RatingSystem, ContestType
old_sponsored_contests = {
"code-festival-2014-exhibition",
"code-festival-2014-final",
"code-festival-2014-morning-easy",
"code-festival-2014-morning-hard",
"code-festival-2014-morning-middle",
"code-festival-2014-quala",
"code-festival-2014-qualb",
"code-festival-2015-exhibition",
"code-festival-2015-morning-easy",
"code-festival-2015-morning-hard",
"code-festival-2015-morning-middle",
"code-festival-2015-quala",
"code-festival-2015-qualb",
"code-formula-2014-final",
"code-formula-2014-quala",
"code-formula-2014-qualb",
"digitalarts2012",
"discovery2016-final",
"discovery2016-qual",
"donuts-2015",
"dwango2015-finals",
"dwango2015-prelims",
"dwango2016-finals",
"dwango2016-prelims",
"indeednow-quala",
"indeednow-qualb",
"mujin-pc-2016",
"tenka1-2012-final",
"tenka1-2012-qualA",
"tenka1-2012-qualB",
"tenka1-2012-qualC",
"tenka1-2013-final",
"tenka1-2013-quala",
"tenka1-2013-qualb",
"tenka1-2014-final",
"tenka1-2014-quala",
"tenka1-2014-qualb",
"tenka1-2015-final",
"tenka1-2015-quala",
"tenka1-2015-qualb",
}
prohibited_problem_ids = {
"codefestival_2016_final_j",
"discovery_2016_final_e",
"arc047_d",
"arc022_4",
"tenka1_2013_qualB_d",
}
class AtCoderCSRFExtractor(HTMLParser):
def __init__(self):
super(AtCoderCSRFExtractor, self).__init__()
self.csrf = None
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == "input" and attrs.get("name") == "csrf_token":
self.csrf = attrs["value"]
def extract(self, html):
self.feed(html)
if self.csrf is not None:
return self.csrf
else:
raise ValueError("Failed to extract CSRF token")
def single_regression(x, y):
n = len(x)
x_sum = sum(x)
y_sum = sum(y)
xy_sum = sum(x * y for x, y in zip(x, y))
sqx_sum = sum(x ** 2 for x in x)
slope = (n * xy_sum - x_sum * y_sum) / (n * sqx_sum - x_sum ** 2)
intercept = (sqx_sum * y_sum - xy_sum * x_sum) / (n * sqx_sum - x_sum ** 2)
return slope, intercept
def safe_log(x):
return math.log(max(x, 10 ** -100))
def safe_sigmoid(x):
return 1.0 / (1.0 + math.exp(min(-x, 750)))
def _fit_1plm_binary_search(xs, positive_count):
discrimination = math.log(6.0) / 400.0
lb, ub = -10000, 10000
accepts = positive_count
while ub - lb > 1:
m = (ub + lb) // 2
expected_accepts = 0
for x in xs:
expected_accepts += 1.0 / (1.0 + (6.0 ** ((m - x) / 400.0)))
if expected_accepts < accepts:
ub = m
else:
lb = m
difficulty = lb
return difficulty, discrimination
def fit_2plm_irt(xs, ys):
return _fit_1plm_binary_search(xs, sum(ys))
def frange(start: float, end: float, step: float):
v = start
while (start - end) * (v - end) > 0:
yield v
v += step
def fit_3plm_irt(xs, ys):
# grid search over retreat_proba
accepts = sum(ys)
iterations = []
for retreat_proba in frange(0.0, 0.5, 0.025):
participate_proba = 1 - retreat_proba
difficulty, discrimination = _fit_1plm_binary_search(
xs, accepts / participate_proba
)
logl = 0.0
for x, y in zip(xs, ys):
p = participate_proba * safe_sigmoid(discrimination * (x - difficulty))
logl += safe_log(p if y == 1.0 else (1 - p))
iterations.append((logl, difficulty, discrimination, retreat_proba))
return max(iterations)[1:4]
def evaluate_2plm_irt(xs, ys, difficulty, discrimination):
n = len(xs)
if difficulty is None or discrimination is None:
logl = n * math.log(0.5)
else:
logl = 0
for x, y in zip(xs, ys):
p = safe_sigmoid(discrimination * (x - difficulty))
logl += safe_log(p if y == 1.0 else (1 - p))
return logl, n
def evaluate_3plm_irt(xs, ys, difficulty, discrimination, retreateds):
actual_xs, actual_ys = [], []
for x, y, retreated in zip(xs, ys, retreateds):
if not retreated:
actual_xs.append(x)
actual_ys.append(y)
return evaluate_2plm_irt(actual_xs, actual_ys, difficulty, discrimination)
def inverse_adjust_rating(rating, prev_contests):
if rating <= 0:
return float("nan")
if rating <= 400:
rating = 400 * (1 - math.log(400 / rating))
adjustment = (
(math.sqrt(1 - (0.9 ** (2 * prev_contests))) / (1 - 0.9 ** prev_contests) - 1)
/ (math.sqrt(19) - 1)
* 1200
)
return rating + adjustment
def is_very_easy_problem(task_screen_name):
return (
task_screen_name.startswith("abc")
and task_screen_name[-1] in {"a", "b"}
and int(task_screen_name[3:6]) >= 42
)
def is_agc_easiest_problem(task_screen_name):
return task_screen_name.startswith("agc") and task_screen_name.endswith("_a")
def fit_problem_model(user_results, task_screen_name):
max_score = max(
task_result[task_screen_name + ".score"] for task_result in user_results
)
if max_score == 0.0:
print(
f"The problem {task_screen_name} is not solved by any competitors. skipping."
)
return {}
for task_result in user_results:
task_result[task_screen_name + ".ac"] *= float(
task_result[task_screen_name + ".score"] == max_score
)
elapsed = [
task_result[task_screen_name + ".elapsed"] for task_result in user_results
]
first_ac = min(elapsed)
recurring_users = [
task_result
for task_result in user_results
if task_result["prev_contests"] > 0 and task_result["rating"] > 0
]
for task_result in recurring_users:
task_result["raw_rating"] = inverse_adjust_rating(
task_result["rating"], task_result["prev_contests"]
)
time_model_sample_users = [
task_result
for task_result in recurring_users
if task_result[task_screen_name + ".time"] > first_ac / 2
and task_result[task_screen_name + ".ac"] == 1.0
]
model = {}
if len(time_model_sample_users) < 5:
print(
f"{task_screen_name}: insufficient data ({len(time_model_sample_users)} users). skip estimating time model."
)
else:
raw_ratings = [
task_result["raw_rating"] for task_result in time_model_sample_users
]
time_secs = [
task_result[task_screen_name + ".time"] / (10 ** 9)
for task_result in time_model_sample_users
]
time_logs = [math.log(t) for t in time_secs]
slope, intercept = single_regression(raw_ratings, time_logs)
print(
f"{task_screen_name}: time [sec] = exp({slope} * raw_rating + {intercept})"
)
if slope > 0:
print("slope is positive. ignoring unreliable estimation.")
else:
model["slope"] = slope
model["intercept"] = intercept
model["variance"] = statistics.variance(
[
slope * rating + intercept - time_log
for rating, time_log in zip(raw_ratings, time_logs)
]
)
if is_very_easy_problem(task_screen_name):
# ad-hoc. excluding high-rating competitors from abc-a/abc-b dataset. They often skip these problems.
difficulty_dataset = [
task_result
for task_result in recurring_users
if task_result["is_rated"] and not task_result["retreated"]
]
elif is_agc_easiest_problem(task_screen_name):
# ad-hoc. AGC-A usually have missing data for negative samples.
difficulty_dataset = recurring_users
else:
# normal. using all participants with at least one submissions
difficulty_dataset = [
task_result
for task_result in recurring_users
if not task_result["retreated"]
]
if len(difficulty_dataset) < 40:
print(
f"{task_screen_name}: insufficient data ({len(difficulty_dataset)} users). skip estimating difficulty model."
)
elif all(
task_result[task_screen_name + ".ac"] for task_result in difficulty_dataset
):
print("all contestants got AC. skip estimating difficulty model.")
elif not any(
task_result[task_screen_name + ".ac"] for task_result in difficulty_dataset
):
print("no contestants got AC. skip estimating difficulty model.")
else:
d_raw_ratings = [
task_result["raw_rating"] for task_result in difficulty_dataset
]
d_accepteds = [
task_result[task_screen_name + ".ac"] for task_result in difficulty_dataset
]
if is_agc_easiest_problem(task_screen_name):
difficulty, discrimination, retreat_proba = fit_3plm_irt(
d_raw_ratings, d_accepteds
)
else:
difficulty, discrimination = fit_2plm_irt(d_raw_ratings, d_accepteds)
print(f"difficulty: {difficulty}, discrimination: {discrimination}")
if discrimination < 0:
print("discrimination is negative. ignoring unreliable estimation.")
elif difficulty > 6000:
print("extreme difficulty. rejecting this estimation.")
else:
model["difficulty"] = difficulty
model["discrimination"] = discrimination
if is_agc_easiest_problem(task_screen_name):
# evaluate difficulty and discrimination using 2plm data.
d_retreateds = [
task_result["retreated"] for task_result in difficulty_dataset
]
loglikelihood, users = evaluate_3plm_irt(
d_raw_ratings, d_accepteds, difficulty, discrimination, d_retreateds
)
else:
loglikelihood, users = evaluate_2plm_irt(
d_raw_ratings, d_accepteds, difficulty, discrimination
)
model["irt_loglikelihood"] = loglikelihood
model["irt_users"] = users
return model
def fetch_dataset_for_contest(
contest_name, contest_type, existing_problem, session, skip_if_no_user_has_rating
):
try:
results = session.get(
f"https://atcoder.jp/contests/{contest_name}/standings/json"
).json()
except json.JSONDecodeError as e:
print(f"Failed to decode standings of {contest_id}: {e}")
return {}, []
task_names = {
task["TaskScreenName"]: task["TaskName"] for task in results["TaskInfo"]
}
user_results = []
standings_data = results["StandingsData"]
standings_data.sort(key=lambda result_row: result_row["Rank"])
standings = []
for result_row in standings_data:
total_submissions = result_row["TotalResult"]["Count"]
retreated = total_submissions == 0
is_rated = result_row["IsRated"]
rating = result_row["OldRating"]
prev_contests = result_row["Competitions"]
user_name = result_row["UserScreenName"]
if not retreated and (is_rated or not contest_type.is_rated):
standings.append(user_name)
user_row = {
"is_rated": is_rated,
"rating": rating,
"prev_contests": prev_contests,
"user_name": user_name,
"retreated": retreated,
}
for task_name in task_names:
user_row[task_name + ".score"] = 0.0
user_row[task_name + ".time"] = -1.0
user_row[task_name + ".elapsed"] = 10 ** 200
user_row[task_name + ".ac"] = 0.0
prev_accepted_times = [0] + [
task_result["Elapsed"]
for task_result in result_row["TaskResults"].values()
if task_result["Score"] > 0
]
user_row["last_ac"] = max(prev_accepted_times)
for task_screen_name, task_result in result_row["TaskResults"].items():
user_row[task_screen_name + ".score"] = task_result["Score"]
if task_result["Score"] > 0:
elapsed = task_result["Elapsed"]
penalty = task_result["Penalty"] * 5 * 60 * (10 ** 9)
user_row[task_screen_name + ".elapsed"] = elapsed
user_row[task_screen_name + ".time"] = (
penalty
+ elapsed
- max(t for t in prev_accepted_times if t < elapsed)
)
user_row[task_screen_name + ".ac"] = float(task_result["Status"] == 1)
user_results.append(user_row)
if len(user_results) == 0 or (
all(user_row["rating"] == 0 for user_row in user_results)
and skip_if_no_user_has_rating
):
print(
f"There are no participants/submissions for contest {contest_name}. Ignoring."
)
return {}, standings
user_results_by_problem = defaultdict(list)
for task_screen_name in task_names.keys():
if task_screen_name in existing_problem:
print(f"The problem model for {task_screen_name} already exists. skipping.")
continue
user_results_by_problem[task_screen_name] += user_results
return user_results_by_problem, standings
def get_current_models():
try:
return requests.get(
"https://kenkoooo.com/atcoder/resources/problem-models.json"
).json()
except Exception as e:
print(f"Failed to fetch existing models.\n{e}")
return {}
def infer_contest_type(contest) -> ContestType:
if (
contest["rate_change"] == "All"
or contest["rate_change"] == "1200 ~ "
or contest["rate_change"] == "2000 ~ "
):
return ContestType.AGC
elif contest["rate_change"] == " ~ 2799" or contest["rate_change"] == "1200 ~ 2799":
return ContestType.NEW_ARC
elif contest["rate_change"] == " ~ 1999":
return ContestType.NEW_ABC
elif contest["rate_change"] == " ~ 1199":
return ContestType.OLD_ABC
# rate_change == "-"
elif contest["id"].startswith("arc"):
return ContestType.OLD_UNRATED_ARC
elif contest["id"].startswith("abc"):
return ContestType.OLD_UNRATED_ABC
elif contest["id"] in old_sponsored_contests:
return ContestType.OLD_UNRATED_ARC
else:
return ContestType.UNRATED
def all_rated_contests():
# Gets all contest IDs and their contest type
# The result is ordered by the start time.
contests = requests.get(
"https://kenkoooo.com/atcoder/resources/contests.json"
).json()
contests.sort(key=lambda contest: contest["start_epoch_second"])
contests_and_types = [
(contest["id"], infer_contest_type(contest)) for contest in contests
]
return [
(contest_id, contest_type)
for contest_id, contest_type in contests_and_types
if contest_type != ContestType.UNRATED
]
def all_contest_problems():
problems = requests.get(
"https://kenkoooo.com/atcoder/resources/problems.json"
).json()
# exclude marathon-like problems
problems = [
problem for problem in problems if problem["id"] not in prohibited_problem_ids
]
return {
contest_id: set(problem["id"] for problem in problems)
for contest_id, problems in itertools.groupby(
problems, key=lambda problem: problem["contest_id"]
)
}
def run(target, overwrite, session):
recompute_history = target is None and overwrite
if target is None:
target = all_rated_contests()
else:
all_contests = all_rated_contests()
target = [contest for contest in all_contests if contest[0] in target]
current_models = get_current_models()
existing_problems = current_models.keys() if not overwrite else set()
contest_problems = all_contest_problems()
print(f"Fetching dataset from {len(target)} contests.")
dataset_by_problem = defaultdict(list)
rating_system = RatingSystem()
competition_history_by_id = defaultdict(set)
last_nonzero_rating = defaultdict(int)
experimental_problems = set()
for contest, contest_type in target:
problems = set(contest_problems.get(contest, []))
if not overwrite and existing_problems & problems == problems:
print(
f"All problem models of contest {contest} are already estimated. specify overwrite = True if you want to update the model."
)
continue
is_old_contest = not contest_type.is_rated
user_results_by_problem, standings = fetch_dataset_for_contest(
contest, contest_type, existing_problems, session, not recompute_history
)
for problem, data_points in user_results_by_problem.items():
if recompute_history:
# overwrite competition history, and rating if necessary
if is_old_contest:
# contests before official rating system. using the emulated rating
experimental_problems.add(problem)
for data_point in data_points:
prev_contests = rating_system.competition_count(
data_point["user_name"]
)
data_point["prev_contests"] = prev_contests
data_point["rating"] = (
rating_system.calc_rating(data_point["user_name"])
if prev_contests > 0
else 0
)
else:
# contests after official rating system. using the official rating
if contest_type.is_rated:
for participant in standings:
competition_history_by_id[participant].add(contest)
for data_point in data_points:
user_name = data_point["user_name"]
if data_point["rating"] == 0:
# AtCoder returns 0 for OldRating if the user has no submission in the contest.
# It is not ideal when these users is also a part of dataset (e.g. AGC-A workaround)
data_point["rating"] = last_nonzero_rating.get(user_name, 0)
else:
last_nonzero_rating[user_name] = data_point["rating"]
data_point["prev_contests"] = (
len(competition_history_by_id[user_name]) - 1
)
dataset_by_problem[problem] += data_points
if recompute_history and is_old_contest:
print(f"Updating user rating with the result of {contest}")
rating_system.update(standings, contest_type)
print(f"Estimating time models of {len(dataset_by_problem)} problems.")
results = current_models
for problem, data_points in dataset_by_problem.items():
model = fit_problem_model(data_points, problem)
model["is_experimental"] = problem in experimental_problems
results[problem] = model
return results
def login(user_id, password):
session = requests.Session()
get_response = session.get("https://atcoder.jp/login")
extractor = AtCoderCSRFExtractor()
csrf = extractor.extract(get_response.text)
form_values = {"username": user_id, "password": password, "csrf_token": csrf}
post_response = session.post("https://atcoder.jp/login", data=form_values)
if post_response.status_code != 200:
raise Exception(str(post_response))
return session
def handler(event, context):
target = event.get("target")
overwrite = event.get("overwrite", False)
bucket = event.get("bucket", "kenkoooo.com")
object_key = event.get("object_key", "resources/problem-models.json")
atcoder_user = event.get("atcoder_user")
atcoder_pass = event.get("atcoder_pass")
if atcoder_user is None or atcoder_pass is None:
raise ValueError("AtCoder credential is required.")
print(f"Using AtCoder account {atcoder_user} to fetch standings data.")
session = login(atcoder_user, atcoder_pass)
results = run(target, overwrite, session)
print("Estimation completed. Saving results in S3")
s3 = boto3.resource("s3")
s3.Object(bucket, object_key).put(
Body=json.dumps(results), ContentType="application/json"
)
| kenkoooo/AtCoderProblems | lambda-functions/time-estimator/function.py | function.py | py | 20,781 | python | en | code | 1,291 | github-code | 1 | [
{
"api_name": "html.parser.HTMLParser",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "html.parser",
"line_number": 79,
"usage_type": "argument"
},
{
"api_name": "math.log",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_... |
15685183129 | import csv
from selenium import webdriver
import pandas
from pandas import DataFrame
import requests
test_url = "https://downtowndallas.com/experience/stay/"
chrome_driver_path = "E:\softwares/chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get(test_url)
driver.find_element_by_xpath("/html/body").click()
driver.back()
driver.find_element_by_xpath("/html/body/main/div/section[2]/div[1]/div[3]/a").click()
place_name = driver.find_element_by_css_selector(".place-header h1").text
print(place_name)
address = driver.find_element_by_xpath("/html/body/main/article/div/div[1]/div[1]/a").text
print(address)
phone = driver.find_element_by_xpath("/html/body/main/article/div/div[1]/div[2]/div/a").text
print(phone)
area = driver.find_element_by_xpath("/html/body/main/article/div/div[1]/div[3]/a").text
print(area)
image = driver.find_element_by_css_selector(".place-info-image img")
image_src = image.get_attribute("src")
print(image_src)
driver.get(image_src)
driver.save_screenshot("AC-Marriott.png")
hotel_dict = [
{
'Name': place_name,
'Address': address,
'Phone': phone,
'Area': area,
'Image-url': image_src,
}
]
print(hotel_dict)
data = pandas.DataFrame(hotel_dict)
data.to_csv('record.csv')
| PRANJALI1901/assesment | main.py | main.py | py | 1,349 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
}
] |
27587876211 | from typing import Dict,List,Tuple,Union,NamedTuple,Optional
from typing_extensions import Literal
import json,re
import daa_luigi
from common_functions import ExecutionFolder,raise_exception,as_inputs
from copy import copy
import pandas as pd
from pathlib import Path
import sissopp
from sissopp.py_interface import get_fs_solver
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
Repr2Members = Dict[str,List[str]]
ExecutionType = Literal["singletask_on_representatives", "multitask_on_all"]
class PrimarySpaceParams(NamedTuple):
data_file:str
property_key:str = 'lat'
leave_out_inds: List[int] = []
leave_out_frac: Optional[float] = 0.25
def deterministic(self) -> "PrimarySpaceParams":
testset_chosenby_index = (len(self.leave_out_inds) > 0)
testset_chosen_randomly = not testset_chosenby_index and (0 <= self.leave_out_frac < 1)
whole_set = pd.read_csv(self.data_file, index_col='material')
work_set = whole_set.loc[set(whole_set.index) - set(self.leave_out_inds), :] if testset_chosenby_index \
else whole_set if self.leave_out_frac == 0.0 \
else train_test_split(whole_set, test_size=self.leave_out_frac)[0] if testset_chosen_randomly \
else raise_exception(
"leave_out_inds must be list of length > 0 and/or leave_out_frac must be float between 0 and 1")
leave_out_inds = [list(whole_set.index).index(mat)
for mat in list(set(whole_set.index) - set(work_set.index))]
return PrimarySpaceParams(str(self.data_file),self.property_key,leave_out_inds,None)
class DerivedSpaceParams(NamedTuple):
data_file: str
property_key: str = 'lat'
leave_out_inds: List[int] = []
leave_out_frac: float = 0.25
task_key:str=None
opset:List[Literal['add', 'sub', 'mult', 'div', 'sq', 'cb', 'cbrt', 'sqrt']] = ['add', 'sub', 'mult', 'div', 'sq', 'cb', 'cbrt', 'sqrt']
param_opset:List[Literal['add', 'sub', 'mult', 'div', 'sq', 'cb', 'cbrt', 'sqrt']] = ['add', 'sub', 'mult', 'div', 'sq', 'cb', 'cbrt', 'sqrt']
calc_type:Literal["regression", "log regression", "classification"] = "regression"
desc_dim:int = 3
n_sis_select:int = 100
max_rung:int = 2
n_residual:int = 1
n_models_store: int = 1
n_rung_store:int =1
n_rung_generate:int = 0
min_abs_feat_val: float = 1e-05
max_abs_feat_val:float = 100000000.0
fix_intercept:bool = False
max_feat_cross_correlation:float = 1.0
nlopt_seed:int = 13
global_param_opt:bool = False
reparam_residual:bool = True
def deterministic(self) -> "DerivedSpaceParams":
testset_chosenby_index = (len(self.leave_out_inds) > 0)
testset_chosen_randomly = not testset_chosenby_index and (0 <= self.leave_out_frac < 1)
whole_set = pd.read_csv(self.data_file, index_col='material')
work_set = whole_set.loc[set(whole_set.index) - set(self.leave_out_inds), :] if testset_chosenby_index \
else whole_set if self.leave_out_frac == 0.0 \
else train_test_split(whole_set, test_size=self.leave_out_frac)[0] if testset_chosen_randomly \
else raise_exception(
"leave_out_inds must be list of length > 0 and/or leave_out_frac must be float between 0 and 1")
leave_out_inds = [list(whole_set.index).index(mat)
for mat in list(set(whole_set.index) - set(work_set.index))]
deterministic_repr = self._asdict()
deterministic_repr.update({"data_file":str(self.data_file),
"leave_out_inds":leave_out_inds, "leave_out_frac":None})
return DerivedSpaceParams(**deterministic_repr)
def get_primary_space_params(self)->PrimarySpaceParams:
return PrimarySpaceParams(**{k:v for k,v in self._asdict().items()
if k in ['data_file','property_key','leave_out_inds','leave_out_frac']})
data_csvpath = "/home/oehlers/Documents/masterthesis/02-data/csvs/cubic_perovskites.csv"
class MyKmeans():
def __init__(self, space_params: Union[PrimarySpaceParams,DerivedSpaceParams],
n_clusters: int,
save_proto2members_at: Union[Path,str] = None,
interm_results_path: Union[Path,str] = None):
"""
In the following, the Work Set will be defined as the dataset that remains after subtracting the Test Set.
Cluster extraction is based on Work Set only; cluster assignment of Test Set materials is conducted afterwards.
This function returns dictionary of Work Set cluster centers pointing to list of Work and Test Set cluster members
space_params (short: space_params): dictionary returned by either
PrimarySpaceParams or DerivedSpaceParams class,
determines in which space kmeans will be applied
(either the space spanned by standardized primary features without target property,
or the space spanned by the derived features times respective fitting coefficients of sisso
- in the latter case, all execution parameters for sisso are defined by the
DerivedSpaceParams class)
n_clusters: number of clusters
save_proto2members_at: path to future file location
interm_results_path: if set to path or str, folder it points to is created if non-existent and used
if set to None, temp folder is created in data_file folder and deleted after exe
"""
self.space_params = space_params
self.n_clusters = n_clusters
self.save_proto2members_at = save_proto2members_at
self.interm_results_path = interm_results_path
self.proto2members = self._get_kmeans_center2members()
def _train_and_test_streched_derived_feature_dfs(self, sisso, exe_folder: ExecutionFolder) -> Tuple[
pd.DataFrame, pd.DataFrame]:
""""sisso must be SISSOregressor as returned by 'feat_space,sisso = get_fs_solver
(for some reason this cannot be indicated via typing)
"""
# Extract derived space from SISSO results by reading train and testfile:
# (no python binding was found for directly extracting derived space)
streched_derived_feature_space = {}
for train_or_test in ['train', 'test']:
filepath = exe_folder.path.joinpath(train_or_test + "file")
derivedspacepath = exe_folder.path.joinpath(train_or_test + "_derived_space.csv")
sisso.models[-1][0].to_file(str(filepath), train_or_test == train_or_test)
with open(filepath, "r") as file:
with open(derivedspacepath, 'w') as derivedspace:
lines = file.readlines()
writeit = 0
for line in lines:
if line[:len("# Sample ID")] == "# Sample ID":
line = line.replace("# Sample ID", "material")
writeit = 1
if writeit == 1:
derivedspace.write(line)
target_and_derived_features = pd.read_csv(derivedspacepath, sep=',', index_col=0)
derived_features = target_and_derived_features.iloc[:, 2:]
strech_coefs = sisso.models[-1][0].coefs[0][:-1]
streched_derived_features = derived_features.multiply(strech_coefs).values
streched_derived_feature_space[train_or_test] = streched_derived_features
return streched_derived_feature_space['train'], streched_derived_feature_space['test']
def _get_kmeans_center2members(self) -> Repr2Members:
space_params = self.space_params
testset_chosenby_index = ( len(space_params.leave_out_inds)>0 and space_params.leave_out_frac is None )
clustering_in_primary_space = isinstance(space_params,PrimarySpaceParams)
clustering_in_derived_space = isinstance(space_params,DerivedSpaceParams)
whole_set = pd.read_csv(space_params.data_file, index_col='material').astype(float)
test_set_materials = [list(whole_set.index)[ind] for ind in space_params.leave_out_inds]
test_set = whole_set.loc[test_set_materials, :]
work_set = whole_set.loc[set(whole_set.index) - set(test_set_materials), :]
if testset_chosenby_index and clustering_in_primary_space:
# Get standardized primary space without target:
target_property_col = [col for col in whole_set.columns
if re.match(r"{} (...)".format(space_params.property_key), col)][0]
work_primary_features = work_set.drop(target_property_col, axis=1).values
test_primary_features = test_set.drop(target_property_col, axis=1).values
standardized_work_primary_features = StandardScaler().fit_transform(work_primary_features)
standardized_test_primary_features = StandardScaler().fit_transform(test_primary_features)
# Use standardized primary space for Kmeans clustering:
kmeans_results = KMeans(self.n_clusters).fit(standardized_work_primary_features)
virtual_cluster_centers = kmeans_results.cluster_centers_
actual_cluster_center_inds = pairwise_distances_argmin_min(virtual_cluster_centers,
standardized_work_primary_features)[0]
test_labels = kmeans_results.predict(standardized_test_primary_features)
elif testset_chosenby_index and clustering_in_derived_space:
assert space_params.task_key is None, """This code applies to clustering in space derived by Single-Task SISSO only,
for clustering in Multi-Task-SISSO space code has to be checked for necessary adaptations"""
# Create execution folder:
exe_folder = ExecutionFolder(permanent_location=self.interm_results_path,
refers_to_data_file=space_params.data_file)
# Execute SISSO:
space_params_dict = space_params._asdict()
inp = as_inputs(exe_folder.path.joinpath("derived_space_constr_params"), **space_params_dict)
print(inp)
inputs = sissopp.Inputs(inp)
feature_space, sisso = get_fs_solver(inputs)
sisso.fit()
# Extract streched derived space:
work_streched_derived_features, test_streched_derived_features \
= self._train_and_test_streched_derived_feature_dfs(sisso, exe_folder)
# Use streched derived space for Kmeans clustering:
kmeans_results = KMeans(self.n_clusters).fit(work_streched_derived_features)
virtual_cluster_centers = kmeans_results.cluster_centers_
actual_cluster_center_inds = pairwise_distances_argmin_min(virtual_cluster_centers,
work_streched_derived_features)[0]
test_labels = kmeans_results.predict(test_streched_derived_features)
# Remove execution folder if set to be temporal:
exe_folder.delete_if_not_permanent()
else:
raise Exception("""space_construction_parameters must be either of PrimarySpaceParams or DerivedSpaceParams
class""")
# Determine cluster center and member material names:
actual_cluster_center_materials = [list(work_set.index)[ind] for ind in actual_cluster_center_inds]
work_set_with_tasks = copy(work_set).assign(task=kmeans_results.labels_)
test_set_with_tasks = copy(test_set).assign(task=test_labels)
whole_set_with_tasks = work_set_with_tasks.append(test_set_with_tasks)
center2members = {}
for task in set(whole_set_with_tasks['task']):
members = list(whole_set_with_tasks[whole_set_with_tasks['task'] == task].index)
center = list(set(actual_cluster_center_materials).intersection(set(members)))[0]
center2members[center] = members
if self.save_proto2members_at is not None:
with open(self.save_proto2members_at, 'w') as jsonfile:
json.dump(center2members, jsonfile)
return center2members
class MyDeepAA():
def __init__(self,space_params:Union[PrimarySpaceParams,DerivedSpaceParams],
at_loss_factor:float, target_loss_factor:float, recon_loss_factor:float, kl_loss_factor:float,
latent_dim:int,n_epochs:int,arche2members_path:Union[str,Path]=None):
self.space_params = space_params
self.at_loss_factor = at_loss_factor
self.target_loss_factor = target_loss_factor
self.recon_loss_factor = recon_loss_factor
self.kl_loss_factor = kl_loss_factor
self.latent_dim = latent_dim
self.n_epochs = n_epochs
self.arche2members_path = arche2members_path
self.nn = daa_luigi.build_network(latent_dim=latent_dim,epochs=n_epochs)
As, arche2members = self._extract_weightdfs_and_arche2members()
self.weight_dfs = As
self.arche2members = arche2members
def _get_stan_work_and_test_df(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
whole_set = pd.read_csv(self.space_params.data_file, index_col='material').astype(float)
test_set_materials = [list(whole_set.index)[ind] for ind in self.space_params.leave_out_inds]
test_set = whole_set.loc[test_set_materials, :]
work_set = whole_set.loc[set(whole_set.index) - set(test_set_materials), :]
# Standardize iot weigh reconstruction of each feature equally important
work_mean, work_std = work_set.mean(axis=0), work_set.std(axis=0)
stan_work_set = (work_set - work_mean) / work_std
stan_test_set = (test_set - work_mean) / work_std
return stan_work_set, stan_test_set
def _get_nn_data_input(self) -> Dict:
standardized_work, standardized_test = self._get_stan_work_and_test_df()
target = [col for col in standardized_work if self.space_params.property_key in col][0]
nn_data_input = {'train_feat': standardized_work.drop([target], axis=1).values,
'train_targets': standardized_work[target].values,
'test_feat': standardized_test.drop([target], axis=1).values,
'test_targets': standardized_test[target].values}
return nn_data_input
def _extract_weightdfs_and_arche2members(self) -> Tuple[Dict[str,pd.DataFrame],Repr2Members]:
"""
nn_results: result yielded by nn
space_params: yielded by primary_space_construction_parameters function
arche2members_path: path to future file location
"""
nn_data_input = self._get_nn_data_input()
nn_results = self.nn(nn_data_input, self.at_loss_factor, self.target_loss_factor,
self.recon_loss_factor, self.kl_loss_factor)
stan_work_set, stan_test_set = self._get_stan_work_and_test_df()
As = {'train': pd.DataFrame(nn_results[('train', 'latent space', 'As')], index=stan_work_set.index),
'test': pd.DataFrame(nn_results[('test', 'latent space', 'As')], index=stan_test_set.index)}
# collect actual arche and resp. clusters IFF virtual arche really the one closest to actual arche candidate
def mat_closest_to(virtualarche: int, train_or_test: str = 'train') -> str:
return As[train_or_test].loc[:, virtualarche].idxmax()
def virtualarche_clostest_to(mat: str, train_or_test: str = 'train') -> int:
return As[train_or_test].loc[mat, :].idxmax()
actualarches = [mat_closest_to(virtualarche, 'train') for virtualarche in As['train'].columns
if virtualarche_clostest_to(mat_closest_to(virtualarche, 'train'), 'train') == virtualarche]
assert len(actualarches) == len(As['train'].columns), \
"For at least one virtual archetype, no material could be found which would be assigned to it, when all " \
"materials are assigned to closest virtual archetype in latent space; latent space cannot be used; " \
"NN has to be retrained "
for train_or_test in ['train', 'test']:
As[train_or_test].columns = actualarches
arche2members = {arche: [] for arche in As['train'].columns}
for train_or_test in ['train', 'test']:
for mat in As[train_or_test].index:
arche = As[train_or_test].loc[mat, :].idxmax()
arche2members[arche] += [mat]
with open(self.arche2members_path, 'w') as jsonfile:
json.dump(arche2members, jsonfile)
return As, arche2members
class MySisso():
def __init__(self,execution_parameters:DerivedSpaceParams,
clusters:Repr2Members,
singletask_on_representatives_or_multitask_on_all:ExecutionType = "singletask_on_representatives",
store_intermediate_results_in:Path = None):
self.execution_parameters = execution_parameters
self.clusters = clusters
self.singletask_on_representatives_or_multitask_on_all = singletask_on_representatives_or_multitask_on_all
self.intermediate_results_path = store_intermediate_results_in
self.results = self._get_sisso()
def _get_sisso(self):
"""returns SISSOregressor, see https://sissopp_developers.gitlab.io/sissopp/quick_start/code_ref.html#input-files
sisso_execution_parameters (short: sisso_params): must be output of function sisso_execution_parameters
or derived_space_construction_parameters
clusters: must be dict yielded by get_kmeans_center2members_dict function
singletask_on_representatives_or_multitask_on_all:
if set to 'singletask_on_representatives', single-task sisso is trained on representatives, and
tested on materials determined by sisso_execution_parameters['leave_out_inds']
if set to 'multitask_on_all', multi-task sisso is trained on all Work Set materials, an tested
on all Test Set materials as determined by
sisso_execution_parameters['leave_out_inds'],
where tasks are determined by :clusters: arg
store_intermediate_results_in: if set to None, temporal folder will be created and deleted after execution
"""
sisso_params = self.execution_parameters._asdict()
# Create execution folder:
exe_folder = ExecutionFolder(permanent_location=self.intermediate_results_path,
refers_to_data_file=sisso_params['data_file'])
# Prepare csv and json file for sisso execution:
whole_set = pd.read_csv(sisso_params['data_file'], sep=',', index_col='material')
test_materials = [list(whole_set.index)[ind] for ind in sisso_params['leave_out_inds']]
whole_data_filepath = Path(sisso_params['data_file'])
training_and_test_data_filepath = exe_folder.path.joinpath("training_and_test_" + whole_data_filepath.name)
if self.singletask_on_representatives_or_multitask_on_all == 'singletask_on_representatives':
training_materials = list(self.clusters.keys())
training_and_test_set = whole_set.loc[training_materials + test_materials, :]
elif self.singletask_on_representatives_or_multitask_on_all == 'multitask_on_all':
mat2center = {mat: center for center in self.clusters.keys()
for mat in self.clusters[center]}
tasks = [str(mat2center[mat_in_sequence]) for mat_in_sequence in whole_set.index]
training_and_test_set = whole_set.assign(task=tasks)
training_and_test_set = training_and_test_set[['task'] +
[col for col in list(whole_set.columns) if col != 'task']]
sisso_params['task_key'] = 'task'
else:
raise_exception(""":singletask_on_centers_or_multitask_on_all: must be set to 'singletask_on_centers'
or 'multitask_on_all'""")
training_and_test_set.to_csv(training_and_test_data_filepath, sep=',')
sisso_params['data_file'] = training_and_test_data_filepath
sisso_params['leave_out_inds'] = [list(training_and_test_set.index).index(mat) for mat in test_materials]
# Execute SISSO and extract results:
inputspath = as_inputs(exe_folder.path.joinpath("sisso_exe"), **sisso_params)
inputs = sissopp.Inputs(inputspath)
feature_space, sisso = get_fs_solver(inputs)
sisso.fit()
# Remove execution folder if set to be temporal:
exe_folder.delete_if_not_permanent()
return sisso
| MilenaOehlers/cluster-based-SISSO | cluster_based_sisso/__init__.py | __init__.py | py | 21,087 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Literal",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",... |
32509486669 | from ftplib import FTP
import ftplib
import os
import xml.etree.ElementTree as EleTree
import VAPublishUtil as VAUtil
import shutil
import EchoItemXML
__author__ = 'Jiao Zhongxiao'
# 选择的功能分支
FEATURE_BRANCH = None
# 确认的版本号
VERSION_NUM = None
ASSET_EXT = ".json,.jpg,.png,.swf,.xml,.mp3,.wdp,.xcom"
IGNORE_EXT = ".xcom"
IGNORE_FILES = ["towerDefenseFight", "PvpPlayerPanel", "commonLang_config"]
def print_branch_select_menu():
"""展示功能分支选择菜单"""
publish_config = VAUtil.parse_publish_config()
print("可以发布的功能分支:")
i = 0
for feature_node in publish_config.findall("feature"):
print(str(i) + ": " + feature_node.get("name"))
i += 1
def publish_feature_branch():
"""发布指定功能分支"""
print("开始发布功能分支: --> " + FEATURE_BRANCH)
publish_dir = VAUtil.publish_dir()
# 更新 config.xml
EchoItemXML.scan_dir(os.path.join(VAUtil.PRE_PUBLISH_ROOT_DIR, FEATURE_BRANCH))
# 拷贝到临时目录
copy_file_to_temp_dir()
# 打版本
version_files()
# 同步到发布目录
# sync_to_publish_dir()
# 提交 SVN
# VAUtil.svn_commit(publish_dir, "版本更新" + VERSION_NUM)
# 同步 FTP
sync_to_ftp(collect_files())
input("版本发布完成")
def sync_to_ftp(all_files):
"""上传到 FTP"""
ftp = VAUtil.prepare_ftp()
ftp_dir = "/www/" + FEATURE_BRANCH + "/flash"
ftp.cwd(ftp_dir)
temp_dir = VAUtil.temp_dir()
while len(all_files) > 0:
upload_file = all_files.pop(0)
print("Uploading:" + upload_file)
rel_path = os.path.relpath(upload_file, temp_dir).replace("\\", "/")
file_dir = os.path.dirname(rel_path)
if len(file_dir) > 0:
print("创建目录" + file_dir)
try:
ftp.mkd(file_dir)
except ftplib.error_perm:
print("目录已经存在")
cur_uploading_file = open(upload_file, 'rb')
ftp.storbinary("STOR " + rel_path, cur_uploading_file)
def collect_files():
"""收集要同步的文件"""
temp_dir = VAUtil.temp_dir()
all_files = []
for rootDir, assetsDirs, files in os.walk(temp_dir):
for file in files:
file_path = os.path.join(temp_dir, os.path.join(rootDir, file))
all_files.append(file_path)
return all_files
def sync_to_publish_dir():
"""复制到发布目录"""
temp_dir = VAUtil.temp_dir()
publish_dir = VAUtil.publish_dir()
for rootDir, assetsDirs, files in os.walk(temp_dir):
for file in files:
print("Copying:" + file)
if os.path.isfile(os.path.join(rootDir, file)):
rel_path = os.path.relpath(os.path.join(rootDir, file), temp_dir)
print("原始:" + rel_path)
rel_path = os.path.join(publish_dir, rel_path)
print("新的:" + rel_path)
file_dir = os.path.dirname(rel_path)
print("FileDir:" + file_dir)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
shutil.copyfile(os.path.join(rootDir, file), rel_path)
print("Copy " + os.path.join(rootDir, file) + " 完成")
def copy_file_to_temp_dir():
"""将相关资源拷贝到临时目录"""
temp_dir = os.path.join(VAUtil.PRE_PUBLISH_ROOT_DIR, VAUtil.PUBLISH_TEMP_DIR)
source_dir = os.path.join(VAUtil.PRE_PUBLISH_ROOT_DIR, FEATURE_BRANCH)
print("拷贝资源到缓存目录:\nsourceDir:->" + temp_dir + "\n缓存目录" + temp_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.mkdir(temp_dir)
wrapper_file = "wrapper.swf"
# wrapper.swf
src = os.path.join(source_dir, wrapper_file)
dst = os.path.join(temp_dir, wrapper_file)
shutil.copy(src, dst)
# release & ver_config
dirs = [VAUtil.RES_RELEASE_DIR_NAME, VAUtil.RES_CONFIG_STORE_DIR_NAME]
for copy_dir in dirs:
src = os.path.join(source_dir, copy_dir)
dst = os.path.join(temp_dir, copy_dir)
copy_files(src, dst)
print("拷贝资源完成")
def copy_files(src_dir, dst_dir):
"""拷贝项目认可的资源文件"""
for root_dir, assets_dirs, files in os.walk(src_dir):
for file in files:
ext = os.path.splitext(file)[1]
if ext != "" and ASSET_EXT.find(ext) != -1:
# 相对路径
rel_path = os.path.relpath(os.path.join(root_dir, file), src_dir)
print("原始:" + rel_path)
dst_path = os.path.join(dst_dir, rel_path)
print("新的:" + dst_path)
file_dir = os.path.dirname(dst_path)
print("FileDir:" + file_dir)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
shutil.copyfile(os.path.join(root_dir, file), dst_path)
print("Copy " + os.path.join(root_dir, file) + " 完成")
def check_is_ignore_file(file_name):
"""检查是否是是接加版本号的文件"""
for ignore_file in IGNORE_FILES:
if file_name.find(ignore_file) != -1:
return True
return False
def version_file_with_ver_num(file_path, xml_node):
"""将文件直接加上版本号"""
ext = os.path.splitext(file_path)[1]
print("尝试将文件直接加上版本号 Processing:" + file_path)
if IGNORE_EXT.find(ext) != -1 or check_is_ignore_file(xml_node.get("id")):
crc = VERSION_NUM
xml_node.set("size", str(os.path.getsize(file_path)))
xml_node.text = xml_node.text.replace(ext, "_" + crc + ext)
new_file = file_path.replace(ext, "_" + crc + ext)
os.replace(file_path, new_file)
return True
else:
return False
def version_dir_with_ver_num(work_dir):
"""将目录下的文件直接加上版本号"""
files = os.listdir(work_dir)
for file in files:
old_file = os.path.join(work_dir, file)
is_file = os.path.isfile(old_file)
print(old_file + "---" + str(is_file))
if is_file:
ext = os.path.splitext(file)[1]
new_file = old_file.replace(ext, "_" + str(VERSION_NUM) + ext)
print("重命名:" + old_file + "<>" + new_file)
os.replace(old_file, new_file)
def res_config_file_path():
"""获取当前的资源配置路径"""
return os.path.join(VAUtil.temp_dir(), VAUtil.RES_CONFIG_STORE_DIR_NAME, EchoItemXML.XML_FILE_NAME)
def com_res_config_file_path():
"""获取当前的资源配置压缩路径"""
return os.path.join(VAUtil.temp_dir(), VAUtil.RES_CONFIG_STORE_DIR_NAME, EchoItemXML.COM_XML_FILE_NAME)
def version_file_with_crc32():
"""根据文件的 CRC32 校验加版本号"""
temp_dir = VAUtil.temp_dir()
config_file = res_config_file_path()
config = EleTree.parse(config_file)
root = config.getroot()
for folderElement in root:
folder = folderElement.get("folder")
for item in folderElement:
file = os.path.join(temp_dir, folder, item.text)
if os.path.exists(file):
if not version_file_with_ver_num(file, item):
VAUtil.version_file_with_crc32(file, item)
config.write(config_file)
VAUtil.compress_file(config_file, com_res_config_file_path())
def version_files():
"""给所有文件加上相应的版本后缀"""
print("开始给所有文件加上版本后缀")
root_dir = VAUtil.temp_dir()
# 主目录 wrapper.swf
version_dir_with_ver_num(root_dir)
# release 目录
version_file_with_crc32()
# ver_config 目录
version_dir_with_ver_num(os.path.join(root_dir, VAUtil.RES_CONFIG_STORE_DIR_NAME))
def feature_test():
p_root_dir = VAUtil.try_find_project_root_dir()
if p_root_dir:
# 更新 SVN
VAUtil.svn_update(p_root_dir)
p_config = VAUtil.parse_publish_config()
# 展示菜单
print_branch_select_menu()
# 读取选项
select_i = VAUtil.read_select_index()
# 设置发布分支
global FEATURE_BRANCH
FEATURE_BRANCH = p_config.findall("feature")[select_i].get("branchDir")
global VERSION_NUM
# 确认发布版本号
if select_i < 0:
VERSION_NUM = str(select_i)
else:
VERSION_NUM = input("确认发布版本号:\n")
publish_feature_branch()
if __name__ == "__main__":
feature_test()
| jiaox99/publishTools | pythonScripts/VACommonPublishTool.py | VACommonPublishTool.py | py | 8,536 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "VAPublishUtil.parse_publish_config",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "VAPublishUtil.publish_dir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "EchoItemXML.scan_dir",
"line_number": 39,
"usage_type": "call"
},
{
... |
15662447246 | import json
import os
from flask import (
Flask,
jsonify,
render_template,
request,
send_from_directory,
redirect,
session,
url_for,
)
import oci
from oci.ai_anomaly_detection.models import DetectAnomaliesDetails
import postgrest
from supabase import create_client, Client
from io import BytesIO
import qrcode
from gotrue.types import User
import stripe
from twilio.rest import Client
app = Flask(__name__, template_folder="templates")
app.static_folder = "static"
app.debug = True
app.secret_key = os.urandom(24)
# @app.route("/oci")
# def ociPlate():
# config = oci.config.from_file("/Users/salomon/Desktop/oxxoGas_app/oci/oci")
# ai_vision_client = oci.ai_vision.AIServiceVisionClient(config=config)
# Send the request to the service
# analyze_image_response = ai_vision_client.analyze_image(
# analyze_image_details=oci.ai_vision.models.AnalyzeImageDetails(
# features=[
# oci.ai_vision.models.ImageClassificationFeature(
# feature_type="TEXT_DETECTION", max_results=130
# )
# ],
# image=oci.ai_vision.models.ObjectStorageImageDetails(
# source="OBJECT_STORAGE",
# namespace_name="axvnl9xrn6xz",
# bucket_name="oxxogas",
# object_name="car-pics/Cars432.png",
# ),
# compartment_id="ocid1.tenancy.oc1..aaaaaaaas244yut7vrorqgsz4jf3vs5dd7nl7tlcreo5bhuc52ddowy6q5mq",
# ),
# opc_request_id="XTOOOGSRULY7TEKOXIY1",
# )
# # Get the "text" value using the provided methods
# text_value = analyze_image_response.data.image_text.lines[0].text
# print(type(text_value))
# return render_template("register.html", text_value=text_value)
# ADMIN OXXO GAS
config = oci.config.from_file(
"/Users/salomon/Desktop/oxxogas.github.io/oci/oci"
) # cambiar por tu path
ai_vision_client = oci.ai_vision.AIServiceVisionClient(config=config)
url: str = "https://rafdgizljnzrnmfguogm.supabase.co"
key: str = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InJhZmRnaXpsam56cm5tZmd1b2dtIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTgzNjU0OTAsImV4cCI6MjAxMzk0MTQ5MH0.7_0lzFml9UgLJ6m4nDCs3IhYam1ofa0FoCSYkpTm2VM"
supabase: Client = create_client(url, key)
stripe.api_key = "sk_test_51O9tjGJJgeLIT5WE5vjn0nzYZaCIqb7mYxjS7Mzu3yEpYcyWV47N5DrLDTLGjXi9OQwpEbK7UtIPo5npy0pXSLQj00xGEG2ZhI"
# Your Twilio Account SID and Auth Token
TWILIO_ACCOUNT_SID = "ACc2ef00cc9fd26b9c1ea712d8d490ab0c"
TWILIO_AUTH_TOKEN = "53b423ee8c6c75a344f78df2f9081fc8"
# Your Twilio phone number (this is the number provided by Twilio)
TWILIO_PHONE_NUMBER = "+13344014858"
@app.route("/oci", methods=["GET", "POST"])
def ociPlate():
if "vendor_info" in session:
if request.method == "POST":
photo_data = request.form.get("photo")
# Remove the base64 image prefix if present
prefix = "data:image/jpeg;base64,"
if photo_data.startswith(prefix):
photo_data = photo_data[len(prefix) :]
# Process the image using OCI AI Services
plate_text = analyze_image(photo_data)
# Redirect to the purchases route with plate_text as a parameter
return redirect(url_for("purchases", plateid=plate_text))
return render_template("oci.html")
else:
return redirect(url_for("login"))
def analyze_image(image_data_base64_str):
analyze_image_response = ai_vision_client.analyze_image(
analyze_image_details=oci.ai_vision.models.AnalyzeImageDetails(
features=[
oci.ai_vision.models.ImageClassificationFeature(
feature_type="TEXT_DETECTION", max_results=130
)
],
image=oci.ai_vision.models.InlineImageDetails(
data=image_data_base64_str, # Pass the base64 string directly
),
compartment_id="ocid1.tenancy.oc1..aaaaaaaas244yut7vrorqgsz4jf3vs5dd7nl7tlcreo5bhuc52ddowy6q5mq",
),
opc_request_id="XTOOOGSRULY7TEKOXIY1",
)
# Assuming the API response can be accessed like this; may require adjustment based on actual response format
lines = analyze_image_response.data.image_text.lines
print(lines)
text_value = "No se detectó texto en la imagen"
if lines:
for line in lines:
if "-" in line.text:
text_value = line.text.replace("-", "")
break
return text_value
@app.route("/register")
def register():
if "vendor_info" in session:
return render_template("register.html")
else:
return redirect(url_for("login"))
@app.route("/submit", methods=["POST"])
def submit_form():
message = None
if request.method == "POST":
# Get form data
plateid = request.form.get("plateid")
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
sex = request.form.get("sex")
age = request.form.get("age")
raw_phone = request.form.get("phone")
email = request.form.get("email")
phone = "+52" + raw_phone
# Create a dictionary with the data
customer_data = {
"plateid": plateid,
"first_name": first_name,
"last_name": last_name,
"sex": sex,
"age": age,
"phone": phone,
"email": email,
}
try:
# Insert the data into the Supabase table
data, count = supabase.table("CLIENTS").insert([customer_data]).execute()
# Data insertion successful
message = "Registro exitoso!"
except postgrest.exceptions.APIError as e:
if "duplicate key value violates unique constraint" in e.message:
# Duplicate key violation error
message = "Este número de placa ya está registrado."
else:
# Other API error
message = f"Error al insertar datos: {e.message}"
return render_template("register.html", message=message)
return render_template("register.html")
@app.route("/compra", methods=["GET", "POST"])
def compra():
if "vendor_info" in session:
if request.method == "POST":
# Get form data from the request
plateid = request.form["plateid"]
amount = float(request.form.get("amount"))
liters = float(request.form.get("liters"))
branch = int(request.form.get("branch"))
payment_method = request.form["payment_method"]
gas_type = request.form["gas_type"]
# Prepare data to be inserted into the PURCHASE_HISTORY table
new_purchase_data = {
"plateid": plateid,
"amount": amount,
"liters": liters,
"branch": branch,
"payment_method": payment_method,
"gas_type": gas_type,
"status": True, # Set status to True
}
# Send a POST request to the PURCHASE_HISTORY table
response = (
supabase.table("PURCHASE_HISTORY").insert([new_purchase_data]).execute()
)
print(response)
return redirect(
url_for("compra")
) # Redirect to the compra route after submission
return render_template("compra.html")
else:
return redirect(url_for("login"))
@app.route("/branch")
def branches():
if "vendor_info" in session:
url: str = "https://rafdgizljnzrnmfguogm.supabase.co"
key: str = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InJhZmRnaXpsam56cm5tZmd1b2dtIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTgzNjU0OTAsImV4cCI6MjAxMzk0MTQ5MH0.7_0lzFml9UgLJ6m4nDCs3IhYam1ofa0FoCSYkpTm2VM"
supabase: Client = create_client(url, key)
response = supabase.table("BRANCH").select("*").execute()
print(response)
data = response.data
return render_template("branch.html", data=data)
else:
return redirect(url_for("login"))
ITEMS_PER_PAGE = 20 # or any other number you'd like
@app.route("/datos")
@app.route("/datos/page/<int:page>")
def datos(page=1):
if "vendor_info" in session:
start = (page - 1) * ITEMS_PER_PAGE
end = start + ITEMS_PER_PAGE - 1
response = supabase.table("CLIENTS").select("*").range(start, end).execute()
total_items = (
supabase.table("CLIENTS").select("plateid", count="exact").execute().count
) # Assuming 'id' is a column in your table
total_pages = (
total_items + ITEMS_PER_PAGE - 1
) // ITEMS_PER_PAGE # Ceiling division
data = response.data
return render_template(
"datos.html", data=data, current_page=page, total_pages=total_pages
)
else:
return redirect(url_for("login"))
@app.route("/login", methods=["GET", "POST"])
def login():
message = None
if request.method == "POST":
# Get the username and password from the form
id = int(request.form.get("id"))
password = request.form.get("password")
try:
# Query the "vendors" table in Supabase
vendors = (
supabase.from_("VENDORS")
.select("*")
.eq("id", id)
.eq("password", password)
.execute()
)
vendors_data = vendors.data
# Check if a matching vendor was found
if vendors.data and len(vendors.data) == 1:
# Successful login, store vendor information in the session
vendor_info = {
"username": vendors_data[0]["username"],
"id": vendors_data[0]["id"],
"branch_id": vendors_data[0]["branch_id"],
}
session["vendor_info"] = vendor_info
print("Login successful!", "success")
message = "Inicio exitoso!"
# Redirect to the purchases page or another appropriate route
return redirect(url_for("purchases"))
else:
# Invalid credentials, show an error alert
print("Invalid username or password", "danger")
message = "Usuario o contraseña incorrectos"
except Exception as e:
# Handle the query error, show an error alert
print(f"Error querying the database: {str(e)}")
message = "Error al consultar la base de datos"
return render_template("login.html", message=message)
@app.route("/purchases")
def purchases():
if "vendor_info" in session:
# Obtener el plateid del parámetro de consulta
plateid = request.args.get("plateid", "DEFAULT_PLATE_ID")
branch_id = session['vendor_info']['branch_id']
# Obtener datos de compra
response = (
supabase.table("PURCHASE_HISTORY")
.select("*")
.eq("plateid", plateid)
.eq("status", True)
.eq("branch", branch_id)
.execute()
)
if len(response.data) > 0:
purchase = response.data[0]
# Obtener el teléfono del cliente de la tabla CLIENTS
client_response = (
supabase.table("CLIENTS").select("phone").eq("plateid", plateid).execute()
)
if len(client_response.data) > 0:
phone_number = client_response.data[0]["phone"]
# Aquí deberías enviar un mensaje de texto al número de teléfono
send_text_message(phone_number, "Revisa tus promociones en: http://127.0.0.1/main en la pestana de promociones OXXO GAS")
# Continuar con el resto del código
# Obtener el título de la sucursal de la tabla BRANCH
branch_response = (
supabase.table("BRANCH")
.select("branch_title")
.eq("id", purchase["branch"])
.execute()
)
if len(branch_response.data) > 0:
branch_title = branch_response.data[0]["branch_title"]
else:
branch_title = "Sucursal Desconocida"
return render_template(
"purchase.html", purchase=purchase, branch_title=branch_title
)
else:
return render_template("purchase.html", error="Cliente no encontrado")
else:
return render_template("purchase.html", error="Compra no encontrada")
else:
return redirect(url_for("login"))
def send_text_message(phone_number, message):
# Initialize Twilio client
twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
try:
# Send SMS using Twilio
twilio_client.messages.create(
body=message, from_=TWILIO_PHONE_NUMBER, to=phone_number
)
print(f"Message sent successfully to {phone_number}")
except Exception as e:
print(f"Error sending message to {phone_number}: {str(e)}")
@app.route("/dispatch", methods=["POST"])
def dispatch_purchase():
purchase_id = request.form.get("purchase_id", type=int)
new_status = False
print(type(new_status))
print(purchase_id)
# Asegúrate de que la respuesta de Supabase se maneje correctamente aquí.
response = (
supabase.table("PURCHASE_HISTORY")
.update({"status": new_status})
.eq("id", purchase_id)
.execute()
)
print(response)
# Redirige al usuario de vuelta a la página de compras
return render_template("success_update.html")
# CUSTOMER OXXO GAS
@app.route("/", methods=["GET", "POST"])
def index():
message = None
if request.method == "POST":
# Get form data
plateid = request.form.get("plateid")
password = request.form.get("password")
print(plateid)
print(password)
# Query the Supabase client table for user credentials
response = (
supabase.table("CLIENTS")
.select("*")
.eq("plateid", plateid)
.eq("password", password)
.execute()
)
print("respuesta", response)
user_data = response.data
# Check if user exists and password matches
if user_data and len(user_data) > 0 and user_data[0]["password"] == password:
# Successful login
message = "Inicio exitoso!"
session["user_info"] = {
"plateid": plateid,
"first_name": user_data[0]["first_name"],
"last_name": user_data[0]["last_name"],
"phone": user_data[0]["phone"],
"email": user_data[0]["email"],
"client_type": user_data[0]["client_type"],
}
print(session["user_info"])
return redirect(url_for("master"))
else:
# Invalid login
message = "Usuario o contraseña incorrectos"
return render_template("index.html", message=message)
@app.route("/signup")
def signup():
return render_template("signup.html")
@app.route("/signup-post", methods=["POST"])
def signup_form():
message = None
if request.method == "POST":
# Get form data
plateid = request.form.get("plateid")
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
sex = request.form.get("sex")
age = request.form.get("age")
raw_phone = request.form.get("phone")
email = request.form.get("email")
password = request.form.get("password")
phone = "+52" + raw_phone
# Create a dictionary with the data
client_data = {
"plateid": plateid,
"first_name": first_name,
"last_name": last_name,
"sex": sex,
"age": age,
"phone": phone,
"email": email,
"password": password,
}
print(client_data)
try:
# Insert the data into the Supabase table
data, count = supabase.table("CLIENTS").insert([client_data]).execute()
# Data insertion successful
message = "Registro exitoso!"
return render_template("index.html", message=message)
except postgrest.exceptions.APIError as e:
if "duplicate key value violates unique constraint" in e.message:
# Duplicate key violation error
message = "Este número de placa ya está registrado."
else:
# Other API error
message = f"Error al insertar datos: {e.message}"
return render_template("signup.html", message=message)
return render_template("signup.html")
@app.route("/main")
def master():
if "user_info" in session:
url: str = "https://rafdgizljnzrnmfguogm.supabase.co"
key: str = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InJhZmRnaXpsam56cm5tZmd1b2dtIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTgzNjU0OTAsImV4cCI6MjAxMzk0MTQ5MH0.7_0lzFml9UgLJ6m4nDCs3IhYam1ofa0FoCSYkpTm2VM"
supabase: Client = create_client(url, key)
user_info = session["user_info"]
client_type = user_info["client_type"]
plate_number = user_info["plateid"]
response = supabase.table("BRANCH").select("*").execute()
fuel = supabase.table("FUEL").select("*").execute()
promotions = supabase.table("PROMOTIONS").select("*").eq("client_type",client_type).execute()
reciepts = (
supabase.table("PURCHASE_HISTORY")
.select("*")
.eq("plateid", session["user_info"]["plateid"])
.order("created_at.desc")
.execute()
)
data = response.data
fuel_data = fuel.data
reciepts_data = reciepts.data
promotions_data = promotions.data
print(fuel_data)
return render_template(
"master.html", data=data, fuel_data=fuel_data, reciepts_data=reciepts_data, promotions_data=promotions_data
)
else:
return redirect(url_for("index"))
def generate_qr_code(plate_number):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(plate_number)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save("static/plate_qr.png")
@app.route("/profile")
def profile():
if "user_info" in session:
# Access user information from the session
user_info = session["user_info"]
plate_number = user_info["plateid"]
# Generate the QR code with the plate number
generate_qr_code(plate_number)
return render_template("profile.html", plate_number=plate_number)
else:
# Handle the case when user_info is not in the session (not logged in)
return redirect(url_for("index"))
@app.route("/qr_code/<filename>")
def send_qr_code(filename):
return send_from_directory("static", filename)
@app.route("/buy", methods=["GET", "POST"])
def buy():
if "user_info" in session:
url: str = "https://rafdgizljnzrnmfguogm.supabase.co"
key: str = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InJhZmRnaXpsam56cm5tZmd1b2dtIiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTgzNjU0OTAsImV4cCI6MjAxMzk0MTQ5MH0.7_0lzFml9UgLJ6m4nDCs3IhYam1ofa0FoCSYkpTm2VM"
supabase: Client = create_client(url, key)
response = supabase.table("BRANCH").select("*").execute()
data = response.data
fuel_type = request.args.get("fuel_type")
fuel_price = request.args.get("fuel_price")
return render_template(
"fuel-checkout.html", data=data, fuel_type=fuel_type, fuel_price=fuel_price
)
else:
return redirect(url_for("index"))
@app.route("/process_purchase", methods=["POST"])
def process_purchase():
if "user_info" in session:
message = None
user_info = session["user_info"]
customer_email = user_info.get("email")
if request.method == "POST":
# Get form data
amount = float(request.form.get("amount"))
liters = float(request.form.get("liters"))
plateid = user_info["plateid"]
branch = int(request.form.get("branch"))
payment_method = request.form.get("payment_method")
gas_type = request.form.get("fuel_type")
status = True
# Create a dictionary with the data
purchase_data = {
"amount": amount,
"liters": liters,
"plateid": plateid,
"branch": branch,
"payment_method": payment_method,
"gas_type": gas_type,
"status": status,
}
try:
# Insert the data into the Supabase table
data, count = (
supabase.table("PURCHASE_HISTORY").insert([purchase_data]).execute()
)
# Data insertion successful
message = "Registro exitoso!"
print(message)
if payment_method == "tarjeta":
# Convertir el precio a un entero seguro para Stripe
product_price = int(amount * 100)
try:
# Crear la sesión de pago con Stripe
checkout_session = stripe.checkout.Session.create(
payment_method_types=["card", "oxxo"],
line_items=[
{
"price_data": {
"currency": "mxn",
"unit_amount": product_price,
"product_data": {
"name": "Gasolina " + gas_type,
"description": "Cobro de gasolina solicitada",
"images": ["static/gas.jpg"],
},
},
"quantity": 1,
},
],
mode="payment",
success_url=url_for("success_stripe", _external=True)
+ "?session_id={CHECKOUT_SESSION_ID}",
# cancel_url=url_for('buy', _external=True),
customer_email=customer_email,
)
return redirect(checkout_session.url, code=303)
except Exception as e:
return str(e)
elif payment_method == "efectivo":
# Aquí tu lógica para el pago en efectivo...
return redirect(url_for("success"))
except postgrest.exceptions.APIError as e:
if "duplicate key value violates unique constraint" in e.message:
# Duplicate key violation error
message = "Este número de placa ya está registrado."
print(message)
else:
# Other API error
message = f"Error al insertar datos: {e.message}"
print(message)
return render_template("fuel-checkout.html", message=message)
return redirect(url_for("process_purchase"))
else:
return redirect(url_for("index"))
@app.route("/success")
def success():
return render_template("success.html")
@app.route("/success_stripe", methods=["GET"])
def success_stripe():
# Retrieve the Stripe session ID from the URL query parameters
session_id = request.args.get("session_id")
if session_id:
try:
payment = stripe.checkout.Session.retrieve(session_id)
purchase_id = payment.payment_intent
except stripe.error.StripeError as e:
purchase_id = "Error retrieving payment"
else:
purchase_id = "Session ID not found"
return render_template("success_stripe.html", purchase_id=purchase_id)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80)
| Salomon-mtz/oxxogas.github.io | app.py | app.py | py | 24,643 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "oci.config.from_file",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "oci.config",
"line_nu... |
30989959642 | # from locale import setlocale, LC_ALL
from calendar import month_name, mdays
from functools import reduce
# setlocale(LC_ALL, 'pt_BR')
Lista_meses = filter(lambda x: mdays[x] == 31, range(1, 13))
nome_meses = map(lambda x: month_name[x], Lista_meses)
juntar = reduce(lambda todos, nome_mes: f'{todos}\n {nome_mes}',
nome_meses, 'Meses com 31 dias')
print(juntar) | higorsantana-omega/Programacao_funcional_Python | imutabilidade_v1.py | imutabilidade_v1.py | py | 381 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "calendar.mdays",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "calendar.month_name",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 9,
"usage_type": "call"
}
] |
16692902771 | #!/usr/bin/env python
import cv2
from lib import tracker
BLUE = (255, 50, 50)
GREEN = (50, 255, 50)
RED = (50, 50, 255)
WHITE = (255, 255, 255)
def main():
markers = tracker.find_markers(img)
for m_id, marker in markers.iteritems():
cv2.drawContours(img, [marker.contour], -1, GREEN, 2)
cv2.line(img, marker.position, marker.major_axis, WHITE, 2)
cv2.line(img, marker.position, marker.minor_axis, WHITE, 2)
cv2.putText(img, str(marker), marker.position,
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=0.6, color=RED)
if __name__ == '__main__':
STATIC = True
if STATIC:
img = cv2.imread('test.jpg')
img = cv2.resize(img, None, fx=0.3, fy=0.3,
interpolation=cv2.INTER_LINEAR)
main()
cv2.imshow('Main window', img)
cv2.waitKey(0)
else:
cap = cv2.VideoCapture(0)
while True:
__, img = cap.read()
main()
cv2.imshow('Main window', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| antoneri/rover-follow-target | examples/track_all_markers.py | track_all_markers.py | py | 1,174 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "lib.tracker.find_markers",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lib.tracker",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cv2.drawContours",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.line",
... |
20521636348 | from turtle import Turtle
from paddle import Paddle
from scoreboard import Scoreboard
from random import randint
from time import sleep
BALL_RADIUS = 12 # supposedly 10
BALL_SPEED = 9
WINDOW_WIDTH = 960
WINDOW_HEIGHT = 640
class Ball(Turtle):
def __init__(self) -> None:
super().__init__()
self.sx = randint(2, 7)
self.sy = randint(2, 4)
if randint(0, 1) % 2 == 0:
self.sx *= -1
if randint(0, 1) % 2 == 0:
self.sy *= -1
self.penup()
self.shape("circle")
self.color("turquoise")
self.speed(0)
# Ball movement
def ball_movement(self) -> None:
self.setx(self.xcor() + self.sx)
self.sety(self.ycor() + self.sy)
# Regenerate the ball
def regenerate_ball(self) -> None:
self.setpos(0, 0)
self.sx = randint(2, 7)
self.sy = randint(2, 4)
if randint(0, 1) % 2 == 0:
self.sx *= -1
if randint(0, 1) % 2 == 0:
self.sy *= -1
def bounce_top_bottom(self) -> None:
cy = self.ycor()
# Check for upper boundary...
if cy + BALL_RADIUS > WINDOW_HEIGHT * 0.5 or cy - BALL_RADIUS < -WINDOW_HEIGHT * 0.5:
print(f"Top-bottom: {cy}")
self.sy *= -1
self.ball_movement()
def bounce_paddle(self, paddle: Paddle) -> None:
PADDLE_OFFSET = 40
PADDLE_RANGE = 180 * 0.5
cx = self.xcor()
cy = self.ycor()
# Testing for left paddle
if paddle.xcor() < 0:
if cx <= -WINDOW_WIDTH * 0.5 + PADDLE_OFFSET and (cy >= paddle.ycor() - PADDLE_RANGE and cy <= paddle.ycor() + PADDLE_RANGE):
print("Left paddle")
self.sx *= -1
# Testing for right paddle
if paddle.xcor() > 0:
if cx >= WINDOW_WIDTH * 0.5 - PADDLE_OFFSET and (cy >= paddle.ycor() - PADDLE_RANGE and cy <= paddle.ycor() + PADDLE_RANGE):
print("Right paddle")
self.sx *= -1
self.ball_movement()
def is_out_of_bounds(self, scoreboard_left: Scoreboard, scoreboard_right: Scoreboard):
cx = self.xcor()
if cx <= -WINDOW_WIDTH * 0.5:
scoreboard_right.add_score()
self.regenerate_ball()
sleep(1)
if cx >= WINDOW_WIDTH * 0.5:
scoreboard_left.add_score()
self.regenerate_ball()
sleep(1)
| LetSleepingFoxesLie/100DaysOfCode_py | 22_Pong/ball.py | ball.py | py | 2,597 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "turtle.Turtle",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.randint",
"lin... |
2090654629 | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import urllib.request
import os
def to_image_name(user_info):
return f'{user_info["name"]}_({user_info["updatedAt"]}).jpg'
def get_local_user_from_image_name(image_name):
image_name_without_ext = os.path.splitext(image_name)[0]
name_parts = image_name_without_ext.split('_')
name = '_'.join(name_parts[:-1])
timestamp = float(name_parts[-1][1:-1])
return {
'name': name,
'updatedAt': timestamp
}
def read_data():
cred = credentials.Certificate("serviceAccount.json")
app = firebase_admin.initialize_app(cred)
db = firestore.client(app)
user_collection = db.collection('users').stream()
path = "images_dicts"
dir_list = os.listdir(path)
local_users = list(map(get_local_user_from_image_name, dir_list))
for document in user_collection:
user = document.to_dict()
user['updatedAt'] = user['updatedAt'].timestamp()
matched_local_users = [local_user for local_user in local_users if local_user['name'] == user['name']]
if len(matched_local_users) > 0:
if matched_local_users[-1]['updatedAt']< user['updatedAt']:
for matched_local_user in matched_local_users:
os.remove(f'images_dicts/{to_image_name(matched_local_user)}')
urllib.request.urlretrieve(user['image'], f'images_dicts/{to_image_name(user)}')
else:
urllib.request.urlretrieve(user['image'], f'images_dicts/{to_image_name(user)}')
# read_data(db)
| huuquyen2606/FID | fast_api.py | fast_api.py | py | 1,610 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.splitext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "... |
2144866290 | import requests
import re
from bs4 import BeautifulSoup
from headers import HEADERS
from csvImporter import CsvImporter
class InfoFinder:
def __init__(self, url_tuple_set, file_name):
self.url_tuple = url_tuple_set
self.has_contact_set = set()
self.no_contact_set = set()
self.importer = CsvImporter(file_name)
def filter_contact_sets(self):
print('filtering sets...')
for company_tuple in self.url_tuple:
print(company_tuple)
try:
req = requests.get(company_tuple[1], HEADERS)
plain_text = req.text
soup = BeautifulSoup(plain_text)
has_contact = False
ankor_list = soup.findAll('a')
for ankor in ankor_list:
href = ankor.get('href')
if href:
if ('http' in href and '/contact' in href) or ('http' in href and '/Contact' in href):
print(href)
print(ankor.text)
print('\n')
self.has_contact_set.add((company_tuple[0], href))
has_contact = True
elif '/contact' in href or '/Contact' in href:
url = company_tuple[1] + href
print(url)
print(ankor.text)
print('\n')
self.has_contact_set.add((company_tuple[0], url))
has_contact = True
elif href == 'contact.html':
url = company_tuple[1] + '/' + href
print(url)
print(ankor.text)
print('\n')
self.has_contact_set.add((company_tuple[0], url))
has_contact = True
elif href == '/contact.html':
url = company_tuple[1] + href
print(url)
print(ankor.text)
print('\n')
self.has_contact_set.add((company_tuple[0], url))
has_contact = True
elif ankor.text.lstrip().rstrip() == 'Contact' or ankor.text.lstrip().rstrip() == 'contact' or ankor.text.lstrip().rstrip() == 'CONTACT' or ankor.text.lstrip().rstrip() == '"CONTACT"':
if 'http' in href:
print(href)
print(ankor.text)
print('\n')
has_contact = True
self.has_contact_set.add((company_tuple[0], href))
else:
url = company_tuple[1] + href
print(url)
print(ankor.text)
print('\n')
self.has_contact_set.add((company_tuple[0], url))
has_contact = True
if not has_contact:
self.no_contact_set.add(company_tuple)
except Exception as e:
print(e)
print('filtering finished')
def find_post_code(self, company, url, company_list): #geen goede naam(csv importer)
try:
req = requests.get(url, HEADERS)
plain_text = req.text
match = re.search('(\d{4}\s[A-Z]{2})|(\d{4}[A-Z]{2})', plain_text)
if match:
postcode_string = match.group()
if len(postcode_string) == 6:
postcode = postcode_string[:4] + ' ' + postcode_string[4:]
self.importer.import_to_csv(company, postcode)
self.importer.import_to_csv(company, match.group())
print('{} found at company_list: {}'.format(match, company_list))
except Exception as e:
# print(str(company_list) + ' ' + str(e))
print(e)
def find_info(self):
self.filter_contact_sets()
for company_list in self.has_contact_set:
self.find_post_code(company_list[0], company_list[1], company_list)
for company_list in self.no_contact_set:
self.find_post_code(company_list[0], company_list[1], company_list) | redvox27/innovatiespotter | omleidingsSites/infoFinderController.py | infoFinderController.py | py | 4,517 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csvImporter.CsvImporter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "headers.HEADERS",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "bs4.Beautifu... |
38730450704 | # raw_to_evoked
# simple utility to plot_joint from file name
import sys
raw_file=sys.argv[1]
event_id=1
import mne
Raw=mne.io.read_raw_fif(raw_file)
Events=mne.find_events(Raw)
reject = dict(grad=4e-10, mag=4e-12, eog=150e-6)
Epochs=mne.Epochs(Raw, Events, event_id=event_id, tmin=-0.1, tmax=0.8,
baseline=(None, 0), reject=reject, proj=True)
Evoked=Epochs.average()
Evoked.plot_joint()
| smonto/cibr-meg | raw2plot_joint.py | raw2plot_joint.py | py | 400 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mne.io.read_raw_fif",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mne.io",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mne.find_events",
"l... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.