content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#----------------------------------------------------------------------------------------------
'''
What to expect from this script:
1- This will generate list of patients with sepsis and no-sepsis in each sets.
2- More details be mentioned in the csv files "info_training_train/test"
'''
#----------------------------------------------------------------------------------------------
import os
from multiprocessing import Pool, cpu_count
import pandas as pd
import numpy as np
import sys
from sys import platform
from IPython.display import display, HTML
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
import time
from sklearn.preprocessing import normalize
from tqdm import tqdm
from sys import platform
if __name__ == '__main__':
#----------------------------------------------------------------------------------------------
# how to run the code :
#---------------- Define MAIN_DIR where you have two files : training_setA and training_setB
MAIN_DIR = sys.argv[1]
#----------------------------------------------------------------------------------------------
TRAIN_DIR= MAIN_DIR +'train_data/'
TEST_DIR = MAIN_DIR +'test_data/'
train_files = os.listdir(TRAIN_DIR)
test_files = os.listdir(TEST_DIR)
# generate files
generate_info()
| [
2,
10097,
1783,
26171,
198,
7061,
6,
198,
2061,
284,
1607,
422,
428,
4226,
25,
220,
628,
220,
352,
12,
770,
481,
7716,
1351,
286,
3871,
351,
384,
862,
271,
290,
645,
12,
325,
862,
271,
287,
1123,
5621,
13,
220,
198,
220,
362,
12... | 3.72067 | 358 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 21:52:30 2019
@author: c0s02bi
"""
#!/usr/bin/env python
# coding: utf-8
# Author: Chao Song 2019/01/08
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from fbprophet import Prophet
import math
from pydlm import dlm, trend, seasonality
from pydlm import dynamic,autoReg,modelTuner
from statsmodels.graphics.tsaplots import plot_pacf
from scipy.stats.stats import pearsonr
from fbprophet.plot import plot_yearly
from datetime import timedelta
calendar= pd.read_csv('wm_yr_wk_ref.csv',parse_dates=[0], names=['calendar_date','wm_yr_wk_nbr'],header=0)
def removehurricane(change_col,dfc,start_date,end_date,sales = False):
'''
for wage data, change_col = 'cost', sales = flase
for sales data, change_col = 'total_sales',sales = True
the window size = end_date & start_date is given by Didun's insight, manually???
Reference: https://confluence.walmart.com/display/SASTDSE/Hurricane+adjustment
'''
date_col = 0
club_col = 0
df = dfc.copy(deep=True)
target_col = df.columns.tolist().index(change_col)
if sales:
date_col = df.columns.tolist().index('WM_YEAR_WK_NBR')
club_col = df.columns.tolist().index('club')
else:
date_col = df.columns.tolist().index('wm_yr_wk_nbr')
df.columns.tolist().index('club_nbr')
club_ls = df[df.columns[club_col]].unique().tolist()
coeff = [] # the inflation rate of sales nearby the period
for club in club_ls:
coeff.append(1) # set some default number for safety
start_date_low = wm_nbr_add(start_date,-6)
end_date_high = wm_nbr_add(end_date,6)
start_date_last = wm_nbr_add(start_date,-52)
end_date_last = wm_nbr_add(end_date,-52)
start_date_low_last = wm_nbr_add(start_date_low,-52)
end_date_high_last = wm_nbr_add(end_date_high,-52)
coeff[-1] = df.loc[(df[df.columns[club_col]]==club)&(((df[df.columns[date_col]]>=start_date_low)&(df[df.columns[date_col]]<start_date)) | ((df[df.columns[date_col]]>end_date)&(df[df.columns[date_col]]<=end_date_high)))][change_col].mean()
coeff[-1] = df.loc[(df[df.columns[club_col]]==club)&(((df[df.columns[date_col]]>=start_date_low_last)&(df[df.columns[date_col]]<start_date_last)) | ((df[df.columns[date_col]]>end_date_last)&(df[df.columns[date_col]]<=end_date_high_last)))][change_col].mean()/coeff[-1]
# Hurricane effect date is recovered by inflation_rate (=coeff[club]) * LY_values
for j in range(df.shape[0]):
if df.iloc[j,date_col] >= start_date and df.iloc[j,date_col] <= end_date:
tmp = df.loc[(df[df.columns[club_col]]==df.iloc[j,club_col]) & (df[df.columns[date_col]]==wm_nbr_add(df.iloc[j,date_col],-52))]
club = df.iloc[j,club_col]
df.iloc[j,target_col] = coeff[club_ls.index(club)]*tmp.iloc[0,target_col]
return df
def getDatesFromWMWks(wm_wk_list):
''' given a list of WM weeks, e.g. 201505, 201631, etc., returns dates
for the Fridays in the weeks
From Michael\'s code'''
ref_date = pd.datetime(2018,2,2) # corresponding to 201801
ref_wk = 201801
dates = [ref_date + pd.Timedelta('7 days') * weekDiff(x, ref_wk) for x in wm_wk_list]
return dates
def wm_nbr_add (cur,weeks,cal = calendar):
'''
addition for wm_year_wk_nbr
'''
cur = [cur]
cur_date = getDatesFromWMWks(cur)
fu_date = cur_date[0] + timedelta(days=7*weeks)
fu_wk = cal.loc[cal.calendar_date == fu_date].iloc[0,1]
return fu_wk
def prep_data(raw,storemap,scat_op_club):
'''
Seperate the data by their categries.
From Siddarth's code
'''
#raw= pd.read_csv('./data/sap_data_20181213.csv')
valid_ref_doc_nums = [ 50, 2115, 8, 51, 2307, 3438, 2705, 263432, 3227, 82, 2901, 3413, 54, 3238]
ref_doc_dict = { 50:'Bereavement', 2115:'Disaster', 8:'Holiday Bonus', 51:'Jury Duty',
2307:'Lump Sum Hourly', 3438:'Non-worked Hours', 2705:'OT pay Hist Award',
263432:'Overtime', 3227:'Personal Payout Unused', 82:'Personal Time',
2901:'Regional Pay Zone', 3413:'Retro Reg Hours', 54:'Severance Pay',
3238:'Term Net Overpayment'}
df= raw[raw.iloc[:,2].isin([3413, 2307, 8, 54, 291517,263432,50,51,82])].copy()
cols= df.columns
df=df.drop(cols[:2],axis=1)
df= df.drop(cols[3:5],axis=1)
df.columns= ['rf_doc_num','posting_date','cost_center','cost','retail']
df['club_nbr']= df.iloc[:,-3].str.extract('[US|PR]0(\d+).*G.*$')
df= df.loc[~df.club_nbr.isnull()]
df.club_nbr= df.club_nbr.astype(int)
df['country']= df.cost_center.str.extract('CONA/(.{2}).*$')
df.posting_date=pd.to_datetime(df.posting_date)
df=df.drop('cost_center',axis=1)
df_PR = df[df.country.isin(['PR'])]
df_PR = df_PR.sort_values(by=['club_nbr','posting_date'])
df = df_PR
storemap = storemap.loc[storemap['state_prov_code']=='PR']
df=pd.merge(left=df, right= storemap, how='left', left_on= 'club_nbr', right_on='club_nbr', validate= 'many_to_one')
#raw= pd.read_csv('./data/sap_data_20181213.csv')
#storemap= pd.read_csv('./storemap.csv', usecols=[1,2,3,4,5])
#scat_op_club= pd.read_csv('./scat_op_club.csv', usecols=[2] )
df=df.merge(right= scat_op_club, how='inner', left_on= 'club_nbr', right_on= 'CLUB_NBR')
df=df.drop('CLUB_NBR', axis=1)
#Subsetting for valid data
df= df[df.open_status_code!=7]
df= df[~df.open_status_code.isna()].drop('open_status_code',axis=1)
df= df[df.country=='PR']
calendar= pd.read_csv('wm_yr_wk_ref.csv',parse_dates=[0], names=['calendar_date','wm_yr_wk_nbr'],header=0)
df=pd.merge(left=df, right= calendar, how='left', left_on='posting_date', right_on= 'calendar_date').drop('calendar_date',axis=1)
df['date_pd']= getDatesFromWMWks(df.wm_yr_wk_nbr)
punched_df= df[(df.rf_doc_num==291517) | (df.rf_doc_num== 263432)].copy()
residual_worked_df = df[(df.rf_doc_num==50) | (df.rf_doc_num== 51) | (df.rf_doc_num==82)].copy()
retro_df= df[df.rf_doc_num==3413].copy().drop(['rf_doc_num'],axis=1)
holiday_df= df[df.rf_doc_num==8].copy().drop(['rf_doc_num'],axis=1)
lump_df= df[df.rf_doc_num==2307].copy().drop(['rf_doc_num'],axis=1)
severance_df= df[df.rf_doc_num==54].copy().drop(['rf_doc_num'],axis=1)
punched_df = punched_df.sort_values(by='posting_date')
return [punched_df,residual_worked_df,retro_df,holiday_df,lump_df,severance_df,calendar]
# # Modeling the Punched Data
# this part is left for Prophet model without adding any regressor
# two options using bi-weekly view or using daily view
day_sep = [0.1464,0.1480,0.1460,0.1067,0.1463,0.1489,0.1577,0.1464,0.1480,0.1460,0.1067,0.1463,0.1489,0.1577]# Sat,..., Thur, Fri
def gen_daily_data(punched_pro_club,day_sep):
'''
to generate the daily data, since the distribution across weekday is quite consistent.
day_sep obtained by US_clubs
daily view data can also be feed into US_clubs_estimator?
'''
dt_ls = []
ct_ls = []
wk_ls = []
for j in range(punched_pro_club.shape[0]):
cur_date = punched_pro_club.iloc[j,0]-timedelta(days = 13) # the first payperiod
for i in range(14):
if (dt_ls.count(cur_date) > 0):
print("shoot")
dt_ls.append(cur_date)
cur_date = cur_date+timedelta(days = 1)
ct_ls.append(punched_pro_club.iloc[j,1]*day_sep[i]/2)
wk_ls.append(punched_pro_club.iloc[j,2])
res = pd.DataFrame({'posting_date':dt_ls,'cost':ct_ls,'wm_yr_wk_nbr':wk_ls})
return res
def estimate_and_predict_prophet_PR(calendar,punched_df, end_train_date, start_test_date, daily_view=False, target_column = 'cost',pred_days=120,horizon = 8,missing_val = 201735):
'''
Using facbook prophet model without any regressor
'daily_view' variable is an indicator specified by user whether to seperate bi-weekly SAP data to daily
'daily_view' is not recommended.
'pred_days' variable is how many days ahead you want to predict
return type: prediction result as a DataFrame,
columns=['ds','yhat','club'] ds is the posting_date and yhat is the prediction value
this serves as the first layer of mixed model.
'''
if 'club_nbr' not in punched_df.columns:
punched_df['club_nbr'] = punched_df['club']
punched_df = punched_df.drop('club',axis = 1)
if 'posting_date' not in punched_df.columns:
punched_df['posting_date'] = getDatesFromWMWks(punched_df['wm_yr_wk_nbr'])
punched = punched_df.groupby(['club_nbr','posting_date'])[target_column].sum()
punched.column = ['total_punched_wg']
punched = punched.reset_index()
punched = pd.merge(left=punched, right=calendar, how='left', left_on='posting_date', right_on= 'calendar_date').drop('calendar_date',axis=1)
punched = punched.drop('posting_date',axis = 1)
punched_pro = punched_df.groupby(['club_nbr','posting_date'])[target_column].sum()
punched_pro.column = ['total_punched_wg']
punched_pro = punched_pro.reset_index()
punched_pro = pd.merge(left=punched_pro, right=calendar, how='left', left_on='posting_date', right_on='calendar_date').drop('calendar_date',axis=1)
punched_pro = removehurricane(target_column,punched_pro,201733,201739,sales = False) #201735 is missing in the SAP data, recover below
club_ls = punched_pro.club_nbr.unique()
res = pd.DataFrame()
for club in club_ls:
cur = club
punched_pro_club = punched_pro[punched_pro.club_nbr.isin([club])]
##########################################
#adding missing value
if missing_val not in punched_pro_club.wm_yr_wk_nbr.values.tolist():
punched_pro_club.loc[-1] = [club,punched_pro_club.loc[punched_pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,1]+timedelta(days=14),0.5*punched_pro_club.loc[punched_pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,2]+0.5*punched_pro_club.loc[punched_pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,2)].iloc[0,2],missing_val] # adding a row
punched_pro_club.index = punched_pro_club.index + 1 # shifting index
##############################################
punched_pro_club = punched_pro_club.sort_values(by='wm_yr_wk_nbr')
punched_pro_club = punched_pro_club.drop('club_nbr',axis=1).reset_index().drop('index',axis=1)
if (daily_view):
punched_pro_club = gen_daily_data(punched_pro_club,day_sep)
trainset = punched_pro_club.loc[punched_pro_club.wm_yr_wk_nbr<=end_train_date].drop(['wm_yr_wk_nbr'],axis=1)
columnsTitles=["posting_date",target_column]
trainset=trainset.reindex(columns=columnsTitles)
trainset.columns=["ds","y"]
m = Prophet(yearly_seasonality=True)
m.fit(trainset)
future = m.make_future_dataframe(periods=pred_days)
forecast = m.predict(future)
result = forecast[['ds','yhat']].tail(pred_days)
weeklist = []
for i in range(horizon):
weeklist.append(trainset.iloc[-1,trainset.columns.tolist().index('ds')] + timedelta(days=14*(i+1)))
result = result[result.ds.isin(weeklist)]
yhat = result.yhat.values
if res.shape[0] == 0:
tmp = result
tmp['club'] = pd.Series([cur for i in range(result.shape[0])],index=tmp.index)
res = tmp
else:
tmp = result
tmp['club'] = pd.Series([cur for i in range(result.shape[0])],index=tmp.index)
res = pd.concat([res,tmp],axis = 0)
return res
def proportion(df_sales):
'''
this is used as seperation of each club's sales among a subset of clubs together (PR clubs here)
return type: Dataframe with proportion
return columns list = [total_sales_across Unnamed: 0 club per_nbr wm_yr_wk_nbr]
total_sales_across is the # in the Puerto Rico region on that day.
per_nbr is the proportion of this club accounted for among total_sales_across
'''
######################
'''
Hurricane Adjusemnt period: 09-15-2017 to 10-20-2017
'''
club_ls = df_sales.club.unique()
df_sales = removehurricane('total_sales',df_sales,201733,201739,sales = True)
df_total_sales = df_sales.groupby('WM_YEAR_WK_NBR')['total_sales'].sum()
df_total_sales = df_total_sales.reset_index()
df_total_sales.columns = ['WM_YEAR_WK_NBR','total_sales_across']
df_propor = pd.merge(left=df_total_sales,right=df_sales,left_on='WM_YEAR_WK_NBR',right_on='WM_YEAR_WK_NBR',validate ='1:m')
df_propor['per_nbr'] = df_propor['total_sales']/df_propor['total_sales_across']
df_propor = df_propor.drop('total_sales',axis = 1)
df_propor = df_propor.sort_values(by = ['club','WM_YEAR_WK_NBR']).reset_index().drop('index',axis = 1)
df_propor['wm_yr_wk_nbr'] = df_propor['WM_YEAR_WK_NBR']
df_propor = df_propor.drop(['WM_YEAR_WK_NBR'],axis = 1)
return df_propor
def predict_proportion(calendar,df_sales,end_train_date, start_test_date,horizon = 8):
'''
treat proportion as a time series and use as obseravle regressor in dlm data
return: dataframe contains the future dates combined with actual data.
return dataframe columns_list = [club,wm_yr_wk_nbr,per_nbr_fc,total_sales_across]
date before start_test_date is true value, althought under column 'per_nbr_fc'.
date after start_test_date is prediction value. It is obtained at once before we go to dlm model for the wage prediction.
'''
df_propor = proportion(df_sales)
df_propor_PR_ts = pd.DataFrame()
club_ls = df_propor.club.unique()
for club in club_ls:
df_propor_club = df_propor[df_propor.club.isin([club])]
trainset_propor = df_propor_club.loc[df_propor_club.wm_yr_wk_nbr<=end_train_date]
predictMean = estimate_and_predict_prophet_PR(calendar,df_propor_club, end_train_date, start_test_date, target_column = 'per_nbr')
predictMean = pd.merge(left=predictMean,right=calendar,left_on='ds',right_on='calendar_date',validate='1:1').drop(['ds','calendar_date'],axis = 1)
totalMean = estimate_and_predict_prophet_PR(calendar,df_propor_club, end_train_date, start_test_date, target_column = 'total_sales_across')
totalMean = pd.merge(left=totalMean,right=calendar,left_on='ds',right_on='calendar_date',validate='1:1').drop(['ds','calendar_date'],axis = 1)
l = trainset_propor.shape[0]+horizon
wk_ls = trainset_propor.wm_yr_wk_nbr.values.tolist()
wk_ls = wk_ls + [wm_nbr_add(start_test_date,x) for x in range(0,horizon*2,2)]
trainset_propor = df_propor_club.loc[df_propor_club.wm_yr_wk_nbr<=end_train_date]
tmp = pd.DataFrame({'club':[club for i in range(l)],'wm_yr_wk_nbr':wk_ls,'per_nbr_fc':trainset_propor['per_nbr'].values.tolist()+predictMean['yhat'].values.tolist()})
tmp['total_sales_across'] = pd.Series(trainset_propor['total_sales_across'].values.tolist()+totalMean['yhat'].values.tolist())
if (df_propor_PR_ts.shape[0] == 0):
df_propor_PR_ts = tmp.copy(deep = True)
else:
df_propor_PR_ts = pd.concat([df_propor_PR_ts,tmp],axis = 0)
df_propor_PR_ts = df_propor_PR_ts.reset_index().drop('index',axis = 1)
return df_propor_PR_ts
# using proportion of sales as the forecast for the real sales (more predictable)
# the total trend is included in macro variable (assumption)
def estimate_and_predict_dlm_PR(calendar,df_propor_PR_ts, punched_df, end_train_date, start_test_date,start_of_this_year,enable_sales,pred_weeks = 8,locality = 10,r = 0.05,missing_val = 201735):
'''
accept the forecasting sales_proportion data as one regressor
df_propor_PR_test: []
return type: DataFrame with prediction result
return: columns = [wm_yr_wk_nbr,club,yhat]
'''
res = pd.DataFrame()
punched = punched_df.groupby(['club_nbr','posting_date'])['cost'].sum()
punched.column = ['total_punched_wg']
punched = punched.reset_index()
punched = pd.merge(left=punched, right=calendar, how='left', left_on='posting_date', right_on= 'calendar_date').drop('calendar_date',axis=1)
# mean wage among all clubs
punched = removehurricane('cost',punched,201733,201739,sales = False)
punched_mean = punched.groupby(['wm_yr_wk_nbr','posting_date'])['cost'].mean()
punched_mean = punched_mean.reset_index()
punched_mean.columns = ['wm_yr_wk_nbr','posting_date','cost']
punched_mean['club_nbr'] = pd.Series(np.ones([punched_mean.shape[0]]))
##########################
if missing_val not in punched_mean.wm_yr_wk_nbr.tolist():
punched_mean.loc[-1] = [missing_val,punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,1]+timedelta(days=14),0.5*punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,2]+0.5*punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,2)].iloc[0,2],1] # adding a row
punched_mean.index = punched_mean.index + 1
#########################
punched_mean1 = punched_mean.copy(deep=True)
punched_mean1['cost'] = 0.5*punched_mean1['cost']+ 0.25*punched_mean1['cost'].shift(1)+0.25*punched_mean1['cost'].shift(2)
ty = punched_mean1['cost'].mean()
punched_mean1[['cost']] = punched_mean1[['cost']].fillna(value = ty)
punched_mean1 = estimate_and_predict_prophet_PR(calendar,punched_mean1, end_train_date, start_test_date, daily_view=False, pred_days=120) #predict the mean wages.
punched_mean1 = punched_mean1.drop('club',axis = 1)
punched_mean1.columns = ['posting_date','PR_cost']
punched_mean1 = pd.merge(left=punched_mean1,right=calendar,how = 'left',left_on='posting_date',right_on='calendar_date').drop('calendar_date',axis=1)
tmp = punched.groupby(['wm_yr_wk_nbr','posting_date'])['cost'].mean()
tmp = tmp.reset_index()
tmp.columns = ['wm_yr_wk_nbr','posting_date','PR_cost']
tmp = tmp.loc[tmp.wm_yr_wk_nbr<=end_train_date]
tmp['PR_cost'] = 0.5*tmp['PR_cost']+0.25*tmp['PR_cost'].shift(1)+0.25*tmp['PR_cost'].shift(2)
ty = tmp['PR_cost'].mean()
tmp[['PR_cost']] = tmp[['PR_cost']].fillna(value = ty)
punched_mean = pd.concat([tmp,punched_mean1],axis = 0)
if missing_val not in punched_mean.wm_yr_wk_nbr.tolist():
tu = [0.5*punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,0]+0.5*punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,2)].iloc[0,0]]
tu.append(punched_mean.loc[punched_mean.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,1]+timedelta(days=14))
tu.append(missing_val)
punched_mean.loc[-1] = tu # adding a row
punched_mean.index = punched_mean.index + 1 # shifting index
punched_mean = punched_mean.sort_values(by='wm_yr_wk_nbr').reset_index().drop('index',axis=1)
punched = punched.drop('posting_date',axis = 1)
punched_pro = punched_df.groupby(['club_nbr','posting_date'])['cost'].sum()
punched_pro.column = ['total_punched_wg']
punched_pro = punched_pro.reset_index()
punched_pro=pd.merge(left=punched_pro, right= calendar, how='left', left_on='posting_date', right_on= 'calendar_date').drop('calendar_date',axis=1)
punched_pro = removehurricane('cost',punched_pro,201733,201739,sales = False)
#201735 is Maria Hurrican Missing
#201737 is the Irma Hurricane
club_ls = punched.club_nbr.unique()
for club in club_ls:
pro_club = punched_pro[punched_pro.club_nbr.isin([club])]
#########################################
# adding missing value
if missing_val not in pro_club.wm_yr_wk_nbr.tolist():
pro_club.loc[-1] = [club,pro_club.loc[pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,1]+timedelta(days=14),0.5*pro_club.loc[pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,2]+0.5*pro_club.loc[pro_club.wm_yr_wk_nbr==wm_nbr_add(missing_val,2)].iloc[0,2],missing_val] # adding a row
pro_club.index = pro_club.index + 1 # shifting index
####################################################
pro_club = pro_club.sort_values(by='posting_date').reset_index().drop('index',axis=1)
pro_sales = df_propor_PR_ts.loc[df_propor_PR_ts.club == club].drop(['club'],axis=1)
pro_club = pro_club.drop(['club_nbr','posting_date'],axis=1)
pro_club.columns = ['cost','wm_yr_wk_nbr']
pro_sales['total_sales'] = pro_sales['total_sales_across']*pro_sales['per_nbr_fc']
pro_sales = pd.concat([pro_sales]+[pro_sales.total_sales.shift(x) for x in range(1,3)],axis=1)
pro_sales.columns = ['wm_yr_wk_nbr','per_nbr_fc','total_sales_across','total_sales_0','sr_1','sr_2']
#########################################
# adding missing value
if missing_val not in pro_sales.wm_yr_wk_nbr.unique().tolist():
tu = []
for k in range(len(pro_sales.columns)):
tu.append(0.5*pro_sales.loc[pro_sales.wm_yr_wk_nbr==wm_nbr_add(missing_val,-2)].iloc[0,k]+0.5*pro_sales.loc[pro_sales.wm_yr_wk_nbr==wm_nbr_add(missing_val,2)].iloc[0,k])
tu[0] = int(tu[0])
pro_sales.loc[-1] = tu # adding a row
pro_sales.index = pro_sales.index + 1 # shifting index
pro_sales = pro_sales.sort_values(by='wm_yr_wk_nbr').reset_index().drop('index',axis=1)
pro_sales = pd.merge(left=pro_sales, right=punched_mean, how ='right',left_on='wm_yr_wk_nbr', right_on='wm_yr_wk_nbr', validate='1:1')
pro_sales = pro_sales.drop(['posting_date'],axis=1)
pro_sales = pro_sales.apply(lambda x: x.fillna(x.mean()),axis=0)
pro_sales_train = pro_sales.loc[pro_sales.wm_yr_wk_nbr<=end_train_date]
pro_sales_test = pro_sales.loc[pro_sales.wm_yr_wk_nbr>=start_test_date]
# trend
linear_trend = trend(degree=2, discount=0.98, name='linear_trend', w=8)
# seasonality
seasonal26 = seasonality(period=26, discount=1, name='seasonal26', w=12)
# control variable
sales0 = pro_sales_train['total_sales_0'].values.tolist()
s0 = [[x] for x in sales0]
sales1 = pro_sales_train['sr_1'].values.tolist()
s1 = [[x] for x in sales1]
sales2 = pro_sales_train['sr_2'].values.tolist()
s2 = [[x] for x in sales2]
macro = pro_sales_train['PR_cost'].values.tolist()
m1 = [[x] for x in macro]
#####################################
s0 = dynamic(features=s0, discount=0.99, name='sales0', w=8)
s1 = dynamic(features=s1, discount=0.99, name='sales1', w=6) # use the actual sales and forecasting sales amount
s2 = dynamic(features=s2, discount=0.95, name='sales2', w=6)
m1 = dynamic(features=m1, discount=0.99, name='macro', w=12)
#e1 = dynamic(features=e1,discount=0.95,name='eff',w=6)
drm = dlm(pro_club['cost']) + linear_trend + seasonal26+autoReg(degree=locality, name='ar2', w=6)+m1#+s0+s1+s2+m1
drm.fit()
#testset
pro_sales_test = pro_sales_test.head(pred_weeks)
sales0test = pro_sales_test['total_sales_0'].head(pred_weeks).values.tolist()
s0test = [[x] for x in sales0test]
sales1test = pro_sales_test['sr_1'].head(pred_weeks).values.tolist()
s1test = [[x] for x in sales1test]
sales2test = pro_sales_test['sr_2'].head(pred_weeks).values.tolist()
s2test = [[x] for x in sales2test]
macrotest = pro_sales_test['PR_cost'].head(pred_weeks).values.tolist()
m1test = [[x] for x in macrotest]
#efftest = testset['eff'].head(pred_weeks).values.tolist()
#e1test = [[x] for x in efftest]
features = {'sales0':s0test,'sales1':s1test, 'sales2':s2test,'macro':m1test}#,'eff':e1test}
(predictMean, predictVar) = drm.predictN(N=pred_weeks, date=drm.n-1,featureDict=features)
#locality
pro_sales = pro_sales.drop(['sr_1','sr_2'],axis = 1)
pro_sales['ratio'] = pro_sales['total_sales_0']/pro_sales['total_sales_across']
pro_sales['ratio_1'] = pro_sales['ratio'].shift(1)
pro_sales['ratio_2'] = pro_sales['ratio'].shift(2)
trainset1_year = pro_club.loc[pro_club.wm_yr_wk_nbr<=end_train_date].loc[pro_club.wm_yr_wk_nbr>=end_train_date-locality]
trainset_year = pro_sales.loc[pro_sales.wm_yr_wk_nbr<=end_train_date].loc[pro_sales.wm_yr_wk_nbr>=end_train_date-locality]
trainset_year.apply(lambda x: x.fillna(x.mean()),axis=0)
linear_trend_year = trend(degree=1, discount=0.99, name='linear_trend_year', w=10)
sales0_year = trainset_year['ratio'].values.tolist()
s0_year = [[x] for x in sales0_year]
# use the forecast of the ratio of each club among total in PR area
# since this is a local model, the total amount in area can be assumed to be constant.
sales1_year = trainset_year['ratio_1'].values.tolist()
s1_year = [[x] for x in sales1_year]
sales2_year = trainset_year['ratio_2'].values.tolist()
s2_year = [[x] for x in sales2_year]
macro_year = trainset_year['PR_cost'].values.tolist()
m1_year = [[x] for x in macro_year]
#####################################
s0_year = dynamic(features=s0_year, discount=0.99, name='sales0_year', w=10)
s1_year = dynamic(features=s1_year, discount=0.99, name='sales1_year', w=8)
s2_year = dynamic(features=s2_year, discount=0.95, name='sales2_year', w=6)
m1_year = dynamic(features=m1_year, discount=0.99, name='macro_year', w=10)
#e1_year = dynamic(features=e1_year,discount=0.95,name='eff_year',w=6)
if enable_sales:
drm_year = dlm(trainset1_year['cost'])+autoReg(degree=locality, name='ar2', w=5)+linear_trend_year+m1_year+s0_year+s1_year+s2_year
else:
drm_year = dlm(trainset1_year['cost'])+autoReg(degree=locality, name='ar2', w=5)+linear_trend_year+m1_year#+s0_year+s1_year+s2_year
drm_year.fit()
testset_year = pro_sales.loc[pro_sales.wm_yr_wk_nbr>=start_test_date].head(pred_weeks)
sales0test = testset_year['ratio'].head(pred_weeks).values.tolist()
s0test = [[x] for x in sales0test]
sales1test = testset_year['ratio_1'].head(pred_weeks).values.tolist()
s1test = [[x] for x in sales1test]
sales2test = testset_year['ratio_2'].head(pred_weeks).values.tolist()
s2test = [[x] for x in sales2test]
features_year = {'sales0_year':s0test,'sales1_year':s1test, 'sales2_year':s2test,'macro_year':m1test}
(predictMean_year, predictVar_year) = drm_year.predictN(N=pred_weeks, date=drm_year.n-1,featureDict=features_year)
weeklist = []
p1 = np.exp(-r*(abs(end_train_date-start_of_this_year-52)))
p2 = 1-p1
for k in range(pred_weeks):
weeklist.append(wm_nbr_add(start_test_date,2*k))
if res.shape[0] == 0:
res['wm_yr_wk_nbr'] = weeklist
res['club'] = pd.Series(club*np.ones(pred_weeks),index=res.index)
res['yhat'] = pd.Series(p1*np.asarray(predictMean)+p2*np.asarray(predictMean_year),index=res.index)
else:
tmp = pd.DataFrame()
tmp['wm_yr_wk_nbr'] = weeklist
tmp['club'] = pd.Series(club*np.ones(pred_weeks),index=tmp.index)
tmp['yhat'] = pd.Series(p1*np.asarray(predictMean)+p2*np.asarray(predictMean_year),index=tmp.index)
res = pd.concat([res,tmp],axis = 0)
return res
def mixedmodel(df_sales,end_train_date,start_test_date,calendar,punched_df,start_of_this_year,enable_sales,r=0.06,locality=10):
'''
this is the final output model
DLM + Prophet. Idea is Prophet for long trend and DLM responsible for local trend
weight between two models needs more concern, so far grid seraching
scat_plan data is not used at this moment, should be useful in the future.
return type: dataframe.
'''
df_propor_PR_ts = predict_proportion(calendar,df_sales,end_train_date, start_test_date)
df_sales_fc = df_propor_PR_ts.groupby('wm_yr_wk_nbr')['total_sales_across'].mean().reset_index()
res = estimate_and_predict_dlm_PR(calendar,df_propor_PR_ts, punched_df, end_train_date,start_test_date, start_of_this_year,enable_sales)
ttmp = estimate_and_predict_prophet_PR(calendar,punched_df, end_train_date, start_test_date,daily_view = False)
ttmp=pd.merge(left=ttmp, right=calendar, how='left', left_on='ds', right_on= 'calendar_date').drop(['calendar_date','ds'],axis=1)
##res = ttmp
res.columns = ['wm_yr_wk_nbr_local','club_local','yhat_local'] #dlm served mainly as local model
res = pd.merge(left=res,right=ttmp,left_on=['club_local','wm_yr_wk_nbr_local'],right_on=['club','wm_yr_wk_nbr'],validate='1:1').drop(['club_local','wm_yr_wk_nbr_local'],axis=1)
p1 = np.exp(-r*(end_train_date-start_of_this_year))
p2 = 1-p1
res['yhat_mixed'] = p1*res['yhat'] +p2*res['yhat_local']
return res
def testpunched():
'''
backtesting function for the punched_model
return type: mape list
'''
end_train_date= [201811,201815,201819,201823,201827]
start_test_date= [201813,201817,201821,201825,201829]
exclude = [0,4041,4925,6279]
#df_sales_total['total_sales'] = df_sales_total['total_sales'].rolling(2).mean()
mape_comp = []
enable_sales = False
#calendar= pd.read_csv('wm_yr_wk_ref.csv',parse_dates=[0], names=['calendar_date','wm_yr_wk_nbr'],header=0)
for i in range(len(start_test_date)):
start_of_this_year = math.floor(end_train_date[i]/100)
raw= pd.read_csv('./data/sap_data_20181213.csv')
storemap= pd.read_csv('./storemap.csv', usecols=[1,2,3,4,5])
scat_op_club= pd.read_csv('./scat_op_club.csv', usecols=[2] )
[punched_df,residual_worked_df,retro_df,holiday_df,lump_df,severance_df,calendar] = prep_data(raw,storemap,scat_op_club)
punched_df_train = punched_df.loc[punched_df.wm_yr_wk_nbr<= end_train_date[i]]
punched_df_test = punched_df.loc[punched_df.wm_yr_wk_nbr>= start_test_date[i]]
df_sales = pd.read_csv('./data/PR_sales.csv')
df_sales = df_sales[df_sales.WM_YEAR_WK_NBR.isin(punched_df.wm_yr_wk_nbr.unique().tolist())]
df_sales = df_sales.sort_values(by ='WM_YEAR_WK_NBR' )
df_sales = df_sales.loc[df_sales.WM_YEAR_WK_NBR <= 201813]
df_sales_train = df_sales.loc[df_sales.WM_YEAR_WK_NBR <= end_train_date[i]]
df_sales_test = df_sales.loc[df_sales.WM_YEAR_WK_NBR >= start_test_date[i]]
df_sales_total = df_sales.groupby('WM_YEAR_WK_NBR')['total_sales'].sum()
df_sales_total = df_sales_total.reset_index()
df_sales_total = df_sales_total.sort_values(by='WM_YEAR_WK_NBR')
res = mixedmodel(df_sales_train,end_train_date[i],start_test_date[i],calendar,punched_df_train,start_of_this_year,enable_sales)
club_ls = res.club.unique()
punched_pro_test = punched_df_test.groupby(['club_nbr','posting_date'])['cost'].sum()
punched_pro_test.column = ['total_punched_wg']
punched_pro_test = punched_pro_test.reset_index()
punched_pro_test = pd.merge(left=punched_pro_test, right= calendar, how='left', left_on='posting_date', right_on= 'calendar_date').drop('calendar_date',axis=1)
mape_mixed = []
for club in club_ls:
pro_club = punched_pro_test[punched_pro_test.club_nbr.isin([club])]
testset = pro_club.loc[pro_club.wm_yr_wk_nbr>=start_test_date[i]]
testset = testset.head(8)
res_club = res[res.club.isin([club])]
mape_mixed.append(mape(np.asarray(testset['cost']),np.asarray(res_club['yhat_mixed'])))
mape_comp.append(mape_mixed)
return mape_comp
def compeltetable(calendar,residual_worked_df,punched_df):
'''
it was mainly used when modeling other categories of worked wages.
for each individual club, some categories does not show up in SAP data
return the dataframe with same format as punched_df with filled zero.
compatiable to use prophet_estimation above.
'''
club_ls = punched_df.club_nbr.unique()
start_date = punched_df.sort_values(by=['wm_yr_wk_nbr']).iloc[0,1]
start_wk = punched_df.sort_values(by=['wm_yr_wk_nbr']).iloc[0,-2]
end_date = punched_df.sort_values(by=['wm_yr_wk_nbr']).iloc[-1,1]
ref_ls = residual_worked_df.rf_doc_num.unique()
res = pd.DataFrame()
for ref in ref_ls:
for club in club_ls:
tmp = residual_worked_df.loc[(residual_worked_df.club_nbr==club)&(residual_worked_df.rf_doc_num==ref)]
curdate = start_date
while curdate <= end_date:
if tmp.loc[tmp.posting_date==curdate].shape[0] == 0:
wk = calendar.loc[calendar.calendar_date==curdate].iloc[0,1]
tmp = tmp.append({'rf_doc_num':50,'posting_date':curdate,'cost':0,'retail':0,'club_nbr':club,'country':'PR','city_name':'S','state_prov_code':'PR','postal_code':0,'wm_yr_wk_nbr':wk,'date_pd':curdate},ignore_index=True)
curdate = curdate+timedelta(days=14)
if res.shape[0] == 0:
res = tmp
else:
res = pd.concat([res,tmp],axis = 0)
return res
def testresidualworked(residual_worked_df):
'''
main function for other categories of worked wages
return: per club.
'''
end_train_date= [201845]#,201815,201819,201823,201827]
start_test_date= [201847]#,201817,201821,201825,201829]
start_of_this_year = 201800
exclude = [0,4041,4925,6279]
[punched_df,residual_worked_df,retro_df,holiday_df,lump_df,severance_df] = prep_data()
df_sales = pd.read_csv('./data/PR_sales.csv')
df_sales_total = df_sales.groupby('WM_YEAR_WK_NBR')['total_sales'].sum()
df_sales_total = df_sales_total.reset_index()
df_sales_total['total_sales'] = df_sales_total['total_sales'].rolling(2).mean()
mape_comp = []
calendar= pd.read_csv('wm_yr_wk_ref.csv',parse_dates=[0], names=['calendar_date','wm_yr_wk_nbr'],header=0)
residual_worked_df = compeltetable(calendar,residual_worked_df,punched_df)
print(residual_worked_df.groupby(['posting_date','club_nbr'])['cost'].sum())
for i in range(len(start_test_date)-1,len(start_test_date)):
ttmp = estimate_and_predict_prophet_PR(calendar,residual_worked_df, end_train_date[i], start_test_date[i],daily_view = False) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2365,
1248,
2310,
25,
4309,
25,
1270,
13130,
198,
198,
31,
9800,
25,
269,
15,
82,
... | 2.167081 | 16,106 |
from main.gui import Draw
from main.model import Service
from main.model import Cluster
if __name__ == '__main__':
main()
| [
6738,
1388,
13,
48317,
1330,
15315,
198,
6738,
1388,
13,
19849,
1330,
4809,
198,
6738,
1388,
13,
19849,
1330,
38279,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 3.225 | 40 |
#!/usr/bin/env python
import os
import argparse
import oneflow as flow
import numpy as np
from conv_tas_net import ConvTasNet
from libs.audio import WaveReader, write_wav
parser = argparse.ArgumentParser(
description="Command to do speech separation in time domain using ConvTasNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--input", type=str, required=True, help="Script for input waveform"
)
parser.add_argument(
"--fs", type=int, default=8000, help="Sample rate for mixture input"
)
parser.add_argument(
"--dump-dir",
type=str,
default="sps_tas",
help="Directory to dump separated results out",
)
# Network architecture
parser.add_argument("--num_spks", default=2, type=int, help="Number of spearkers")
parser.add_argument(
"--N", default=256, type=int, help="Number of filters in autoencoder"
)
parser.add_argument(
"--L",
default=20,
type=int,
help="Length of the filters in samples (40=5ms at 8kHZ)",
)
parser.add_argument(
"--B",
default=256,
type=int,
help="Number of channels in bottleneck 1 × 1-conv block",
)
parser.add_argument(
"--H", default=512, type=int, help="Number of channels in convolutional blocks"
)
parser.add_argument(
"--P", default=3, type=int, help="Kernel size in convolutional blocks"
)
parser.add_argument(
"--X", default=8, type=int, help="Number of convolutional blocks in each repeat"
)
parser.add_argument("--R", default=4, type=int, help="Number of repeats")
parser.add_argument(
"--norm",
default="BN",
type=str,
choices=["gLN", "cLN", "BN"],
help="Layer norm type",
)
parser.add_argument(
"--non_linear",
default="relu",
type=str,
choices=["relu", "softmax"],
help="non-linear to generate mask",
)
parser.add_argument("--causal", default=False)
parser.add_argument(
"--model_path",
default="exp/temp/final.pth.tar",
help="Location to save best validation model",
)
args = parser.parse_args()
separating(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
530,
11125,
355,
5202,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3063,
62,
83,
292,
62,
3262,
1330,
34872,
51,
292,
7934,
1... | 2.731444 | 741 |
from pytorch_pretrained_biggan import BigGAN, truncated_noise_sample
import torch
import torch.optim as optim
import torch.nn.functional as F
from torchvision.transforms import ToPILImage, ToTensor
from hessian_eigenthings.power_iter import Operator, deflated_power_iteration
from hessian_eigenthings.lanczos import lanczos
from lanczos_generalized import lanczos_generalized
from GAN_hvp_operator import GANHVPOperator, GANForwardHVPOperator, compute_hessian_eigenthings, get_full_hessian
import sys
import numpy as np
import matplotlib.pylab as plt
from time import time
from os.path import join
from imageio import imwrite
from build_montages import build_montages, color_framed_montages
import torchvision.models as tv
#%%
from geometry_utils import LERP, SLERP, ExpMap
from PIL import Image
from skimage.io import imsave
from torchvision.utils import make_grid
from IPython.display import clear_output
from hessian_eigenthings.utils import progress_bar
#%%
sys.path.append(r"D:\Github\PerceptualSimilarity")
sys.path.append(r"E:\Github_Projects\PerceptualSimilarity")
import models
ImDist = models.PerceptualLoss(model='net-lin', net='squeeze', use_gpu=1, gpu_ids=[0])
#%%
StyleGAN_root = r"E:\DL_Projects\Vision\stylegan2-pytorch"
StyleGAN_root = r"D:\Github\stylegan2-pytorch"
sys.path.append(StyleGAN_root)
from model import Generator
#%%
ckpt_name = r"stylegan2-cat-config-f.pt"# r"stylegan2-ffhq-config-f.pt"#
# r"AbstractArtFreaGAN.pt"#r"2020-01-11-skylion-stylegan2-animeportraits.pt"
ckpt_path = join(StyleGAN_root, "checkpoint", ckpt_name)
size = 256
device = "cpu"
latent = 512
n_mlp = 8
channel_multiplier = 2
g_ema = Generator(
size, latent, n_mlp, channel_multiplier=channel_multiplier
).to(device)
checkpoint = torch.load(ckpt_path)
g_ema.load_state_dict(checkpoint['g_ema'])
g_ema.eval()
for param in g_ema.parameters():
param.requires_grad_(False)
g_ema.cuda()
#%%
g_ema = loadStyleGAN("stylegan2-cat-config-f.pt")
G = StyleGAN_wrapper(g_ema)
#%% Forward Operator Factorization
from GAN_hvp_operator import GANForwardMetricHVPOperator
#%%
truncation = 0.8
truncation_mean = 4096
mean_latent = g_ema.mean_latent(truncation_mean)
G.select_trunc(truncation, mean_latent)
savedir = r"E:\Cluster_Backup\StyleGAN2\Cats_forw"
for triali in range(10):
for HVP_eps in [1E-1, 5E-2, 2E-2, 1E-2, 5E-3, 2E-3]:
RND = np.random.randint(10000)
T0 = time()
ref_z = torch.randn(1, latent, device="cuda").cuda()
SGhvp = GANForwardMetricHVPOperator(G, ref_z, ImDist, preprocess=lambda img: img, EPS=HVP_eps,)
eigenvals, eigenvecs = lanczos(SGhvp, num_eigenthings=250, max_steps=200, tol=1e-5,)
print(time() - T0, " sec")# 10 eigvect takes 12 sec
# 50 eigvect takes 40 sec 40.1 sec
# 200 eigvect, 100 steps takes 163 sec
#%
eigenvecs = eigenvecs.T
sort_idx = np.argsort(np.abs(eigenvals))
eigabs_sort = eigenvals[sort_idx]
eigvect_sort = eigenvecs[:, sort_idx]
#%
np.savez(join(savedir, "Hess_trunc%.1f_eps%.E_%03d.npz" % (truncation, HVP_eps, RND)), eigvals=eigenvals,
eigvects=eigenvecs, vect=ref_z.cpu().numpy(), )
# plt.figure();plt.plot(np.log10(eigenvals));plt.xlim([0, len(eigenvals)])
# plt.savefig(join(savedir, "Hessian_trunc%.1f_EPS%.E_%03d.jpg" % (truncation, HVP_eps, RND)))
plt.figure(figsize=[7, 5])
plt.subplot(1, 2, 1)
plt.plot(eigenvals[::-1])
plt.plot(np.abs(eigabs_sort[::-1]))
plt.xlim([0, len(eigenvals)])
plt.ylabel("eigenvalue")
plt.subplot(1, 2, 2)
plt.plot(np.log10(eigenvals[::-1]))
plt.plot(np.log10(np.abs(eigabs_sort[::-1])))
plt.xlim([0, len(eigenvals)])
plt.ylabel("eigenvalue (log)")
plt.suptitle("Hessian Spectrum Forward Decomposition")
plt.savefig(join(savedir, "Hessian_trunc%.1f_EPS%.E_%03d.jpg" % (truncation, HVP_eps, RND)))
#%%
T00 = time()
eigstr = 0
while eigstr < eigenvecs.shape[1]:
eigend = min(eigstr + 50, eigenvecs.shape[1])
codes_all = []
for eigi in range(eigstr, eigend): # eigvects.shape[1]
interp_codes = SExpMap(ref_z.cpu().numpy(), eigvect_sort[:, -eigi-1], 15, (-0.5, 0.5))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=truncation, mean_latent=mean_latent, B=8)
imggrid = make_grid(img_all, nrow=15)
PILimg2 = ToPILImage()(imggrid)#.show()
PILimg2.save(join(savedir, "eigvect_sph_fin_eig%d-%d_trunc%.1f_EPS%.E_%04d.jpg" % (eigstr, eigend,
truncation, HVP_eps, RND)))
print("Spent time %.1f sec" % (time() - T00))
eigstr = eigend
#%%
T00 = time()
codes_all = []
for eigi in range(50): # eigvects.shape[1]
interp_codes = LExpMap(ref_z.cpu().numpy(), eigvect_sort[:, -eigi-1], 11, (-10, 10))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=truncation, mean_latent=mean_latent, B=8)
imggrid = make_grid(img_all, nrow=11)
PILimg = ToPILImage()(imggrid) # .show()
PILimg.save(join(savedir, "eigvect_lin_trunc%.1f_EPS%.E_%04d.jpg" % (truncation, HVP_eps, RND)))
print("Spent time %.1f sec" % (time() - T00))
#%%
T00 = time()
codes_all = []
for eigi in range(50): # eigvects.shape[1]
interp_codes = SExpMap(ref_z.cpu().numpy(), eigenvecs[:, -eigi-1], 15, (-0.5, 0.5))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=1, mean_latent=None, B=5)
imggrid = make_grid(img_all, nrow=15)
PILimg2 = ToPILImage()(imggrid)#.show()
PILimg2.save(join(savedir, "eigvect_sph_fin_trunc%.1f_%04d.jpg" % (1, RND)))
print("Spent time %.1f sec" % (time() - T00))
#%%
T00 = time()
truncation = 0.8
truncation_mean = 4096
RND = np.random.randint(10000)
mean_latent = g_ema.mean_latent(truncation_mean)
ref_z = torch.randn(1, latent, device=device).cuda()
mov_z = ref_z.detach().clone().requires_grad_(True) # requires grad doesn't work for 1024 images.
ref_samp = G.visualize(ref_z, truncation=truncation, mean_latent=mean_latent)
mov_samp = G.visualize(mov_z, truncation=truncation, mean_latent=mean_latent)
dsim = ImDist(ref_samp, mov_samp)
H = get_full_hessian(dsim, mov_z)
print(time() - T00, " sec")
#%%
savedir = r"E:\OneDrive - Washington University in St. Louis\HessGANCmp\StyleGAN2"
truncation = 0.5
T00 = time()
for triali in range(3):
for truncation in [1, 0.8, 0.6]:
if truncation == 1 and triali == 0:
continue
T00 = time()
truncation_mean = 4096
RND = np.random.randint(1000)
mean_latent = g_ema.mean_latent(truncation_mean)
ref_z = torch.randn(1, latent, device=device).cuda()
mov_z = ref_z.detach().clone().requires_grad_(True)
ref_samp = G.visualize(ref_z, truncation=truncation, mean_latent=mean_latent)
mov_samp = G.visualize(mov_z, truncation=truncation, mean_latent=mean_latent)
dsim = ImDist(ref_samp, mov_samp)
H = get_full_hessian(dsim, mov_z)
print("Computing Hessian Completed, %.1f sec" % (time()-T00))
#%%
eigvals, eigvects = np.linalg.eigh(H)
plt.figure(figsize=[7,5])
plt.subplot(1, 2, 1)
plt.plot(eigvals)
plt.ylabel("eigenvalue")
plt.subplot(1, 2, 2)
plt.plot(np.log10(eigvals))
plt.ylabel("eigenvalue (log)")
plt.suptitle("Hessian Spectrum Full Space")
plt.savefig(join(savedir, "Hessian_trunc%.1f_%03d.jpg" % (truncation, RND)))
np.savez(join(savedir, "Hess_trunc%.1f_%03d.npz" % (truncation, RND)), H=H, eigvals=eigvals, eigvects=eigvects, vect=ref_z.cpu().numpy(),)
del dsim
torch.cuda.empty_cache()
#%%
T00 = time()
codes_all = []
for eigi in range(50): # eigvects.shape[1]
interp_codes = LExpMap(ref_z.cpu().numpy(), eigvects[:, -eigi-1], 11, (-10, 10))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=truncation, mean_latent=mean_latent, B=5)
imggrid = make_grid(img_all, nrow=11)
PILimg = ToPILImage()(imggrid) # .show()
PILimg.save(join(savedir, "eigvect_lin_trunc%.1f_%03d.jpg" % (truncation, RND)))
print("Spent time %.1f sec" % (time() - T00))
#%%
T00 = time()
codes_all = []
for eigi in range(50): # eigvects.shape[1]
interp_codes = SExpMap(ref_z.cpu().numpy(), eigvects[:, -eigi-1], 11, (-1, 1))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=truncation, mean_latent=mean_latent, B=5)
imggrid = make_grid(img_all, nrow=11)
PILimg2 = ToPILImage()(imggrid)#.show()
PILimg2.save(join(savedir, "eigvect_sph_trunc%.1f_%03d.jpg" % (truncation, RND)))
print("Spent time %.1f sec" % (time() - T00))
#%%
T00 = time()
codes_all = []
for eigi in range(50): # eigvects.shape[1]
interp_codes = SExpMap(ref_z.cpu().numpy(), eigvects[:, -eigi-1], 15, (-0.5, 0.5))
codes_all.append(interp_codes.copy())
codes_all_arr = np.concatenate(tuple(codes_all), axis=0)
img_all = G.visualize_batch_np(codes_all_arr, truncation=truncation, mean_latent=mean_latent, B=5)
imggrid = make_grid(img_all, nrow=15)
PILimg2 = ToPILImage()(imggrid)#.show()
PILimg2.save(join(savedir, "eigvect_sph_fin_trunc%.1f_%03d.jpg" % (truncation, RND)))
print("Spent time %.1f sec" % (time() - T00))
#%%
| [
6738,
12972,
13165,
354,
62,
5310,
13363,
62,
14261,
1030,
1330,
4403,
45028,
11,
40122,
515,
62,
3919,
786,
62,
39873,
198,
11748,
28034,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
... | 2.037432 | 4,969 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Dump information so we can get a quick look at what's available."""
import platform
import sys
for mod in [platform, sys]:
dump_module(mod)
| [
2,
49962,
739,
262,
24843,
13789,
25,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
198,
2,
1114,
3307,
25,
3740,
1378,
12567,
13,
785,
14,
2817,
8664,
14,
1073,
1857,
9078,
14,
2436,
... | 3.08 | 100 |
import json
import pytest
import shutil
import os
import copy
from pathlib import Path
from fastapi.testclient import TestClient
from constants import ALL_AREAS
from libs.config_engine import ConfigEngine
from api.settings import Settings
from api.tests.utils.common_functions import create_app_config
from libs.utils import config as config_utils
from .example_models import camera_template, camera_example, camera_example_2, camera_example_3, camera_example_4,\
area_example, area_example_2
from ...utils import get_config
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
def rollback_homography_matrix_folder():
"""
'/repo/api/tests/data/mocked_data/data/processor/config/'
comes from -->
config.get_section_dict('App')['EntityConfigDirectory']
Remember that we use another configuration file to test. So ('App')['EntityConfigDirectory'] is modified
to point to our mocked data.
"""
yield None
# Deletes the homography_matrix directory and all its content.
raw = "/repo/api/tests/data/mocked_data/data/processor/config"
path = os.path.join(raw, "sources", str(camera_template["id"]), "homography_matrix")
if os.path.exists(path):
shutil.rmtree(path)
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
11748,
4423,
346,
198,
11748,
28686,
198,
11748,
4866,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
3049,
15042,
13,
9288,
16366,
1330,
6208,
11792,
198,
198,
6738,
38491,
1330,
1109... | 2.918919 | 481 |
import copy
import numpy as np
import torch
import skecon
from skecon.utils import astensor
from skecon.data.scales import Ratio, Interval, Ordinal, Nominal
| [
11748,
4866,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
201,
198,
201,
198,
11748,
6146,
1102,
201,
198,
6738,
6146,
1102,
13,
26791,
1330,
6468,
22854,
201,
198,
6738,
6146,
1102,
13,
7890,
13,
1416,
2... | 3.036364 | 55 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100,
796,
2124,
1... | 2.16 | 75 |
"""
DESCRIPTION
Application Project Interface for Instrument Detection Software
Preprocess: Preprocesses audio data before sending to Neural Network
NeuralNetwork: General purpose neural network that can be used for many
applications
See demo in in main()
Please contact Benjamin Gutierrez for any questions or errors.
MIT License
Copyright (c) 2018 The-Instrumental-Specialists
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Standard Library Dependencies
# os
# glob
# json
# time
# Third Party Dependencies (Please install prior to use)
# scipy
# numpy
# matplotlib
import neuralnet as NN
import preprocess as PP
#--- START PREPROCESS API ----------------------------------------------------------
def processFFT(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):
"""returns one sided FFT amplitudes of filename
filename (string): ex) 'sax.wav'
length (int): Number of datapoints of one-sided fft (must be even,preferably a power of 2)
q (int): (optional argument) Downsampling Rate
fs_in (int): (optional argument) throw ValueError if fs of filename != fs_in
divide (int): (optional argument) 1/divide*Nsamples is taken from FFT (preferably even)
plot (bool): (optional argument) plots the one sided FFT if True, otherwise does not plot
Note: length < total_time*fs//(2*q*divide)
Ex) length = 256 < (0.25sec)*(44100Hz)//(2*4*2) = 689
"""
return PP.processFFT(filename,length,q,fs_in,divide,plot)
def processMFCC(filename,subsample=2048):
"""Preprocesses file. Returns Mel-frequency Cepstral Coefficients.
filename (string): wavfile
subsample: number of datapoints in wavfile to use in FFT
"""
#assumes 8000Hz, but works with 44,100Hz and other sample rates.
return PP.processMFCC(filename,subsample)
def mean(array_list):
"""Returns the mean of an array or list"""
return PP.mean(array_list)
def downsample(sig,fs,q):
"""
sig (list,array): sound/data signal
q (int): downsample factor
"""
return PP.downsample(sig,fs,q)
#--- END PREPROCESS API ----------------------------------------------------------
#--- START NEURALNET API ------------------------------------------------------
def sigmoid(x):
""" This function computes the sigmoid of x for NeuralNetwork"""
return NN.sigmoid(x)
def sigmoidDerivative(x):
""" This function computes the sigmoid derivative of x for NeuralNetwork
(Note: Not Real Derivative)
"""
return NN.sigmoidDerivative(x)
def tanh(x):
""" This function computes the tanh of x for NeuralNetwork"""
return NN.tanh(x)
def tanhDerivative(x):
""" This function computes the tanh derivative of x for NeuralNetwork
(Note: Not Real Derivative)
"""
return NN.tanhDerivative(x)
def arctan(x):
"""This function returns the arctan of x for NeuralNetwork"""
return NN.arctan(x)
def arctanDerivative(x):
"""This function returns the arctan derivative of x for NeuralNetwork
(Note: Not Real Derivative)
"""
return NN.arctanDerivative(x)
def sin(x):
"""This function returns the sine of x"""
return NN.sin(x)
def sinDerivative(x):
"""This function returns the sine derivative of x
(Note: Not Real Derivative)
"""
return NN.sinDerivative(x)
def gaussian(x):
"""This function returns the gaussian of x"""
return NN.gaussian(x)
def gaussianDerivative(x):
"""This function returns the gaussian derivative of x
(Note: Not Real Derivative)
"""
return NN.gaussianDerivative(x)
def softplus(x):
"""This function returns the softplus of x"""
return NN.softplus(x)
def softplusDerivative(x):
"""This function returns the softplusDerivative of x
(Note: Not Real Derivative)
"""
return NN.softplusDerivative(x)
def getMax(array_list):
"""Returns a tuple (index,value) of the maximum in an 1D array or list"""
return NN.getMax(array_list)
class NeuralNetwork(NN.NeuralNetwork):
""" General Purpose Neural Network
activation_dict = {'sigmoid': [sigmoid,sigmoidDerivative],
'tanh': [tanh,tanhDerivative],
'arctan': [arctan,arctanDerivative],
'sin': [sin,sinDerivative],
'gaussian': [gaussian,gaussianDerivative],
'softplus': [softplus,softplusDerivative],
}
"""
def __init__(self,layers,activeFn = 'sigmoid'):
"""layers is list of layer lengths"""
NN.NeuralNetwork.__init__(self,layers,activeFn)
#self.net = NN.NeuralNetwork(layers,activeFn)
def setActivationFn(self,activeFn):
"""Sets the activation function"""
#self.net.setActivationFn(activeFn)
#print('s = ' + str(super()))
return super().setActivationFn(activeFn)
def getLayers(self):
"""Return a list of layer lengths"""
return super().getLayers()
def storeWeights(self,filename,comment = 'No Comment'):
"""Stores Weights in filename.
filename (string): ex. 'data.txt'
comment (string): message to be stored in file
"""
return super().storeWeights(filename,comment)
def loadWeights(self,filename):
"""Loads Weights in filename. Note WILL overwrite layer shape and number.
filename (string): ex. 'data.txt'
"""
return super().loadWeights(filename)
def trainRandom(self,X,Y,learning_rate=1.0,intervals = 100):
"""Trains the neural networks with a list of input vectors x
and a list of output vectors Y. Iterates in Random Order.
"""
return super().trainRandom(X,Y,learning_rate,intervals)
def trainSequential(X,Y,learning_rate=1.0,intervals = 100):
"""Trains the neural networks with a list of input vectors X
and a list of output vectors Y. Iterates in Sequential Order.
"""
return super().trainSequential(X,Y,learning_rate,intervals)
def trainWithPlots(self,X,Y,learning_rate = 1.0,intervals = 100,way='max'):
"""Trains the neural networks with a list of input vectors X
and a list of output vectors Y.
Plots Cost function over each iteration.
Iterates in Sequential Order.
way (string): (optional arg) comparison method for testing
valid options are 'max','thres'
"""
return super().trainWithPlots(X,Y,learning_rate,intervals,way)
def trainSample(self,x,y,learning_rate=1.0):
"""Trains the neural networks with a single input vector x
and a single output vector y"""
return super().trainSample(x,y,learning_rate)
def trainTestSample(self,x,y,learning_rate=1.0,way='max'):
"""Trains the neural networks with a single input vector x
and a single output vector y.
Takes prediction of tested sample using forward propagation
before doing backpropagation.
way (string): (optional arg) comparison method for testing
valid options are 'max','thres'
tl:dr trains and tests one sample
"""
return super().trainTestSample(x,y,learning_rate,way)
def compareProb(self,prob,y,way='max'):
"""Compares y with prob, probabitity vector from last activation layer
in backpropagation
"""
return super().compareProb(prob,y,way)
def forwardProp(self,x):
"""Forward Propagates x through the Neural Network"""
return super().forwardProp(x)
def backProp(self,a,y,learning_rate):
"""Backward propagates y through the Neural Network using activations a"""
return super().backProp(a,y,learning_rate)
def testBatch(self,X,Y,verbose = False,way='max'):
"""prints and returns the testing accuracy of a batch of testing vectors.
X (list of np.arrays): list of input vectors
Y (list of np.arrays): list of target vectors
if verbose == True, prints out whether each test vector passed/failed.
"""
return super().testBatch(X,Y,verbose,way)
def testSample(self,x,y,way='max'):
""" Returns true prediction is correct
Takes prediction of tested sample using probabilities from
forward propagation.
way (string): (optional arg) comparison method for testing
valid options are 'max','thres'
"""
return super().testSample(x,y,way)
def predictProb(self,x):
"""Predicts an output vector for a given input vector x"""
return super().predictProb(x)
def lossFunction(self,x,y):
"""Computes the loss function for a given input vector and output vector"""
return super().lossFunction(x,y)
def printWeights(self):
"""Prints all of the weight matrices"""
return super().printWeights()
def printWeight(self,i):
"""Prints the weight matrix at index i of self.theta"""
return super().printWeight(i)
#--- END NEURALNET API ------------------------------------------------------
def trainXor():
"""Training NeuralNet to learn the boolean XOR function"""
# Initialize Neural Network with tanh activation function,
# with an input layer of size 2, one hidden layer of size 2,
# and one output layer of size 1
net = NeuralNetwork([2,2,1],'tanh')
# XOR Training and Test Data
X = [[0,0],[1,0],[0,1],[1,1]];
Y = [[0],[1],[1],[0]]
# Train with plots
net.trainWithPlots(X,Y,learning_rate=0.2,intervals=2000,way='thres')
# Store, load, print weights
net.storeWeights('weights/XOR.txt',comment='XOR DATA')
net.loadWeights('weights/XOR.txt')
net.printWeights()
# test XOR data
net.testBatch(X,Y,verbose=True)
# Predict Data
net.predictProb([0,0]) # predict probability
def train10Instruments():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
Ten instruments are used in this example
"""
# Preprocess Training Data
P = Preprocess()
#P.processData('preprocessed/instr_train_10.txt',directory='instr_train_10',way='mfcc',opt = [2048])
P.loadData('preprocessed/instr_train_10.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Train Neural Net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.trainWithPlots(X,Y,learning_rate=1,intervals = 100,way='max')
net.storeWeights('weights/instr_train_10.txt')
#net.loadWeights('weights/instr_train_10.txt') # Load weights from file, since net has been trained
# Preprocess Testing Data
Q = Preprocess()
Q.processData('preprocessed/instr_test_10.txt',directory='instr_test_10',way='mfcc',opt=[2048])
#Q.loadData('preprocessed/instr_test_10.txt') # Load weights from file, since net has been trained
tX, tY = Q.getXY()
# Test testing data
net.testBatch(tX,tY)
def train6Instruments():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
Six instruments are used in this example
"""
# Preprocess Training Data
P = Preprocess()
#P.processData('preprocessed/instr_train_06.txt',directory='instr_train_06',way='mfcc',opt = [2048])
P.loadData('preprocessed/instr_test_06.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Train Neural Net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
#net.trainWithPlots(X,Y,learning_rate=0.1,intervals = 75,way='max')
#net.storeWeights('weights/instr_train_06.txt')
net.loadWeights('weights/instr_train_06.txt') # Load weights from file, since net has been trained
# Preprocess Testing Data
Q = Preprocess()
#Q.processData('preprocessed/instr_test_06.txt',directory='instr_test_06',way='mfcc',opt=[2048])
Q.loadData('preprocessed/instr_test_06.txt') # Load weights from file, since net has been trained
tX, tY = Q.getXY()
# Test testing data
net.testBatch(tX,tY)
def trainNotes():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
19 instruments were used to generate all the notes
"""
# Preprocess Training Data
P = Preprocess()
P.processData('preprocessed/notes_train_19.txt',directory='notes_train_19',way='mfcc',opt = [2048])
#P.loadData('preprocessed/notes_train_19.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Train Neural Net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.trainWithPlots(X,Y,learning_rate=1,intervals = 200,way='max')
net.storeWeights('weights/notes_train_19.txt')
#net.loadWeights('weights/notes_train_19.txt') # Load weights from file, since net has been trained
# Preprocess Testing Data
Q = Preprocess()
Q.processData('preprocessed/notes_test_19.txt',directory='notes_test_19',way='mfcc',opt=[2048])
#Q.loadData('preprocessed/notes_test_19.txt') # Load weights from file, since net has been trained
tX, tY = Q.getXY()
# Test testing data
net.testBatch(tX,tY)
def test6Instruments():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
Six instruments are used in this example
"""
# Get preprocessed training data
P = Preprocess()
P.loadData('preprocessed/instr_test_06.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Load weights for neural net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.loadWeights('weights/instr_train_06.txt') # Load weights from file, since net has been trained
# Test testing data
print('Testing 6 Instruments Recognition')
net.testBatch(X,Y)
def test10Instruments():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
Ten instruments are used in this example
"""
# Get preprocessed training data
P = Preprocess()
P.loadData('preprocessed/instr_test_10.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Load weights for neural net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.loadWeights('weights/instr_train_10.txt') # Load weights from file, since net has been trained
# Test testing data
print('Testing 10 Instruments Recognition')
net.testBatch(X,Y)
def testNotes():
"""Uses Preprocess to convert the audio data into mel-frequency cepstral coefficients.
Feeds these coefficients into NeuralNet.
19 instruments were used to generate all the notes
"""
# Get preprocessed training data
P = Preprocess()
P.loadData('preprocessed/notes_test_19.txt') #Load preprocessed data from file, since net has been trained
X, Y = P.getXY()
input_size = P.getInputLength()
output_size = P.getOutputLength()
# Load weights for neural net
net = NeuralNetwork([input_size,100,output_size],activeFn='sigmoid')
net.loadWeights('weights/notes_train_19.txt') # Load weights from file, since net has been trained
# Test testing data
print('Testing Pitch Recognizition')
net.testBatch(X,Y)
if __name__ == '__main__':
main()
| [
37811,
198,
30910,
40165,
198,
23416,
4935,
26491,
329,
42410,
46254,
10442,
198,
6719,
14681,
25,
3771,
14681,
274,
6597,
1366,
878,
7216,
284,
47986,
7311,
198,
8199,
1523,
26245,
25,
3611,
4007,
17019,
3127,
326,
460,
307,
973,
329,
... | 2.679527 | 6,428 |
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""libFuzzer Neural Smoothing - Argparse Options."""
import argparse
import bot.fuzzers.ml.gradientfuzz.constants as constants
def get_train_args():
"""
Returns needed args specifying run name, training/dataset
options, and model architecture for a full training run.
For use in `train.py`.
Args:
N/A
Returns:
argparse.Namespace object with specified args.
"""
parser = argparse.ArgumentParser()
# Run name.
parser.add_argument(
'--run-name', help='Unique identifier for this run.', type=str)
# Full-on configs.
parser.add_argument(
'--neuzz-config',
help='Train NEUZZ model and hyperparams.',
action='store_true')
# Training options.
parser.add_argument(
'--lr',
help='learning rate (default: {}).'.format(constants.DEFAULT_LR),
type=float,
default=constants.DEFAULT_LR)
parser.add_argument(
'--epochs',
help='number of epochs (default: {}).'.format(
constants.DEFAULT_NUM_EPOCHS),
type=int,
default=constants.DEFAULT_NUM_EPOCHS)
parser.add_argument(
'--optimizer',
help='Optimizer to use (Default: {}).'.format(
constants.DEFAULT_OPTIMIZER),
type=str,
default=constants.DEFAULT_OPTIMIZER,
choices=list(constants.OPTIMIZER_MAP.keys()))
# Dataset options.
parser.add_argument(
'--dataset-name',
help='Dataset name (Look under {}/).'.format(constants.DATASET_DIR),
type=str)
parser.add_argument(
'--val-split',
help='Proportion of dataset to use as validation set (default {}).'
.format(constants.DEFAULT_VAL_SPLIT),
type=float,
default=constants.DEFAULT_VAL_SPLIT)
parser.add_argument(
'--batch-size',
help='Batch size (default: {}).'.format(
constants.DEFAULT_TRAIN_BATCH_SIZE),
type=int,
default=constants.DEFAULT_TRAIN_BATCH_SIZE)
parser.add_argument(
'--val-batch-size',
help='Validation set batch size (default: {}).'.format(
constants.DEFAULT_VAL_BATCH_SIZE),
type=int,
default=constants.DEFAULT_VAL_BATCH_SIZE)
# Model options.
parser.add_argument(
'--architecture',
help='Model architecture to use.',
type=str,
default=constants.NEUZZ_ONE_HIDDEN_LAYER_MODEL,
choices=list(constants.ARCHITECTURE_MAP.keys()))
parser.add_argument(
'--num-hidden',
help=('Hidden dimension size (feedforward and RNN models only. ' +
'Default: {}).').format(constants.DEFAULT_HIDDEN_SIZE),
type=int,
default=constants.DEFAULT_HIDDEN_SIZE)
args = parser.parse_args()
return args
def check_train_args(args):
"""
Ensures that all required args exist when building a new model from scratch.
Args:
args (argparse.Namespace): Arguments from get_train_args().
Returns:
boolean: True if required args are present, and False otherwise.
"""
if args.architecture is None:
print('Error: --architecture is required for new models!')
return False
if args.dataset_name is None:
print('Error: --dataset-name is required for new models! ' +
'(Check {}/ directory).'.format(constants.DATASET_DIR))
return False
return True
def get_gradient_gen_critical_locs_args():
"""
Returns needed args specifying run name, seed directory,
generated file directory, and location generation method.
For use in `gradient_gen_critical_locs.py`.
Args:
N/A
Returns:
argparse.Namespace object with specified args.
"""
parser = argparse.ArgumentParser()
# For loading trained model.
parser.add_argument(
'--run-name',
required=True,
help=('Pre-trained model\'s run name. ' +
'Should be under {}/[architecture]/ directory.'.format(
constants.MODEL_DIR)))
# For getting seed files + save dir.
parser.add_argument(
'--path-to-seeds', required=True, help='Path to seed file directory.')
parser.add_argument(
'--path-to-lengths',
required=True,
help='Path to file-to-input-length dictionary.')
parser.add_argument(
'--generation-name',
required=True,
help='Name of generated gradient files directory (to be saved under ' +
'{}/[generation-name]/{}).'.format(constants.GENERATED_DIR,
constants.GRADIENTS_DIR))
# How to generate.
parser.add_argument(
'--gradient-gen-method',
required=True,
help='Which outputs to generate gradients with respect to.',
choices=constants.GRADIENT_OPTS)
# Required mutation options for NEUZZ.
parser.add_argument(
'--num-output-locs',
help='Number of branches for which to generate gradients.',
type=int,
default=1)
parser.add_argument(
'--top-k',
help='Keep [top-k] input gradient components.',
type=int,
default=500)
args = parser.parse_args()
return args
def check_gradient_gen_critical_locs_args(args):
"""
Ensures that proper arguments are set for each gradient gen method.
Args:
args (argparse.Namespace): Arguments from
`get_gradient_gen_critical_locs_args()`.
Returns:
A boolean indicating whether it's okay to continue.
"""
if args.gradient_gen_method == constants.NEUZZ_RANDOM_BRANCHES:
if args.num_output_locs is None:
print('Error: --num-output-locs must be specified in conjunction with {}.'
.format(constants.NEUZZ_RANDOM_BRANCHES))
return False
if args.top_k is None:
print('Error: --top-k must be specified in conjunction with {}.'.format(
constants.NEUZZ_RANDOM_BRANCHES))
return False
return True
def get_gen_mutations_args():
"""
Returns needed args specifying which directory to save mutated
files under and which mutation generation method to use.
For `gen_mutations.py`.
Args:
N/A
Returns:
argparse.Namespace object with specified args.
"""
parser = argparse.ArgumentParser()
# For running actual mutations from trained model.
parser.add_argument(
'--generation-name',
required=True,
help='Name of generated gradient files directory (gradients saved under '
+ '{}/[generation-name]/{}).'.format(constants.GENERATED_DIR,
constants.GRADIENTS_DIR))
parser.add_argument(
'--mutation-name',
required=True,
help='Name of mutated inputs files directory (mutated files saved under '
+ '{}/[generation-name]/{}/[mutation-name]/).'.format(
constants.GENERATED_DIR, constants.MUTATIONS_DIR))
parser.add_argument(
'--mutation-gen-method',
required=True,
help='Which mutation method to use.',
choices=constants.MUTATION_OPTS)
parser.add_argument(
'--path-to-lengths',
required=True,
help='Path to file-to-input-length dictionary.')
# TODO(ryancao): Mutation options for NEUZZ.
# Mutation options for simple random.
parser.add_argument(
'--num-mutations',
help='Number of mutations to perform for each file. (Default: {})'.format(
constants.DEFAULT_NUM_MUTATIONS),
type=int,
default=constants.DEFAULT_NUM_MUTATIONS)
# Mutation options for limited neighborhood.
parser.add_argument(
'--neighborhood-max-width',
help='Max number of bytes to mutate (in either direction) of ' +
'critical bytes. (Default: {})'.format(
constants.NEIGHBORHOOD_DEFAULT_MAX_WIDTH),
type=int,
default=constants.NEIGHBORHOOD_DEFAULT_MAX_WIDTH)
parser.add_argument(
'--arith-min',
help='Smallest byte delta to add to critical bytes. (Default: {})'.format(
constants.ARITH_DEFAULT_MIN),
type=int,
default=constants.ARITH_DEFAULT_MIN)
parser.add_argument(
'--arith-max',
help='Largest byte delta to add to critical bytes. (Default: {})'.format(
constants.ARITH_DEFAULT_MAX),
type=int,
default=constants.ARITH_DEFAULT_MAX)
args = parser.parse_args()
return args
def check_gen_mutations_args(args):
"""
Ensures that proper arguments are set for each mutation gen method.
Args:
args (argparse.Namespace): Arguments from `get_gen_mutations_args()`.
Returns:
A boolean indicating whether it's okay to continue.
"""
if args.mutation_gen_method == constants.NEUZZ_MUTATION:
pass
elif args.mutation_gen_method == constants.SIMPLE_RANDOM:
if args.num_mutations <= 0:
print('Error: --num-mutations argument must be positive!')
return False
elif args.mutation_gen_method == constants.LIMITED_NEIGHBORHOOD:
if args.num_mutations <= 0:
print('Error: --num-mutations argument must be positive!')
return False
if args.arith_min >= 0:
print('Error: --arith-min must be negative!')
return False
if args.arith_max <= 0:
print('Error: --arith-max must be positive!')
return False
if args.neighborhood_max_width < 0:
print('Error: --neighborhood-max-width must be non-negative!')
return False
return True
| [
2,
15069,
12131,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 2.580696 | 3,823 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_envmap_bumpmap'
GL_BUMP_ENVMAP_ATI=_C('GL_BUMP_ENVMAP_ATI',0x877B)
GL_BUMP_NUM_TEX_UNITS_ATI=_C('GL_BUMP_NUM_TEX_UNITS_ATI',0x8777)
GL_BUMP_ROT_MATRIX_ATI=_C('GL_BUMP_ROT_MATRIX_ATI',0x8775)
GL_BUMP_ROT_MATRIX_SIZE_ATI=_C('GL_BUMP_ROT_MATRIX_SIZE_ATI',0x8776)
GL_BUMP_TARGET_ATI=_C('GL_BUMP_TARGET_ATI',0x877C)
GL_BUMP_TEX_UNITS_ATI=_C('GL_BUMP_TEX_UNITS_ATI',0x8778)
GL_DU8DV8_ATI=_C('GL_DU8DV8_ATI',0x877A)
GL_DUDV_ATI=_C('GL_DUDV_ATI',0x8779)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,arrays.GLintArray)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,arrays.GLintArray)
| [
7061,
6,
16541,
519,
877,
515,
416,
35555,
62,
8612,
378,
4226,
11,
466,
407,
4370,
0,
7061,
6,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
26515,
198,
2,
6127,
5270,
3544,
428,
198,
6738,
30672,
13,
1831,
13,
8763,
1330,
480... | 1.974104 | 502 |
from RayTracer.renderable import SDFBox, SDFCylinder, SDFSphere, CSGObject
from RayTracer.material import Material
from RayTracer.camera import PinHoleCamera
from RayTracer.lights import DistantPointSource
from RayTracer.scene import Scene
objs = []
lights = []
blue = Material((0.1,0.3, 3), (0.1,0.1,3), (0.1, 0.3, 3), (1,1,1), 0,0,1)
green = Material((0,1,3,0.3), (0.1,3,0.1), (0.1,3,0.3), (1,1,1),0,0,1)
red = Material((3,0.1,0.3), (3,0.1,0.3), (3,0.1,0.3), (1,1,1), 0,0,1)
cylinder_x = SDFCylinder((0,0,0), (1,0,0), 2.0, 0.35, red)
cylinder_y = SDFCylinder((0,0,0), (0,1,0), 2.0, 0.35, red)
cylinder_z = SDFCylinder((0,0,0), (0,0,1), 2.0, 0.35, red)
box = SDFBox((0,0,0), 1,1,1, green)
sphere = SDFSphere((0,0,0), 0.65, blue)
cross = CSGObject()
cross.union(cylinder_x)
cross.union(cylinder_y)
cross.union(cylinder_z)
result = CSGObject()
result.union(box)
result.intersect(sphere)
result.subtract(cross)
objs.append(result)
cam = PinHoleCamera((2.25, 2.25, 2.5), (-1,-1,2), (-1,1,0), 2400, 1800, 0.8)
light = DistantPointSource((1,1,1), (20,20,20), (40,40,40))
lights.append(light)
my_scene = Scene(objs, lights, cam, (200,200,200), (0.2,0.2,0.2))
my_scene.render(3)
cam.write_file("sdf.bmp") | [
6738,
7760,
2898,
11736,
13,
13287,
540,
1330,
311,
8068,
14253,
11,
9834,
4851,
2645,
5540,
11,
311,
8068,
38882,
11,
9429,
38,
10267,
198,
6738,
7760,
2898,
11736,
13,
33665,
1330,
14633,
198,
6738,
7760,
2898,
11736,
13,
25695,
1330,... | 2.056122 | 588 |
import json
from urllib.request import Request, urlopen
with open("metadata.json") as file:
metadata = json.load(file)
result = post("http://localhost:8080/v1/query", {
"type": "replace_metadata",
"args": metadata,
})
print(result)
| [
11748,
33918,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19390,
11,
19016,
9654,
198,
198,
4480,
1280,
7203,
38993,
13,
17752,
4943,
355,
2393,
25,
198,
220,
220,
220,
20150,
796,
33918,
13,
2220,
7,
7753,
8,
198,
220,
220,
220,
12... | 2.598039 | 102 |
from hashlib import sha1
from time import perf_counter
import pytest
from pow_calculator import calculate_pow
test_data = [
'NMJDwDLSGwiJRcdOiwyeyFpZSbnaFSZPJCoFNvrqFFRjjxGxufKEDKmtBrSULHCe',
'bCTNzBWdmuzjrOsOHNwVMUoiKqreBZoXGAZOyDoHGaZtGFPArQeBNNSAOHphughq',
'mmhxAuitDefBRtJXunJMbKyYUkacTLbQmtUklVrfxPJCQzhtqSAdBhxARDYbluiV',
'xOLhHSZorlXVEIAEpDMEkiFyuZywXnDhHjSnIBsyMlwumDyLuUxwMlmpXajAoskT',
'XDNLtzVOXjFXvAhJUguJuXThSALSrmBUEHyWbPLteLNnWupNsmGjjOSXhhyDXHNX',
'yHYPpeDoZtausMLaqAhBKzkNXQOgTJZZFNFSLAyhVRwDKaLuUvCeXSgVqjMPUFSr',
'YczpCSqUpliNTBcWSlsOLoDvBklPomLZidKGAfEFIRfqZIpQKdQTzfnyVkVWqzhc',
'FgUcLPSDNSUpjsaxRposXIkPMgVtwOsURAoMNmHRSbgjUonPPKQpPivhvMldKwmw',
'jucevuUgAvZJRCBDvImuBnnCptTvSnbAvUODWSKEPNhzqJByfGyqmPeIbGLrMAFG',
'NPysmhIldJEvldCQdpqHNuuhWEjUGMuKiRnOIsxYnbIEZTlAMbnrldreGSUqQRpr',
]
@pytest.mark.parametrize("input_data", test_data)
| [
6738,
12234,
8019,
1330,
427,
64,
16,
198,
6738,
640,
1330,
23035,
62,
24588,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
7182,
62,
9948,
3129,
1352,
1330,
15284,
62,
79,
322,
628,
198,
9288,
62,
7890,
796,
685,
198,
220,
220,
220... | 1.590829 | 567 |
from greww.data.mysql import MysqlPen as M
UNLOCK_USER = "ALTER USER '{0}'@'{1}' ACCOUNT UNLOCK;"
LOCK_USER = "ALTER USER '{0}'@'{1}' ACCOUNT LOCK;"
GRANT_POWER = "GRANT {0} ON {1}.{2} TO '{3}'@'{4}';"
REVOKE_POWER = "REVOKE {0} ON {1}.{2} FROM '{3}'@'{4}';"
USER_RIGHTS = "SHOW GRANTS FOR '{0}'@'{1}';"
| [
6738,
10536,
1383,
13,
7890,
13,
28744,
13976,
1330,
337,
893,
13976,
25553,
355,
337,
198,
198,
4944,
36840,
62,
29904,
796,
366,
1847,
5781,
1294,
1137,
705,
90,
15,
92,
6,
31,
6,
90,
16,
92,
6,
15859,
28270,
4725,
36840,
26033,
... | 1.871166 | 163 |
from django.db.models.query import QuerySet
from django.test import TestCase
from .models import ResultRow, ResultIdModel
from .queries import get_all_pids, save_pid, save_parsing_results, get_results_by_id
# Create your tests here.
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
22766,
1330,
43301,
7248,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
27530,
1330,
25414,
25166,
11,
25414,
7390,
17633,
198,
6738,
764,
421,
10640,
1330,
651,
62,
4... | 3.219178 | 73 |
from mongoengine import *
from spaceone.core.model.mongo_model import MongoModel
from spaceone.inventory.model.region_model import Region
from spaceone.inventory.model.collection_info_model import CollectionInfo
from spaceone.inventory.model.reference_resource_model import ReferenceResource
| [
6738,
285,
25162,
18392,
1330,
1635,
198,
198,
6738,
2272,
505,
13,
7295,
13,
19849,
13,
76,
25162,
62,
19849,
1330,
42591,
17633,
198,
6738,
2272,
505,
13,
24807,
13,
19849,
13,
36996,
62,
19849,
1330,
17718,
198,
6738,
2272,
505,
13... | 4.083333 | 72 |
import numpy as np
import cv2
import math
def rbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes2.shape[0]
cols = bboxes1.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious.T
w = bboxes2[:, 2] - bboxes2[:, 0]
h = bboxes2[:, 3] - bboxes2[:, 1]
x = (bboxes2[:, 2] + bboxes2[:, 0])/2
y = (bboxes2[:, 3] + bboxes2[:, 1])/2
l1 = w * bboxes2[:, 4]
l2 = h * bboxes2[:, 5]
wc = np.sqrt(l1 ** 2 + l2 ** 2)
hc = np.sqrt((w - l1) ** 2 + (h - l2) ** 2)
angle = np.arctan(l1 / l2)
mask = wc>=hc
angle = angle - mask * np.pi / 2.0
angle = angle/np.pi * 180
wp = bboxes1[:, 2] - bboxes1[:, 0]
hp = bboxes1[:, 3] - bboxes1[:, 1]
xp = (bboxes1[:, 2] + bboxes1[:, 0])/2
yp = (bboxes1[:, 3] + bboxes1[:, 1])/2
l1p = wp * bboxes1[:, 5]
l2p = hp * bboxes1[:, 6]
wcp = np.sqrt(l1p ** 2 + l2p ** 2)
hcp = np.sqrt((wp - l1p) ** 2 + (hp - l2p) ** 2)
anglep = np.arctan(l1p / l2p)
maskp = wcp>=hcp
anglep = anglep - maskp * np.pi / 2.0
anglep = anglep/np.pi * 180
for i in range(bboxes2.shape[0]):
r1 = ((x[i], y[i]), (wc[i], hc[i]), angle[i])
area1 = wc[i] * hc[i]
for j in range(bboxes1.shape[0]):
area2 = wcp[j] * hcp[j]
r2 = ((xp[j], yp[j]), (wcp[j], hcp[j]), anglep[j])
int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
if int_pts is not None:
order_pts = cv2.convexHull(int_pts, returnPoints=True)
int_area = cv2.contourArea(order_pts)
# 计算出iou
ious[i][j] = int_area * 1.0 / (area1 + area2 - int_area)
else:
ious[i][j] = 0
return ious.T
def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start, 0) * np.maximum(
y_end - y_start, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious | [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
10688,
198,
198,
4299,
374,
3524,
62,
2502,
75,
1686,
7,
65,
29305,
16,
11,
275,
29305,
17,
11,
4235,
11639,
72,
280,
3256,
304,
862,
28,
16,
68,
12,
21,
2599,
... | 1.884956 | 2,034 |
from os.path import join, dirname
from unittest import TestCase
from werkzeug.datastructures import FileStorage, Headers
import fast_home_api.controllers.upload as upl
from fast_home_api.utils.enums import ControllerStatus
| [
6738,
28686,
13,
6978,
1330,
4654,
11,
26672,
3672,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
266,
9587,
2736,
1018,
13,
19608,
459,
1356,
942,
1330,
9220,
31425,
11,
7123,
364,
198,
198,
11748,
3049,
62,
11195,
62,... | 3.424242 | 66 |
"""
Time In Profile Pic.....
Command: `.bloom`
Hmmmm U need to config DOWNLOAD_PFP_URL_CLOCK var in Heroku with any telegraph image link
:::::Credit Time::::::
1) Coded By: @s_n_a_p_s
2) Ported By: @r4v4n4 (Noodz Lober)
3) End Game Help By: @spechide
4) Better Colour Profile Pic By @PhycoNinja13b
#curse: who ever edits this credit section will goto hell
⚠️DISCLAIMER⚠️
USING THIS PLUGIN CAN RESULT IN ACCOUNT BAN. WE DONT CARE ABOUT BAN, SO WE ARR USING THIS.
"""
from telethon import events
import asyncio
import time
from telethon.tl import functions
from telethon.errors import FloodWaitError
from userbot.utils import admin_cmd
from userbot import AUTONAME, CMD_HELP, DEFAULT_BIO
import random
import re
import pybase64
import os
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from pySmartDL import SmartDL
import shutil
DEFAULTUSERBIO = str(DEFAULT_BIO) if DEFAULT_BIO else " ᗯᗩᏆᎢᏆᑎᏀ ᏞᏆᏦᗴ ᎢᏆᗰᗴ "
DEL_TIME_OUT = 60
DEFAULTUSER = str(AUTONAME) if AUTONAME else "cat"
FONT_FILE_TO_USE = "/usr/share/fonts/truetype/liberation/LiberationMono-Regular.ttf"
@borg.on(admin_cmd(pattern="autopic$"))
@borg.on(admin_cmd(pattern="digitalpfp"))
@borg.on(admin_cmd(pattern="bloom ?(.*)"))
@borg.on(admin_cmd(pattern="autoname")) # pylint:disable=E0602
@borg.on(admin_cmd(pattern="autobio")) # pylint:disable=E0602
BIO_STRINGS = [
"👉⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️👉⬛️⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️👉⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️👉⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️👉⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️👉⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️👉⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉🔳",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉⬜️🔳",
"⬜️⬜️⬜️⬜️⬜️⬜️👉⬜️⬜️🔳",
"⬜️⬜️⬜️⬜️⬜️👉⬜️⬜️⬜️🔳",
"⬜️⬜️⬜️⬜️👉⬜️⬜️⬜️⬜️🔳",
"⬜️⬜️⬜️👉⬜️⬜️⬜️⬜️⬜️🔳",
"⬜️⬜️👉⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"⬜️👉⬜️⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"👉⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"🐵",
"🙈",
"🙉",
"🙊",
"🐵",
"🐵",
"🙈",
"🙉",
"🙊",
"🐵",
"👉⬛️⬛️⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️👉⬛️⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️👉⬛️⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️👉⬛️⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️👉⬛️⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️👉⬛️⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️👉⬛️⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉⬛️🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉🔲",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉🔳",
"⬜️⬜️⬜️⬜️⬜️⬜️⬜️👉⬜️🔳",
"⬜️⬜️⬜️⬜️⬜️⬜️👉⬜️⬜️🔳",
"⬜️⬜️⬜️⬜️⬜️👉⬜️⬜️⬜️🔳",
"⬜️⬜️⬜️⬜️👉⬜️⬜️⬜️⬜️🔳",
"⬜️⬜️⬜️👉⬜️⬜️⬜️⬜️⬜️🔳",
"⬜️⬜️👉⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"⬜️👉⬜️⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"👉⬜️⬜️⬜️⬜️⬜️⬜️⬜️⬜️🔳",
"🐵",
"🙈",
"🙉",
"🙊",
"🐵",
"🐵",
"🙈",
"🙉",
"🙊",
"🐵",
]
@borg.on(admin_cmd(pattern="monkeybio")) # pylint:disable=E0602
CMD_HELP.update({
"autoprofile":
"`.autopic`\
\n**USAGE:** Rotating image along with the time on it .\
\nfor working this you must set `DOWNLOAD_PFP_URL_CLOCK` in the heroku vars first with telegraph link of required image\
\n\n`.digitalpfp`\
\n**USAGE:**Your profile pic changes to digitaltime profile picutre \
\n\n`.bloom`\
\n**USAGE:**Random colour profile pics will be setted along with time on it.\
\nfor working this you must set `DOWNLOAD_PFP_URL_CLOCK` in the heroku vars first with telegraph link of required image\
\n\n`.autoname`\
\n**USAGE:**for time along name to work this you must set `AUTONAME`in the heroku vars first \
\n\n`.autobio`\
\n**USAGE:**for time along with your bio to work this you must set `DEFAULT_BIO` in the heroku vars first \
\n\n`.monkeybio`\
\n**USAGE:**set of funny monkey bio's\
\n\n for stoping these aby command you need to do `.restart` and change them manually\
"
})
| [
37811,
198,
7575,
554,
13118,
15085,
12359,
198,
21575,
25,
4600,
13,
2436,
4207,
63,
198,
39,
40133,
471,
761,
284,
4566,
30320,
35613,
62,
47,
5837,
62,
21886,
62,
5097,
11290,
1401,
287,
2332,
11601,
351,
597,
573,
16606,
2939,
279... | 1.306564 | 2,727 |
# Testing code used during development
from pygestalt import packets
from pygestalt import utilities
from pygestalt import core
from pygestalt import nodes
from pygestalt import config
from pygestalt import interfaces
from pygestalt import units
from pygestalt import geometry
from pygestalt import mechanics
import copy
import time
# Define Packets
# payloadTestPacket = packets.template('port5Default')
# payloadTestPacket = packets.template('payloadTestPacket',
# packets.unsignedInt('xPosition', 2),
# packets.signedInt('yPosition', 2),
# packets.fixedPoint('pressureSensor', 0,16),
# packets.pString('URL'))
#
# gestaltPacket = packets.template('gestaltPacketTemplate',
# packets.unsignedInt('_startByte_',1),
# packets.unsignedInt('_address_', 2),
# packets.unsignedInt('_port_', 1),
# packets.length('_length_'),
# packets.packet('_payload_'),
# packets.checksum('_checksum_'))
#
#
#
# payloadDict = {'xPosition':1025, 'yPosition':-1024, 'pressureSensor':-0.99999, 'URL':'www.fabunit.com'}
# encodedPayloadPacket = payloadTestPacket.encode(payloadDict)
# fixedPointTestPacket = packets.template('fixedTest',
# packets.fixedPoint('pressureSensor', 1,11))
# encodedPacket = fixedPointTestPacket.encode({'pressureSensor':0.95})
# decimalValue = utilities.bytesToUnsignedInteger(encodedPacket)
# print decimalValue
# print fixedPointTestPacket.decode(encodedPacket)
# bitFieldTestPacket = packets.template('bitFieldTest',
# packets.bitfield('myBitField', 8, (0,'bitO', True),
# (1,'bit1', True),
# (2,'bit2', True),
# (3,'bit3', True),
# (4,'bit4', False),
# (5,'bit5', False),
# (6,'bit6', False),
# (7,'bit7', False)))
# encodedPacket = bitFieldTestPacket.encode({'myBitField':{'bit0':True,'bit1':False, 'bit2':True,'bit3':False,'bit4':True, 'bit5':False,'bit6':True}})
# print encodedPacket
#
# print bitFieldTestPacket.decode(encodedPacket)
#
# gestaltDict = {'_startByte_':72, '_address_':1, '_port_':72, '_payload_':encodedPayloadPacket}
# gestaltDict = {'_startByte_':72, '_address_':1, '_port_':72, '_payload_':[]}
# encodedGestaltPacket = gestaltPacket.encode(gestaltDict)
# print encodedGestaltPacket[0:3]
# print gestaltPacket.decodeTokenInIncompletePacket('_port_', encodedGestaltPacket[0:4])
# exit()
# myEncodedPacket = [72, 1, 0, 72, 25, 1, 4, 0, 252, 1, 128, 119, 119, 119, 46, 102, 97, 98, 117, 110, 105, 116, 46, 99, 111, 109, 203]
# print gestaltPacket.validateChecksum('_checksum_', myEncodedPacket)
#
# decodedGestaltPacket, remainder = gestaltPacket.decode(encodedGestaltPacket)
#
# decodedPayloadPacket, remainder = payloadTestPacket.decode(decodedGestaltPacket['_payload_'])
# embeddedTestPacket = packets.template('embeddedTestPacket',
# packets.unsignedInt('zPosition',2),
# packets.packetTemplate('myTemplate', payloadTestPacket),
# packets.fixedPoint('temperatureSensor', 0, 15))
#
# embeddedDict = copy.copy(payloadDict)
#
# embeddedDict.update({'zPosition':7272, 'temperatureSensor':0.501})
#
# encodedEmbeddedPacket = embeddedTestPacket.encode(embeddedDict)
# gestaltDict['_payload_'] = encodedEmbeddedPacket
# encodedGestaltPacket = gestaltPacket.encode(gestaltDict)
#
# decodedGestaltPacket = gestaltPacket.decode(encodedGestaltPacket)[0]
# gestaltPayload = decodedGestaltPacket['_payload_']
#
# gestaltPayloadStartIndex, gestaltPayloadEndIndex, gestaltPayloadToken = gestaltPacket.findTokenPosition('_payload_', encodedGestaltPacket)
# searchedPayload = encodedGestaltPacket[gestaltPayloadStartIndex:gestaltPayloadEndIndex]
# print searchedPayload
#
# decodedEmbeddedPacket = embeddedTestPacket.decode(searchedPayload)[0]
#
# startIndex, endIndex, token = embeddedTestPacket.findTokenPosition('temperatureSensor', searchedPayload)
# print token.decode(searchedPayload[startIndex: endIndex])[0]
# class getTuna(core.actionObject):
# def init(self, name):
# self.name = name
# return self.classInfo
#
# getTuna.classInfo = "hello"
# x = getTuna('charlie')
# print x
# config.syntheticModeOn() #turn on synthetic mode
# # The code below tests whether actionObject classes are being copied
# serialInterface = interfaces.serialInterface()
# serialInterface.connect()
# gestaltInterface = interfaces.gestaltInterface('myGestaltInterface', interface = serialInterface)
#
#
# config.verboseDebugOn()
# myGestaltNode = nodes.arduinoGestaltNode(name = "myGestaltNode", port = "/dev/tty.usbmodem1451")
# myGestaltNode = nodes.gestaltNodeShell(name = "myGestaltNode", interface = serialInterface)
# myGestaltNode = nodes.gestaltNodeShell(name = "myGestaltNode")
# print myGestaltNode._virtualNode_
# myGestaltNode = nodes.arduinoGestaltVirtualNode(name = "myGestaltNode", port = "/dev/tty.usbmodem1451")
# myGestaltNode = nodes.arduinoGestaltNode(name = "myGestaltNode", port = "/dev/tty.usbmodem1451")
#
# print myGestaltNode.statusRequest()
#
# print myGestaltNode.bootWriteRequest(pageNumber = 0, data = range(128))
# print myGestaltNode.bootReadRequest(pageNumber = 127)
# print myGestaltNode.urlRequest()
# print myGestaltNode.setAddressRequest(1025)
# print myGestaltNode.synNodeAddress
# print myGestaltNode.identifyRequest()
# print myGestaltNode.resetRequest()
# print myGestaltNode
# class myGestaltNode(nodes.gestaltNode):
# def init(self):
# print "myGestaltNode init"
# def onLoad(self):
# print "myGestaltNode onLoad"
# --UNITS--
# myNumber = units.mm(1.243)
# myNumberTwo = units.mm(0.5)
# myNumberThree = units.mm(2)
#
# print myNumberThree * myNumberThree
# print myNumberThree ** 2
# baseUnits, conversion = units.getBaseUnits(units.yd)
# print baseUnits.fullName
# print conversion
# unitDict = units.unitDictionary({units.m:1, units.s:-2})
# unitDict2 = unitDict / {units.s:2, units.m:-1}
# print unitDict2
#
# unitDict3 = 1/unitDict
# print unitDict3
#
# unitDict4 = unitDict**3
# print unitDict4
# float1 = units.m(1.45)
# float2 = units.s(4.5)
# print (float1*float2)
#
# float3 = 1.423*units.N*units.m/units.s**2
# print float3
# float1 = 4.44*units.m
# print float1
# float2 = 7.77*units.s
# print float2
# print float1 / float2
# mySpeed = 100*units.m/units.s**2
#
# print mySpeed.convert(units.m/units.min**2)
# ---MECHANICS---
# myArray = geometry.array([[1,2,3],[4,5,6],[7,8,9]])
# print myArray[0,1]
# print type(myArray[0,:])
# print units.mm/units.rev
# myLeadscrew = mechanics.leadscrew(units.rev(10)/units.inch(1))
# print myLeadscrew.forwardTransform
# print myLeadscrew.forward(360*units.deg)
# print myLeadscrew.reverse(0.25*units.inch)
# print myLeadscrew.transform(360*units.deg)
# print myLeadscrew.transform(units.mm(2.54))
# myGearbox = mechanics.gear(5)
# print myGearbox.forward(units.rad(6.28*5))
# print myGearbox.reverse(units.deg(72))
# myTimingBelt = mechanics.timingBelt(18)
# print myTimingBelt.forward(units.rev(1))
# myStepperMotor = mechanics.stepperMotor(units.step(400)/units.rev(1))
# print myStepperMotor.forward(units.step(200))
# myLeadscrew = mechanics.leadscrew(units.mm(10)) #define a leascrew with a pitch of 10mm
# myGearbox = mechanics.gear(2) #define a gearbox with a ratio of 2:1
# myStepper = mechanics.stepper(units.step(200)/units.rev(1)) #define a stepper motor with 200 steps per rev
#
# xAxisChain = mechanics.chain(myStepper, myGearbox, myLeadscrew)
# yAxisChain = mechanics.chain(mechanics.stepper(1.8), mechanics.gear(2), mechanics.leadscrew(10))
#
# actuators = mechanics.stack(xAxisChain, yAxisChain)
#
# print xAxisChain.reverse(units.mm(10))
# print xAxisChain.forward(units.step(100))
# print yAxisChain.forward(units.step(200))
# print actuators.getSize()
# print actuators.forward([units.step(100), units.step(200)])
# print actuators.reverse([units.mm(2.5), units.mm(5)])
#
# testStack = mechanics.stack(xAxisChain)
# print testStack.forward(units.step(100))
# print testStack.reverse(units.mm(2.5))
#
# myCoreXY = mechanics.corexy()
# print myCoreXY.forward([1,1])
# print myCoreXY.reverse([1,0])
# aAxis = mechanics.chain( mechanics.stepper(1.8), mechanics.timingBelt(9.6))
# bAxis = mechanics.chain(mechanics.stepper(1.8), mechanics.timingBelt(9.6))
# cAxis = mechanics.chain(mechanics.stepper(1.8), mechanics.rack(5))
# coreXY = mechanics.corexy()
# kinematics = mechanics.chain(mechanics.stack(aAxis, bAxis, cAxis), mechanics.stack(mechanics.corexy(), mechanics.passThru(1)))
# print kinematics.reverse([units.mm(100), units.mm(100), units.mm(30)])
# outputOffset = mechanics.offset(3)
# kinematics2 = mechanics.chain(mechanics.stack(aAxis, bAxis, cAxis), mechanics.stack(mechanics.corexy(), mechanics.passThru(1)), mechanics.router([0,2,1]), outputOffset)
# print kinematics2.reverse([units.mm(100), units.mm(30), units.mm(100)])
# print kinematics2.reverse([units.mm(100), units.mm(100), units.mm(30)])
# print kinematics2.forward([units.step(1326), units.step(0), units.step(381)])
# outputOffset.adjust([-100, -30, -100])
# print kinematics2.forward([units.step(1326), units.step(0), units.step(381)])
## ----array addition and subtraction--------
# myArray1 = geometry.array([[1,2,3,4],[4,3,2,1]])
# myArray2 = geometry.array([[5,6,6,5],[8,7,6,5]])
#
# print geometry.arrayAddition(myArray1, myArray2)
# print geometry.arraySubtraction(myArray1, myArray2)
# print " "
# print myArray1 + myArray2
# print myArray1 + [[5,6,6,5], [8,7,6,5]]
# print [[5,6,6,5], [8,7,6,5]] + myArray1
# print myArray1 - [[5,6,6,5], [8,7,6,5]]
# print [[5,6,6,5], [8,7,6,5]] - myArray1
#
# matrix1 = geometry.matrix([[2,-2],[5,3]])
# matrix2 = geometry.matrix([[-1,4],[7,-6]])
# print matrix1
# print matrix1.transpose()
# print matrix1*matrix2
# print geometry.dotProduct(matrix1, matrix2)
# exit()
# myList = geometry.array([[1,2,3],[4,5,6],[7,8,9]])
# myList2 = myList[:,0]
# print isinstance(myList, list)
# print "size: " + str(myList.getSize())
# print "dimension: " + str(myList.getDimension())
# myMatrix = geometry.matrix([1,2,3])
# matrix1 = geometry.matrix([[2,-2],[5,3]])
# matrix4 = geometry.matrix([[units.mm(1)],[units.mm(1)]])
#
# print myMatrix[0]
#
# #
# print matrix1
# print matrix1.getSize()
# #
# #
# newMatrix = matrix1[0,:]
# print newMatrix
#
# exit()
#
# print newMatrix[0]
# matrix3 = matrix1*matrix4
# matrix3 = matrix3 * 10
# print matrix3[1,0]
# matrix4 = geometry.matrix([[3,8],[4,6]])
# x = geometry.testClass()
# x[1:,2:]
# print matrix4.determinant()
# print myList[1,1]
# time.sleep(2)
# myArray = geometry.array([[1,2,3],[4,5,6],[7,8,9]])
# print myArray.getSize()
# print myArray.getDimension()
# print geometry.indexIntoArray(myArray, (2,2))
# print myArray[0]
#
# myArray2 = geometry.array([[1,2,3]])
# print myArray2
# print myArray2[1:]
# matrix1 = geometry.matrix([[2,-2],[5,3]])
# matrix2 = geometry.matrix([[-1,4],[7,-6]])
# print matrix1
# print matrix1.transpose()
# print matrix1*matrix2
# print matrix1.concatenateRight(matrix2)
# print matrix1.concatenateLeft(matrix2)
# matrix5 = geometry.matrix([[units.mm(1),units.mm(2)],[units.mm(3),units.mm(4)]])
# print geometry.arrayMultiplyByScalar(matrix5, 2)
# print matrix5[:,:0]
# matrix6 = geometry.matrix(geometry.matrixInverse(matrix5))
# print matrix6
# print matrix6[0,0]
# matrix7 = geometry.matrix([[units.mm(4),units.mm(2),units.mm(8)],[units.mm(7),units.mm(3),units.mm(9)],[units.mm(2),units.mm(6),units.mm(7)]])
# print matrix7.inverse()[0,0]
# print geometry.dotProduct(matrix1, matrix2)
# matrix8 = geometry.matrix([[1,2,3],[4,5,6],[7,8,9]])
# matrix9 = matrix8 * units.mm(1.0)
# print matrix9[0,0]
##----- Persistence -----
# persistenceManager = utilities.persistenceManager(filename = "../persistenceTest.vmp", namespace = 'myName')
#
# persistenceManager['hello'] = [5,6,7]
# print persistenceManager['hello']
#
# persistenceManager.set('goodbye', 'wuzzup')
# print persistenceManager.get('goodbye')
##----- Function Distributor -----
test1 = tester()
test2 = tester()
test3 = tester()
print(core.distributedFunctionCall('owner', [test1, test2, test3], 'test', core.syncToken, 'arg1', 'arg2', 1, arg4 = "arg4", arg5 = ("arg51", "arg52", "arg53")))
| [
2,
23983,
2438,
973,
1141,
2478,
198,
198,
6738,
12972,
3495,
2501,
1330,
24624,
198,
6738,
12972,
3495,
2501,
1330,
20081,
198,
6738,
12972,
3495,
2501,
1330,
4755,
198,
6738,
12972,
3495,
2501,
1330,
13760,
198,
6738,
12972,
3495,
2501,... | 2.308099 | 5,680 |
from characteristic import attributes
from eliot import Message, MessageType, Field
from effect import TypeDispatcher, ComposedDispatcher
from effect.twisted import (
make_twisted_dispatcher,
)
from effect.twisted import (
perform, deferred_performer)
from twisted.conch.endpoints import (
SSHCommandClientEndpoint,
# https://twistedmatrix.com/trac/ticket/7861
_NewConnectionHelper,
# https://twistedmatrix.com/trac/ticket/7862
_ReadFile, ConsoleUI,
)
from twisted.conch.client.knownhosts import KnownHostsFile
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.endpoints import UNIXClientEndpoint, connectProtocol
from twisted.internet.error import ConnectionDone
from twisted.protocols.basic import LineOnlyReceiver
from twisted.python.filepath import FilePath
import os
from flocker.testtools import loop_until
from ._model import (
Run, Sudo, Put, Comment, RunRemotely, perform_comment, perform_put,
perform_sudo)
from .._effect import dispatcher as base_dispatcher
from ._monkeypatch import patch_twisted_7672
RUN_OUTPUT_MESSAGE = MessageType(
message_type="flocker.provision.ssh:run:output",
fields=[
Field.for_types(u"line", [bytes], u"The output."),
],
description=u"A line of command output.",
)
@attributes([
"deferred",
"context",
])
class CommandProtocol(LineOnlyReceiver, object):
"""
Protocol that logs the lines of a remote command.
:ivar Deferred deferred: Deferred to fire when the command finishes
If the command finished successfully, will fire with ``None``.
Otherwise, errbacks with the reason.
:ivar Message context: The eliot message context to log.
"""
delimiter = b'\n'
def get_ssh_dispatcher(connection, context):
"""
:param Message context: The eliot message context to log.
:param connection: The SSH connection run commands on.
"""
@deferred_performer
return TypeDispatcher({
Run: perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: perform_comment,
})
def get_connection_helper(address, username, port):
"""
Get a :class:`twisted.conch.endpoints._ISSHConnectionCreator` to connect to
the given remote.
:param bytes address: The address of the remote host to connect to.
:param bytes username: The user to connect as.
:param int port: The port of the ssh server to connect to.
:return _ISSHConnectionCreator:
"""
try:
agentEndpoint = UNIXClientEndpoint(
reactor, os.environ["SSH_AUTH_SOCK"])
except KeyError:
agentEndpoint = None
return _NewConnectionHelper(
reactor, address, port, None, username,
keys=None,
password=None,
agentEndpoint=agentEndpoint,
knownHosts=KnownHostsFile.fromPath(FilePath("/dev/null")),
ui=ConsoleUI(lambda: _ReadFile(b"yes")))
@deferred_performer
@inlineCallbacks
| [
198,
6738,
16704,
1330,
12608,
198,
198,
6738,
1288,
5151,
1330,
16000,
11,
16000,
6030,
11,
7663,
198,
198,
6738,
1245,
1330,
5994,
7279,
8071,
2044,
11,
3082,
1335,
7279,
8071,
2044,
198,
6738,
1245,
13,
4246,
6347,
1330,
357,
198,
... | 2.864891 | 1,051 |
"""
Author(s): Carson Schubert (carson.schubert14@gmail.com)
Date Created: 04/11/19
Tests the ravenml local_cache module.
NOTE: If this test fails, it may cause cascades of failures in other tests.
"""
import pytest
import os
import re
from pathlib import Path
from click.testing import CliRunner
from ravenml.cli import cli
from ravenml.utils.local_cache import RMLCache
### SETUP ###
runner = CliRunner()
test_dir = Path(os.path.dirname(__file__))
test_data_dir = test_dir / Path('data')
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
test_cache = RMLCache()
def setup_module():
""" Sets up the module for testing.
"""
test_cache.path = test_dir / '.testing'
def teardown_module():
""" Tears down the module after testing.
"""
test_cache.clean()
### TESTS ###
def test_no_leaky_cache_creation():
"""Assues that simply running `ravenml` on the command line does not
create the local cache. Can indicate that somewhere a piece of code is
ensuring the existence of the local cache on import, which we DO NOT want.
"""
result = runner.invoke(cli)
assert result.exit_code == 0
assert not os.path.exists(test_cache.path)
| [
37811,
198,
13838,
7,
82,
2599,
220,
220,
220,
220,
220,
16787,
3059,
84,
4835,
357,
66,
12613,
13,
20601,
84,
4835,
1415,
31,
14816,
13,
785,
8,
198,
10430,
15622,
25,
220,
220,
8702,
14,
1157,
14,
1129,
198,
198,
51,
3558,
262,
... | 2.82783 | 424 |
import math
import time
import os
try: import cpickle as pickle
except: import pickle
import opts
import autolux
LUMA_BUCKET=500
LUMA_SPREAD=5000
# for a luma map, what we hold is:
# time of day -> luma -> [p1,p2,p3]
LUMA_MAP = {}
LUMA_OBS = []
LUMA_FILE=None
LUMA_DIR=os.path.expanduser("~/.config/autolux")
LUMA_FILE_DEFAULT = os.path.join(LUMA_DIR, "luma_map.p")
OLD_LUMA_FILE_DEFAULT = os.path.expanduser("~/.config/autolux.luma_map")
LUMA_FILE=None
CHANGES_FILE = os.path.join(LUMA_DIR, "brightness_changes.p")
try: os.makedirs(LUMA_DIR)
except: pass
if os.path.exists(OLD_LUMA_FILE_DEFAULT):
os.rename(OLD_LUMA_FILE_DEFAULT, LUMA_FILE_DEFAULT)
LAST_SAVE = None
SAVE_INTERVAL=1000
# TODO: nearest neighbors search here, instead of only looking for the current
# hour and current luma
MAX_LUMA_PTS=7
HOUR_SLICE=10
HOUR_SPREAD=60
LAST_CALIBRATE=int(time.time())
NEXT_CALIBRATE=4
LAST_OBSERVATION = 0
| [
11748,
10688,
198,
11748,
640,
198,
11748,
28686,
198,
28311,
25,
1330,
269,
27729,
293,
355,
2298,
293,
198,
16341,
25,
1330,
2298,
293,
198,
198,
11748,
2172,
82,
198,
11748,
1960,
349,
2821,
628,
198,
41596,
5673,
62,
33,
16696,
27... | 2.3175 | 400 |
import tkinter as tk # this is in standard library
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| [
11748,
256,
74,
3849,
355,
256,
74,
1303,
428,
318,
287,
3210,
5888,
628,
198,
198,
15763,
796,
256,
74,
13,
51,
74,
3419,
198,
1324,
796,
15678,
7,
9866,
28,
15763,
8,
198,
1324,
13,
12417,
26268,
3419,
198
] | 2.875 | 40 |
# This is a python implementation of the Solitaire Cipher, a key-generator encryption/decryption Algorithm based on a deck of french cards developed by Bruce Schneier
# Reference can be found at: https://www.schneier.com/academic/solitaire/
# This algorithm uses the bridge order of suits: clubs, diamonds, hearts and spades
# If the card is a club, it is the value shown
# If the card is a diamond, it is the value plus 13
# If it is a heart, it is the value plus 26
# If it is a spade, it is the value plus 39
# Either joker is a 53
# Therefore clubs are numbers from 1 to 13, diamonds are cards from 14 to 26, hearts are cards from 27 to 39 and spades are cards from 40 to 52
import random
from random import shuffle
# Examples:
# encryption(5)
decryption(5)
| [
2,
770,
318,
257,
21015,
7822,
286,
262,
28762,
44334,
11,
257,
1994,
12,
8612,
1352,
15835,
14,
12501,
13168,
978,
42289,
1912,
319,
257,
6203,
286,
48718,
4116,
4166,
416,
11088,
23343,
959,
220,
198,
2,
20984,
460,
307,
1043,
379,
... | 3.663507 | 211 |
# -*- coding:utf-8 -*-
import json
from django.core.exceptions import ValidationError
from django.http.response import JsonResponse
from django.utils.datastructures import MultiValueDictKeyError
from 臺灣言語資料庫.資料模型 import 種類表
from 臺灣言語平臺.項目模型 import 平臺項目表
from 臺灣言語平臺.介面.Json失敗回應 import Json失敗回應
from 臺灣言語資料庫.資料模型 import 來源表
from 臺灣言語平臺.tasks import 新文本自資料庫加入sheet
@加文本了愛加入sheet
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
4023,
13,
26209,
1330,
449,
1559,
31077,
198,
67... | 1.285714 | 301 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from cubes_lite.compat import py3k
if not py3k:
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
34896,
62,
36890,
13,
5589,
265,
1330,
12972,
18,
74,
... | 2.560976 | 82 |
from app.notify import NotifyMessage
from jira.Api import Api
| [
6738,
598,
13,
1662,
1958,
1330,
1892,
1958,
12837,
198,
6738,
474,
8704,
13,
32,
14415,
1330,
5949,
72,
628
] | 3.15 | 20 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.TextEncoder import TextEncoder
class DPQ(TextEncoder):
'''
This is the implementation for differentiable product quantization models (e.g., SPQ, DQN, DVSQ)
'''
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
4981,
13,
8206,
2719... | 3.184466 | 103 |
import random
from aalpy.automata import Onfsm, Mdp, StochasticMealyMachine
from aalpy.base import Oracle, SUL
automaton_dict = {Onfsm: 'onfsm', Mdp: 'mdp', StochasticMealyMachine: 'smm'}
class RandomWalkEqOracle(Oracle):
"""
Equivalence oracle where queries contain random inputs. After every step, 'reset_prob' determines the probability
that the system will reset and a new query asked.
"""
def __init__(self, alphabet: list, sul: SUL, num_steps=5000, reset_after_cex=True, reset_prob=0.09):
"""
Args:
alphabet: input alphabet
sul: system under learning
num_steps: number of steps to be preformed
reset_after_cex: if true, num_steps will be preformed after every counter example, else the total number
or steps will equal to num_steps
reset_prob: probability that the new query will be asked
"""
super().__init__(alphabet, sul)
self.step_limit = num_steps
self.reset_after_cex = reset_after_cex
self.reset_prob = reset_prob
self.random_steps_done = 0
self.automata_type = None | [
11748,
4738,
198,
198,
6738,
257,
282,
9078,
13,
2306,
296,
1045,
1330,
1550,
69,
5796,
11,
337,
26059,
11,
520,
5374,
3477,
44,
2287,
88,
37573,
198,
6738,
257,
282,
9078,
13,
8692,
1330,
18650,
11,
311,
6239,
198,
198,
2306,
296,
... | 2.469083 | 469 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetVmClusterPatchResult',
'AwaitableGetVmClusterPatchResult',
'get_vm_cluster_patch',
]
@pulumi.output_type
class GetVmClusterPatchResult:
"""
A collection of values returned by getVmClusterPatch.
"""
@property
@pulumi.getter(name="availableActions")
def available_actions(self) -> Sequence[str]:
"""
Actions that can possibly be performed using this patch.
"""
return pulumi.get(self, "available_actions")
@property
@pulumi.getter
def description(self) -> str:
"""
The text describing this patch package.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastAction")
def last_action(self) -> str:
"""
Action that is currently being performed or was completed last.
"""
return pulumi.get(self, "last_action")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
A descriptive text associated with the lifecycleState. Typically can contain additional displayable text.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="patchId")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the patch as a result of lastAction.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeReleased")
def time_released(self) -> str:
"""
The date and time that the patch was released.
"""
return pulumi.get(self, "time_released")
@property
@pulumi.getter
def version(self) -> str:
"""
The version of this patch package.
"""
return pulumi.get(self, "version")
@property
@pulumi.getter(name="vmClusterId")
# pylint: disable=using-constant-test
def get_vm_cluster_patch(patch_id: Optional[str] = None,
vm_cluster_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVmClusterPatchResult:
"""
This data source provides details about a specific Vm Cluster Patch resource in Oracle Cloud Infrastructure Database service.
Gets information about a specified patch package.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_vm_cluster_patch = oci.database.get_vm_cluster_patch(patch_id=oci_database_patch["test_patch"]["id"],
vm_cluster_id=oci_database_vm_cluster["test_vm_cluster"]["id"])
```
:param str patch_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the patch.
:param str vm_cluster_id: The VM cluster [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
__args__ = dict()
__args__['patchId'] = patch_id
__args__['vmClusterId'] = vm_cluster_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getVmClusterPatch:getVmClusterPatch', __args__, opts=opts, typ=GetVmClusterPatchResult).value
return AwaitableGetVmClusterPatchResult(
available_actions=__ret__.available_actions,
description=__ret__.description,
id=__ret__.id,
last_action=__ret__.last_action,
lifecycle_details=__ret__.lifecycle_details,
patch_id=__ret__.patch_id,
state=__ret__.state,
time_released=__ret__.time_released,
version=__ret__.version,
vm_cluster_id=__ret__.vm_cluster_id)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.462253 | 1,722 |
from pyx import canvas, color, deco, path, style, text, unit
boxwidth = 3
height = 4
qi = 0.2*boxwidth
qf = 0.7*boxwidth
linecolor1 = color.rgb(0.8, 0, 0)
linecolor2 = color.rgb(0, 0, 0.8)
text.set(engine=text.LatexEngine)
text.preamble(r'''\usepackage[sfdefault,scaled=.85]{FiraSans}
\usepackage{newtxsf}''')
unit.set(vscale=1.2, wscale=1.3, xscale=1.5)
c = canvas.canvas()
c.fill(path.rect(0, 0, boxwidth, height), [color.rgb(0.92, 1, 0.92)])
for n in range(-1, 4):
c.stroke(path.line(n*boxwidth, 0, n*boxwidth, height),
[style.linewidth.THick, color.grey(0.4)])
poslinestyle = (style.linestyle.dashed, style.linewidth.Thick)
for n in range(-1, 2):
q = qf + 2*n*boxwidth
c.stroke(path.line(q, 0, q, height), [*poslinestyle, linecolor1])
c.stroke(path.line(q, height+1.1, q, height+1.5), [style.linewidth.thick])
for n in range(-1, 2):
q = -qf + (2*n+2)*boxwidth
c.stroke(path.line(q, 0, q, height), [*poslinestyle, linecolor2])
c.stroke(path.line(q, height+0.1, q, height+0.5), [style.linewidth.thick])
for n in range(0, 2):
c.stroke(path.line(-qf+2*n*boxwidth, height+0.3,
-qf+2*(n+1)*boxwidth, height+0.3),
[style.linewidth.thick, deco.barrow, deco.earrow])
c.text(-qf+(1+2*n)*boxwidth, height+0.4, '$2L$',
[text.halign.center])
c.stroke(path.line(qf-2*(n-1)*boxwidth, height+1.3,
qf-2*n*boxwidth, height+1.3),
[style.linewidth.thick, deco.barrow, deco.earrow])
c.text(qf-(2*n-1)*boxwidth, height+1.4, '$2L$',
[text.halign.center])
c.text(qf, -0.5, r'$q_\text{f}$', [text.halign.center])
c.text(2*boxwidth-qf, -0.5, r'$2L-q_\text{f}$', [text.halign.center])
c.writePDFfile()
| [
6738,
12972,
87,
1330,
21978,
11,
3124,
11,
875,
78,
11,
3108,
11,
3918,
11,
2420,
11,
4326,
198,
198,
3524,
10394,
796,
513,
198,
17015,
796,
604,
198,
40603,
796,
657,
13,
17,
9,
3524,
10394,
198,
80,
69,
796,
657,
13,
22,
9,
... | 1.929438 | 907 |
# 5. Longest Palindromic Substring | [
2,
642,
13,
5882,
395,
3175,
521,
398,
291,
3834,
8841
] | 3.090909 | 11 |
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 2.0
# Copyright (C) 2019 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
from __future__ import absolute_import
import pytest
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
from ansible_collections.community.general.tests.unit.compat.mock import MagicMock
import json
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
198,
2,
23617,
412,
9655,
4946,
5124,
496,
28038,
856,
3401,
5028,
198,
2,
10628,
362,
13,
15,
198,
2,
15069,
357,
34,
8,
13130,
23617,
3457,
13,
198,
198,
... | 3.284519 | 239 |
import json, os
def save_json_to_disk(dir_path: str, result: dict, rel_path: str):
"""Saves JSON object in a JSON file on disk.
:param dir_path: Path to directory where result needs to be saved.
:type dir_path: str
:param result: Data to be written in the JSON file.
:type result: dict
:param rel_path: Relative path to file.
:type rel_path: str
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(rel_path, "w") as f:
json.dump(result, f)
| [
11748,
33918,
11,
28686,
628,
198,
4299,
3613,
62,
17752,
62,
1462,
62,
39531,
7,
15908,
62,
6978,
25,
965,
11,
1255,
25,
8633,
11,
823,
62,
6978,
25,
965,
2599,
198,
220,
220,
220,
37227,
50,
3080,
19449,
2134,
287,
257,
19449,
2... | 2.539216 | 204 |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
input:
messages_filepath: The path of messages dataset.
categories_filepath: The path of categories dataset.
output:
df: The merged dataset
'''
disastermessages = pd.read_csv('disaster_messages.csv')
disastermessages.head()
# load categories dataset
disastercategories = pd.read_csv('disaster_categories.csv')
disastercategories.head()
df = pd.merge(disastermessages, disastercategories, left_on='id', right_on='id', how='outer')
return df
def clean_data(df):
'''
input:
df: The merged dataset in previous step.
output:
df: Dataset after cleaning.
'''
disastercategories = df.categories.str.split(';', expand = True)
# select the first row of the categories dataframe
row = disastercategories.iloc[0,:]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
disastercategory_colnames = row.apply(lambda x:x[:-2])
print(disastercategory_colnames)
disastercategories.columns = category_colnames
for column in disastercategories:
# set each value to be the last character of the string
disastercategories[column] = disastercategories[column].str[-1]
# convert column from string to numeric
disastercategories[column] = disastercategories[column].astype(np.int)
disastercategories.head()
df.drop('categories', axis = 1, inplace = True)
df = pd.concat([df, categories], axis = 1)
# drop the original categories column from `df`
df = df.drop('categories',axis=1)
df.head()
# check number of duplicates
print('Number of duplicated rows: {} out of {} samples'.format(df.duplicated().sum(),df.shape[0]))
df.drop_duplicates(subset = 'id', inplace = True)
return df
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
198,
4299,
3440,
62,
7890,
7,
37348,
1095,
62,
7753,
6978,
11,
9376,
62,
7753,
6978,
2599,
198,
220,
220,
220,
705,
7061,
... | 2.785714 | 896 |
import torch
import string
charrnn = torch.jit.load('CharRNN_pipeline.pt')
all_characters = string.printable
n_characters = len(all_characters)
hidden_size = 300
n_layers = 2
batch_size = 1
prediction_seq_length = 200
primer = torch.tensor([5]).to(dtype=torch.long)
hidden = torch.zeros(n_layers, batch_size, hidden_size)
output = charrnn(primer, hidden)
print(int2str(output))
| [
11748,
28034,
198,
11748,
4731,
198,
198,
354,
3258,
20471,
796,
28034,
13,
45051,
13,
2220,
10786,
12441,
49,
6144,
62,
79,
541,
4470,
13,
457,
11537,
198,
198,
439,
62,
10641,
19858,
796,
4731,
13,
4798,
540,
198,
77,
62,
10641,
1... | 2.641379 | 145 |
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.template import loader, Context
from django.http import HttpResponse, HttpResponseRedirect
from travels.models import *
from favourite.models import *
from accounts.models import *
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http import Http404
import time
from django.core.files.base import ContentFile
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.core.mail import send_mail
from django.utils import timezone
import datetime
from django.shortcuts import render_to_response,get_object_or_404
# Create your views here.
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
28243,
1330,
40213,
11,
30532,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
... | 3.528634 | 227 |
from Crypto.Cipher import AES | [
6738,
36579,
13,
34,
10803,
1330,
34329
] | 4.142857 | 7 |
from localground.apps.site.api.serializers.base_serializer import \
BaseSerializer, NamedSerializerMixin, ProjectSerializerMixin
from localground.apps.site.api.serializers.field_serializer import \
FieldSerializer
from django.conf import settings
from rest_framework import serializers
from localground.apps.site import models
| [
6738,
1957,
2833,
13,
18211,
13,
15654,
13,
15042,
13,
46911,
11341,
13,
8692,
62,
46911,
7509,
1330,
3467,
198,
220,
220,
220,
7308,
32634,
7509,
11,
34441,
32634,
7509,
35608,
259,
11,
4935,
32634,
7509,
35608,
259,
198,
6738,
1957,
... | 3.623656 | 93 |
# -*- coding: UTF-8 -*-
from .niux2_lazyload_helper import *
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
8461,
2821,
17,
62,
75,
12582,
2220,
62,
2978,
525,
1330,
1635,
628
] | 2.172414 | 29 |
from django.urls import path
from . import views
app_name = 'account'
urlpatterns = [
path('two_factor/disable/', views.disableTwoFactor, name='disable-2fa'),
path('logout/', views.logoutView, name='logout'),
path('manage/', views.manage, name='manage'),
path('sign-up/', views.signup, name='signup'),
path('revert-email-change/<str:revertcode>/', views.revertEmail, name='revert-email'),
path('verify-email/<str:verificationcode>/', views.verifyEmail, name='verify-email'),
path('support/', views.supportHome, name='support-home')
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
23317,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
11545,
62,
31412,
14,
40223,
14,
3256,
500... | 2.719807 | 207 |
import unittest
from powerpy.pattern.creational.singleton import Singleton, SingletonMeta
@Singleton
@Singleton
| [
11748,
555,
715,
395,
198,
198,
6738,
1176,
9078,
13,
33279,
13,
7513,
864,
13,
12215,
10565,
1330,
5573,
10565,
11,
5573,
10565,
48526,
628,
198,
31,
29974,
10565,
628,
198,
31,
29974,
10565,
628,
628,
198
] | 3.27027 | 37 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from .config import ClientConfig
from .notebook import Note, Paragraph
import time
import logging
class ZeppelinClient:
"""
Low leve of Zeppelin SDK, this is used to interact with Zeppelin in note/paragraph abstraction layer.
"""
def get_version(self):
"""
Return Zeppelin version
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/version")
self._check_response(resp)
return resp.json()['body']['version']
def login(self, user_name, password, knox_sso = None):
"""
Login to Zeppelin, use knox_sso if it is provided.
:param user_name:
:param password:
:param knox_sso:
:return:
"""
if knox_sso:
self.session.auth = (user_name, password)
resp = self.session.get(knox_sso + "?originalUrl=" + self.zeppelin_rest_url, verify=False)
if resp.status_code != 200:
raise Exception("Knox SSO login fails, status: {}, status_text: {}" \
.format(resp.status_code, resp.text))
resp = self.session.get(self.zeppelin_rest_url + "/api/security/ticket")
if resp.status_code != 200:
raise Exception("Fail to get ticket after Knox SSO, status: {}, status_text: {}" \
.format(resp.status_code, resp.text))
else:
resp = self.session.post(self.zeppelin_rest_url + "/api/login",
data = {'userName': user_name, 'password': password})
self._check_response(resp)
def create_note(self, note_path, default_interpreter_group = 'spark'):
"""
Create a new note with give note_path and default_interpreter_group
:param note_path:
:param default_interpreter_group:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook",
json = {'name' : note_path, 'defaultInterpreterGroup': default_interpreter_group})
self._check_response(resp)
return resp.json()['body']
def delete_note(self, note_id):
"""
Delete a note with give note_id
:param note_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/" + note_id)
self._check_response(resp)
def query_note_result(self, note_id):
"""
Query note result via Zeppelin rest api and convert the returned json to NoteResult
:param note_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id)
self._check_response(resp)
note_json = resp.json()['body']
return Note(note_json)
def execute_note(self, note_id, parameters = {}):
"""
Execute give note with parameters, block until note execution is finished.
:param note_id:
:param parameters:
:return:
"""
self.submit_note(note_id, parameters)
return self.wait_until_note_finished(note_id)
def submit_note(self, note_id, parameters = {}):
"""
Execute give note with parameters, return once submission is finished. It is non-blocking api,
won't wait for the completion of note execution.
:param note_id:
:param parameters:
:return:
"""
logging.info("Submitting note: " + note_id + " with parameters: " + str(parameters))
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/job/" + note_id,
params = {'blocking': 'false', 'isolated': 'true'},
json = {'params': parameters})
self._check_response(resp)
return self.query_note_result(note_id)
def wait_until_note_finished(self, note_id):
"""
Wait until note execution is finished.
:param note_id:
:return:
"""
while True:
note_result = self.query_note_result(note_id)
logging.info("note_is_running: " + str(note_result.is_running) + ", jobURL: " +
str(list(map(lambda p: p.job_urls, filter(lambda p: p.job_urls, note_result.paragraphs)))))
if not note_result.is_running:
return note_result
time.sleep(self.client_config.get_query_interval())
def get_note(self, note_id, reload = False):
"""
Get specified note.
:param note_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id)
self._check_response(resp)
return resp.json()['body']
def clone_note(self, note_id, dest_note_path):
"""
Clone specific note to another location.
:param note_id:
:param dest_note_path:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id, json = {'name': dest_note_path})
self._check_response(resp)
return resp.json()['body']
def add_paragraph(self, note_id, title, text):
"""
Add paragraph to specific note at the last paragraph
:param note_id:
:param title:
:param text:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph", json = {'title': title, 'text': text})
self._check_response(resp)
return resp.json()['body']
def update_paragraph(self, note_id, paragraph_id, title, text):
"""
update specified paragraph with given title and text
:param note_id:
:param paragraph_id:
:param title:
:param text:
:return:
"""
resp = self.session.put(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph/" + paragraph_id,
json = {'title' : title, 'text' : text})
self._check_response(resp)
def execute_paragraph(self, note_id, paragraph_id, parameters = {}, session_id = "", isolated = False):
"""
Blocking api, execute specified paragraph with given parameters
:param note_id:
:param paragraph_id:
:param parameters:
:param session_id:
:param isolated:
:return:
"""
self.submit_paragraph(note_id, paragraph_id, parameters, session_id, isolated)
return self.wait_until_paragraph_finished(note_id, paragraph_id)
def submit_paragraph(self, note_id, paragraph_id, parameters = {}, session_id = "", isolated = False):
"""
Non-blocking api, execute specified paragraph with given parameters.
:param note_id:
:param paragraph_id:
:param parameters:
:param session_id:
:param isolated:
:return:
"""
logging.info("Submitting paragraph: " + paragraph_id + " with parameters: " + str(parameters))
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/job/" + note_id + "/" + paragraph_id,
params = {'sessionId': session_id, 'isolated': isolated},
json = {'params': parameters})
self._check_response(resp)
return self.query_paragraph_result(note_id, paragraph_id)
def query_paragraph_result(self, note_id, paragraph_id):
"""
Query specified paragraph result.
:param note_id:
:param paragraph_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph/" + paragraph_id)
self._check_response(resp)
return Paragraph(resp.json()['body'])
def wait_until_paragraph_finished(self, note_id, paragraph_id):
"""
Wait until specified paragraph execution is finished
:param note_id:
:param paragraph_id:
:return:
"""
while True:
paragraph_result = self.query_paragraph_result(note_id, paragraph_id)
logging.info("paragraph_status: " + str(paragraph_result.status) + ", jobURL: " + str(paragraph_result.job_urls))
if paragraph_result.is_completed():
return paragraph_result
time.sleep(self.client_config.get_query_interval())
def cancel_paragraph(self, note_id, paragraph_id):
"""
Cancel specified paragraph execution.
:param note_id:
:param paragraph_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id + "/" + paragraph_id)
self._check_response(resp)
def cancel_note(self, note_id):
"""
Cancel specified note execution.
:param note_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id)
self._check_response(resp)
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id)
self._check_response(resp)
def new_session(self, interpreter):
"""
Create new ZSession for specified interpreter
:param interpreter:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/session",
params = {'interpreter': interpreter})
self._check_response(resp)
return SessionInfo(resp.json()['body'])
def stop_session(self, session_id):
"""
Stop specified ZSession
:param session_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/session/" + session_id)
self._check_response(resp)
def get_session(self, session_id):
"""
Get SessionInfo of specified session_id
:param session_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/session/" + session_id)
if resp.status_code == 404:
raise Exception("No such session: " + session_id)
self._check_response(resp)
return SessionInfo(resp.json()['body'])
def next_session_paragraph(self, note_id, max_statement):
"""
Create a new paragraph for specified session.
:param note_id:
:param max_statement:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id +"/paragraph/next",
params= {'maxParagraph' : max_statement})
self._check_response(resp)
return resp.json()['message']
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
... | 2.318145 | 4,938 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
L1 = ["John", 102, "USA",23.2,34,'pooja','xyz',80]
L2 = [1, 2, 3, 4, 5, 6,6,2,3,8]
print(type(L1))
print(type(L2)) #type of data
print(L1) #print elements of list
print(L2)
print(id(L1)) #id means address of list
print(id(L2))
print(L1==L2) #compare both list elements.elements from the list must have same index then only return true
print("name : %s\t id : %d\tper : %f"%(L1[0],L1[1],L1[3])) #access elements
print(L2[0],L2[1],L2[2]) #acess elements by index
print(L1[1:4]) #split or sublist or slice
print(L1[:]) #whole list will take
print(L1[0:])
print(L1[:7])
print(L1[0:7:2]) #elements in 2 steps
print(L1[-1],L1[-3])
print(L1[0:-4])
print(L1[-6:-1])
print(L1[-1:-4])
L2.reverse() #reverse list using reverse method
print(L2)
print(L1[::-1]) #reverse using slicing optr
y=range(6) #range func working as list range start with 0
for x in y:
print(x)
print(type(y))
print(id(L1))
del L1[2]
print(L1)
print(id(L1))
L1=[3,4,5,6,7,78]
print(id(L1))
print(L1)
| [
201,
198,
43,
16,
796,
14631,
7554,
1600,
15143,
11,
366,
14053,
1600,
1954,
13,
17,
11,
2682,
4032,
79,
2238,
6592,
41707,
5431,
89,
3256,
1795,
60,
220,
220,
220,
220,
201,
198,
43,
17,
796,
685,
16,
11,
362,
11,
513,
11,
604,... | 1.72028 | 715 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.forms import BaseModelFormSet
from .models import Jurisdiction, Entity, Location, Profile
from schedule.models import Event
admin.site.register(Jurisdiction, JurisdictionAdmin)
admin.site.unregister(Event)
admin.site.register(Event, EventAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(Entity, EntityAdmin)
admin.site.unregister(User)
admin.site.register(User, CustomeUserAdmin)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
... | 3.295699 | 186 |
import typing
main()
| [
11748,
19720,
201,
198,
201,
198,
201,
198,
201,
198,
12417,
3419,
198
] | 2.153846 | 13 |
# pylint: disable=missing-docstring, redefined-outer-name
from unittest import TestCase
from unittest.mock import patch
import os
import warnings
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from faker import Faker
from kedro.framework.session import get_current_session
from tensorflow import keras
import pytest
from candystore import CandyStore
from tests.fixtures.fake_estimator import FakeEstimatorData, create_fake_pipeline
from augury.pipelines.match import nodes as match
from augury.pipelines.nodes import common
from augury.sklearn.models import EloRegressor, KerasClassifier
from augury.sklearn.preprocessing import (
CorrelationSelector,
TeammatchToMatchConverter,
ColumnDropper,
DataFrameConverter,
MATCH_INDEX_COLS,
)
from augury.sklearn.metrics import match_accuracy_scorer, bits_scorer, bits_objective
from augury.sklearn.model_selection import year_cv_split
from augury.settings import BASE_DIR
FAKE = Faker()
ROW_COUNT = 10
N_FAKE_CATS = 6
@pytest.fixture
@pytest.fixture
# We use FakeEstimator to generate predictions for #test_bits_scorer,
# and we don't care if it doesn't converge
@pytest.mark.filterwarnings("ignore:lbfgs failed to converge")
@pytest.mark.filterwarnings("ignore:lbfgs failed to converge")
@pytest.mark.parametrize("invalid_preds", [np.ones(5), np.zeros(5)])
| [
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
11,
2266,
18156,
12,
39605,
12,
3672,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
11748,
28686,
198,
11748,
1460... | 3.010846 | 461 |
import re
from nbconvert.preprocessors import Preprocessor
| [
11748,
302,
198,
198,
6738,
299,
65,
1102,
1851,
13,
3866,
14681,
669,
1330,
3771,
41341,
198
] | 3.529412 | 17 |
import json
import requests
| [
11748,
33918,
198,
11748,
7007,
628
] | 4.833333 | 6 |
import unittest
import matplotlib
matplotlib.use('Agg')
try:
from smt.surrogate_models import RMTB, RMTC
compiled_available = True
except:
compiled_available = False
| [
11748,
555,
715,
395,
198,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
895,
83,
13,
11793,
3828,
378,
62,
27530,
1330,
371,
13752,
33,
11,
29820,
... | 2.784615 | 65 |
# Eulerian path.
# The nodes which have odd degrees (int and out) are the entrance or exit.
# In your example it's JFK and A.The nodes which have odd degrees (int and out) are the entrance or exit.
# If there are no nodes have odd degrees, we could follow any path without stuck until hit the last exit node
# The reason we got stuck is because that we hit the exit
# https://leetcode.com/problems/reconstruct-itinerary/discuss/78768/Short-Ruby-Python-Java-C++?page=5
# print(sorted([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]))
# [['ATL', 'JFK'], ['ATL', 'SFO'], ['JFK', 'ATL'], ['JFK', 'SFO'], ['SFO', 'ATL']]
# print(sorted([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]])[::-1])
# [['SFO', 'ATL'], ['JFK', 'SFO'], ['JFK', 'ATL'], ['ATL', 'SFO'], ['ATL', 'JFK']]
import collections
# 字典序,第一个字母相同就去找第二个字母排序
s = Solution()
s.findItinerary([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]) | [
2,
412,
18173,
666,
3108,
13,
198,
2,
383,
13760,
543,
423,
5629,
7370,
357,
600,
290,
503,
8,
389,
262,
10384,
393,
8420,
13,
198,
2,
554,
534,
1672,
340,
338,
35216,
290,
317,
13,
464,
13760,
543,
423,
5629,
7370,
357,
600,
29... | 2.28673 | 422 |
#!/usr/bin/env python
# Paralog filtering for subtractive analysis
'''This script works on CD-HIT output.
CD-Hit is an web server for homolog sequence clustering. One output file of
CD-Hit contains paralog sequence id in cluster. This script mine that output file,
find the unique sequence, and filter out those sequences from query in file.
Requriements:
Python 2.7
Running from commandline:
`python paralog_filtering.py input.fas.1.clstr.sorted CDHit_input.fasta`
Example:
CDHit analysis has several outputs. This file work on files with *.fas.1.clstr.sorted
extension. This output from CDHit report a cluster of fasta ids from fasta file
submited to CDHit. This code takes one sequence id from each paralog cluster, and then
extract the sequence of the selected id from fasta file submitted to CDHit.
The following code mines on CDHit_input.fasta using the sequence ids from
input.fas.1.clstr.sorted file and generate 1.no_paralogs.fasta file as output.
`python subtractive_analysis.py input.fas.1.clstr.sorted CDHit_input.fasta`
Arafat Rahman
July 15 2019
arafat@nstu.edu.bd
'''
from Bio import SeqIO
import sys
######################################
# Pick single sequence from a cluster#
######################################
def unique_in_cluster(filein):
'''
Input: Sorted cluster file of CD-HIT output
Algo: Read raw data, by spliting, select unique sequence id in a cluster
Output: List of unique sequence id
'''
cdhit_cluster = open(filein)
raw_data = cdhit_cluster.read()
# List to hold cluster representatvie sequences
clust_rep = []
for i in raw_data.split('>Cluster'):
clust_rep.append(i.split('*')[0].split('>')[-1].split('...')[0])
if '' in clust_rep:
clust_rep.remove('')
return clust_rep
# Filter sequences
def filter_seqs(file_in, seq_list):
'''Given a fasta file and a list of fasta description.
If a sequence description in fasta file have match in the list,
this function will append the sequence record in a new filtered list.'''
filtered_seqs = []
for seq_record in SeqIO.parse(file_in, 'fasta'):
if seq_record.id in seq_list:
filtered_seqs.append(seq_record)
return filtered_seqs
if __name__ == '__main__':
clust_rep = unique_in_cluster(sys.argv[1])
cdhit_input = sys.argv[2]
unique_seqs = filter_seqs(cdhit_input, clust_rep)
SeqIO.write(unique_seqs, open('../output/1.no_paralogs.fasta', 'w'), 'fasta') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
2547,
11794,
25431,
329,
34128,
425,
3781,
198,
198,
7061,
6,
1212,
4226,
2499,
319,
6458,
12,
39,
2043,
5072,
13,
198,
8610,
12,
17889,
318,
281,
3992,
4382,
329,
3488,
928,... | 2.814238 | 899 |
from mock import patch
from organize.actions import Shell
from organize.utils import Path
| [
6738,
15290,
1330,
8529,
198,
198,
6738,
16481,
13,
4658,
1330,
17537,
198,
6738,
16481,
13,
26791,
1330,
10644,
628,
628
] | 4.47619 | 21 |
from django.shortcuts import render
from api.models import *
from api.serializers import OwnerSerializer
from django.http import Http404, JsonResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.permissions import IsAuthenticated, DjangoModelPermissions
from rest_framework_simplejwt.authentication import JWTAuthentication
#encryptografia
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group
from api.custom_permissions import CustomPermissionsOwner
class RegisterOwner(APIView):
"""
View para registrar os donos das empresas
Não precisa autenticação
"""
class OwnerDetail(APIView):
"""
get, put and delete a Owner by pk
* requerido permissões e autenticação do usuário
"""
permission_classes = (IsAuthenticated, DjangoModelPermissions, CustomPermissionsOwner,)
authentication_classes = (JWTAuthentication,)
def get_queryset(self):
"""
Metodo para verificar as permissões do usuário
"""
return Owner.objects.all()
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
40391,
13,
27530,
1330,
1635,
198,
6738,
40391,
13,
46911,
11341,
1330,
23853,
32634,
7509,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
11,
449,
1559,
31077,
198... | 2.973039 | 408 |
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from sklearn import metrics
from sklearn.manifold import TSNE
@torch.no_grad()
def predict(model, dataloader):
"""Returns: numpy arrays of true labels and predicted probabilities."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
labels = []
probs = []
for batch_idx, batch in enumerate(dataloader):
inputs, label = batch
inputs = inputs.to(device)
label = label.to(device)
labels.append(label)
outputs = model(inputs)
probs.append(torch.sigmoid(outputs[:, 1]))
labels = torch.cat(labels).cpu().numpy()
probs = torch.cat(probs).cpu().numpy()
return labels, probs
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
1341,
35720,
13,
805,
361,
727,
... | 2.557692 | 312 |
import simpy
import sys
sys.path
import random
import numpy as np
import torch
import matplotlib.pyplot as plt
from tabulate import tabulate
import sequencing
class machine:
'''
1. downwards are functions that perform the simulation
including production, starvation and breakdown
'''
# this function should be called after __init__ to avoid deadlock
# after the creation of all machines and initial jobs
# the initial jobs are allocated through job_creation module
# The main function, simulates the production
'''
2. downwards are functions the called before and after each operation
to maintain some record, and transit the finished job to next workcenter or out of system
'''
# update lots information that will be used for calculating the rewards
# transfer unfinished job to next workcenter, or delete finished job from record
# and update the data of queuing jobs, EMA_tardiness etc.
'''
3. downwards are functions that related to information update and exchange
especially the information that will be used by other agents on shop floor
'''
# call this function after the initialization, used for once
# call this function before sequencing decision / after operation / after breakdown and starvation
# available time is a bit tricky, jobs may come when the operation is ongoing
# or when the machine is starving (availble time is earlier than now)
# update the information of progression, eralized and expected tardiness to JOB_CREATOR !!!
# update the information of the job that being processed to JOB_CREATOR !!!
# MUST !!! call this after operation otherwise the record persists and lead to error
# give ou the information related to sequencing decision
'''
4. downwards are functions related to the calculation of reward and construction of state
only be called if the sequencing learning mode is activated
the options of reward function are listed at bottom
'''
# testing reward function
def get_reward1(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 100)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - np.square(restructured_wait / 128).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward2(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 80)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - (restructured_wait / 128).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward3(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 80)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - np.square(restructured_wait / 128).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward4(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 90)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - np.square(restructured_wait / 256).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward5(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 90)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - np.clip(restructured_wait / 256,0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward6(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
reward = - np.square(restructured_wait / 100).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward7(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
reward = - np.square(restructured_wait / 100).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
def get_reward8(self):
'''1. retrive the production record of job'''
job_record = self.job_creator.production_record[self.job_idx]
path = job_record[1]
queued_time = np.array(job_record[2])
slack = np.array(job_record[3])
critical_factor = 1 - slack / (np.absolute(slack) + 200)
exposure = 0.2 # how much of waiting at succeeding machine is exposure to agent
'''2. calculate the reward for each agents'''
# if tardiness is non-zero and waiting time exists, machines in path get punishment
if self.tardiness and queued_time.sum():
restructured_wait = queued_time*(1-exposure) + np.append(np.delete(queued_time*exposure,0),0)
restructured_wait *= critical_factor
reward = - np.square(restructured_wait / 128).clip(0,1)
#print(reward)
reward = torch.FloatTensor(reward)
else:
reward = torch.ones(len(queued_time),dtype=torch.float)*0
'''3. and assign the reward to incomplete experience, make them ready to be learned'''
for i,m_idx in enumerate(path):
r_t = reward[i]
decision_point = job_record[0][i][0]
try:
self.job_creator.complete_experience(m_idx, decision_point, r_t)
except:
pass
| [
11748,
985,
9078,
201,
198,
11748,
25064,
201,
198,
17597,
13,
6978,
201,
198,
11748,
4738,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
... | 2.288805 | 5,592 |
#load libraries
# to check unit root in time series
import datetime
from statsmodels.tsa.stattools import adfuller
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.model_selection import TimeSeriesSplit
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
from matplotlib import pyplot as mp
import seaborn as sns
import os
bigmacro = pd.read_csv("Macroeconomic_Variables.csv")
bigmacro = bigmacro.rename(columns={'sasdate': 'Date'})
Recession_periods = pd.read_csv('Recession_Periods.csv')
bigmacro.insert(loc=1, column="Regime",
value=Recession_periods['Regime'].values)
#remove columns with missing observations
missing_colnames = []
for i in bigmacro.drop(['Date', 'Regime'], axis=1):
observations = len(bigmacro)-bigmacro[i].count()
if (observations > 10):
print(i+':'+str(observations))
missing_colnames.append(i)
bigmacro = bigmacro.drop(labels=missing_colnames, axis=1)
#rows with missing values
bigmacro = bigmacro.dropna(axis=0)
# Add lags
for col in bigmacro.drop(['Date', 'Regime'], axis=1):
for n in [3, 6, 9, 12, 18]:
bigmacro['{} {}M lag'.format(col, n)] = bigmacro[col].shift(
n).ffill().values
# 1 month ahead prediction
bigmacro["Regime"] = bigmacro["Regime"].shift(-1)
bigmacro = bigmacro.dropna(axis=0)
#check stationarity
threshold = 0.01 # significance level
for column in bigmacro.drop(['Date', 'Regime'], axis=1):
result = adfuller(bigmacro[column])
if result[1] > threshold:
bigmacro[column] = bigmacro[column].diff()
bigmacro = bigmacro.dropna(axis=0)
# Standardize
features = bigmacro.drop(['Date', 'Regime'], axis=1)
col_names = features.columns
scaler = StandardScaler()
scaler.fit(features)
standardized_features = scaler.transform(features)
standardized_features.shape
df = pd.DataFrame(data=standardized_features, columns=col_names)
df.insert(loc=0, column="Date", value=bigmacro['Date'].values)
df.insert(loc=1, column='Regime', value=bigmacro['Regime'].values)
Label = df["Regime"].apply(lambda regime: 1. if regime == 'Normal' else 0.)
df.insert(loc=2, column="Label", value=Label.values)
# Time Series Split
df_idx = df[df.Date == '12/1/96'].index[0]
df_targets = df['Label'].values
df_features = df.drop(['Regime', 'Date', 'Label'], axis=1)
df_training_features = df.iloc[:df_idx, :].drop(['Regime', 'Date', 'Label'], axis=1)
df_validation_features = df.iloc[df_idx:, :].drop(['Regime', 'Date', 'Label'], axis=1)
df_training_targets = df['Label'].values
df_training_targets = df_training_targets[:df_idx]
df_validation_targets = df['Label'].values
df_validation_targets = df_validation_targets[df_idx:]
print(len(df_training_features), len(df_training_targets), len(df_targets))
print(len(df_validation_features), len(df_validation_targets), len(df_features))
scoring = "roc_auc"
kfold = model_selection.TimeSeriesSplit(n_splits=3)
seed = 8
# Create regularization hyperparameter space
C = np.reciprocal([0.00000001, 0.00000005, 0.0000001, 0.0000005, 0.000001, 0.000005, 0.00001, 0.00005,
0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100, 500, 1000, 5000])
# Create hyperparameter options
hyperparameters = dict(C=C)
model = LogisticRegression(max_iter=10000, penalty='l1')
LR_penalty = model_selection.GridSearchCV(estimator=model, param_grid=hyperparameters,
cv=kfold, scoring=scoring).fit(X=df_features,
y=df_targets).best_estimator_
LR_penalty
X = df_features
y = df_targets
lr_l1 = LogisticRegression(C=0.1, max_iter=10000, penalty="l1").fit(X, y)
model = SelectFromModel(lr_l1, prefit=True)
feature_idx = model.get_support()
feature_name = X.columns[feature_idx]
X_new = model.transform(X)
df_2 = df[feature_name]
df_2.insert(loc=0, column="Date", value=df['Date'].values)
df_2.insert(loc=1, column="Regime", value=df['Regime'].values)
df_2.insert(loc=2, column="Label", value=df['Label'].values)
corr = df_2.drop(['Date', 'Regime', 'Label'], axis=1).corr()
plt.figure(figsize=(10, 8))
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True), square=True)
# Training Algorithms on Training Dataset
df = df_2
# Time Series Split
df_idx = df[df.Date == '12/1/96'].index[0]
df_targets = df['Label'].values
df_features = df.drop(['Regime', 'Date', 'Label'], axis=1)
df_training_features = df.iloc[:df_idx, :].drop(['Regime', 'Date', 'Label'], axis=1)
df_validation_features = df.iloc[df_idx:, :].drop(['Regime', 'Date', 'Label'], axis=1)
df_training_targets = df['Label'].values
df_training_targets = df_training_targets[:df_idx]
df_validation_targets = df['Label'].values
df_validation_targets = df_validation_targets[df_idx:]
seed = 8
scoring = 'roc_auc'
kfold = model_selection.TimeSeriesSplit(n_splits=3)
models = []
models.append(('LR', LogisticRegression(C=1e09)))
models.append(('LR_L1', LogisticRegression(penalty='l1')))
models.append(('LR_L2', LogisticRegression(penalty='l2')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('GB', GradientBoostingClassifier()))
models.append(('ABC', AdaBoostClassifier()))
models.append(('RF', RandomForestClassifier()))
models.append(('XGB', xgb.XGBClassifier()))
results = []
names = []
lb = preprocessing.LabelBinarizer()
for name, model in models:
cv_results = model_selection.cross_val_score(estimator=model, X=df_training_features,
y=lb.fit_transform(df_training_targets), cv=kfold, scoring=scoring)
model.fit(df_training_features, df_training_targets) # train the model
fpr, tpr, thresholds = metrics.roc_curve(
df_training_targets, model.predict_proba(df_training_features)[:, 1])
auc = metrics.roc_auc_score(
df_training_targets, model.predict(df_training_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % (name, auc))
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1-Specificity(False Positive Rate)')
plt.ylabel('Sensitivity(True Positive Rate)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
fig = plt.figure()
fig.suptitle('Algorithm Comparison based on Cross Validation Scores')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
# Evaluate Performances of the Algorithms on Validation Dataset
model = LogisticRegression(C=1e09) # high penalty
LR = model.fit(df_training_features, df_training_targets)
training_predictions = LR.predict(df_training_features)
prob_predictions = LR.predict_proba(df_training_features)
prob_predictions = np.append(prob_predictions, LR.predict_proba(df_validation_features), axis=0)
# define periods of recession
rec_spans = []
#rec_spans.append([datetime.datetime(1957,8,1), datetime.datetime(1958,4,1)])
rec_spans.append([datetime.datetime(1960, 4, 1),datetime.datetime(1961, 2, 1)])
rec_spans.append([datetime.datetime(1969, 12, 1),datetime.datetime(1970, 11, 1)])
rec_spans.append([datetime.datetime(1973, 11, 1),datetime.datetime(1975, 3, 1)])
rec_spans.append([datetime.datetime(1980, 1, 1),datetime.datetime(1980, 6, 1)])
rec_spans.append([datetime.datetime(1981, 7, 1),datetime.datetime(1982, 10, 1)])
rec_spans.append([datetime.datetime(1990, 7, 1),datetime.datetime(1991, 2, 1)])
rec_spans.append([datetime.datetime(2001, 3, 1),datetime.datetime(2001, 10, 1)])
rec_spans.append([datetime.datetime(2007, 12,1), datetime.datetime(2009,5,1)])
sample_range = pd.date_range(start='9/1/1960', end='9/1/2018', freq='MS')
plt.figure(figsize=(20, 5))
plt.plot(sample_range.to_series().values, prob_predictions[:, 0])
for i in range(len(rec_spans)):
plt.axvspan(rec_spans[i][0], rec_spans[i][len(rec_spans[i]) - 1], alpha=0.25, color='grey')
plt.axhline(y=0.5, color='r', ls='dashed', alpha=0.5)
plt.title('Recession Prediction Probabalities with Logistic Regression')
mp.savefig('plot1.png', bbox_inches='tight')
plt.show()
# Create regularization penalty space
penalty = ['l1', 'l2']
# Create regularization hyperparameter space
C = np.reciprocal([0.00000001, 0.00000005, 0.0000001, 0.0000005, 0.000001, 0.000005, 0.00001, 0.00005,
0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100, 500, 1000, 5000])
# Create hyperparameter options
hyperparameters = dict(C=C, penalty=penalty)
model = LogisticRegression(max_iter=10000)
LR_penalty = model_selection.GridSearchCV(estimator=model, param_grid=hyperparameters,
cv=kfold, scoring=scoring).fit(df_training_features,
df_training_targets).best_estimator_
training_predictions = LR_penalty.predict(df_training_features)
prob_predictions = LR_penalty.predict_proba(df_training_features)
prob_predictions = np.append(prob_predictions, LR_penalty.predict_proba(df_validation_features), axis=0)
sample_range = pd.date_range(start='9/1/1960', end='9/1/2018', freq='MS')
plt.figure(figsize=(20, 5))
plt.plot(sample_range.to_series().values, prob_predictions[:, 0])
for i in range(len(rec_spans)):
plt.axvspan(rec_spans[i][0], rec_spans[i][len(rec_spans[i]) - 1], alpha=0.25, color='grey')
plt.axhline(y=0.5, color='r', ls='dashed', alpha=0.5)
plt.title('Recession Prediction Probabalities with Regularized Logistic Regression')
mp.savefig('plot2.png', bbox_inches='tight')
plt.show()
# XGBoosting
xgboost = model_selection.GridSearchCV(estimator=xgb.XGBClassifier(),
param_grid={'booster': ['gbtree']},
scoring=scoring, cv=kfold).fit(df_training_features,
lb.fit_transform(df_training_targets)).best_estimator_
xgboost.fit(df_training_features, df_training_targets)
prob_predictions = xgboost.predict_proba(df_training_features)
prob_predictions = np.append(prob_predictions, xgboost.predict_proba(df_validation_features), axis=0)
sample_range = pd.date_range(start='9/1/1960', end='9/1/2018', freq='MS')
plt.figure(figsize=(20, 5))
plt.plot(sample_range.to_series().values, prob_predictions[:, 0])
for i in range(len(rec_spans)):
plt.axvspan(rec_spans[i][0], rec_spans[i]
[len(rec_spans[i]) - 1], alpha=0.25, color='grey')
plt.axhline(y=0.5, color='r', ls='dashed', alpha=0.5)
plt.title('Recession Prediction Probabalities with XGBoost')
mp.savefig('plot3.png', bbox_inches='tight')
plt.show()
# find feature importances
headers = df.drop(['Regime', 'Label', 'Date'], axis=1).columns.values.tolist()
xgboost_importances = pd.DataFrame(
xgboost.feature_importances_, index=headers, columns=['Relative Importance'])
_ = xgboost_importances.sort_values(
by=['Relative Importance'], ascending=False, inplace=True)
xgboost_importances = xgboost_importances[xgboost_importances['Relative Importance'] > 0].iloc[:20]
# display importances in bar-chart and pie-chart
fig = plt.figure(figsize=(6, 6))
plt.xticks(rotation='90')
plt.barh(y=np.arange(len(xgboost_importances)),
width=xgboost_importances['Relative Importance'], align='center', tick_label=xgboost_importances.index)
plt.gca().invert_yaxis()
mp.savefig('feature_importance.png', bbox_inches='tight')
plt.show()
# ROC AUC - Validation Targets
fpr, tpr, thresholds = metrics.roc_curve(df_validation_targets, LR.predict_proba(df_validation_features)[:, 1])
auc = metrics.roc_auc_score(df_validation_targets, LR.predict(df_validation_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('LR', auc))
fpr, tpr, thresholds = metrics.roc_curve(df_validation_targets, LR_penalty.predict_proba(df_validation_features)[:, 1])
auc = metrics.roc_auc_score(df_validation_targets, LR_penalty.predict(df_validation_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('LR_penalty', auc))
fpr, tpr, thresholds = metrics.roc_curve(df_validation_targets, xgboost.predict_proba(df_validation_features)[:, 1])
auc = metrics.roc_auc_score(df_validation_targets, xgboost.predict(df_validation_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('XGBoost', auc))
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1-Specificity(False Positive Rate)')
plt.ylabel('Sensitivity(True Positive Rate)')
plt.title('Receiver Operating Characteristic (Validation Data)')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
mp.savefig('ROC1.png', bbox_inches='tight')
plt.show()
# ROC AUC - Actual Targets
fpr, tpr, thresholds = metrics.roc_curve(df_targets, LR.predict_proba(df_features)[:, 1])
auc = metrics.roc_auc_score(df_targets, LR.predict(df_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('LR', auc))
fpr, tpr, thresholds = metrics.roc_curve(df_targets, LR_penalty.predict_proba(df_features)[:, 1])
auc = metrics.roc_auc_score(df_targets, LR_penalty.predict(df_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('LR_penalty', auc))
fpr, tpr, thresholds = metrics.roc_curve(df_targets, xgboost.predict_proba(df_features)[:, 1])
auc = metrics.roc_auc_score(df_targets, xgboost.predict(df_features))
plt.plot(fpr, tpr, label='%s ROC (area = %0.2f)' % ('XGBoost', auc))
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('1-Specificity(False Positive Rate)')
plt.ylabel('Sensitivity(True Positive Rate)')
plt.title('Receiver Operating Characteristic (Whole period)')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
mp.savefig('ROC2.png', bbox_inches='tight')
plt.show()
| [
2,
2220,
12782,
201,
198,
2,
284,
2198,
4326,
6808,
287,
640,
2168,
201,
198,
11748,
4818,
8079,
201,
198,
6738,
9756,
27530,
13,
912,
64,
13,
301,
1078,
10141,
1330,
512,
12853,
263,
201,
198,
6738,
1341,
35720,
13,
3866,
36948,
13... | 2.333747 | 6,439 |
# Copyright (C) 2017 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements caching for file hashes."""
from pathlib import Path
import sqlite3
from mir import xdg
class HashCache:
"""Cache for file SHA-256 hashes.
Hashes are stored in a SQLite database along with the file's path,
mtime, and size. The file's mtime and size are checked
automatically.
HashCache implements a basic mapping API for access and a context
manager API for closing the database connection.
"""
@staticmethod
def _dbpath() -> Path:
"""Return the path to the user's hash cache database."""
return _cachedir() / 'hash.db'
| [
2,
15069,
357,
34,
8,
2177,
9659,
7455,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
9... | 3.549849 | 331 |
import typing
import collections
import decimal
import enum
import datetime
import inspect
from django import forms
from django.contrib.admin import widgets
from django.db import models
from django.forms import utils
from sane_finances.inspection.analyzers import FlattenedInstanceAnalyzer, InstanceAttributeInfo
from sane_finances.sources.base import DownloadParameterValuesStorage
from sane_finances.annotations import SupportsDescription
class FormFieldsManager:
""" Manager for form fields """
# order is important: last item wins.
# thus base classes have to locate at the beginning,
# more specialized classes (subclasses) have to locate at the ending.
# otherwise, base class will always be used.
field_type_mapping: typing.OrderedDict[typing.Any,
typing.Tuple[typing.Type[forms.Field], typing.Dict[str, typing.Any]]] = \
collections.OrderedDict({
str: (forms.CharField, {'widget': widgets.AdminTextInputWidget()}),
int: (forms.IntegerField, {'widget': widgets.AdminIntegerFieldWidget()}),
bool: (forms.BooleanField, {'required': False}),
float: (forms.FloatField, {}),
decimal.Decimal: (forms.DecimalField, {'max_digits': 50, 'decimal_places': 4}),
datetime.date: (forms.DateField, {'widget': widgets.AdminDateWidget()}),
datetime.datetime: (forms.SplitDateTimeField, {'widget': widgets.AdminSplitDateTime()}),
enum.Enum: (forms.ChoiceField, {})
})
@property
@property
@property
def materialize_choice_values(
self,
form_cleaned_data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
""" Convert stringified choice values to original enum values
"""
return {
field_name: (self._materialize_choice_value(field_value, self._choice_fields[field_name])
if field_name in self._choice_fields
else field_value)
for field_name, field_value
in form_cleaned_data.items()
}
class ReadonlyFormFieldsManager(FormFieldsManager):
""" Manager for form readonly fields
"""
field_type_mapping: typing.OrderedDict[typing.Any,
typing.Tuple[typing.Type[forms.Field], typing.Dict[str, typing.Any]]] = \
collections.OrderedDict({
str: (forms.CharField, {'disabled': True}),
int: (forms.IntegerField, {'disabled': True}),
bool: (forms.BooleanField, {'required': False, 'disabled': True}),
float: (forms.FloatField, {'disabled': True}),
decimal.Decimal: (forms.DecimalField, {'max_digits': 50, 'decimal_places': 4, 'disabled': True}),
datetime.date: (forms.DateField, {'disabled': True}),
datetime.datetime: (forms.SplitDateTimeField, {'disabled': True}),
enum.Enum: (forms.ChoiceField, {'disabled': True})
})
| [
11748,
19720,
198,
11748,
17268,
198,
11748,
32465,
198,
11748,
33829,
198,
11748,
4818,
8079,
198,
11748,
10104,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
1330,
40803,
198,
6738,
42625,
... | 2.433172 | 1,242 |
#1 DR Number 11 Victim Age 22 Crime Description
#2 Date Reported 12 Victim Sex 23 Crime Code 1
#3 Date Occurred 13 Victim Descent 24 Crime Code 2
#4 Time Occurred 14 Premise Code 25 Crime Code 3
#5 Area ID 15 Premise Description 26 Crime Code 4
#6 Area Name 16 Weapon Used Code 27 Address
#7 Reporting District 17 Weapon Description 28 Cross Street
#8 Crime Code 18 Status Code 29 Location
#9 Crime Description 20 Status Description
#10 MO Codes 21 Crime Code
import pandas as pd
from app.settings import setting
source = pd.read_csv(setting['file'],skiprows=1,header=None,nrows=setting['limit']);
| [
2,
16,
10560,
7913,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1367,
38901,
7129,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2534,
10003,
12489,
198,
2,
17,
7536,
30588,
220,
220,
220,
220,
220,
220,
220,
1105,
... | 2.512027 | 291 |
#!/usr/bin/env python
# encoding: utf-8
'''
@author: AnthonyZero
@file: __init__.py.py
@time: 2019/5/9 22:35
@desc:
''' | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
7061,
6,
198,
31,
9800,
25,
9953,
28667,
198,
31,
7753,
25,
11593,
15003,
834,
13,
9078,
13,
9078,
198,
31,
2435,
25,
13130,
14,
20,
14,
24,
... | 2.163636 | 55 |
import numpy as np
from spn.structure.leaves.piecewise.PiecewiseLinear import PiecewiseLinear
from spn.algorithms.Gradient import add_node_feature_gradient
| [
11748,
299,
32152,
355,
45941,
628,
198,
6738,
599,
77,
13,
301,
5620,
13,
293,
3080,
13,
12239,
3083,
13,
47,
8535,
3083,
14993,
451,
1330,
27053,
3083,
14993,
451,
198,
6738,
599,
77,
13,
282,
7727,
907,
13,
42731,
1153,
1330,
751... | 3.156863 | 51 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 16 10:46:14 2022
@author: Admin
"""
import csv
import numpy
import datetime
from datetime import date
import random
import sys
start = datetime.strftime("31-10-2019", "%d-%m-%Y")
today=date.today()
end=today.strftime("%d-%m-%Y")
date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days)]
all_list = []
a_list = []
f = open('random_num.csv', 'w')
or date in date_generated:
all_list = []
a_list.append(date.strftime("%d-%m-%Y"))
a_list.append(str(random.randint(100000,999999)))
for item in a:
for i in range(len(item)):
if i == 0:
f.write(str(item[i]))
else:
f.write(',' + str(item[i]))
f.write('\n')
f.close() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2365,
1467,
838,
25,
3510,
25,
1415,
33160,
201,
198,
201,
198,
31,
9800,
25,
32053,
201,
198,
37811,
201,
198,
11748,
269,
213... | 2.059126 | 389 |
"""melodi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import logout, login
from django.views.generic import TemplateView
from django.contrib.auth import views as auth_views
from django.core.urlresolvers import reverse_lazy
from browser.views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'searchset', SearchSetViewSet, base_name="SearchSet")
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
#url(r'^browser/', include('browser.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('social_auth.urls')),
url(r'^about', about, name='about'),
url(r'^citation', citation, name='citation'),
url(r'^help', help, name='help'),
url(r'^contact', contact, name='contact'),
url(r'^logout/', logout, {'next_page': reverse_lazy('home')}, name='logout'),
url(r'^login/', login, name='login'),
#url(r'^login-error/$', TemplateView.as_view(template_name="login-error.html")),
url(r'^$', index, name='home'),
url(r'^jobs/', jobs, name='jobs'),
url(r'^results/(?P<num>[0-9a-z-]+)/$', results, name='results'),
url(r'^complete/(?P<backend>[^/]+)/$', AuthComplete.as_view()),
url(r'^login-error/$', LoginError.as_view()),
url(r'^database/$', OrderListJson.as_view(), name='order_list_json'),
url(r'^pubs/(?P<num>[0-9]+_[0-9])/$',pubDetails, name='pubs'),
url(r'^pubss/(?P<num>.*_[0-9]+_[0-9])/$',pubSingle, name='pubss'),
url(r'^ajax_searchset/$', ajax_searchset.as_view(), name='ajax_searchset'),
url(r'^ajax_compare/$', ajax_compare.as_view(), name='ajax_compare'),
url(r'^get_semmed_items/',get_semmed_items, name='get_semmed_items'),
url(r'^articles/(?P<num>[0-9]+)/$',articleDetails, name='articles'),
url(r'^ajax_overlap/$', ajax_overlap.as_view(), name='ajax_overlap'),
url(r'^dt_test_page/$', dt_test_page, name='dt_test_page'),
url(r'^ajax_graph_metrics/$', ajax_graph_metrics, name='ajax_graph_metrics'),
url(r'^ajax_share/$', ajax_share, name='ajax_share'),
url(r'^ajax_delete/$', ajax_delete, name='ajax_delete'),
url(r'^download_result/$', download_result, name='download_result'),
url(r'^download_filter/$', download_filter, name='download_filter'),
url(r'^upload_filter/$', upload_filter, name='upload_filter'),
url(r'^save_filter/$', save_filter, name='save_filter'),
url(r'^temmpo/$', temmpo, name='temmpo'),
url(r'^temmpo_res/$', temmpo_res, name='temmpo_res')
]
| [
37811,
17694,
23130,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
... | 2.484895 | 1,291 |
import datetime
from . import screen
from .utils import strw
| [
11748,
4818,
8079,
198,
6738,
764,
1330,
3159,
198,
6738,
764,
26791,
1330,
965,
86,
198,
220,
220,
220,
220,
198
] | 3.142857 | 21 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AttachedDatabaseConfigurationArgs', 'AttachedDatabaseConfiguration']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.598425 | 127 |
# coding: utf-8
"""
Fabric Orchestrator API
This is Fabric Orchestrator API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: kthare10@unc.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from setuptools import setup, find_packages # noqa: H301
from fabric_cf import __VERSION__
NAME = "fabric-orchestrator-client"
VERSION = __VERSION__
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fh:
requirements = fh.read()
setup(
name=NAME,
version=VERSION,
description="Fabric Orchestrator API",
author_email="kthare10@unc.edu",
url="https://github.com/fabric-testbed/OrchestratorClient",
keywords=["Swagger", "Fabric Orchestrator API"],
install_requires=requirements,
packages=find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.9',
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
37759,
30369,
2536,
1352,
7824,
628,
220,
220,
220,
770,
318,
37759,
30369,
2536,
1352,
7824,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
4946,
1761... | 2.688172 | 465 |
import json
import os
import cv2
import numpy as np
from typing import Callable
from typing import List
from typing import Optional
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from utils import compact
class CLEVRVideoFrameDataset(Dataset):
"""Dataset that loads one random frame from CLEVR video"""
def __getitem__(self, index: int):
"""Load one video and get only one frame from it"""
if self.is_video:
return self._get_video(index)
# since we take subseq of video frames
img_idx, frame_idx = self._get_idx(index)
image_path = self.files[img_idx]
cap = cv2.VideoCapture(image_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
imgs = []
for _ in range(self.sample_clip_num):
success, img = cap.read()
if not success:
cap.release()
return self._rand_another(index)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs.append(img)
cap.release()
# return shape [sample_clip_num, 3, H, W]
return torch.stack([self.clevr_transforms(img) for img in imgs], dim=0)
class CATERVideoFrameDataset(CLEVRVideoFrameDataset):
"""Dataset that loads one random frame from CATER video"""
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
4889,
540,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
12972,
13165,
354,
62,
2971,
... | 2.369527 | 571 |
from math import sqrt
while True:
print("Program wyznaczający pierwiastki równiania kwadratowego ax^2 + bx + c")
first = int(input("Podaj wpołczynnik liczby a: "))
second = int(input("Podaj wpołczynnik liczby b: "))
third = int(input("Podaj wpołczynnik liczby c: "))
delta = (second ** 2) - (4*(first * third))
if first == 0:
print("Równanie liniowe jest postaci: ", str(second) + "x", "+", third)
resolve = ((0 - third) / second)
print("Rozwiązaniem równania liniowego jest x =", resolve)
else:
if delta < 0:
#Zapisujemy postać -b +/- sqrt.delta / 2a w postaci rzeczywistej i urojonej: -b/2a + +/- sqrt.delta / 2a
print("z1 = ", complex((float(-second) / (2.0 * float(first))), (abs(delta) ** (1/2) / (2.0 * first))))
print("z2 = ", complex((float(-second) / (2.0 * float(first))), -(abs(delta) ** (1/2) / (2.0 * first))))
else:
sqrt_delta = sqrt(delta)
x1 = ((-second + sqrt_delta) / 2 * first)
x2 = ((-second - sqrt_delta) / 2 * first)
if x1 == x2:
print("Pierwiastakami równania:", str(first) + "x^2", "+", str(second) + "x", "+", third, "to: ", x1)
else:
print("Pierwiastakami równania:", str(first) + "x^2", "+", str(second) + "x", "+", third, "to: ", x1, x2)
what_to_do = input("Co chcesz dalej zrobić? \n T - Kontynuacja programu \n N - Koniec działania programu \n >")
if what_to_do == "N":
break
| [
6738,
10688,
1330,
19862,
17034,
198,
4514,
6407,
25,
198,
220,
220,
220,
3601,
7203,
15167,
266,
45579,
77,
330,
89,
1228,
128,
227,
948,
17748,
37686,
459,
4106,
374,
10205,
675,
666,
544,
479,
86,
324,
10366,
322,
1533,
78,
7877,
... | 1.986928 | 765 |
from top2vec.Top2Vec import Top2Vec
__version__ = '1.0.16'
| [
6738,
1353,
17,
35138,
13,
9126,
17,
53,
721,
1330,
5849,
17,
53,
721,
198,
198,
834,
9641,
834,
796,
705,
16,
13,
15,
13,
1433,
6,
198
] | 2.142857 | 28 |
#
# @lc app=leetcode id=230 lang=python3
#
# [230] Kth Smallest Element in a BST
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Assuming the tree is balanced:
# We first found the lowest value that is a leaf
# Then we go back up, until we find the kth smallest value
# Time complexity is O(H + k), where H is the height of the tree, since
# we must iterate until we find a leaf, than k times til we find kth.
# If the tree is balanced, H = logN, where N is number of nodes.
# Space complexity is O(H + k), O(N + k) in worst case, O(logN + k) in the avg.
# Avg happens when the tree is balanced. Worst if the tree is like a linked list
# (all nodes in one side).
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
19214,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
19214,
60,
509,
400,
10452,
395,
11703,
287,
257,
44992,
198,
2,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
... | 2.882784 | 273 |
import os
import math
import numpy as np
import numpy.linalg as nplin
| [
11748,
28686,
201,
198,
11748,
10688,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
299,
46982,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.342857 | 35 |
# Path to preprocssed BraTS training dataset (hdf5 file)
BRATS_PATH = '../../../dataset/Mydataset/processed/data_3D_train.hdf5'
# Path to preprocssed BraTS validation dataset (hdf5 file)
BRATS_VAL_PATH = '../../../dataset/Mydataset/processed/data_3D.hdf5'
#Define a location for checkpoints (with trailing slash)
CHECKPOINT_BASE_PATH = "../../exp/"
#Define a location for predictions (with trailing slash)
PREDICTIONS_BASE_PATH = "../../exp/prediction"
| [
201,
198,
2,
10644,
284,
662,
1676,
25471,
276,
9718,
4694,
3047,
27039,
357,
71,
7568,
20,
2393,
8,
201,
198,
11473,
33586,
62,
34219,
796,
705,
40720,
40720,
40720,
19608,
292,
316,
14,
3666,
19608,
292,
316,
14,
14681,
276,
14,
7... | 2.634831 | 178 |
from django.db import models
from django.core.urlresolvers import reverse
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198
] | 3.52381 | 21 |
from django import forms | [
6738,
42625,
14208,
1330,
5107
] | 4.8 | 5 |
__version__ = "0.9.03"
__version_comment__ = "add results_topic to json_mqtt output"
| [
834,
9641,
834,
796,
366,
15,
13,
24,
13,
3070,
1,
198,
834,
9641,
62,
23893,
834,
796,
366,
2860,
2482,
62,
26652,
284,
33918,
62,
76,
80,
926,
5072,
1,
198
] | 2.65625 | 32 |
#!/usr/bin/env python
"""
Decode incoming packets from weather station and save to db
"""
import argparse
import os
import serial
import sys
import time
from datetime import datetime
from chaac import packets
from serial_packet.serial_packet import decode_packet, encode_packet
parser = argparse.ArgumentParser()
parser.add_argument("--port", help="device to connect to")
parser.add_argument("uid", help="uid to reset")
args = parser.parse_args()
if args.port:
port = args.port
else:
port = os.environ.get('SERIAL_PORT')
if port is None:
raise ValueError("Invalid serial port!")
stream = serial.Serial(port, timeout=0.01)
stream.flushInput()
buff = bytearray()
print("Waiting for message from {}".format(int(args.uid)))
while True:
line = stream.read(1)
if len(line) > 0:
buff.append(line[0])
while decode_packet(buff, process_packet) is True:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
10707,
1098,
15619,
24624,
422,
6193,
4429,
290,
3613,
284,
20613,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
11389,
198,
11748,
25064,
198,
11748... | 2.84326 | 319 |
# Generated by Django 2.2.16 on 2020-11-12 10:32
from django.db import migrations, models
import django.db.models.deletion
import laundro.models
import laundro.vallidators
import uuid
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1433,
319,
12131,
12,
1157,
12,
1065,
838,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.952381 | 63 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 08:20:21 2019
@author: Kazuki
"""
import numpy as np
import pandas as pd
import utils
SUBMIT_FILE_PATH = '../output/0313-1.csv.gz'
COMMENT = 'mean(ireko*7) + mean(tosh*6) + mean(onodera*2) + mean(nyanp*3) + mean(nejumi*2)'
EXE_SUBMIT = True
usecols = ['HasDetections']
# =============================================================================
# def
# =============================================================================
sub_best = pd.read_csv(utils.SUB_BEST)
# =============================================================================
# ireko
# =============================================================================
ireko707 = pd.Series(np.load('../external/sub/ireko707/test_preds.npy'), name='ireko707')
ireko708 = pd.Series(np.load('../external/sub/ireko708/test_preds.npy'), name='ireko708')
ireko708_v2 = pd.Series(np.load('../external/sub/ireko708_v2/test_preds.npy'), name='ireko708_v2')
ireko709 = pd.Series(np.load('../external/sub/ireko709/test_preds.npy'), name='ireko709')
ireko710 = pd.Series(np.load('../external/sub/ireko710/test_preds.npy'), name='ireko710')
ireko708_lovasz = pd.Series(np.load('../external/sub/ireko708_lovasz/test_preds.npy'), name='ireko708_lovasz')
ireko710_v2 = pd.Series(np.load('../external/sub/ireko710_v2/test_preds.npy'), name='ireko710_v2')
ireko = get_rank([ireko707, ireko708, ireko708_v2, ireko709, ireko710, ireko708_lovasz, ireko710_v2])
del ireko707, ireko708, ireko708_v2, ireko709, ireko710, ireko708_lovasz, ireko710_v2
# =============================================================================
# tosh
# =============================================================================
tosh708 = pd.read_csv('../external/sub/subNN_v1722.csv.gz', usecols=usecols)['HasDetections']
tosh709 = 1-pd.read_csv('../external/sub/subNN_v176.csv.gz', usecols=usecols)['HasDetections']
tosh709_v2 = 1-pd.read_csv('../external/sub/subNN_v1763.csv.gz', usecols=usecols)['HasDetections']
tosh707 = 1-pd.read_csv('../external/sub/sub_inv_NN_v178.csv.gz', usecols=usecols)['HasDetections']
tosh708_v2 = 1-pd.read_csv('../external/sub/sub_inv_NN_v1764.csv.gz', usecols=usecols)['HasDetections']
tosh709_v3 = pd.read_csv('../external/sub/subNN_v182.csv.gz', usecols=usecols)['HasDetections']
tosh = get_rank([tosh708, tosh709, tosh709_v2, tosh707, tosh708_v2, tosh709_v3])
del tosh708, tosh709, tosh709_v2, tosh707, tosh708_v2, tosh709_v3
# =============================================================================
# onodera
# =============================================================================
onodera709 = 1-pd.read_csv('../output/0308-2.csv.gz', usecols=usecols)['HasDetections']
onodera_noised = 1-pd.read_csv('../output/0310-1.csv.gz', usecols=usecols)['HasDetections']
onodera = get_rank([onodera709, onodera_noised])
del onodera709, onodera_noised
# =============================================================================
# nyanp
# =============================================================================
nyanp709_wo = pd.read_csv('../external/sub/sub_wo1_x24.csv.gz', usecols=usecols)['HasDetections']
nyanp709_w = pd.read_csv('../external/sub/sub_w1_x24.csv.gz', usecols=usecols)['HasDetections']
nyanp709_v2 = pd.read_csv('../external/sub/sub_lgbm_te_01100101102103173613614615701_auc0.745.csv.csv.gz', usecols=usecols)['HasDetections']
nyanp = get_rank([nyanp709_wo, nyanp709_w, nyanp709_v2])
del nyanp709_wo, nyanp709_w, nyanp709_v2
# =============================================================================
# nejumi
# =============================================================================
nejumi712 = 1-pd.read_csv('../external/sub/1_m_nejumi_0098_seed71_test.csv.gz', usecols=usecols)['HasDetections']
nejumi714 = 1-pd.read_csv('../external/sub/1_m_bagging_lgbm_on_nn.csv.gz', usecols=usecols)['HasDetections']
nejumi = get_rank([nejumi712, nejumi714])
del nejumi712, nejumi714
# =============================================================================
#
# =============================================================================
y_pred = ireko + tosh + onodera + nyanp + nejumi
y_pred /= y_pred.max()
sub = pd.read_csv('../input/sample_submission.csv.zip')
sub['HasDetections'] = y_pred.values
#sub['HasDetections'] = 1 - sub['HasDetections']
pri_id = pd.read_pickle('../data/pri_id.pkl')['MachineIdentifier']
sub_pri = sub[sub.MachineIdentifier.isin(pri_id)]
sub_pub = sub[~sub.MachineIdentifier.isin(pri_id)]
sub_best_pri = sub_best[sub_best.MachineIdentifier.isin(pri_id)]
sub_best_pub = sub_best[~sub_best.MachineIdentifier.isin(pri_id)]
print(f'corr with {utils.SUB_BEST}')
print('with best(ALL):', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
print('with best(pub):', sub_pub['HasDetections'].corr( sub_best_pub['HasDetections'], method='spearman') )
print('with best(pri):', sub_pri['HasDetections'].corr( sub_best_pri['HasDetections'], method='spearman') )
# save
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
1526,
1511,
8487,
25,
1238,
25,
2481,
13130,
198,
198,
31,
9800,
25,
16385,
11308,
... | 2.841077 | 1,894 |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Load file
dt=pd.read_csv("sevn_output/output_0.csv")
print(dt)
#Give a look to the columns
print(dt.columns)
time_ranges=((0,1),(1,10),(10,50),(300,500))
plt.figure(figsize=(12,10))
for i,timer in enumerate(time_ranges):
tlow,tup=timer
plt.subplot(2,2,i+1)
#Filter only stars
idxt=(dt.BWorldtime>=tlow) & (dt.BWorldtime<=tup)
idx0 = (dt.Phase_0<7) & idxt
idx1 = (dt.Phase_1<7) & idxt
Luminosity = pd.concat([dt.Luminosity_0[idx0],dt.Luminosity_1[idx1]])
Temperature = pd.concat([dt.Temperature_0[idx0],dt.Temperature_1[idx1]])
plt.hexbin(np.log10(Temperature),np.log10(Luminosity),bins=100,cmap="plasma",mincnt=1)
cbar=plt.colorbar(pad=0)
cbar.ax.tick_params(axis='both', which='major', labelsize=16)
cbar.set_label(label="$N$",size=15)
plt.xlim(5.5,3.5)
plt.xlabel("$\log T/\mathrm{K}$",fontsize=18)
plt.ylabel("$\log L/\mathrm{L_\odot}$",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.title(f"{tlow}<Age/Myr<{tup}",fontsize=20)
plt.tight_layout()
plt.savefig("analysis0.png")
plt.show()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
8912,
2393,
198,
28664,
28,
30094,
13,
961,
62,
40664,
7203,
325,
85,
77,
62,
22915,
14,... | 2.038328 | 574 |
import cv2
import sys
img=cv2.imread("image.jpg")
if img is None:
sys.exit("Could not find the image")
cv2.imshow("Display image", img)
k= cv2.waitKey(0)
gray_img= cv2.cvtColor(img, cv2.COLOR_BGR2GREY)
cv2.imwrite("new image.png", gray_img)
cv2.imshow("new image grayscale", gray_img)
cv2.waitKey(0)
| [
11748,
269,
85,
17,
198,
11748,
25064,
198,
198,
9600,
28,
33967,
17,
13,
320,
961,
7203,
9060,
13,
9479,
4943,
198,
198,
361,
33705,
318,
6045,
25,
198,
197,
17597,
13,
37023,
7203,
23722,
407,
1064,
262,
2939,
4943,
198,
198,
3396... | 2.231884 | 138 |
from rest_framework import serializers
from ensembl_metadata.models.release import Release, Site
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
551,
4428,
75,
62,
38993,
13,
27530,
13,
20979,
1330,
13868,
11,
14413,
628,
198
] | 4.125 | 24 |
# Generated by Django 2.0.5 on 2018-05-21 13:05
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2713,
12,
2481,
1511,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Generated by Django 3.2.7 on 2021-11-09 11:46
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
1157,
12,
2931,
1367,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 1.785714 | 14 |
from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment()
env.loader = FileSystemLoader('.')
my_val = {
"vrf_name": 'blue',
"rd_number": '100:1',
"ipv4_enabled": 1,
"ipv6_enabled": 0,
}
template_file = 'vrf_conf.j2'
template = env.get_template(template_file)
output = template.render(**my_val)
print(output)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
198,
6738,
474,
259,
6592,
17,
1330,
9220,
11964,
17401,
11,
520,
2012,
31319,
18156,
198,
6738,
474,
259,
6592,
17,
13,
38986,
1330,
9344,
198,
24330,
79... | 2.720497 | 161 |
from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_2_xsd.nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_2 import NistschemaSvIvAtomicUnsignedByteMinInclusive2
__all__ = [
"NistschemaSvIvAtomicUnsignedByteMinInclusive2",
]
| [
6738,
5072,
13,
27530,
13,
77,
396,
62,
7890,
13,
47116,
13,
43375,
62,
26327,
13,
15952,
2611,
62,
39098,
13,
77,
1023,
2395,
2611,
62,
21370,
62,
452,
62,
47116,
62,
43375,
62,
26327,
62,
1084,
62,
259,
5731,
62,
17,
62,
87,
2... | 2.577586 | 116 |
from .base import Base
from sqlalchemy import Integer, DateTime, Column, ForeignKey
from sqlalchemy.orm import relationship, backref
| [
6738,
764,
8692,
1330,
7308,
198,
6738,
44161,
282,
26599,
1330,
34142,
11,
7536,
7575,
11,
29201,
11,
8708,
9218,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
11,
736,
5420,
628,
198
] | 3.970588 | 34 |
from ...subscribe import models
| [
6738,
2644,
7266,
12522,
1330,
4981,
628,
628
] | 4.375 | 8 |
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import tempfile
from abc import ABC
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional
import requests
import vcr
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams.http import HttpStream
from requests.exceptions import HTTPError
cache_file = tempfile.NamedTemporaryFile()
class SemiIncrementalGithubStream(GithubStream):
"""
Semi incremental streams are also incremental but with one difference, they:
- read all records;
- output only new records.
This means that semi incremental streams read all records (like full_refresh streams) but do filtering directly
in the code and output only latest records (like incremental streams).
"""
cursor_field = "updated_at"
# This flag is used to indicate that current stream supports `sort` and `direction` request parameters and that
# we should break processing records if possible. If `sort` is set to `updated` and `direction` is set to `desc`
# this means that latest records will be at the beginning of the response and after we processed those latest
# records we can just stop and not process other record. This will increase speed of each incremental stream
# which supports those 2 request parameters.
is_sorted_descending = False
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state
object and returning an updated state object.
"""
state_value = latest_cursor_value = latest_record.get(self.cursor_field)
if current_stream_state.get(self.repository, {}).get(self.cursor_field):
state_value = max(latest_cursor_value, current_stream_state[self.repository][self.cursor_field])
return {self.repository: {self.cursor_field: state_value}}
# Below are full refresh streams
# Below are semi incremental streams
# Below are incremental streams
| [
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
3701,
26327,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3... | 3.49945 | 909 |
import dsz
import dsz.cmd
import dsz.version
import dsz.script
import ops
import ops.cmd
import ops.db
import ops.project
import ops.files.dirs
import ops.system.registry
import ops.system.environment
import ntpath
from datetime import timedelta, datetime
PACKAGES_TAG = 'OPS_PACKAGES_TAG'
PROGRAM_FILES_32_TAG = 'OPS_32_BIT_PROGRAM_FILES_TAG'
PROGRAM_FILES_64_TAG = 'OPS_64_BIT_PROGRAM_FILES_TAG'
SOFTWARE_32_TAG = 'OPS_32_BIT_SOFTWARE_KEY_TAG'
SOFTWARE_64_TAG = 'OPS_64_BIT_SOFTWARE_KEY_TAG'
MAX_PACKAGES_CACHE_SIZE = 3
MAX_PROGRAMDIR_CACHE_SIZE = 3
MAX_SOFTWARE_KEY_CACHE_SIZE = 3
| [
201,
198,
11748,
288,
82,
89,
201,
198,
11748,
288,
82,
89,
13,
28758,
201,
198,
11748,
288,
82,
89,
13,
9641,
201,
198,
11748,
288,
82,
89,
13,
12048,
201,
198,
11748,
39628,
201,
198,
11748,
39628,
13,
28758,
201,
198,
11748,
39... | 2.295455 | 264 |
import pyautogui as pg
from time import sleep
sleep(3)
test_type = 0 #0 valid patient id with data,1 valid patient id without data, 2 invalid patient id, 3 non-existent patient id, 4 invalid type 2 missing digits in patient id
if not test_type:
national_id = "11111111111"
press_key('tab',8)
pg.write(national_id)
press_key('tab')
press_key('enter')
elif test_type==1:
national_id = "12345678910"
press_key('tab',8)
pg.write(national_id)
press_key('tab')
press_key('enter')
elif test_type==2:
national_id = "a1111111111"
press_key('tab',8)
pg.write(national_id)
press_key('tab')
press_key('enter')
elif test_type==3:
national_id = "11111111112"
press_key('tab',8)
pg.write(national_id)
press_key('tab')
press_key('enter')
elif test_type==4:
national_id = "1111111111"
press_key('tab',8)
pg.write(national_id)
press_key('tab')
press_key('enter') | [
11748,
12972,
2306,
519,
9019,
355,
23241,
198,
6738,
640,
1330,
3993,
198,
198,
42832,
7,
18,
8,
198,
198,
9288,
62,
4906,
796,
657,
1303,
15,
4938,
5827,
4686,
351,
1366,
11,
16,
4938,
5827,
4686,
1231,
1366,
11,
362,
12515,
5827,... | 2.412214 | 393 |
number=30
n=[10,11,12,13,14,17,18,19]
l=len(n)
i=0
m=[]
while i<l:
j=0
a=[]
while j<l:
if n[i]+n[j]==number and n[i]<n[j]:
a.append(n[i])
a.append(n[j])
m.append(a)
j=j+1
i=i+1
print(m) | [
17618,
28,
1270,
198,
77,
41888,
940,
11,
1157,
11,
1065,
11,
1485,
11,
1415,
11,
1558,
11,
1507,
11,
1129,
60,
198,
75,
28,
11925,
7,
77,
8,
198,
72,
28,
15,
198,
76,
28,
21737,
198,
4514,
1312,
27,
75,
25,
198,
220,
220,
2... | 1.405556 | 180 |
# Author: Yu-Chia Chen <yuchaz@uw.edu>
# LICENSE: Simplified BSD
from ._configure import setup_color_palettes, tqdm, color_hex
from .coord_search import projected_volume, greedy_coordinate_search
from .regu_path import zeta_search
from .utils import compute_radius_embeddings, compute_tangent_plane
from .plotter import regu_path_plot, discretize_x_ticks
from .plotter import (visualize_2d_embedding, visualize_3d_embedding,
visualize_4d_embedding)
from .data_generator import data_loader
| [
2,
6434,
25,
10605,
12,
1925,
544,
12555,
1279,
88,
794,
1031,
31,
84,
86,
13,
15532,
29,
198,
2,
38559,
24290,
25,
45157,
1431,
347,
10305,
198,
198,
6738,
47540,
11250,
495,
1330,
9058,
62,
8043,
62,
18596,
23014,
11,
256,
80,
3... | 2.782609 | 184 |
TRUE_VALS = set(["1", "t", "true", "on", "yes", "y"])
__unittest = True
class Config(object):
"""Configuration for a plugin or other entities.
Encapsulates configuration for a single plugin or other element.
Corresponds to a :class:`ConfigParser.Section` but provides an
extended interface for extracting items as a certain type.
"""
def as_bool(self, key, default=None):
"""Get key value as boolean
1, t, true, on, yes and y (case insensitive) are accepted as ``True``
values. All other values are ``False``.
"""
try:
val = self._mvd[key][0].strip()
except KeyError:
return default
except IndexError:
# setting = -> False
return False
return val.lower() in TRUE_VALS
def as_int(self, key, default=None):
"""Get key value as integer"""
return self._cast(key, int, default)
def as_float(self, key, default=None):
"""Get key value as float"""
return self._cast(key, float, default)
def as_str(self, key, default=None):
"""Get key value as str"""
return self._cast(key, str, default)
def as_list(self, key, default=None):
"""Get key value as list.
The value is split into lines and returned as a list. Lines
are stripped of whitespace, and lines beginning with # are
skipped.
"""
lines = []
try:
vlist = self[key]
except KeyError:
return default
for val in vlist:
lines.extend(
line.strip()
for line in val.splitlines()
if line.strip() and not line.strip().startswith("#")
)
return lines
def get(self, key, default=None):
"""Get key value"""
return self.as_str(key, default)
| [
5446,
8924,
62,
53,
23333,
796,
900,
7,
14692,
16,
1600,
366,
83,
1600,
366,
7942,
1600,
366,
261,
1600,
366,
8505,
1600,
366,
88,
8973,
8,
198,
834,
403,
715,
395,
796,
6407,
628,
198,
4871,
17056,
7,
15252,
2599,
628,
220,
220,
... | 2.330446 | 808 |
import sys
from typing import List, TextIO
PREAMBLE_SIZE = 25
if __name__ == "__main__":
print(handler(sys.stdin))
| [
11748,
25064,
198,
6738,
19720,
1330,
7343,
11,
8255,
9399,
198,
198,
47,
32235,
19146,
62,
33489,
796,
1679,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
30281,
7,
17597,
13,
... | 2.695652 | 46 |