function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def kurt(x, axis = 0): return kurtosis(x, axis)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def pnorm(p, mean = 0, var = 1): if p > 1: p = p/100 return scipy.stats.norm.cdf(p, loc=mean, scale=var)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def CI(q, data, method = "mean",U = True, L = True): if q > 1: q = q/100 norms = qnorm(q+(1-q)/2)*(std(data) / sqrt(len(data)) ) if method == "mean": u = mean(data) + norms; l = mean(data) - norms if U == L == True: return (l,u) elif U == True: return u else: return l
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def lower(x): j = copy(x) if type(x) == list: for k in range(len(j)): try: j[k] = j[k].lower() except: pass; return j
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def int(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = x[y].astype("int64") except: try: x[y] = np.floor(x[y]) except: pass return x else: try: return np.int64(df) except: try: return np.floor(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def float(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = x[y].astype("float64") except: pass return x else: try: return np.float64(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def max(x, axis = 0): if istable(x)==1: return conts(x).max() else: if shape(matrix(x))[0] == 1: return np.amax(x,axis=axis) else: return np.amax(x)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def min(x, axis = 0): if istable(x)==1: return conts(x).min() else: if shape(matrix(x))[0] == 1: return np.amin(x) else: return np.amin(x,axis=axis)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def argcheck(df, args): if len(args)==0: col = columns(df) elif type(args[0])!=list: col = list(args) else: col = args[0] return copy(df), col
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def abs(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.abs(x[y]) except: pass return x else: try: return np.abs(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def log(df, *args, shift = 0): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.log(x[y]+shift) except: pass; return x else: try: return np.log(df+shift) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def exp(df, *args, shift = 0): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.exp(x[y])+shift except: pass; return x else: try: return np.exp(df)+shift except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def sin(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.sin(x[y]) except: pass; return x else: try: return np.sin(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def cos(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.cos(x[y]) except: pass; return x else: try: return np.cos(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def cos(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.cos(x[y]) except: pass; return x else: try: return np.cos(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def sqrt(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.sqrt(x[y]) except: pass; return x else: try: return np.sqrt(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def floor(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.floor(x[y]) except: pass; return x else: try: return np.floor(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def ceiling(df, *args): if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: try: x[y] = np.ceil(x[y]) except: pass; return x else: try: return np.ceil(df) except: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def sum(x, axis = 1): try: return x.sum(axis = axis) except: return np.nansum(x, axis = 0)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def array(*args): if len(args)==1: arrs = np.array(args[0]) try: if shape(arrs)[1]==1: arrs = arrs.T[0] except: pass; return arrs else: try: return np.array(args) except: return np.array([args])
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def T(x): if type(x)==np.array: return matrix(x).T else: try: return x.T except: return array(x).T
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def inv(x): try: return np.linalg.inv(x) except: print("Either det(x)=0 or not square matrix")
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def det(x): try: return np.linalg.det(x) except: print("Not square matrix")
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def eye(x): return np.eye(x)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def ones(x, s = 1): if s == 1: return np.ones((x,x)) else: return np.ones(x)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def J(x, s = 1): return ones(x, s)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def zeros(x, s = 1): if s == 1: return np.zeros((x,x)) else: return np.zeros(x)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def zeroes(x, s = 1): return zeros(x, s)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def triu(matrix): return np.triu(matrix)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def trace(A): return np.trace(A)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def diag(A): return np.diagonal(A)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def repmat(A, *args): if len(args) == 2: return np.tile(A, (args[0],args[1])) elif len(args) == 1: return np.tile(A, args[0]) else: print("Error")
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def read(x): if type(x) == list: for y in x: if "csv" in y: return clean(pd.read_csv(y)) else: if "csv" in x: return clean(pd.read_csv(x))
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def string(dfx, *args): x = copy(dfx); df = copy(dfx) if type(df) == pd.DataFrame: x, col = argcheck(df, args) for y in col: x[y] = x[y].astype("str")+"*" return x elif type(df) == pd.Series: df = df.astype("str")+"*" return df else: return str(df)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def clean(x, *args): def cleancol(x): if dtypes(x) == 'obj': c = x.str.replace(",","").str.replace(" ","").str.replace("-","").str.replace("%","").str.replace("#","") else: c = x
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def analyse(c, y = None, extra = ["skew"], dec = 2, colour = True, limit = True, graph = True): x = copy(c) if y is not None: if type(y) == str: x, y = drop(x, y) first = describe(x, extra = extra, clean = False); cols = columns(first) df = hcat(guess_importance(x,y), first) df.columns = append("Importance", cols) df = round(sort(df, by = ["Importance","FreqRatio","%Unique"], des = True),dec) if limit == True: df = df[df["Importance"]>0] if graph == True: plot(x = index(df)[0], y = index(df)[1], z = index(df)[2], hue = y, data = c) if colour == True: df = df.style.bar(align='mid', color=pd_colour, width = 80).set_properties(**{'max-width': '90px'}) return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def varcheck(x, freq = "mean", unq = 0.1, colour = True, limit = True, output = False): freqs = freqratio(x); unqs = punique(x) if freq == "mean": fd = (freqs>=CI(q=0.99,data =freqs,L=False))*1 else: fd = (freqs>freq)*1 df = hcat(freqs,fd,unqs,(unqs<=unq)*1,var(x)) df.columns = ["FreqRatio","BadFreq?","%Unique","BadUnq?","Var"] df["BadVar?"] = (df["Var"].fillna(1000)<=0.1)*1 df["BAD?"] = (df["BadFreq?"]+df["BadUnq?"]+df["BadVar?"])>0 df = round(sort(df, by =["BAD?","BadVar?","BadFreq?","BadUnq?","FreqRatio","%Unique","Var"], des = True),2)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def corr(x, table = False, limit = 20): if table == False: corrs = round(x.corr()*100) sortby = sort(sum(abs(corrs)-100),des=False) corrs = corrs[index(sortby)] corrs = T(T(corrs)[index(sortby)]) if shape(corrs)[0]>limit: corrs = T(T(corrs.iloc[0:limit]).iloc[0:limit]) corrs = T(reverse(T(reverse(corrs)))) cmap = sb.light_palette("black", as_cmap=True) show = abs(corrs).style.background_gradient(cmap).set_properties(**{'max-width': '50px', 'font-size': '8pt' ,'color':'black'}) return show else: try: return conts(x).corr() except: print("Error. No continuous data")
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def correlation(x, table = False): return corr(x, table)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def cor(x, table = False): return corr(x, table)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def guess_importance(df, y): x = copy(df) if type(y) == str: try: y = x[y] except: print("No column for y") x = dummies(x) x_train, x_test, y_train, y_test = holdout(x, y, info = False);
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def lightmodel(x_train, x_test, y_train, y_test, reg, seed = 1234): try: import lightgbm as lgb except: print("Cannot install"); raise x_train = array(x_train); y_train = array(y_train); x_test = array(x_test); y_test = array(y_test) if reg == True: model = lgb.LGBMRegressor(objective='regression', num_leaves = 5, learning_rate = 0.1, n_estimators = 100, seed = seed) model.fit(x_train, y_train, early_stopping_rounds = 10, eval_metric='l2', eval_set=[(x_test, y_test)],verbose=False) return model
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def guess_imp(df, y): return guess_importance(df, y)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def remcor(x, limit = 0.9): dataset = copy(x) col_corr = set(); corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] >= limit: colname = corr_matrix.columns[i] col_corr.add(colname) if colname in dataset.columns: del dataset[colname] return dataset
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def independent(A): try: import sympy except: print("Cannot install"); raise _, inds = sympy.Matrix(A).T.rref() print("Lin Indp rows are: "+str(inds)) return A[list(inds)]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def dummies(x, dummies = True, codes = False, freq = True, na = "inplace", nanew = True, col = None, ascending = True, cat = True, drop = True, ids = False): try: if dtypes(x)[0]==('int' or 'float') and type(x)==pd.Series: return x except: if dtypes(x)==('int' or 'float') and type(x)==pd.Series: return x if type(x)!=pd.DataFrame: x = table(x) df = copy(x) if ids == False: df = exc(df, isid(df)) if col is None: if cat == True: col = catcol(df) else: col = objcol(df) elif type(col)!=list: col = [col] if dummies == True: if "in" in na: for j in col: dummified = pd.get_dummies(x[j], dummy_na = nanew) dummified.columns = [str(j)+"_"+str(c) for c in columns(dummified)] if j in nacol(x): dummified.iloc[isnull(x[j]).index]=np.nan df = hcat(df, dummified) else: df = pd.get_dummies(x, dummy_na = nanew, columns = col) if drop == True: return notobj(zerodrop(df)) else: return zerodrop(df) else: if freq == True: code = {} for j in col: part = {}; try: i = min(df[j]); except: i = 0; if dtype(df[j])!=('int'or'float'): d = fsort(df, by = j)[j] else: d = sort(df, by = j)[j] for k in d: if pd.isnull(k)==False: try: part[k] except: part[k] = i; i+=1 code[j] = part df[j]=df[j].replace(part) del part,i,d,k else: code = {} for j in col: code[j] = reverse(dict(enumerate(df[j].astype("category").cat.categories))) df[j]=df[j].replace(code[j]) if drop == True: df = notobj(df) if shape(df)[1]==1: df = df[columns(df)[0]] if codes == True: return df,code else: return df
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def discretise(x, n = 4, smooth = True, codes = False): if codes == False: codes = None else: codes = False if smooth == True: try: return pd.qcut(x, q = n, duplicates = 'drop', labels = codes) except: return pd.cut(x, q = n, labels = codes) else: return pd.cut(x, bins = n, labels = codes)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def qcut(x, n = 4, smooth = True, codes = False): return discretise(x, n, smooth, codes)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def topositive(y, info = False): x = copy(y); d = conts(x) notgood = ((np.min(d)<=0).index[np.min(d)<=0]).tolist() add = np.abs(np.min(d[notgood]))+1 d[notgood] = d[notgood]+add x[columns(d)] = d if info == False: return x else: return x,add
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def boxcox(x): if type(x) == pd.Series: k = (conts(x)+abs(min(conts(x)))+1) lm = scipy.stats.boxcox(k)[1] if lm == 0: return log(x), lm else: return ((k**lm)-1)/lm, lm else: df = []; lms = [] for col in contcol(x): k = (x[col]+abs(min(x[col]))+1) lm = scipy.stats.boxcox(k)[1] if lm == 0: df.append(log(x[col])); lms.append(lm) else: df.append(((k**lm)-1)/lm); lms.append(lm) return T(table(df)), array(lms)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def unskew(x, info = False): def show(q, df): if q == 0: return (df, "normal") elif q == 1: return (sqrt(df), "sqrt") else: return (boxcox(df)[0], "boxcox") original = copy(x) df = topositive(conts(x)) skews = np.abs(skew(df)) sqrted = sqrt(df) boxcoxed = boxcox(df)[0] comp = hcat(skew(df),skew(sqrted),skew(boxcoxed)); comp.columns = ["norm","sqrt","box"] res = np.abs(comp.T) r = []; out = [] for col in res: p = 0 for i in res[col]: if i == np.min(res[col]): f = show(p, df[col]); r.append(f[1]); out.append(f[0]); break else: p += 1 first = out[0] for c in out[1:]: first = hcat(first, c)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def outlier(df, method = "forest", poutlier = 0.025, sd = 3.5, iqr = 1.5, indicate = True, n_estimators = 100): x = copy(df) if "for" in method or "tree" in method: from sklearn.ensemble import IsolationForest df = dummies(x, na = "clear"); df = df.fillna(df[nullcol].median()) model = IsolationForest(n_estimators = n_estimators, n_jobs=-1, bootstrap = True, contamination = poutlier) model.fit(df); preds = model.predict(df) res = x.iloc[np.where(preds==-1)[0]] else: f = dummies(x, na = "clear"); df = topositive(f.fillna(f.median())) if "std" in method or "sd" in method: #https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data if len(shape(df)) == 1: df = df[:,None] df = unskew(df) meds = median(df, axis=0) diff = sum((df - meds)**2, axis=1) diff = sqrt(diff); mad = median(diff) z = 0.6745 * diff / mad out = (z>sd)==True where = out.index[out].tolist() res = x.iloc[where] elif "iqr" in method: first = percentile(df, p = 0.25) last = percentile(df, p = 0.75) iqrred = first-last where = sum((df>(last+iqr*last))|(df<(first-iqr*first)))!=0 res = x.iloc[where.index[where].tolist()]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def isoutlier(df, method = "forest", poutlier = 0.025, sd = 3.5, iqr = 1.5, indicate = False, n_estimators = 100): d = outlier(df, method = method, poutlier = poutlier, sd = sd, iqr = iqr, indicate = True, n_estimators = n_estimators) if indicate == False: return exc(d.iloc[(d["IsOutlier"]==1).index[d["IsOutlier"]==1]], "IsOutlier") else: return d.iloc[(d["IsOutlier"]==1).index[d["IsOutlier"]==1]]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def zerodrop(x): return exc(x, (pzero(x)==100).index[pzero(x)==100].tolist())
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def fillobj(x, method): data = copy(clean(x)) missed = nacol(data[objcol(data)]); missdf = data[missed] if method in ["mode","freq","frequency"]: data[missed] = data[missed].fillna(mode(missdf)) elif method in ["zero","missing","none"]: data[missed] = data[missed].fillna("Missing_Data") elif method in ["mix","half","halved"]: ins = (count(x)<0.75*len(x)).index[count(x)<0.75*len(x)] data[ins] = data[ins].fillna("Missing_Data") other = diff(columns(x), ins) data[other] = data[other].fillna(mode(x[other])) return data
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def fillcont(x, method): data = copy(clean(x)) missed = nacol(conts(data)); missdf = data[missed] if method in ["mean","avg","average"]: data[missed] = data[missed].fillna(mean(missdf)) elif method in ["median"]: data[missed] = data[missed].fillna(median(missdf)) elif method in ["mode","freq","frequency"]: data[missed] = data[missed].fillna(mode(missdf)) return data
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def complete(df, method = None, objects = None, continuous = None, knn = 5, max_unique = 20, epoch = 100, mice = "forest", ids = False): x = copy(df); imputation = ["bpca","pca","knn","mice","svd"]; imped = 0 if ids == False: x = exc(x, isid(x)) if method is not None: meth = method.lower() else: meth = "a" if method is None and objects is None and continuous is None: meth = 'knn'
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def matching(method, objects, continuous, thingo): if method is not None: if thingo in method: return 1 else: return 0 else: if thingo in objects or thingo in continuous: return 1 else: return 0
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def bpca_complete(x, epochs = 100): decimals = 4 y = copy(x); cols = y.columns.tolist() maximum = np.int(np.max(y.max())*999) means = round(y.mean(),decimals); sd = round(y.std(),decimals); y = round((y-means)/sd,decimals) y[missingcol(y)] = y[missingcol(y)].fillna(maximum) mat = float(np.matrix(y)) N,d = mat.shape; q = d-1 yest = np.copy(mat); yest[yest==maximum]=0 missidx = {}; bad = np.where(mat==maximum) for a in bad[0]: missidx[a] = [] for a in range(len(bad[0])): missidx[bad[0][a]].append(bad[1][a]) nomissidx = {}; good = np.where(mat!=maximum) for a in good[0]: nomissidx[a] = [] for a in range(len(good[0])): nomissidx[good[0][a]].append(good[1][a]) gmiss = list(set(bad[0])) gnomiss = list(set(good[0])) covy = np.cov(yest.T) U, S, V = np.linalg.svd(np.matrix(covy)) U = (U.T[0:q]).T; S = S[0:q]*np.eye(q); V = (V.T[0:q]).T mu = np.copy(mat); mu[mu==maximum]=np.nan; mu = np.nanmean(mu, 0) W = U*np.sqrt(S); tau = 1/ (np.trace(covy)-np.trace(S)); taumax = 1e20; taumin = 1e-20; tau = np.amax([np.amin([tau,taumax]),taumin]) galpha0 = 1e-10; balpha0 = 1; alpha = (2*galpha0 + d)/(tau*np.diag(W.T*W)+2*galpha0/balpha0) gmu0 = 0.001; btau0 = 1; gtau0 = 1e-10; SigW = eye(q) tauold = 1000 for epoch in range(epochs): Rx = np.eye(q)+tau*W.T*W+SigW; Rxinv = np.linalg.inv(Rx) idx = gnomiss; n = len(idx) dy = mat[idx,:] - np.tile(mu,(n,1)); x = tau * Rxinv * W.T * dy.T Td = dy.T*x.T; trS = np.sum(np.multiply(dy,dy)) for n in range(len(gmiss)): i = gmiss[n] dyo = np.copy(mat)[i,nomissidx[i]] - mu[nomissidx[i]] Wm = W[missidx[i],:]; Wo = W[nomissidx[i],:] Rxinv = np.linalg.inv( Rx - tau*Wm.T*Wm ); ex = tau * Wo.T * np.matrix(dyo).T; x = Rxinv * ex dym = Wm * x; dy = np.copy(mat)[i,:] dy[nomissidx[i]] = dyo; dy[missidx[i]] = dym.T yest[i,:] = dy + mu Td = Td + np.matrix(dy).T*x.T; Td[missidx[i],:] = Td[missidx[i],:] + Wm * Rxinv trS = trS + dy*np.matrix(dy).T + len(missidx[i])/tau + np.trace( Wm * Rxinv * Wm.T ) Td = Td/N; trS = trS/N; Rxinv = np.linalg.inv(Rx); Dw = Rxinv + tau*Td.T*W*Rxinv + np.diag(alpha)/N; Dwinv = np.linalg.inv(Dw); W = Td * Dwinv; tau = (d+2*gtau0/N)/(trS-np.trace(Td.T*W) + (mu*np.matrix(mu).T*gmu0+2*gtau0/btau0)/N)[0,0]; SigW = Dwinv*(d/N); alpha = (2*galpha0 + d)/ (tau*np.diag(W.T*W)+np.diag(SigW)+2*galpha0/balpha0).T if np.abs(np.log10(tau)-np.log10(tauold)) < 1e-4: break; tauold = tau out = table(yest) out.columns = cols out = (out*sd)+means return out
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def mice_complete(res, strings, epochs = 10, impute_method = "forest"): x = copy(clean(res)); original = copy(x) filled = fillcont(original, method = "median") from sklearn.cross_validation import train_test_split
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def squeeze(df, lower = 0, upper = 1): x = copy(df) x[x<lower] = lower; x[x>upper] = upper return x
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def lightgbm(x_train, x_test, y_train, y_test, noclass = None, lr = 0.05, method = "dart", gpu = False, trees = 100, metric = None, depth = -1, splits=2, leaves=31.123, min_weight=20.123, features=1, bins=5.123, impurity=1e-3+0.000001, jobs=-1, state=None, bagging = 0.1, stop = 10, l1 = 0, l2 = 1, dropout = 0.1, skipdrop = 0.5, verbose = False, info = True):
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def randomforest(x_train, x_test, y_train, y_test, noclass = None, lr = 0.05, method = "dart", gpu = False, trees = 100, metric = None, depth = -1, splits=2, leaves=31.123, min_weight=20, features=1, bins=5.123, impurity=1e-3+0.000001, jobs=-1, state=None, bagging = 0.1, stop = 10, l1 = 0, l2 = 1, dropout = 0.1, skipdrop = 0.5, verbose = False, info = True, addon = False):
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def standardise(data, output = True, method = "robust"): if method == "robust": from sklearn.preprocessing import RobustScaler as scaler elif method == "standard": from sklearn.preprocessing import StandardScaler as scaler elif "min" in method or "max" in method: from sklearn.preprocessing import MinMaxScaler as scaler elif "abs" in method: from sklearn.preprocessing import MaxAbsScaler as scaler
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def normalise(data, output = True, method = "l2"): from sklearn.preprocessing import Normalizer
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def preprocess(train, target, hold = 0.2, dummy = True, impute = "bpca", mice = "boost",remove_outlier = 0, scale = "robust", transform = 0, norm = False, output = True):
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def prefit(test, processor): alldf = reset(vcat(processor['impute'][1],test), drop = True) df = complete(alldf, method = processor['impute'][0], ids = True, mice = processor['impute'][2]) test = df[len(processor['impute'][1]):] if processor['dummies'] == -2: test = dummies(test, dummies = True, ids = True) a = set(processor['columns']) b = set(columns(test)) matching = set.intersection(a,b) not_matching = a.symmetric_difference(matching) test = test[list(matching)]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def holdout(x, y, test = 0.2, seed = 1234, info = True): from sklearn.model_selection import train_test_split if info == True: print("--------------------------------\nx_train, x_test, y_train, y_test") return train_test_split(x, y, test_size = test, random_state = seed)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def metrics(noclass, model = "lightgbm"): from sklearn.metrics import mean_squared_error, cohen_kappa_score, r2_score
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def toclasses(preds, classes): preds = np.round(preds) preds = int(squeeze(preds, lower = min(classes), upper = max(classes))) return preds
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def predict(test, model, processor): preds = model.predict(array(test)) if processor['target'] != -1: return toclasses(preds, classes = processor['target']) else: return preds
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def plot(x = None, y = None, z = None, hue = None, size = 8, data = None, color = 'afmhot', smooth = True, n = 4): dfdf = copy(data) import matplotlib.pyplot as plt if data is None and x is not None: print("Need to specify data"); return try: if type(x)==str: xlabel = x; x = dfdf[xlabel]; x = dummies(x, dummies = False) except: pass; try: if type(y)==str: ylabel = y; y = dfdf[ylabel]; y = dummies(y, dummies = False) except: pass; try: if type(z)==str: zlabel = z; z = dfdf[zlabel]; z = dummies(z, dummies = False) except: pass; try: if type(hue)==str: huelabel = hue; hue = dfdf[huelabel]; hue = dummies(hue, dummies = False) except: pass; try: xlabel = columns(x)[0]; if xlabel is None: xlabel = "X" except: pass; try: ylabel = columns(y)[0]; if ylabel is None: ylabel = "Y" except: pass; try: zlabel = columns(z)[0]; if zlabel is None: zlabel = "Z" except: pass; try: huelabel = columns(hue)[0]; if huelabel is None: huelabel = "Hue" except: pass; if x is not None and y is not None and z is not None: from mpl_toolkits.mplot3d import Axes3D import matplotlib fig = plt.figure(figsize=(size,size)) ax = Axes3D(fig) if hue is not None: cm = plt.get_cmap(color) try: cNorm = matplotlib.colors.Normalize(vmin=np.min(hue)[0], vmax=np.max(hue)[0]) except: cNorm = matplotlib.colors.Normalize(vmin=np.min(hue), vmax=np.max(hue)) scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cm) ax.scatter(array(x),array(y),array(z),c=scalarMap.to_rgba(array(hue)),s=size*5) ax.set_xlabel(xlabel); ax.set_ylabel(ylabel); ax.set_zlabel(zlabel) scalarMap.set_array(hue) fig.colorbar(scalarMap, pad=0, orientation = "h", shrink = .8) plt.show() else: import matplotlib ax.scatter(x,y,z,s=size*5) ax.set_xlabel(xlabel); ax.set_ylabel(ylabel); ax.set_zlabel(zlabel) plt.show()
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def highlight_larger(s): is_max = s > CI(99,s,L=False); return ['background-color: '+pd_colour if v else '' for v in is_max]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def highlight_one(s): is_true = s == 1; return ['background-color: '+pd_colour if v else '' for v in is_true]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def mean_line(x, **kwargs): ls = {"0":"--"} plt.axvline(mean(x), linestyle =ls[kwargs.get("label","0")], color = kwargs.get("color", "brown"), linewidth=2) txkw = dict(size=12, color = kwargs.get("color", "brown")) plt.text(mean(x),0.03, "MEAN", **txkw)
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def special_statistic(x): return (2*np.nanmedian(x)+np.nanmean(x))/3
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def check_type(x): ctd = nunique(x); parts = (((ctd<=15)&(len(x)>15))|((ctd<len(x)*0.01)&(ctd<=20)&(dtypes(x)=='int'))|((dtypes(x)=='str')&(ctd<=15))) if dtypes(x) != 'str': if parts == True: return 'cat' else: return 'cont' else: if parts == False: return 'str' else: return 'cat'
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def getfunction(c, args, now): if "split" in c: if "ex" in c: expanding = True; else: expanding = False if "letter" in args[c].lower() or "word" in args[c].lower() or "digit" in args[c].lower() or "number" in args[c].lower(): how = '' for j in args[c].split(","): if "letter" in j: how = how+"([a-z])" elif "Letter" in j: how = how+"([a-zA-Z])" elif "LETTER" in j: how = how+"([A-Z])" elif "word" in j: how = how+"([a-z]+)" elif "Word" in j: how = how+"([a-zA-Z]+)" elif "WORD" in j: how = how+"([A-Z]+)" elif "digit" in j.lower(): how = how+"([0-9])" elif "number" in j.lower(): how = how+"([0-9]+)" elif "symbol" in j.lower(): how+'[^\w]+' now = now.str.extract(how, expand = expanding) else: now = now.str.split(args[c], expand = expanding) elif "col" in c or "loc" in c: try: if "le" in args[c]: now = now.str[0:-1] elif "ri" in args[c]: now = now.str[-1:1] except: if type(now) == pd.Series: now = now.str[args[c]] else: now = now[args[c]] elif "not" in c: now = now.str.contains(args[c]); now = reverse(now) elif "has" in c: now = now.str.contains(args[c]) elif "rep" in c: if "symbol" in args[c]: now = now.replace(r'[^\w]',args[c][1]) else: now = now.str.replace(args[c][0], args[c][1]) elif "rem" in c or "strip" in c: if "all" in args[c]: for j in [".",",","+","=","-","_","(",")","[","]","*","$","?","<",">",'"',"'","/","<",">","%"]: now = now.str.replace(j,"") elif "symbol" in args[c]: now = now.replace(r'[^\w]','') else: now = now.str.replace(args[c][0], "") elif "len" in c: if args[c] == 1: now = now.str.len() elif "low" in c: if args[c] == 1: now = now.str.lower() elif "up" in c: if args[c] == 1: now = now.str.upper() elif "count" in c: if args[c] == ".": now = now.str.count(r"(\.)") elif args[c] == "(": now = now.str.count(r"(\()") elif args[c] == ")": now = now.str.count(r"(\))") elif args[c] == "[": now = now.str.count(r"(\[)") elif args[c] == "]": now = now.str.count(r"(\])") elif args[c] == "{": now = now.str.count(r"(\{)") elif args[c] == "}": now = now.str.count(r"(\})") elif 'symbol' in args[c]: now = now.str.count(r'[^\w]') elif 'sym' in args[c]: now = now.str.count(r'[\w]') elif 'num' in args[c] or 'dig' in args[c]: now = now.str.count(r'[\d]') else: now = now.str.count(args[c]) elif "df" in c or "table" in c or "series" in c: now = now.apply(pd.Series) return now
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def get(x, **args): import re now = copy(x) for c in args: now = getfunction(c, args, now) return now
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def flatten(y, split = " ", dropna = True, symbols = False, lower = True):
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def col_split(x,split,dropna,symbols,lower): if split is not None: if symbols == False: if lower == True: f = list(get(x, lower = True, rem = "all", splitex = split).fillna(np.nan).values.flatten()) else: f = list(get(x, rem = "all", splitex = split).fillna(np.nan).values.flatten()) else: f = list(get(x, splitex = split).fillna(np.nan).values.flatten()) else: f = list(x.fillna(np.nan).values.flatten()) return f
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def wordfreq(x, hist = True, first = 15, separate = True): if separate == False or type(x) == pd.Series: df = reset(table(cunique(flatten(x))))[0:first] df.columns = ["Word","Count"]
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def getwords(y, first = 10): x = copy(y) df = wordfreq(x, first = first, hist = False) for col in objcol(x): cols = get(x[col], lower = True, rem = "all", table = True) for j in df[df["Column"]==col]["Word"]: x["Count="+str(j)] = get(cols[0], count = j) return x
danielhanchen/sciblox
[ 48, 1, 48, 1, 1500443500 ]
def __init__(self, driver, **kwargs): self.options = kwargs self.driver = driver # Set viewport position, size self.driver.set_window_position(0, 0) self.set_viewport()
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def _create_dir(directory): """Recursively create a directory .. note:: From needle https://github.com/python-needle/needle/blob/master/needle/cases.py#L125 :param str directory: Directory path to create :return: """ try: os.makedirs(directory) except OSError as err: if err.errno == EEXIST and os.path.isdir(directory): return raise err
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def _get_element_dimensions(element): """Returns an element's position and size :param WebElement element: Element to get dimensions for :return: """ if isinstance(element, WebElement): # Get dimensions of element location = element.location size = element.size return { 'top': int(location['y']), 'left': int(location['x']), 'width': int(size['width']), 'height': int(size['height']) } raise ValueError("element must be a WebElement")
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def _get_ratio(image_size, window_size): return max(( math.ceil(image_size[0] / float(window_size[0])), math.ceil(image_size[1] / float(window_size[1])) ))
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def baseline_dir(self): """Return baseline image path :return: :rtype: str """ return self.options.get('baseline_dir', DEFAULT_BASELINE_DIR)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def baseline_dir(self, value): """Set baseline image directory :param str value: File path :return: """ assert isinstance(value, basestring) self.options['baseline_dir'] = value
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def cleanup_on_success(self): """Returns True, if cleanup on success flag is set :return: :rtype: bool """ return self.options.get('cleanup_on_success', False)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def cleanup_on_success(self, value): """Set cleanup on success flag :param bool value: Cleanup on success flag :return: """ self.options['cleanup_on_success'] = bool(value)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def engine(self): """Return image processing engine :return: """ return import_from_string(self.engine_class)()
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def engine_class(self): """Return image processing engine name :return: :rtype: str """ return self.ENGINES.get(self.options.get('needle_engine', 'pil').lower(), DEFAULT_ENGINE)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def engine_class(self, value): """Set image processing engine name :param str value: Image processing engine name (pil, imagemagick, perceptualdiff) :return: """ assert value.lower() in self.ENGINES self.options['needle_engine'] = value.lower()
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def get_screenshot_as_image(self, element=None, exclude=None): """ :param WebElement element: Crop image to element (Optional) :param list exclude: Elements to exclude :return: """ image = self.get_screenshot(element) # Mask elements in exclude if element is not included if isinstance(exclude, (list, tuple)) and exclude and not element: # Gather all elements to exclude elements = [self._find_element(element) for element in exclude] elements = [element for element in elements if element] canvas = ImageDraw.Draw(image) window_size = self._get_window_size() image_size = image.size ratio = self._get_ratio(image_size, window_size) for ele in elements: canvas.rectangle([point * ratio for point in self._get_element_rect(ele)], fill=ImageColor.getrgb('black')) del canvas return image
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def output_dir(self): """Return output image path :return: :rtype: str """ return self.options.get('output_dir', DEFAULT_OUTPUT_DIR)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def output_dir(self, value): """Set output image directory :param str value: File path :return: """ assert isinstance(value, basestring) self.options['output_dir'] = value
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def save_baseline(self): """Returns True, if save baseline flag is set :return: :rtype: bool """ return self.options.get('save_baseline', False)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def save_baseline(self, value): """Set save baseline flag :param bool value: Save baseline flag :return: """ self.options['save_baseline'] = bool(value)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]
def viewport_size(self): """Return setting for browser window size :return: :rtype: str """ return self.options.get('viewport_size', DEFAULT_VIEWPORT_SIZE)
jlane9/pytest-needle
[ 16, 8, 16, 7, 1495224710 ]