Unnamed: 0
int64
0
15.9k
cleaned_code
stringlengths
67
124k
cleaned_prompt
stringlengths
168
30.3k
13,600
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np from random import randint from pandas import Series,DataFrame import seaborn as sns import matplotlib.pyplot as plt import matplotlib import math %matplotlib inline plt.rcParams['figure.figsize'] = (8, 6) plt.rcParams['font.size'] = 14 from sklearn.datasets import make_classification df = pd.DataFrame(columns=['X0','X1','Y']) X, Y = make_classification(n_samples=1000, n_features=2, class_sep=0.7, hypercube=True, \ n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1) train_test_split = 0.7 train_size = int(X.shape[0]*train_test_split) test_size = X.shape[0] - train_size X_train,Y_train,X_test,Y_test = X[0:train_size],Y[0:train_size],X[train_size:],Y[train_size:] df['X0'],df['X1'],df['Y'] = X[:, 0],X[:, 1],Y df['type'] = 'train' df.loc[train_size:,'type'] = 'test' #plt.title("four clusters", fontsize='small') #plt.scatter(X[:, 0], X[:, 1], marker='o', c=Y) sns.lmplot(x='X0',y='X1',data=df,hue='Y',fit_reg=False, col='type', size=6) from math import log def entropy(Y): if (len(Y) == 0): return 0 p = np.count_nonzero(Y)*1.0/Y.shape[0] if(p == 0 or p == 1): return 0 else: return -p*log(p,2) -(1-p)*log(1-p,2) def find_cutoff(X,Y, split_size): # search tree, try 5 cutoff points, each axis X_min = np.min(X) X_max = np.max(X) E_root = entropy(Y) E_X_best = E_root X_cutoff_best = 0.0 print 'X_min = {:.2f},X_max = {:.2f}'.format(X_min,X_max) print 'root entropy = {:.3f}'.format(E_root) for i in range(0,split_size): X_cutoff_point = round(X_min + (X_max - X_min)/(split_size+1)*(i+1),3) index_lower = (X < X_cutoff_point) index_higer = np.logical_not(index_lower) len_lower,len_higher = len(Y[index_lower]),len(Y[index_higer]) len_all = len_lower + len_higher E_lower = entropy(Y[index_lower]) E_higher = entropy(Y[index_higer]) E_X = 1.0*len_lower/len_all*E_lower + 1.0*len_higher/len_all*E_higher if(E_X < E_X_best): E_X_best = E_X X_cutoff_best = X_cutoff_point print 'Chosen X cut_off = {:.2f}, E = {:.3f}'.format(X_cutoff_best,E_X_best) print '' return X_cutoff_best,E_X_best # search tree, try 5 cutoff points, each axis # Test X0 first print 'X0' L0_cut,E_L0 = find_cutoff(X_train[:,0],Y_train,5) index_left = (X_train[:,0] < L0_cut) index_right = np.logical_not(index_left) #X0 left side, Test X1 print 'X1 for X0 left side' L1_left_cut,E_L1_left = find_cutoff(X_train[index_left,1],Y_train[index_left],5) print 'X1 for X0 right side' L1_right_cut,E_L1_right = find_cutoff(X_train[index_right,1],Y_train[index_right],5) len_left,len_right = len(Y_train[index_left]),len(Y_train[index_right]) len_all = len_left + len_right E_fin1 = 1.0*len_left/len_all*E_L1_left + 1.0*len_right/len_all*E_L1_right cut = [L0_cut,L1_left_cut,L1_right_cut] print 'final entropy = {:.3f}'.format(E_fin1) # search tree, try 5 cutoff points, each axis # Test X1 print 'X1' L0_cut,E_L0 = find_cutoff(X_train[:,0],Y_train,5) index_left = (X_train[:,1] < L0_cut) index_right = np.logical_not(index_left) #X0 left side, Test X1 print 'X0 for X1 left side' L1_left_cut,E_L1_left = find_cutoff(X_train[index_left,0],Y_train[index_left],5) print 'X0 for X1 right side' L1_right_cut,E_L1_right = find_cutoff(X_train[index_right,0],Y_train[index_right],5) len_left,len_right = len(Y_train[index_left]),len(Y_train[index_right]) len_all = len_left + len_right E_fin2 = 1.0*len_left/len_all*E_L1_left + 1.0*len_right/len_all*E_L1_right print 'final entropy = {:.3f}'.format(E_fin2) var = [0,1] if E_fin2 < E_fin1: var = [1,0] cut = [L0_cut,L1_left_cut,L1_right_cut] E_fin = min(E_fin1,E_fin1) i_v0_l = (X_train[:,var[0]] < cut[0]) i_v0_h = np.logical_not(i_v0_l) i_v1_ll = (X_train[:,var[1]] < cut[1]) & i_v0_l i_v1_lh = np.logical_not(i_v1_ll) & i_v0_l i_v1_hl = (X_train[:,var[1]] < cut[2]) & i_v0_h i_v1_hh = np.logical_not(i_v1_hl) & i_v0_h p_v1_ll = np.argmax(np.bincount(Y_train[i_v1_ll])) #find mode p_v1_lh = np.argmax(np.bincount(Y_train[i_v1_lh])) p_v1_hl = np.argmax(np.bincount(Y_train[i_v1_hl])) p_v1_hh = np.argmax(np.bincount(Y_train[i_v1_hh])) print 'Start pop. class 0 and 1 = {}/{}'.format(len(Y_train) - np.sum(Y_train),np.sum(Y_train)) print '=> X{}, cutoff at < {:.2f}, pop. = {}/{}'.format( var[0],cut[0], \ len(Y_train[i_v0_l]) - np.sum(Y_train[i_v0_l]),np.sum(Y_train[i_v0_l])) print '===> X{}, cutoff at < {:.2f}, pop. = {}/{}, predict {}'.format(var[1],cut[1], \ len(Y_train[i_v1_ll]) - np.sum(Y_train[i_v1_ll]),np.sum(Y_train[i_v1_ll]),p_v1_ll) print '===> X{}, cutoff at >= {:.2f}, pop. = {}/{}, predict {}'.format(var[1],cut[1], \ len(Y_train[i_v1_lh]) - np.sum(Y_train[i_v1_lh]),np.sum(Y_train[i_v1_lh]),p_v1_lh) print '=> X{}, cutoff at >= {:.2f}, pop. = {}/{}'.format( var[0],cut[0], \ len(Y_train[i_v0_h]) - np.sum(Y_train[i_v0_h]),np.sum(Y_train[i_v0_h])) print '===> X{}, cutoff at < {:.2f}, pop. = {}/{}, predict {}'.format(var[1],cut[2], \ len(Y_train[i_v1_hl]) - np.sum(Y_train[i_v1_hl]),np.sum(Y_train[i_v1_hl]),p_v1_hl) print '===> X{}, cutoff at >= {:.2f}, pop. = {}/{}, predict {}'.format(var[1],cut[2], \ len(Y_train[i_v1_hh]) - np.sum(Y_train[i_v1_hh]),np.sum(Y_train[i_v1_hh]),p_v1_hh) #calculate Y_pred Y_pred = np.zeros(shape=Y_test.shape) t_l = (X_test[:,var[0]] < cut[0]) t_h = np.logical_not(t_l) t_ll = (X_test[:,var[1]] < cut[1]) & t_l t_lh = np.logical_not(t_ll) & t_l t_hl = (X_test[:,var[1]] < cut[2]) & t_h t_hh = np.logical_not(t_hl) & t_h Y_pred[t_ll] = p_v1_ll Y_pred[t_lh] = p_v1_lh Y_pred[t_hl] = p_v1_hl Y_pred[t_hh] = p_v1_hh from sklearn.metrics import confusion_matrix cm_mat = confusion_matrix(Y_test,Y_pred) accuracy = np.trace(cm_mat)*100.0/len(Y_test) print 'Test set Accuracy = {:.2f}%'.format(accuracy) df_temp = pd.DataFrame(cm_mat.flatten()[np.newaxis].T,columns = ['values']) df_temp = pd.DataFrame(cm_mat.flatten()[np.newaxis].T,columns = ['values']) plt.figure(figsize = (6,4),dpi=600) sns.heatmap(cm_mat.T, cbar=True ,annot=True, fmt=',.0f') plt.xlabel('Truth') plt.ylabel('Predicted') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Make Random dataset. Step2: Start with X0 split, then X1 split. Step3: Now try out X1 first then X0.
13,601
<ASSISTANT_TASK:> Python Code: from beampy import * # Remove quiet=True to get beampy compilation outputs doc = document(quiet=True) with slide('Animation from svg files'): animatesvg("./ressources/svg_anims/*.svg", width="600") display_matplotlib(gcs()) import pylab as p with slide('Animation from matplotlib figures'): anim_figs = [] for i in range(20): fig = p.figure() x = p.linspace(0,2*p.pi) p.plot(x, p.sin(x+i)) p.plot(x, p.sin(x+i+p.pi)) p.close(fig) anim_figs += [fig] animatesvg(anim_figs) save('./examples_html_outputs/animatesvg.html') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: From matplotlib figure list
13,602
<ASSISTANT_TASK:> Python Code: %load_ext autoreload %autoreload 2 %matplotlib inline import matplotlib.pyplot as plt import numpy as np import algos import features import parametric import policy import chicken from agents import OffPolicyAgent, OnPolicyAgent from rlbench import * # define the experiment num_states = 8 num_features = 8 # set up environment env = chicken.Chicken(num_states) # set up policy pol_pi = policy.FixedPolicy({s: {0: 1} for s in env.states}) # set feature mapping phi = features.RandomBinary(num_features, num_features // 2, random_seed=101011) # phi = features.Int2Unary(num_states) # run the algorithms for enough time to get reliable convergence num_steps = 20000 # state-dependent gamma gm_dct = {s: 0.9 for s in env.states} gm_dct[0] = 0 gm_func = parametric.MapState(gm_dct) gm_p_func = parametric.MapNextState(gm_dct) # the TD(1) solution should minimize the mean-squared error update_params = { 'gm': gm_func, 'gm_p': gm_p_func, 'lm': 1.0, } lstd_1 = OnPolicyAgent(algos.LSTD(phi.length), pol_pi, phi, update_params) run_episode(lstd_1, env, num_steps) mse_values = lstd_1.get_values(env.states) # the TD(0) solution should minimize the MSPBE update_params = { 'gm': gm_func, 'gm_p': gm_p_func, 'lm': 0.0, } lstd_0 = OnPolicyAgent(algos.LSTD(phi.length), pol_pi, phi, update_params) run_episode(lstd_0, env, num_steps) mspbe_values = lstd_0.get_values(env.states) # Plot the states against their target values xvals = list(sorted(env.states)) y_mse = [mse_values[s] for s in xvals] y_mspbe = [mspbe_values[s] for s in xvals] # Mean-square error optimal values plt.bar(xvals, y_mse) plt.show() # MSPBE optimal values plt.bar(xvals, y_mspbe) plt.show() y_mse y_mspbe algos.algo_registry # set up algorithm parameters update_params = { 'alpha': 0.02, 'beta': 0.002, 'gm': 0.9, 'gm_p': 0.9, 'lm': 0.0, 'lm_p': 0.0, 'interest': 1.0, } # Define the target policy pol_pi = policy.FixedPolicy({s: {0: 1} for s in env.states}) # Define the behavior policy pol_mu = policy.FixedPolicy({s: {0: 1} if s < 4 else {0: 0.5, 1: 0.5} for s in env.states}) # Run all available algorithms max_steps = 50000 for name, alg in algos.algo_registry.items(): # Set up the agent, run the experiment, get state-values agent = OffPolicyAgent(alg(phi.length), pol_pi, pol_mu, phi, update_params) mse_lst = run_errors(agent, env, max_steps, mse_values) mspbe_lst = run_errors(agent, env, max_steps, mspbe_values) # Plot the errors xdata = np.arange(max_steps) plt.plot(xdata, mse_lst) plt.plot(xdata, mspbe_lst) # plt.plot(xdata, np.log(mse_lst)) # plt.plot(xdata, np.log(mspbe_lst)) # Format and label the graph plt.ylim(0, 2) plt.title(name) plt.xlabel('Timestep') plt.ylabel('Error') plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Assessing Learning Algorithms Step2: What do the target values look like? Step3: Actual Testing Step4: These algorithms are given to OffPolicyAgent, which also takes care of the function approximation and manages the parameters given to the learning algorithm.
13,603
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np from math import radians, cos, sin, asin, sqrt import itertools from sklearn import neighbors from sklearn import preprocessing from sklearn import ensemble from sklearn.model_selection import LeaveOneGroupOut, LeavePGroupsOut import inversion import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('../facies_vectors.csv') def estimate_dphi(df): return ((4*(df['PHIND']**2) - (df['DeltaPHI']**2))**0.5 - df['DeltaPHI']) / 2 def estimate_rhob(df): return (2.71 - (df['DPHI_EST']/100) * 1.71) def estimate_nphi(df): return df['DPHI_EST'] + df['DeltaPHI'] def compute_rhomaa(df): return (df['RHOB_EST'] - (df['PHIND'] / 100)) / (1 - df['PHIND'] / 100) def compute_umaa(df): return ((df['PE'] * df['RHOB_EST']) - (df['PHIND']/100 * 0.398)) / (1 - df['PHIND'] / 100) df['DPHI_EST'] = df.apply(lambda x: estimate_dphi(x), axis=1).astype(float) df['RHOB_EST'] = df.apply(lambda x: estimate_rhob(x), axis=1) df['NPHI_EST'] = df.apply(lambda x: estimate_nphi(x), axis=1) df['RHOMAA_EST'] = df.apply(lambda x: compute_rhomaa(x), axis=1) pe = df.dropna() PE = pe['PE'].values wells = pe['Well Name'].values drop_list_pe = ['Formation', 'Well Name', 'Facies', 'Depth', 'PE', 'RELPOS'] fv_pe = pe.drop(drop_list_pe, axis=1).values X_pe = preprocessing.StandardScaler().fit(fv_pe).transform(fv_pe) y_pe = PE reg = neighbors.KNeighborsRegressor(n_neighbors=40, weights='distance') logo = LeaveOneGroupOut() f1knn_pe = [] for train, test in logo.split(X_pe, y_pe, groups=wells): well_name = wells[test[0]] reg.fit(X_pe[train], y_pe[train]) score = reg.fit(X_pe[train], y_pe[train]).score(X_pe[test], y_pe[test]) print("{:>20s} {:.3f}".format(well_name, score)) f1knn_pe.append(score) print("-Average leave-one-well-out F1 Score: %6f" % (np.mean(f1knn_pe))) reg.fit(X_pe, y_pe) fv_apply = df.drop(drop_list_pe, axis=1).values X_apply = preprocessing.StandardScaler().fit(fv_apply).transform(fv_apply) df['PE_EST'] = reg.predict(X_apply) df.PE = df.PE.combine_first(df.PE_EST) df['UMAA_EST'] = df.apply(lambda x: compute_umaa(x), axis=1) df[df.GR < 125].plot(kind='scatter', x='UMAA_EST', y='RHOMAA_EST', c='GR', figsize=(8,6)) plt.ylim(3.1, 2.2) plt.xlim(0.0, 17.0) plt.plot([4.8, 9.0, 13.8, 4.8], [2.65, 2.87, 2.71, 2.65], c='r') plt.plot([4.8, 11.9, 13.8, 4.8], [2.65, 3.06, 2.71, 2.65], c='g') plt.scatter([4.8], [2.65], s=50, c='r') plt.scatter([9.0], [2.87], s=50, c='r') plt.scatter([13.8], [2.71], s=50, c='r') plt.scatter([11.9], [3.06], s=50, c='g') plt.text(2.8, 2.65, 'Quartz', backgroundcolor='w') plt.text(14.4, 2.71, 'Calcite', backgroundcolor='w') plt.text(9.6, 2.87, 'Dolomite', backgroundcolor='w') plt.text(12.5, 3.06, 'Illite', backgroundcolor='w') plt.text(7.0, 2.55, "gas effect", ha="center", va="center", rotation=-55, size=8, bbox=dict(boxstyle="larrow,pad=0.3", fc="pink", ec="red", lw=2)) plt.text(15.0, 2.78, "barite?", ha="center", va="center", rotation=0, size=8, bbox=dict(boxstyle="rarrow,pad=0.3", fc="yellow", ec="orange", lw=2)) # QTZ-CAL-CLAY ur1 = inversion.UmaaRhomaa() ur1.set_dol_uma(11.9) ur1.set_dol_rhoma(3.06) # QTZ-CAL-DOL ur2 = inversion.UmaaRhomaa() df['UR_QTZ'] = np.nan df['UR_CLY'] = np.nan df['UR_CAL'] = np.nan df['UR_DOL'] = np.nan df.ix[df.GR >= 40, 'UR_QTZ'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR >= 40, 'UR_CLY'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR >= 40, 'UR_CAL'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR >= 40, 'UR_DOL'] = 0 df.ix[df.GR < 40, 'UR_QTZ'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR < 40, 'UR_DOL'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR < 40, 'UR_CAL'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1) df.ix[df.GR < 40, 'UR_CLY'] = 0 facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] fms = df.Formation.unique() fig, ax = plt.subplots(int(len(fms) / 2), 2, sharey=True, sharex=True, figsize=(5,10)) for i, fm in enumerate(fms): facies_counts = df[df.Formation == fm]['Facies'].value_counts().sort_index() colors = [facies_colors[i-1] for i in facies_counts.index] ax[int(i/2), i%2].bar(facies_counts.index, height=facies_counts, color=colors) ax[int(i/2), i%2].set_title(fm, size=8) fm_groups = [['A1 SH', 'B1 SH', 'B2 SH', 'B3 SH', 'B4 SH'], ['B5 SH', 'C SH'], ['A1 LM', 'C LM'], ['B1 LM', 'B3 LM', 'B4 LM'], ['B2 LM', 'B5 LM']] fm_group_dict = {fm:i for i, l in enumerate(fm_groups) for fm in l} df['FM_GRP'] = df.Formation.map(fm_group_dict) df = pd.get_dummies(df, prefix='FM_GRP', columns=['FM_GRP']) def archie(df): return np.sqrt(0.08 / ((df.PHIND ** 2) * (10 ** df.ILD_log10))) df['SW'] = df.apply(lambda x: archie(x), axis=1) # modified from jesper latlong = pd.DataFrame({"SHRIMPLIN": [37.978076, -100.987305], # "ALEXANDER D": [37.6747257, -101.1675259], # "SHANKLE": [38.0633799, -101.3920543], # "LUKE G U": [37.4499614, -101.6121913], # "KIMZEY A": [37.12289, -101.39697], # "CROSS H CATTLE": [37.9105826, -101.6464517], # "NOLAN": [37.7866294, -101.0451641], #? "NEWBY": [37.3172442, -101.3546995], # "CHURCHMAN BIBLE": [37.3497658, -101.1060761], #? "STUART": [37.4857262, -101.1391063], # "CRAWFORD": [37.1893654, -101.1494994], #? "Recruit F9": [0,0]}) def haversine(lon1, lat1, lon2, lat2): Calculate the great circle distance between two points on the earth (specified in decimal degrees) # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) km = 6367 * c return km def get_lat(df): return latlong[df['Well Name']][0] def get_long(df): return latlong[df['Well Name']][1] df['LAT'] = df.apply(lambda x: get_lat(x), axis=1) df['LON'] = df.apply(lambda x: get_long(x), axis=1) dist_dict = {} for k in latlong: dict_name = k + '_DISTANCES' k_dict = {} lat1 = latlong[k][0] lon1 = latlong[k][1] for l in latlong: lat2 = latlong[l][0] lon2 = latlong[l][1] if l == 'Recruit F9': dist = haversine(0, 0, 0, 0) elif k == "Recruit F9": dist = haversine(0, 0, 0, 0) else: dist = haversine(lon1, lat1, lon2, lat2) k_dict[l] = dist dist_dict[dict_name] = k_dict for i in dist_dict: df[i] = np.nan for j in dist_dict[i]: df.loc[df['Well Name'] == j, i] = dist_dict[i][j] df0 = df[(df.PHIND <= 40) & (df['Well Name'] != 'CROSS H CATTLE')] facies = df0['Facies'].values wells = df0['Well Name'].values keep_list0 = ['GR', 'ILD_log10', 'PHIND', 'PE', 'NM_M', 'RELPOS', 'RHOB_EST', 'UR_CLY', 'UR_CAL'] fv0 = df0[keep_list0].values clf0 = neighbors.KNeighborsClassifier(n_neighbors=56, weights='distance') X0 = preprocessing.StandardScaler().fit(fv0).transform(fv0) y0 = facies clf0.fit(X0, y0) X1 = preprocessing.StandardScaler().fit(df[keep_list0].values).transform(df[keep_list0].values) knn_pred = clf0.predict(X1) df['KNN_FACIES'] = knn_pred df1 = df.dropna() df1 = df1[(df1['Well Name'] != 'CROSS H CATTLE') & (df.PHIND < 40.0)] facies = df1['Facies'].values wells = df1['Well Name'].values drop_list = ['Formation', 'Well Name', 'Facies', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI', 'UMAA_EST', 'UR_QTZ', 'PE_EST', 'Recruit F9_DISTANCES', 'KIMZEY A_DISTANCES', 'NEWBY_DISTANCES', 'ALEXANDER D_DISTANCES', 'NOLAN_DISTANCES', 'FM_GRP_3'] fv = df1.drop(drop_list, axis=1).values X = preprocessing.StandardScaler().fit(fv).transform(fv) y = facies ne_grid = [150] mf_grid = [10] md_grid = [20] msl_grid = [5] mss_grid = [20] keys = ['n_estimators', 'max_features', 'max_depth', 'min_samples_leaf', 'min_samples_split'] param_sets = itertools.product(ne_grid, mf_grid, md_grid, msl_grid, mss_grid) param_grid = [dict(zip(keys, i)) for i in param_sets] clf_list = [] for i, d in enumerate(param_grid): clf = ensemble.RandomForestClassifier(n_estimators=d['n_estimators'], class_weight='balanced', min_samples_leaf=d['min_samples_leaf'], min_samples_split=d['min_samples_split'], max_features=d['max_features'], max_depth=d['max_depth'], n_jobs=-1) lpgo = LeavePGroupsOut(n_groups=2) f1rfc = [] for train, test in lpgo.split(X, y, groups=wells): clf.fit(X[train], y[train]) score = clf.fit(X[train], y[train]).score(X[test], y[test]) f1rfc.append(score) print("Average leave-two-wells-out F1 Score: %6f" % (np.mean(f1rfc))) clf_list.append((clf, np.mean(f1rfc))) np.max([i[1] for i in clf_list]) list(zip(df1.drop(drop_list, axis=1).columns, clf.feature_importances_)) # refit model to entire training set clf.fit(X, y) # load validation data vd = pd.read_csv('../validation_data_nofacies.csv') # compute extra log data features vd['DPHI_EST'] = vd.apply(lambda x: estimate_dphi(x), axis=1).astype(float) vd['RHOB_EST'] = vd.apply(lambda x: estimate_rhob(x), axis=1) vd['NPHI_EST'] = vd.apply(lambda x: estimate_nphi(x), axis=1) vd['RHOMAA_EST'] = vd.apply(lambda x: compute_rhomaa(x), axis=1) vd['UMAA_EST'] = vd.apply(lambda x: compute_umaa(x), axis=1) # Estimate lithology using Umaa Rhomaa solution vd['UR_QTZ'] = np.nan vd['UR_CLY'] = np.nan vd['UR_CAL'] = np.nan vd['UR_DOL'] = np.nan vd.ix[vd.GR >= 40, 'UR_QTZ'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR >= 40, 'UR_CLY'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR >= 40, 'UR_CAL'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR >= 40, 'UR_DOL'] = 0 vd.ix[vd.GR < 40, 'UR_QTZ'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR < 40, 'UR_DOL'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR < 40, 'UR_CAL'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1) vd.ix[vd.GR < 40, 'UR_CLY'] = 0 # Formation grouping vd['FM_GRP'] = vd.Formation.map(fm_group_dict) vd = pd.get_dummies(vd, prefix='FM_GRP', columns=['FM_GRP']) # Water saturation vd['SW'] = vd.apply(lambda x: archie(x), axis=1) # Lat-long features vd['LAT'] = vd.apply(lambda x: get_lat(x), axis=1) vd['LON'] = vd.apply(lambda x: get_long(x), axis=1) for i in dist_dict: vd[i] = np.nan for j in dist_dict[i]: vd.loc[vd['Well Name'] == j, i] = dist_dict[i][j] # Compute first guess at facies with KNN X2 = preprocessing.StandardScaler().fit(vd[keep_list0].values).transform(vd[keep_list0].values) vd['KNN_FACIES'] = clf0.predict(X2) # Apply final model drop_list1 = ['Formation', 'Well Name', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI', 'UMAA_EST', 'UR_QTZ', 'Recruit F9_DISTANCES', 'KIMZEY A_DISTANCES', 'NEWBY_DISTANCES', 'ALEXANDER D_DISTANCES', 'NOLAN_DISTANCES', 'FM_GRP_3'] fv_vd1 = vd.drop(drop_list1, axis=1).values X_vd1 = preprocessing.StandardScaler().fit(fv_vd1).transform(fv_vd1) vd_predicted_facies = clf.predict(X_vd1) vd['Facies'] = vd_predicted_facies vd.to_csv('RFC_submission_4_predictions.csv') vd_predicted_facies <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load training data Step2: Build features Step3: Because solving the sum of squares equation involved the quadratic formula, in some cases imaginary numbers result due to porosities being negative, which is what the warning below is about. Step4: Regress missing PE values Step5: Apply regression model to missing PE values and merge back into dataframe Step6: Compute UMAA for lithology model Step7: Umaa Rhomaa plot Step8: Here I use matrix inversion to "solve" the ternary plot for each lithologic component. Essentially each datapoint is a mix of the three components defined by the ternary diagram, with abundances of each defined by the relative distances from each endpoint. I use a GR cutoff of 40 API to determine when to use either the QTZ-CAL-DOL or QTZ-CAL-CLAY ternary solutions. In other words, it is assumed that below 40 API, there is 0% clay, and above 40 API there is 0% dolomite, and also that these four lithologic components are the only components in these rocks. Admittedly it's not a great assumption, especially since the ternary plot indicates other stuff is going on. For example the high Umaa datapoints near the Calcite endpoint may indicate some heavy minerals (e.g., pyrite) or even barite-weighted mud. The "pull" of datapoints to the northwest quadrant probably reflects some gas effect, so my lithologies in those gassy zones will be skewed. Step9: Plot facies by formation to see if the Formation feature will be useful Step10: Group formations by similar facies distributions Step11: Make dummy variables from the categorical Formation feature Step12: Compute Archie water saturation Step14: Get distances between wells Step15: Add latitude and longitude as features, add distances to every other well as features Step16: First guess at facies using KNN Step17: Fit RandomForect model and apply LeavePGroupsOut test Step18: Apply model to validation dataset
13,604
<ASSISTANT_TASK:> Python Code: %matplotlib inline path = "data/dogscats/" #path = "data/dogscats/sample/" from __future__ import division,print_function import os, json from glob import glob import numpy as np np.set_printoptions(precision=4, linewidth=100) from matplotlib import pyplot as plt import utils; reload(utils) from utils import plots # As large as you can, but no larger than 64 is recommended. # If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this. batch_size=64 # Import our class, and instantiate import vgg16; reload(vgg16) from vgg16 import Vgg16 vgg = Vgg16() # Grab a few images at a time for training and validation. # NB: They must be in subdirectories named based on their category batches = vgg.get_batches(path+'train', batch_size=batch_size) val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2) vgg.finetune(batches) vgg.fit(batches, val_batches, nb_epoch=1) vgg = Vgg16() batches = vgg.get_batches(path+'train', batch_size=4) imgs,labels = next(batches) plots(imgs, titles=labels) vgg.predict(imgs, True) vgg.classes[:4] batch_size=64 batches = vgg.get_batches(path+'train', batch_size=batch_size) val_batches = vgg.get_batches(path+'valid', batch_size=batch_size) vgg.finetune(batches) vgg.fit(batches, val_batches, nb_epoch=1) from numpy.random import random, permutation from scipy import misc, ndimage from scipy.ndimage.interpolation import zoom import keras from keras import backend as K from keras.utils.data_utils import get_file from keras.models import Sequential, Model from keras.layers.core import Flatten, Dense, Dropout, Lambda from keras.layers import Input from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.optimizers import SGD, RMSprop from keras.preprocessing import image FILES_PATH = 'http://www.platform.ai/models/'; CLASS_FILE='imagenet_class_index.json' # Keras' get_file() is a handy function that downloads files, and caches them for re-use later fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models') with open(fpath) as f: class_dict = json.load(f) # Convert dictionary with string indexes into an array classes = [class_dict[str(i)][1] for i in range(len(class_dict))] classes[5:] def ConvBlock(layers, model, filters): for i in range(layers): model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(filters, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) def FCBlock(model): model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) # Mean of each channel as provided by VGG researchers vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1)) def vgg_preprocess(x): x = x - vgg_mean # subtract mean return x[:, ::-1] # reverse axis bgr->rgb def VGG_16(): model = Sequential() model.add(Lambda(vgg_preprocess, input_shape=(3,224,224))) ConvBlock(2, model, 64) ConvBlock(2, model, 128) ConvBlock(3, model, 256) ConvBlock(3, model, 512) ConvBlock(3, model, 512) model.add(Flatten()) FCBlock(model) FCBlock(model) model.add(Dense(1000, activation='softmax')) return model model = VGG_16() fpath = get_file('vgg16.h5', FILES_PATH+'vgg16.h5', cache_subdir='models') model.load_weights(fpath) batch_size = 4 def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=batch_size, class_mode='categorical'): return gen.flow_from_directory(path+dirname, target_size=(224,224), class_mode=class_mode, shuffle=shuffle, batch_size=batch_size) batches = get_batches('train', batch_size=batch_size) val_batches = get_batches('valid', batch_size=batch_size) imgs,labels = next(batches) # This shows the 'ground truth' plots(imgs, titles=labels) def pred_batch(imgs): preds = model.predict(imgs) idxs = np.argmax(preds, axis=1) print('Shape: {}'.format(preds.shape)) print('First 5 classes: {}'.format(classes[:5])) print('First 5 probabilities: {}\n'.format(preds[0, :5])) print('Predictions prob/class: ') for i in range(len(idxs)): idx = idxs[i] print (' {:.4f}/{}'.format(preds[i, idx], classes[idx])) pred_batch(imgs) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Define path to data Step2: A few basic libraries that we'll need for the initial exercises Step3: We have created a file most imaginatively called 'utils.py' to store any little convenience functions we'll want to use. We will discuss these as we use them. Step4: Use a pretrained VGG model with our Vgg16 class Step5: The code above will work for any image recognition task, with any number of categories! All you have to do is to put your images into one folder per category, and run the code above. Step6: Vgg16 is built on top of Keras (which we will be learning much more about shortly!), a flexible, easy to use deep learning library that sits on top of Theano or Tensorflow. Keras reads groups of images and labels in batches, using a fixed directory structure, where images from each category for training must be placed in a separate folder. Step7: (BTW, when Keras refers to 'classes', it doesn't mean python classes - but rather it refers to the categories of the labels, such as 'pug', or 'tabby'.) Step8: As you can see, the labels for each image are an array, containing a 1 in the first position if it's a cat, and in the second position if it's a dog. This approach to encoding categorical variables, where an array containing just a single 1 in the position corresponding to the category, is very common in deep learning. It is called one hot encoding. Step9: We can now pass the images to Vgg16's predict() function to get back probabilities, category indexes, and category names for each image's VGG prediction. Step10: The category indexes are based on the ordering of categories used in the VGG model - e.g here are the first four Step11: (Note that, other than creating the Vgg16 object, none of these steps are necessary to build a model; they are just showing how to use the class to view imagenet predictions.) Step12: Calling finetune() modifies the model such that it will be trained based on the data in the batches provided - in this case, to predict either 'dog' or 'cat'. Step13: Finally, we fit() the parameters of the model using the training data, reporting the accuracy on the validation set after every epoch. (An epoch is one full pass through the training data.) Step14: That shows all of the steps involved in using the Vgg16 class to create an image recognition model using whatever labels you are interested in. For instance, this process could classify paintings by style, or leaves by type of disease, or satellite photos by type of crop, and so forth. Step15: Let's import the mappings from VGG ids to imagenet category ids and descriptions, for display purposes later. Step16: Here's a few examples of the categories we just imported Step17: Model creation Step18: ...and here's the fully-connected definition. Step19: When the VGG model was trained in 2014, the creators subtracted the average of each of the three (R,G,B) channels first, so that the data for each channel had a mean of zero. Furthermore, their software that expected the channels to be in B,G,R order, whereas Python by default uses R,G,B. We need to preprocess our data to make these two changes, so that it is compatible with the VGG model Step20: Now we're ready to define the VGG model architecture - look at how simple it is, now that we have the basic blocks defined! Step21: We'll learn about what these different blocks do later in the course. For now, it's enough to know that Step22: As well as the architecture, we need the weights that the VGG creators trained. The weights are the part of the model that is learnt from the data, whereas the architecture is pre-defined based on the nature of the problem. Step23: Getting imagenet predictions Step24: Keras provides functionality to create batches of data from directories containing images; all we have to do is to define the size to resize the images to, what type of labels to create, whether to randomly shuffle the images, and how many images to include in each batch. We use this little wrapper to define some helpful defaults appropriate for imagenet data Step25: From here we can use exactly the same steps as before to look at predictions from the model. Step26: The VGG model returns 1,000 probabilities for each image, representing the probability that the model assigns to each possible imagenet category for each image. By finding the index with the largest probability (with np.argmax()) we can find the predicted label.
13,605
<ASSISTANT_TASK:> Python Code: import pp gap = 0.2 wg_width = 0.5 length = 10 yaml = f instances: sl: component: coupler_symmetric settings: gap: {gap} wg_width: {wg_width} sr: component: coupler_symmetric settings: gap: {gap} wg_width: {wg_width} cs: component: coupler_straight settings: gap: {gap} width: {wg_width} length: {length} placements: cs: x: 100 y: 0 connections: sl,W0: cs,W0 sr,W0: cs,E0 ports: w0: sl,E0 w1: sl,E1 e0: sr,E0 e1: sr,E1 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) gap = 0.2 wg_width = 0.5 length = 10 yaml = f instances: sl: component: coupler_symmetric settings: gap: {gap} wg_width: {wg_width} sr: component: coupler_symmetric settings: gap: {gap} wg_width: {wg_width} cs: component: coupler_straight settings: gap: {gap} width: {wg_width} length: {length} placements: sl: x: 100 y: 0 rotation: 180 connections: cs,W0: sl,W0 sr,W0: cs,E0 ports: w0: sl,E0 w1: sl,E1 e0: sr,E0 e1: sr,E1 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) import pp yaml = instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: x: 100 y: 100 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) import pp yaml = instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: x: 100 y: 100 routes: mmi_short,E1: mmi_long,W0 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) import pp yaml = instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: rotation: 180 x: 100 y: 100 routes: mmi_short,E1: mmi_long,E0 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) import pp yaml = instances: mmi_long: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 10 mmi_short: component: mmi1x2 settings: width_mmi: 4.5 length_mmi: 5 placements: mmi_long: rotation: 180 x: 100 y: 100 routes: mmi_short,E1: mmi_long,E0 ports: E0: mmi_short,W0 W0: mmi_long,W0 c = pp.component_from_yaml(yaml) pp.show(c) pp.plotgds(c) r = c.routes['mmi_short,E1:mmi_long,E0'] r r.parent.length c.instances c.routes sample_2x2_connections_problem = name: connections_2x2_problem instances: mmi_bottom: component: mmi2x2 mmi_top: component: mmi2x2 placements: mmi_top: x: 100 y: 100 routes: mmi_bottom,E0: mmi_top,W0 mmi_bottom,E1: mmi_top,W1 def test_connections_2x2_problem(): c = pp.component_from_yaml(sample_2x2_connections_problem) return c c = test_connections_2x2_problem() pp.qp(c) pp.show(c) import pp sample_2x2_connections_solution = name: connections_2x2_problem instances: mmi_bottom: component: mmi2x2 mmi_top: component: mmi2x2 placements: mmi_top: x: 100 y: 100 bundle_routes: mmis: mmi_bottom,E0: mmi_top,W0 mmi_bottom,E1: mmi_top,W1 def test_connections_2x2_solution(): c = pp.component_from_yaml(sample_2x2_connections_solution) return c c = test_connections_2x2_solution() pp.qp(c) pp.show(c) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: YAML component connections Step5: While this one has the sbend_left_coupler sl centered at (100, 0) Step7: You can rotate and instance specifying the angle in degrees Step9: You can also define ports for the component Step11: Routes problem Step13: Routes Solution
13,606
<ASSISTANT_TASK:> Python Code: import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') survey_data = pd.read_csv("data/surveys.csv") survey_data.head() # %load _solutions/case2_observations_processing1.py datasetname = "Ecological Archives E090-118-D1." # %load _solutions/case2_observations_processing2.py # %load _solutions/case2_observations_processing3.py survey_data = survey_data.rename(columns={'sex_char': 'verbatimSex'}) # %load _solutions/case2_observations_processing4.py # %load _solutions/case2_observations_processing5.py survey_data["sex"].unique() # %load _solutions/case2_observations_processing6.py survey_data["species"].unique() survey_data.head(10) example = survey_data.loc[7:10, "species"] example example.str.split("and") example_split = example.str.split("and").explode() example_split example_split.iloc[1], example_split.iloc[2] example_split.str.strip() def solve_double_field_entry(df, keyword="and", column="verbatimEventDate"): Split on keyword in column for an enumeration and create extra record Parameters ---------- df: pd.DataFrame DataFrame with a double field entry in one or more values keyword: str word/character to split the double records on column: str column name to use for the decoupling of the records df = df.copy() # copy the input DataFrame to avoid editing the original df[column] = df[column].str.split(keyword) df = df.explode(column) df[column] = df[column].str.strip() # remove white space around the words return df # %load _solutions/case2_observations_processing7.py survey_data_decoupled["species"].unique() survey_data_decoupled.head(11) np.arange(1, len(survey_data_decoupled) + 1, 1) survey_data_decoupled["occurrenceID"] = np.arange(1, len(survey_data_decoupled) + 1, 1) survey_data_decoupled = survey_data_decoupled.drop(columns="record_id") survey_data_decoupled.head(10) # pd.to_datetime(survey_data_decoupled[["year", "month", "day"]]) # uncomment the line and test this statement sum(pd.to_datetime(survey_data_decoupled[["year", "month", "day"]], errors='coerce').isna()) # %load _solutions/case2_observations_processing8.py trouble_makers.head() trouble_makers["day"].unique() trouble_makers["month"].unique() trouble_makers["year"].unique() # %load _solutions/case2_observations_processing9.py survey_data_decoupled["eventDate"] = \ pd.to_datetime(survey_data_decoupled[["year", "month", "day"]]) # %load _solutions/case2_observations_processing10.py # %load _solutions/case2_observations_processing11.py survey_data_decoupled.head() survey_data_decoupled["eventDate"].dtype survey_data_decoupled.eventDate.dt #add a dot (.) and press TAB to explore the date options it provides # %load _solutions/case2_observations_processing12.py # %load _solutions/case2_observations_processing13.py survey_data_decoupled["eventDate"] = survey_data_decoupled["eventDate"].dt.strftime('%Y-%m-%d') survey_data_decoupled["eventDate"].head() survey_data_decoupled = survey_data_decoupled.drop(columns=["day", "month", "year"]) # %load _solutions/case2_observations_processing14.py species_data.head() # %load _solutions/case2_observations_processing15.py # %load _solutions/case2_observations_processing16.py len(survey_data_species) # check length after join operation survey_data_species.head() survey_data_species = survey_data_species.drop(["species_x", "species_id"], axis=1) survey_data_species = survey_data_species.rename(columns={"species_y": "species"}) survey_data_species.head() len(survey_data_species) # %load _solutions/case2_observations_processing17.py plot_data.head() from pyproj import Transformer transformer = Transformer.from_crs("EPSG:32612", "epsg:4326") transformer.transform(681222.131658, 3.535262e+06) # %load _solutions/case2_observations_processing18.py # %load _solutions/case2_observations_processing19.py # %load _solutions/case2_observations_processing20.py # %load _solutions/case2_observations_processing21.py plot_data.head() # %load _solutions/case2_observations_processing22.py # %load _solutions/case2_observations_processing23.py survey_data_plots.head() survey_data_plots = survey_data_plots.rename(columns={'plot': 'verbatimLocality'}) survey_data_plots.to_csv("interim_survey_data_species.csv", index=False) import requests species_name = 'Alcedo atthis' base_string = 'http://api.gbif.org/v1/species/match?' request_parameters = {'verbose': False, 'strict': True, 'name': species_name} message = requests.get(base_string, params=request_parameters).json() message genus_name = "Callipepla" species_name = "squamata" name_to_match = '{} {}'.format(genus_name, species_name) base_string = 'http://api.gbif.org/v1/species/match?' request_parameters = {'strict': True, 'name': name_to_match} # use strict matching(!) message = requests.get(base_string, params=request_parameters).json() message # %load _solutions/case2_observations_processing24.py genus_name = "Callipepla" species_name = "squamata" name_match(genus_name, species_name, strict=True) genus_name = "Lizard" species_name = "sp." name_match(genus_name, species_name, strict=True) # %load _solutions/case2_observations_processing25.py len(unique_species) # %load _solutions/case2_observations_processing26.py len(unique_species) # %load _solutions/case2_observations_processing27.py unique_species.head() # this will take a bit as we do a request to gbif for each individual species species_annotated = {} for key, row in unique_species.iterrows(): species_annotated[key] = name_match(row["genus"], row["species"], strict=True) #species_annotated # uncomment to see output # %load _solutions/case2_observations_processing28.py df_species_annotated.head() # %load _solutions/case2_observations_processing29.py df_species_annotated_subset.head() # %load _solutions/case2_observations_processing30.py unique_species_annotated.head() # %load _solutions/case2_observations_processing31.py len(survey_data_completed) survey_data_completed.head() survey_data_completed.to_csv("survey_data_completed_.csv", index=False) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Scenario Step2: <div class="alert alert-success"> Step3: Adding the data source information as static column Step4: Adding this static value as a new column datasetName Step5: Cleaning the sex_char column into a DwC called sex column Step6: So, apparently, more information is provided in this column, whereas according to the metadata information, the sex information should be either M (male) or F (female). We will create a column, named sex and convert the symbols to the corresponding sex, taking into account the following mapping of the values (see metadata for more details) Step7: <div class="alert alert-success"> Step8: Checking the current frequency of values of the resulting sex column (this should result in the values male, female and nan) Step9: To check what the frequency of occurrences is for male/female of the categories, a bar chart is a possible representation Step10: <div class="alert alert-warning"> Step11: There apparently exists a double entry Step12: Using the split method on strings, we can split the string using a given character, in this case the word and Step13: The explode method will create a row for each element in the list Step14: Hence, the DM and SH are now enlisted in separate rows. Other rows remain unchanged. The only remaining issue is the spaces around the characters Step15: Which we can solve again using the string method strip, removing the spaces before and after the characters Step17: To make this reusable, let's create a dedicated function to combine these steps, called solve_double_field_entry Step18: The function takes a DataFrame as input, splits the record into separate rows and returns an updated DataFrame. We can use this function to get an update of the DataFrame, with an additional row (observation) added by decoupling the specific field. Let's apply this new function. Step19: Create new occurrence identifier Step20: To create a new column with header occurrenceID with the values 1 -> 35550 as field values Step21: To overcome the confusion on having both a record_id and occurrenceID field, we will remove the record_id term Step22: Hence, columns can be drop-ped out of a DataFrame Step23: Converting the date values Step24: This is not working, not all dates can be interpreted... We should get some more information on the reason of the errors. By using the option coerce, the problem makers will be labeled as a missing value NaT. We can count the number of dates that can not be interpreted Step25: <div class="alert alert-success"> Step26: Checking some charactersitics of the trouble_makers Step27: The issue is the presence of day 31 during the months April and September of the year 2000. At this moment, we would have to recheck the original data in order to know how the issue could be solved. Apparently, - for this specific case - there has been a data-entry problem in 2000, making the 31 days during this period should actually be 30. It would be optimal to correct this in the source data set, but for the exercise, we will correct it here. Step28: Now, we do the parsing again to create a proper eventDate field, containing the dates Step29: <div class="alert alert-success"> Step30: Currently, the dates are stored in a python specific date format Step31: This is great, because it allows for many functionalities using the .dt accessor Step32: <div class="alert alert-success"> Step33: We actually do not need the day, month, year columns anymore, but feel free to use what suits you best. Step34: When saving the information to a file (e.g. CSV-file), this data type will be automatically converted to a string representation. However, we could also decide to explicitly provide the string format the dates are stored (losing the date type functionalities), in order to have full control on the way these dates are formatted Step35: For the remainder, let's remove the day/year/month columns. Step36: 2. Add species names to dataset Step37: Fix a wrong acronym naming Step38: Merging surveys and species Step39: The join is ok, but we are left with some redundant columns and wrong naming Step40: We do not need the columns species_x and species_id column anymore, as we will use the scientific names from now on Step41: The column species_y could just be named species Step42: 3. Add coordinates from the plot locations Step43: Transforming to other coordinate reference system Step44: The reprojection can be done by the function transform of the projection toolkit, providing the coordinate systems and a set of x, y coordinates. For example, for a single coordinate, this can be applied as follows Step45: Such a transformation is a function not supported by Pandas itself (it is in https Step46: The above function transform_utm_to_wgs you have created is a very specific function that knows the structure of the DataFrame you will apply it to (it assumes the 'xutm' and 'yutm' column names). We could also make a more generic function that just takes a X and Y coordinate and returns the Series of converted coordinates (transform_utm_to_wgs2(X, Y)). Step47: <div class="alert alert-success"> Step48: The plot locations need to be stored with the variable name verbatimLocality indicating the identifier as integer value of the plot Step49: Let's now save our clean data to a csv file, so we can further analyze the data in a following notebook Step50: (OPTIONAL SECTION) 4. Using a API service to match the scientific names Step51: Example matching with Alcedo Atthis Step52: From which we get a dictionary containing more information about the taxonomy of the Alcedo atthis. Step53: To apply this on our species data set, we will have to do this request for each of the individual species/genus combination. As, this is a returning functionality, we will write a small function to do this Step54: <div class="alert alert-info"> Step55: However, the matching won't provide an answer for every search Step56: Match each of the species names of the survey data set Step57: <div class="alert alert-success"> Step58: <div class="alert alert-success"> Step59: To perform the matching for each of the combination, different options do exist (remember apply?) Step60: We can now transform this to a pandas DataFrame Step61: Select relevant information and add this to the survey data Step62: <div class="alert alert-success"> Step63: <div class="alert alert-success"> Step64: Congratulations! You did a great cleaning job, save your result
13,607
<ASSISTANT_TASK:> Python Code: %matplotlib inline import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pandas as pd pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set_style("whitegrid") sns.set_context("poster") #CITATION: This is the data from National Center for Education Statistics on Schools #Schools for all 50 states and Washington, D.C. #http://nces.ed.gov/ccd/elsi/ #ELSI Root Data Source: U.S. Department of Education National Center for Education Statistics Common Core of Data (CCD) "Public Elementary/Secondary School Universe Survey" 2012-13 v.1a 2013-14 v.1a. #KEY: #† indicates that the data are not applicable. #– indicates that the data are missing. #‡ indicates that the data do not meet NCES data quality standards. schoolinformation = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Information Tab.csv", dtype=np.str) schoolcharacteristicsa = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL CharacteristicsA Tab.csv", dtype=np.str) schoolcharacteristicsb = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL CharacteristicsB Tab.csv", dtype=np.str) schoolenrollment = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Enrollments Tab.csv", dtype=np.str) schoolenrollmentdetails = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Enrollment Details Tab.csv", dtype=np.str) #schoolenrollmentK3 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity, and Gender Tab PreK-3.csv", dtype=np.str) #schoolenrollment48 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity and Gender Tab 4-8.csv", dtype=np.str) #schoolenrollment912 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity and Gender Tab 9-Ungraded.csv", dtype=np.str) schoolteacherstaff = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Teacher and Staff Tab.csv", dtype=np.str) print len(schoolinformation) print len(schoolcharacteristicsa) print len(schoolcharacteristicsb) print len(schoolenrollment) print len(schoolenrollmentdetails) #print len(schoolenrollmentK3) #print len(schoolenrollment48) #print len(schoolenrollment912) print len(schoolteacherstaff) schoolcharacteristicsa = schoolcharacteristicsa.drop(schoolcharacteristicsa.columns[[0, 1, 20]], 1) schoolcharacteristicsb = schoolcharacteristicsb.drop(schoolcharacteristicsb.columns[[0, 1]], 1) schoolenrollment = schoolenrollment.drop(schoolenrollment.columns[[0, 1]], 1) schoolenrollmentdetails = schoolenrollmentdetails.drop(schoolenrollmentdetails.columns[[0, 1]], 1) #schoolenrollmentK3 = schoolenrollmentK3.drop(schoolenrollmentK3.columns[[0, 1]], 1) #schoolenrollment48 = schoolenrollment48.drop(schoolenrollment48.columns[[0, 1]], 1) #schoolenrollment912 = schoolenrollment912.drop(schoolenrollment912.columns[[0, 1, 72]], 1) schoolinformation.head() joinedschool = schoolteacherstaff.join([schoolcharacteristicsa, schoolcharacteristicsb, schoolenrollment, schoolenrollmentdetails]) joinedschool = schoolinformation.merge(joinedschool, 'left', 'School ID - NCES Assigned [Public School] Latest available year', suffixes=('', '_DEL')) #Need to get rid of Excel syntax ="" from some of the columns for i, col in enumerate(joinedschool.columns): joinedschool[col] = joinedschool[col].map(lambda x: str(x).lstrip('="').rstrip('"')) #If by chance any rows have NaN, replace with the ELSI standard for missing data '–' joinedschool = joinedschool.fillna('–') joinedschool = joinedschool.replace('nan', '–') # Replacing Missing Data / NA / Bad Quality data with blank, later to be turned into NaN for float columns # CITATIOIN : http://pandas.pydata.org/pandas-docs/version/0.15.2/missing_data.html joinedschool = joinedschool.replace('\xe2\x80\x93', '') # Replace "-" (Missing Data) with blank joinedschool = joinedschool.replace('\xe2\x80\xa0', '') # Replace "†" (Not Applicable) with blank joinedschool = joinedschool.replace('\xe2\x80\xa1', '') # Replace "‡" (Bad Quality) with blank joinedschool['i_agency_type_regional_education_services'] = np.where(joinedschool['Agency Type [District] 2009-10']=='4-Regional education services agency', 1, 0) joinedschool['i_agency_type_local_school_district'] = np.where(joinedschool['Agency Type [District] 2009-10']=='1-Local school district', 1, 0) joinedschool['i_lgo_PK'] = np.where(joinedschool['Lowest Grade Offered [Public School] 2009-10']=='Prekindergarten', 1, 0) joinedschool['i_lgo_K'] = np.where(joinedschool['Lowest Grade Offered [Public School] 2009-10']=='Kindergarten', 1, 0) joinedschool['Black Students [Public School] 2009-10'] = joinedschool['Black Students [Public School] 2009-10'].replace('', np.nan) joinedschool['Grades 9-12 Students [Public School] 2009-10'] = joinedschool['Grades 9-12 Students [Public School] 2009-10'].replace('', np.nan) joinedschool['Total Students [Public School] 2009-10'] = joinedschool['Total Students [Public School] 2009-10'].replace('', np.nan) joinedschool['Black Students [Public School] 2009-10'] = joinedschool['Black Students [Public School] 2009-10'].astype(float) joinedschool['Grades 9-12 Students [Public School] 2009-10'] = joinedschool['Grades 9-12 Students [Public School] 2009-10'].astype(float) joinedschool['Total Students [Public School] 2009-10'] = joinedschool['Total Students [Public School] 2009-10'].astype(float) joinedschool['r_stud_re_B'] = joinedschool['Black Students [Public School] 2009-10']/joinedschool['Total Students [Public School] 2009-10'] joinedschool['r_stud_912'] = joinedschool['Grades 9-12 Students [Public School] 2009-10']/joinedschool['Total Students [Public School] 2009-10'] joinedschool['r_st_TS'] = joinedschool['Pupil/Teacher Ratio [Public School] 2009-10'] joinedschool = joinedschool[['School Name [Public School] 2009-10', 'State Name [Public School] Latest available year', 'School ID - NCES Assigned [Public School] Latest available year', 'Agency ID - NCES Assigned [Public School] Latest available year', 'i_agency_type_regional_education_services', 'i_agency_type_local_school_district', 'i_lgo_PK', 'i_lgo_K', 'r_stud_re_B', 'r_stud_912', 'r_st_TS']] joinedschool.head() joinedschool=joinedschool.replace([np.inf, -np.inf], np.nan) joinedschool.fillna(value=0,inplace=True) joinedschool.head() joinedschool.to_csv("data/finaldata/tableauschools.csv") for col in joinedschool.columns: print col <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Each of the datasheets downloaded from ELSI had download metadata on the top of them and total and key information on the bottom of them that were not data rows. This metadata, total, and key information was manually deleted before import. Some of the files had ="0" instead of 0 in the cells. This was found/replaced before import using the sed -i '' 's/="0"/0/g' *.csv command from the terminal. Step2: Check the lengths of the datasets to see if we have a row for every school. Step3: Drop all of the duplicate columns. Step4: Join all of the school datasets. Step5: This is the full list of all column names in the schools dataframe.
13,608
<ASSISTANT_TASK:> Python Code: from pprint import pprint import numpy as np import contextlib @contextlib.contextmanager def printoptions(*args, **kwargs): original = np.get_printoptions() np.set_printoptions(*args, **kwargs) yield np.set_printoptions(**original) ## compact print (numpy array) def cprint(x): with printoptions(precision=3, suppress=True, linewidth=120): print(x) # test x = np.random.random(10) cprint(x) #examples taken from here: http://stackoverflow.com/a/1750187 doc_list = ['Julie loves me more than Linda loves me', 'Jane likes me more than Julie loves me', 'He likes basketball more than baseball'] bags = [] from collections import Counter # we use lower() here so that the sorted vocabulary is easier to see. bags = [Counter(doc.lower().split()) for doc in doc_list] for bag in bags: print(bag) import itertools vocabulary = sorted(list(set(itertools.chain(*[list(b) for b in bags])))) vocabulary def print_vocabulary_vertically(voc, leading_str = '', spacing=2, align=1): # align = 0: align top; otherwise, align bottom max_len = max([len(v) for v in voc]) for i in range(max_len): if align == 0: line = [v[i] if i < len(v) else ' ' for v in voc] else: line = [' ' if i < max_len - len(v) else v[i-max_len] for v in voc] print('{}{}'.format(leading_str, (' '*spacing).join(line))) print_vocabulary_vertically(vocabulary, align=0) for doc in doc_list: print(doc) print() vec_list = [] print_vocabulary_vertically(vocabulary, leading_str=' ') print('-'*70) for bag in bags: vec = [bag[v] for v in vocabulary] # Counter['non-existing-key'] = 0 vec_list.append(vec) print(vec) from math import log def normalize_tf(tf): if tf == 0: return 0.0 else: return 1.0 + log(tf) tf_vec_list = [] for vec in vec_list: tf_vec_list.append([normalize_tf(val) for val in vec]) print_vocabulary_vertically(vocabulary, leading_str=' ', spacing=6) print('-'*80) cprint(np.matrix(tf_vec_list)) from math import log def idf(cnt, ndocs): # here we use ln(). The base of the log does not matter much. return 1.0 + log(ndocs/(cnt+1)) ndocs = len(doc_list) # voc = [ (v, [b[v] for b in bags]) for v in vocabulary] # if you want to see the individual counts voc = [(v, sum([b[v] for b in bags])) for v in vocabulary] pprint(voc) voc = [(v, idf( sum([b[v] for b in bags]), ndocs)) for v in vocabulary] idf_dict = dict(voc) pprint(voc) print(idf_dict['he']) import numpy as np def build_idf_matrix(idf_vector): idf_mat = np.zeros((len(idf_vector), len(idf_vector))) np.fill_diagonal(idf_mat, idf_vector) return idf_mat idf_matrix = build_idf_matrix([v[1] for v in voc]) cprint(idf_matrix) import math def l2_normalizer(vec): denom = np.sum([el**2 for el in vec]) return [(el / math.sqrt(denom)) for el in vec] doc_term_matrix_tfidf = [] #performing tf-idf matrix multiplication for vec in tf_vec_list: doc_term_matrix_tfidf.append(np.dot(vec, idf_matrix)) cprint(np.matrix(doc_term_matrix_tfidf)) # np.matrix() just to make it easier to look at #normalizing print('\nAfter normalization ...') doc_term_matrix_tfidf_l2 = [] for tf_vector in doc_term_matrix_tfidf: doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector)) cprint(np.matrix(doc_term_matrix_tfidf_l2)) import numpy as np def normalize(v): norm = np.linalg.norm(v) if norm == 0: return v return v / norm def cosine(u, v): a = normalize(u) b = normalize(v) return np.inner(a, b) cosine(doc_term_matrix_tfidf[0], doc_term_matrix_tfidf[1]) cosine(doc_term_matrix_tfidf[0], doc_term_matrix_tfidf[2]) cosine(doc_term_matrix_tfidf[1], doc_term_matrix_tfidf[2]) q = 'Linda likes me' def encode(q, voc): c = Counter(q.lower().split()) return [normalize_tf(c[v]) if (v in c) else normalize_tf(0) for v in voc] vec_q = encode(q, vocabulary) print_vocabulary_vertically(vocabulary, leading_str=' ', spacing=4) vec_q for i, vec in enumerate(vec_list): print('score of doc[{}] = {}'.format(i, cosine(doc_term_matrix_tfidf[i], vec_q))) def topk(query, k): v_q = encode(query, vocabulary) out = [] for i, vec in enumerate(vec_list): out.append((i, cosine(doc_term_matrix_tfidf[i], v_q))) answer = sorted(out, key=lambda r: r[1], reverse=True)[:k] return answer topk('loves baseball', 3) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Now we construct the vocabulary and the doc-term matrix. Step2: Now we transform raw tf to tf by $1 + \log(tf)$. Step3: Adding idf Step4: We will eventually record in voc the idf values. You can look at the raw df value and the resulting idf values from the outputs of the above the below cells. Step5: Now we can look at the final weighted vectors of the documents. Step6: Look at the matrix m before normalization. Step7: Modelling a Query as a Document Step8: Scoring Documents
13,609
<ASSISTANT_TASK:> Python Code: from keras.datasets import imdb (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) max([max(sequence) for sequence in train_data]) # word_index is a dictionary mapping words to an integer index word_index = imdb.get_word_index() # We reverse it, mapping integer indices to words reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) # We decode the review; note that our indices were offset by 3 # because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "unknown". decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]]) # show decoded_review import numpy as np def vectorize_sequences(sequences, dimension=10000): # Create an all-zero matrix of shape (len(sequences), dimension) results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. # set specific indices of results[i] to 1s return results # Our vectorized training data x_train = vectorize_sequences(train_data) # Our vectorized test data x_test = vectorize_sequences(test_data) # Our vectorized labels y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') from keras import models from keras import layers model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) x_val = x_train[:10000] partial_x_train = x_train[10000:] y_val = y_train[:10000] partial_y_train = y_train[10000:] history = model.fit(partial_x_train, partial_y_train, epochs=100, batch_size=512, validation_data=(x_val, y_val)) history_dict = history.history history_dict.keys() #@title import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.clf() # clear figure acc_values = history_dict['accuracy'] val_acc_values = history_dict['val_accuracy'] plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: num_words=10000 表示我們只採用前 10000 個常出現的字詞,此外在 label 中 0 表示負評 1 表示正評。 Step2: 也可以透過字典檔,將資料組合回評論文字。 Step3: 處理資料 Step4: 將結果標記進行正規化 Step5: 建立網路架構 Step6: 將優化器設定為 rmsprop,損失函數使用 binary_crossentropy,將網路進行 Compile Step7: 訓練模型 Step8: 開始訓練模型 Step9: 訓練的過程會把相關資訊存放在 history,透過事後分析訓練過程的資訊可以幫助我們優化參數。 Step10: 透過上面的方法可以取得訓練 History 包含的資訊,然後我們將資訊繪製成為圖表,如下:
13,610
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np from IPython.display import display, HTML CSS = .output { flex-direction: row; } patient_data = pd.read_csv("../data/Exercises_Summary_Statistics_Data.csv") patient_data.head() patients = # Subtet patient_data to include only patients control = # Subset patient_data to include only control patient_mean = # Calculate the mean of the patients control_mean = # Calculate the mean of the controls print("The patient mean age is:", patient_mean, "and the control mean age is:", control_mean, "\t") patient_median = # Calculate the median of the patients control_median = # Calculate the median of the controls print("The patient median age is:", patient_median, "and the control median age is:", control_median, "\t") patient_std = # Standard Deviation of the patients control_std = # Standard Deviation of the controls print("The patient std is:", patient_std, "and the control std is:", control_std, "\t") patient_quantiles = # Patient quantiles control_quantiles = # Control quantiles print("Patients:\f") display(pd.DataFrame(patient_quantiles)) print("Control:\f") display(pd.DataFrame(control_quantiles)) HTML('<style>{}</style>'.format(CSS)) num_male_patients = # Get the number of male patients num_female_patients = # Get the number of female patients print("The number of male patients is:", num_male_patients, \ "\nThe number of female patients is:", num_female_patients, \ "\nAnd the percentage of males is:", num_male_patients / (num_male_patients + num_female_patients), "\t") num_male_control = # Get the number of male control num_female_control = # Get the number of female control print("The number of male control patients is:", num_male_control, \ "\nThe number of female control patients is:", num_female_control, \ "\nAnd the percentage of males is:", num_male_control / (num_male_control + num_female_control), "\t") gene1_patients = patients.Gene1 gene1_control = control.Gene1 # Mean mean_gene1_patients = # Gene1 mean for patients mean_gene1_control = # Gene1 mean for control # Median median_gene1_patients = # Gene1 median for patients median_gene1_control = # Gene1 median for control # Std std_gene1_patients = # Gene1 std for patents std_gene1_control = # Gene1 std for control print("Patients: Mean =", mean_gene1_patients, "Median =", median_gene1_patients, "Std =", std_gene1_patients, "\t") print("Control: Mean =", mean_gene1_control, "Median =", median_gene1_control, "Std =", std_gene1_control, "\t") gene2_patients = patients.Gene2 gene2_control = control.Gene2 # Mean mean_gene2_patients = # Gene2 mean for patients mean_gene2_control = # Gene2 mean for control # Median median_gene2_patients = # Gene2 median for patients median_gene2_control = # Gene2 median for control # Std std_gene2_patients = # Gene2 std for patents std_gene2_control = # Gene2 std for control print("Patients: Mean =", mean_gene2_patients, "Median =", median_gene2_patients, "Std =", std_gene2_patients, "\t") print("Control: Mean =", mean_gene2_control, "Median =", median_gene2_control, "Std =", std_gene2_control, "\t") gene6_patients = patients.Gene6 gene6_control = control.Gene6 # Mean mean_gene6_patients = # Gene6 mean for patients mean_gene6_control = # Gene6 mean for control # Median median_gene6_patients = # Gene6 median for patients median_gene6_control = # Gene6 median for control # Std std_gene6_patients = # Gene6 std for patents std_gene6_control = # Gene6 std for control print("Patients: Mean =", mean_gene6_patients, "Median =", median_gene6_patients, "Std =", std_gene6_patients, "\t") print("Control: Mean =", mean_gene6_control, "Median =", median_gene6_control, "Std =", std_gene6_control, "\t") gene_names = ["Gene1", "Gene2", "Gene3", "Gene4", "Gene5", "Gene6"] display(# Get the summary of the gene columns for PATIENTS) display(# Get the summary of the gene columns for CONTROL) display(# Mean of the PATIENT genes / # Mean of the CONTROL genes) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Summary Statistics - Exercises Step2: Exercise 1 - Lets get a quick look at the groups Step3: Find out the Age means for each of the groups Step4: Find the Median of each group Step5: Results - Mean / Median Step6: Find the quantiles Step7: Results - Interval Statistics Step8: Number of male control patients Step9: Results - Percentage of the sexes Step10: Gene 2 Step11: I will just ask for one more gene, since the process is entirely the same! Step12: Results - Genes 1, 2 and 6 Step13: What if we want the a measure of difference for each gene?
13,611
<ASSISTANT_TASK:> Python Code: import numpy as np import pandas as pd from owslib.csw import CatalogueServiceWeb from owslib import fes from pprint import pprint fmt = '{:*^64}'.format def fes_date_filter(start, stop, constraint='overlaps'): Take datetime-like objects and returns a fes filter for date range (begin and end inclusive). NOTE: Truncates the minutes!!! Examples -------- >>> from datetime import datetime, timedelta >>> stop = datetime(2010, 1, 1, 12, 30, 59).replace(tzinfo=pytz.utc) >>> start = stop - timedelta(days=7) >>> begin, end = fes_date_filter(start, stop, constraint='overlaps') >>> begin.literal, end.literal ('2010-01-01 12:00', '2009-12-25 12:00') >>> begin.propertyoperator, end.propertyoperator ('ogc:PropertyIsLessThanOrEqualTo', 'ogc:PropertyIsGreaterThanOrEqualTo') >>> begin, end = fes_date_filter(start, stop, constraint='within') >>> begin.literal, end.literal ('2009-12-25 12:00', '2010-01-01 12:00') >>> begin.propertyoperator, end.propertyoperator ('ogc:PropertyIsGreaterThanOrEqualTo', 'ogc:PropertyIsLessThanOrEqualTo') start = start.strftime('%Y-%m-%d %H:00') stop = stop.strftime('%Y-%m-%d %H:00') if constraint == 'overlaps': propertyname = 'apiso:TempExtent_begin' begin = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname, literal=stop) propertyname = 'apiso:TempExtent_end' end = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname, literal=start) elif constraint == 'within': propertyname = 'apiso:TempExtent_begin' begin = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname, literal=start) propertyname = 'apiso:TempExtent_end' end = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname, literal=stop) else: raise NameError('Unrecognized constraint {}'.format(constraint)) return begin, end def run_ngdc_csw(filter_list): csw = CatalogueServiceWeb('http://www.ngdc.noaa.gov/geoportal/csw', timeout=60) csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full') print(fmt(' Catalog information ')) print("CSW version: {}".format(csw.version)) print("Number of datasets available: {}".format(len(csw.records.keys()))) return csw def service_urls(csw): df = [] for key, rec in csw.records.items(): df.append(pd.DataFrame(rec.references)) df = pd.concat(df, ignore_index=True) df['scheme'] = [scheme.split(':')[-2] for scheme in df['scheme']] return df.set_index('scheme').sort_index().stack() # IOOS NGDC collection uuid's # uuidscsv_url = 'https://raw.githubusercontent.com/ioos/registry/master/uuid.csv' # The separator used is messy! It's ',\t' in most cases, # except OceanSITES # That's too messy! I've created my own, cleaned-up copy, for now: uuidscsv_url = 'https://raw.githubusercontent.com/emiliom/stuff/master/ioos_ngdc_registry_uuid.csv' uuids_df = pd.read_csv(uuidscsv_url) uuids_df.set_index('MetadataCollection', inplace=True) uuids = uuids_df.to_dict()['UUID'] uuids bbox = [-127, 43, -123.75, 48] from datetime import datetime, timedelta dt = 5 now = datetime.utcnow() start = now - timedelta(days=dt) stop = now + timedelta(days=dt) sos_name = 'sea_water_temperature' name_list = ['sea_water_temperature', 'sea_surface_temperature', 'sea_water_potential_temperature', 'equivalent_potential_temperature', 'sea_water_conservative_temperature', 'pseudo_equivalent_potential_temperature'] # Not includeing 'Water Temperature' begin, end = fes_date_filter(start, stop) kw = dict(wildCard='*', escapeChar='\\', singleChar='?', propertyname='apiso:AnyText') variable_names_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw) for val in name_list]) filter_list = [fes.And([begin, end, fes.BBox(bbox), variable_names_filt])] csw = run_ngdc_csw(filter_list) csw.records.keys() collection_uuids = fes.Or( [fes.PropertyIsEqualTo(propertyname='dc:source', literal=uuids[collection]) for collection in ['NANOOS', 'NOS/CO-OPS', 'NDBC']] ) filter_list = [fes.And([fes.BBox(bbox), variable_names_filt, collection_uuids])] csw = run_ngdc_csw(filter_list) csw.records.keys() uuid_selection = ['NANOOS', 'NOS/CO-OPS'] for collection in uuid_selection: collection_uuid = fes.PropertyIsEqualTo(propertyname='dc:source', literal=uuids[collection]) filter_list = [collection_uuid] print(fmt(' Collection: %s ' % collection)) csw = run_ngdc_csw(filter_list) pprint(csw.records.keys()) services = service_urls(csw) print('---------- OPeNDAP services:') pprint(set(services['odp'].values.tolist())) print('---------- SOS services:') pprint(set(services['sos'].values.tolist())) collection_uuid = fes.PropertyIsEqualTo(propertyname='dc:source', literal=uuids['NOS/CO-OPS']) filter_list = [collection_uuid] csw = run_ngdc_csw(filter_list) print(csw.records['NOAA.NOS.CO-OPS SOS'].xml) kw = dict(wildCard='*', escapeChar='\\', singleChar='?') kw['propertyname'] = 'apiso:Subject' filt_nanoos = fes.PropertyIsLike(literal=('*%s*' % 'NANOOS'), **kw) filter_list = [filt_nanoos] csw = run_ngdc_csw(filter_list) csw.records.keys() kw['propertyname'] = 'apiso:AnyText' filt_nanoos = fes.PropertyIsLike(literal=('*%s*' % 'NANOOS'), **kw) filter_list = [filt_nanoos] csw = run_ngdc_csw(filter_list) csw.records.keys() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: IOOS CSW queries Step2: GeoPortal uuid's for NGDC IOOS Collections Step3: Query 1 Step4: Run CSW query Step5: Note that the COOPS SOS is not returned. This is probably due to an improper setup for the temporal extent in the COOPS SOS' ISO metadata record. Step6: Query 2 Step7: NOTE for NANOOS Collection Step8: Query 3 Step9: In keywords, apiso Step10: In apiso
13,612
<ASSISTANT_TASK:> Python Code: import pandas as pd df = pd.read_csv('olympics.csv', index_col=0, skiprows=1) for col in df.columns: if col[:2]=='01': df.rename(columns={col:'Gold'+col[4:]}, inplace=True) if col[:2]=='02': df.rename(columns={col:'Silver'+col[4:]}, inplace=True) if col[:2]=='03': df.rename(columns={col:'Bronze'+col[4:]}, inplace=True) if col[:1]=='№': df.rename(columns={col:'#'+col[1:]}, inplace=True) names_ids = df.index.str.split('\s\(') # split the index by '(' df.index = names_ids.str[0] # the [0] element is the country name (new index) df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that) df = df.drop('Totals') df.head() # You should write your whole answer within the function provided. The autograder will call # this function and compare the return value against the correct solution value def answer_zero(): # This function returns the row for Afghanistan, which is a Series object. The assignment # question description will tell you the general format the autograder is expecting return df.iloc[0] # You can examine what your function returns by calling it in the cell. If you have questions # about the assignment formats, check out the discussion forums for any FAQs answer_zero() def answer_one(): return df[df['Gold'] == max(df['Gold'])].iloc[0].name answer_one() def answer_two(): return df.loc[(df['Gold'] - df['Gold.1']).idxmax()].name answer_two() def answer_three(): df_1 = df[(df['Gold']>=1) & (df['Gold.1']>=1)] return df_1.loc[(abs(df_1['Gold'].astype('f') - df_1['Gold.1'].astype('f'))/df_1['Gold.2'].astype('f')).idxmax()].name abs(-1) answer_three() def answer_four(): points = df['Gold.2']*3 + df['Silver.2']*2 + df['Bronze.2']*1 points.rename('Points', inplace=True) return points answer_four() census_df = pd.read_csv('census.csv') census_df.head() def answer_five(): census_df_50 = census_df[census_df['SUMLEV'] == 50] #census_df_50 = census_df_50.reset_index() #census_df_50 = census_df_50.set_index(['STNAME']) census_df_50 = census_df_50.groupby(['STNAME']).sum() return census_df_50.loc[census_df_50['COUNTY'].idxmax()].name answer_five() def answer_six(): census_df_50 = census_df[census_df['SUMLEV'] == 50] census_df_50 = census_df_50.groupby(['STNAME'])['CENSUS2010POP'].nlargest(3) census_df_50 = census_df_50.reset_index() census_df_50 = census_df_50.groupby(['STNAME']).sum() census_df_50= census_df_50.sort(['CENSUS2010POP'], ascending=False)[:3] return list(census_df_50.index) answer_six() def answer_seven(): census_df_50 = census_df[census_df['SUMLEV'] == 50] #census_df_50 = census_df_50.reset_index() #census_df_50 = census_df_50.set_index(['STNAME']) col_list = ['POPESTIMATE2010', 'POPESTIMATE2011','POPESTIMATE2012','POPESTIMATE2013','POPESTIMATE2014', 'POPESTIMATE2015'] census_df_50 = census_df_50.groupby(['CTYNAME']).sum() census_df_50['POPE_DIFF_ABS'] = census_df_50[col_list].max(axis=1) - census_df_50[col_list].min(axis=1) #census_df_50 = census_df_50.sort(['POPE_DIFF_ABS'], ascending=False).iloc[0] return census_df_50.loc[census_df_50['POPE_DIFF_ABS'].idxmax()].name answer_seven() def answer_eight(): census_df_50 = census_df[census_df['SUMLEV'] == 50] census_df_50 = census_df_50[(census_df_50['REGION'] == 1) |(census_df_50['REGION'] == 2 )] census_df_50 = census_df_50[census_df_50['CTYNAME'].str.startswith('Washington')] census_df_50 = census_df_50[census_df_50['POPESTIMATE2015'] > census_df_50['POPESTIMATE2014'] ] return census_df_50.filter(items=['STNAME', 'CTYNAME']) answer_eight() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Question 0 (Example) Step2: Question 1 Step3: Question 2 Step4: Question 3 Step5: Question 4 Step6: Part 2 Step7: Question 6 Step8: Question 7 Step9: Question 8
13,613
<ASSISTANT_TASK:> Python Code: # 1. Input model parameters and print # 2. Compute the steady state of the model directly # 3. Define a function that evaluates the equilibrium conditions def equilibrium_equations(variables_forward,variables_current,parameters): # Parameters p = parameters # Variables fwd = variables_forward cur = variables_current # Resource constraint resource = cur.a*cur.k**p.alpha + (1-p.delta)* cur.k - fwd.k - cur.c # Exogenous tfp # Euler equation # Stack equilibrium conditions into a numpy array return np.array([ ]) # 4. Initialize the model model = ls.model(equations = equilibrium_equations, nstates=, varNames=[], # Any order as long as the state variables are named first shockNames=[], # Name a shock for each state variable *even if there is no corresponding shock in the model* parameters = parameters) # 5. Set the steady state of the model directly. model.set_ss([]) # 6. Find the log-linear approximation around the non-stochastic steady state and solve model.approximate_and_solve() # 7. Print the approximated model in terms of log-deviations from the stady state print(model.approximated()) # 8(a) Compute impulse responses and print the computed impulse responses model.impulse(T=41,t0=5,shock=None) print(model.irs['eA'].head(10)) # 8(b) Plot the computed impulse responses to a TFP shock fig = plt.figure(figsize=(12,4)) ax1 = fig.add_subplot(1,2,1) model.irs['eA'][['a','k','c']].plot(lw='5',alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=3) ax2 = fig.add_subplot(1,2,2) model.irs['eA'][['eA','a']].plot(lw='5',alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2) # 9(a) Compute stochastic simulation and print the simulated values model.stoch_sim(seed=192,covMat= [[parameters['sigma']**2,0],[0,0]]) print(model.simulated.head(10)) # 9(b) Plot the computed stochastic simulation fig = plt.figure(figsize=(12,4)) ax1 = fig.add_subplot(1,2,1) model.simulated[['a','c','k']].plot(lw=5,alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=3) ax2 = fig.add_subplot(1,2,2) model.simulated[['eA','a']].plot(lw=5,alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2) # 1. Compute the steady state values of Y and I # 2. Define a function that evaluates the equilibrium conditions # 3. Initialize the model # 4. Set the steady state of the model directly. # 5. Find the log-linear approximation around the non-stochastic steady state and solve # 6(a) Compute stochastic simulation and print the simulated values model.stoch_sim(seed=192,covMat= [[parameters['sigma'],0],[0,0]]) print(model.simulated.head(10)) # 6(b) Plot the computed stochastic simulation # Compute the standard deviations of A, Y, C, and I # Compute the coefficients of correlation for A, Y, C, and I <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Add Output and Investment Step2: Evaluation
13,614
<ASSISTANT_TASK:> Python Code: %matplotlib inline import numpy as np from scipy import ndimage import matplotlib.pyplot as plt import matplotlib.cm as cm from model import OrderExtend img = ndimage.imread('images/boat.jpeg', flatten=True) img /= np.max(img) #normalize image [0,1] plt.imshow(img, cmap = cm.Greys_r) nx,ny = img.shape r = 50 p = 0.3 max_iter = 100 theta = 1 # mask of the observed pixels (randomly chosen) sigma = np.random.binomial(1, p, size=img.shape) model = OrderExtend(img, sigma, r, theta=theta) order = model.init() r_img, x, y = model.fit(order, max_iter=max_iter) plt.imshow(r_img, cmap = cm.Greys_r) print(np.max(r_img),np.min(r_img)) r_img, x, y = model.fit(order, max_iter=1000) plt.imshow(r_img, cmap = cm.Greys_r) print(np.max(r_img),np.min(r_img)) #set the threshold to 3 theta = 3 nx,ny = img.shape r = 50 p = 0.3 max_iter = 5000 # mask of the observed pixels (randomly chosen) sigma = np.random.binomial(1, p, size=img.shape) # initialize another model with new theta model = OrderExtend(img, sigma, r, theta) order = model.init() r_img, x, y = model.fit(order, max_iter=max_iter) plt.imshow(r_img, cmap = cm.Greys_r) print(np.max(r_img), np.min(r_img)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Prepare the model. Step2: Now we set the iteration number to 1000, and rerun the model. Step3: The model cannot solve any dimension of $x$-axis except the dimensions used in initialization, because the model decides that the any $r \times r$ submatrix of $y$ is highly unstable to solve following linear systems, even with the stabilize routine. In other words, the local condition number of initial matrix is greater than the threshold $\theta$. Now let's increase the threshold to 3.
13,615
<ASSISTANT_TASK:> Python Code: import copy import pandas as pd import numpy as np import matplotlib.pyplot as plt from transformers import AutoTokenizer, AutoModelForSequenceClassification import shap.benchmark as benchmark import shap import scipy as sp import nlp import torch pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('max_colwidth', None) train, test = nlp.load_dataset("emotion", split = ["train", "test"]) data={'text':train['text'], 'emotion':train['label']} data = pd.DataFrame(data) tokenizer = AutoTokenizer.from_pretrained("nateraw/bert-base-uncased-emotion",use_fast=True) model = AutoModelForSequenceClassification.from_pretrained("nateraw/bert-base-uncased-emotion") # set mapping between label and id id2label = model.config.id2label label2id = model.config.label2id labels = sorted(label2id, key=label2id.get) def f(x): tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=128,truncation=True) for v in x]) attention_mask = (tv!=0).type(torch.int64) outputs = model(tv,attention_mask=attention_mask)[0].detach().numpy() scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T val = sp.special.logit(scores) return val explainer = shap.Explainer(f,tokenizer,output_names=labels) shap_values = explainer(data['text'][0:20]) sort_order = 'positive' perturbation = 'keep' sequential_perturbation = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation) xs, ys, auc = sequential_perturbation.model_score(shap_values, data['text'][0:20]) sequential_perturbation.plot(xs, ys, auc) sort_order = 'negative' perturbation = 'keep' sequential_perturbation = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation) xs, ys, auc = sequential_perturbation.model_score(shap_values, data['text'][0:20]) sequential_perturbation.plot(xs, ys, auc) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load Data and Model Step2: Class Label Mapping Step3: Define Score Function Step4: Create Explainer Object Step5: Run SHAP Explanation Step6: Define Metrics (Sort Order & Perturbation Method) Step7: Benchmark Explainer
13,616
<ASSISTANT_TASK:> Python Code: import numpy as np np.random.seed(29384924) data = np.random.randint(10, size = 100) print(data) print("Number of data points: {}".format(data.shape[0])) print("Largest value: {}".format(data.max())) print("Smallest value: {}".format(data.min())) print(data.mean()) # Mean outlier = np.array([1, 1, 2, 3, 2, 1, 3, 2, 38]) # Note the outlier of 38 at the end. print(outlier.mean()) print(data) print(np.median(data)) print(outlier) print(np.median(outlier)) print(data) print(data.var()) print(np.sqrt(data.var())) print(data.std()) print(np.percentile(data, 75) - np.percentile(data, 25)) %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns # Observe 100 data points from a Gaussian random variable with mean = 0.0 and variance = 1.0. observations = np.random.normal(size = 100) plt.hist(observations) # Observe **1000** data points from a Gaussian random variable with mean = 0.0 and variance = 1.0. observations = np.random.normal(size = 1000) plt.hist(observations) # Observe **** 10,000 **** data points from a Gaussian random variable with mean = 0.0 and variance = 1.0. observations = np.random.normal(size = 10000) _, _, _ = plt.hist(observations, bins = 25) print(observations) print("Mean: {:.2f}".format(observations.mean())) print("Variance: {:.2f}".format(observations.var())) from scipy.stats import norm xs = np.linspace(-5, 5, 100) plt.plot(xs, norm.pdf(xs, loc = 0, scale = 1), '-', label = "mean=0, var=1") plt.plot(xs, norm.pdf(xs, loc = 0, scale = 2), '--', label = "mean=0, var=2") plt.plot(xs, norm.pdf(xs, loc = 0, scale = 0.5), ':', label = "mean=0, var=0.5") plt.plot(xs, norm.pdf(xs, loc = -1, scale = 1), '-.', label = "mean=-1, var=1") plt.legend(loc = 0) plt.title("Various Normal Distributions") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Some very straightforward statistics are the number of data points, the largest value, and the smallest value. These shouldn't be immediately ignored, but they are of limited utility. Step2: Mean Step3: The mean is very simple to compute Step4: The mean is sensitive to outliers, meaning one or two data points that lie well beyond all the others can disproportionately affect the value of the mean. In the above simple example, the lone outlier of 38 pulls the mean to be larger than all the other data points except for 38; not exactly a representative sample! Step5: The median is computed by sorting all your data and picking out the middle value (or averaging the two middle data points, if you have an even amount of data). In this way, the median does not directly depend on the vast majority of your data; just on whatever value(s) happen to be in the middle. It is also not trivial to compute Step6: Second-order statistics Step7: The variance is computed by subtracting each individual data point from the average of the whole data set, squaring this difference, and summing all these differences together before finally dividing by the number of data points. Step8: Like the mean, variance (and standard deviation) uses all the data points to compute, and is therefore sensitive to outliers. Step9: This, like the median, is robust to outliers. But also like the median, it relies on sorting the data first, then picking out the value 1/4 of the way down the dataset and subtracting it from the value 3/4 of the way down the dataset. This can be expensive in large datasets. Step10: It's tough to see, isn't it? Let's try 1000 observations. Step11: That looks a little better! Maybe 10,000 data points, just for grins? Step12: There's the bell curve we know and love! Step13: This could be any old dataset! In fact, forget for a moment that we generated this dataset ourselves, and instead think that this could be a dataset we picked up from the web. Step14: You'll notice the mean is very close to 0, and the variance is likewise very close to 1. Since we ourselves set the mean and variance for the random number generator, we know that these are very close to the true mean and true variance, but in general we wouldn't necessarily know that.
13,617
<ASSISTANT_TASK:> Python Code: %matplotlib inline import numpy as np from scipy import integrate from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import cnames from matplotlib import animation import seaborn as sns import butter_lowpass_filter as blf def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0): def lorenz_deriv((x, y, z), t0, sigma=sigma, beta=beta, rho=rho): Compute the time-derivative of a Lorentz system. return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] # Choose random starting points, uniformly distributed from -15 to 15 np.random.seed(1) x0 = -15 + 30 * np.random.random((N, 3)) # Solve for the trajectories t = np.linspace(0, max_time, int(250*max_time)) x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t) for x0i in x0]) # choose a different color for each trajectory colors = plt.cm.jet(np.linspace(0, 1, N)) # plot the results sns.set_style("white") fig = plt.figure(figsize = (8,8)) ax = fig.add_axes([0, 0, 1, 1], projection='3d') #ax.axis('off') # prepare the axes limits ax.set_xlim((-25, 25)) ax.set_ylim((-35, 35)) ax.set_zlim((5, 55)) for i in range(N): x, y, z = x_t[i,:,:].T lines = ax.plot(x, y, z, '-', c=colors[i]) plt.setp(lines, linewidth=1) ax.view_init(30, angle) ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.set_zlabel('Z axis') plt.show() return t, x_t t, x_t = solve_lorenz(max_time=10.0) def forced_lorenz(N=3, fnot=2.5, theta=0, max_time=100.0, sigma=10.0, beta=8./3, rho=28.0): def lorenz_deriv((x, y, z), t0, sigma=sigma, beta=beta, rho=rho): Compute the time-derivative of a forced Lorentz system. c = 2*np.pi/360 return [sigma * (y - x) + fnot*np.cos(theta*c), x * (rho - z) - y + fnot*np.sin(theta*c), x * y - beta * z] # Choose random starting points, uniformly distributed from -15 to 15 np.random.seed(1) x0 = -15 + 30 * np.random.random((N, 3)) # Solve for the trajectories t = np.linspace(0, max_time, int(25*max_time)) x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t) for x0i in x0]) return t, x_t sns.set_style("darkgrid") sns.set_palette("Dark2") t, x_t = forced_lorenz(fnot = 0, theta = 50,max_time = 100.00) xv = x_t[0,:,:] # time filter lab = 'X', 'Y', 'Z' col = sns.color_palette("Paired") fig = plt.figure(figsize = (8,8)) xl = np.empty(xv.shape) for k in range(3): xl[:,k] = blf.filter(xv[:,k],0.5,fs=25) plt.plot(t,xv[:,k],color=col[k*2]) #plt.plot(t,xl[:,k],color=col[k*2+1],lw=3.0) plt.legend(lab) plt.show() # Timeseries PLOT sns.set_palette("Dark2") t, x_t = forced_lorenz(fnot = 0, theta = 50,max_time = 1000.00) xv = x_t[0,:,:] # time filter lab = 'X', 'lowpass-filtered X', 'Y', 'lowpass-filtered Y', 'Z','lowpass-filtered Z' col = sns.color_palette("Paired") fig = plt.figure(figsize = (8,8)) xl = np.empty(xv.shape) for k in range(3): xl[:,k] = blf.filter(xv[:,k],0.5,fs=25) plt.plot(t,xv[:,k],color=col[k*2]) plt.plot(t,xl[:,k],color=col[k*2+1],lw=3.0) plt.legend(lab) plt.xlim(0,100) # Be patient... this could take a few seconds to complete. cmap = sns.cubehelix_palette(light=1, as_cmap=True) skip = 10 sns.jointplot(xl[0::skip,0],xl[0::skip,1], kind="kde", color="#4CB391") def plot_lorenzPDF(fnot, theta, max_time = 1000.00, skip = 10): t, x_t = forced_lorenz(fnot = fnot, theta = theta,max_time = max_time) xv = x_t[0,:,:]; xl = np.empty(xv.shape) for k in range(3): xl[:,k] = blf.filter(xv[:,k],0.5,fs=25) g = sns.jointplot(xl[0::skip,0],xl[0::skip,1], kind="kde", color="#4CB391") return g theta = 50; f0 = 2.5 # assign values of f0 and theta g = plot_lorenzPDF(fnot = f0, theta = theta) g.ax_joint.arrow(0, 0, 2*f0*np.cos(theta*np.pi/180), f0*np.sin(theta*np.pi/180), head_width=0.5, head_length=0.5, lw=3.0, fc='r', ec='r') ## (BE PATIENT THIS COULD TAKE UP TO A MINUTE) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Let's write a quick Lorenz solver Step3: Let's plot this solution Step5: Very pretty. If you are curious, you can even change the plot angle within the function, and examine this strange attractor under all angles. There are several key properties to this attractor Step6: Let's start by plotting the solutions for X, Y and Z when $f_0 = 0$ (no pertubation) Step7: What happened to X? Well in this case X and Y are so close to each other that they plot on top of each other. Step8: (Once again, Y is on top of X. ) Step9: Question 1 ### Step10: Now let's have some fun
13,618
<ASSISTANT_TASK:> Python Code: import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt sns.set_context('talk') import csv import urllib2 import StringIO %matplotlib inline ceb = pd.read_csv('http://data.princeton.edu/wws509/datasets/ceb.dat', sep='\s+') ceb.head() ceb_int = pd.read_csv('http://data.princeton.edu/wws509/datasets/ceb.raw', sep='\s+', names = ['index'] + list(ceb.columns[:-1]), index_col=0) ceb_int.head() plt.figure(figsize=(9,6)) ct = pd.crosstab(ceb_int['dur'], ceb_int['educ'], values=ceb_int['n'], aggfunc= np.sum).sort_index(ascending = False) sns.heatmap(ct, annot = True) plt.yticks(ceb_int['dur'].drop_duplicates().values - .5, ceb['dur'].drop_duplicates().values) plt.xticks(ceb_int['educ'].drop_duplicates().values - .5, ceb['educ'].drop_duplicates().values) plt.ylabel('duration of marriage (years)') plt.xlabel('level of education') plt.title('heatmap of marriage duration by level of education') response = urllib2.urlopen('http://stanford.edu/class/psych252/_downloads/caffeine.csv') html = response.read() caf = pd.read_csv(StringIO.StringIO(html[:-16])) caf.head() caf.describe() from microscopes.models import gp as gamma_poisson <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Children Ever Born is a dataset of birthrates in Fiji from the World Fertility Survey with the following columns Step2: With the these columns encoded, we can now represent them as integers Step3: We can map these orderings of dur and educ to produce a crosstab heatmap of n, numbe of women Step4: Since dur and education are ordinal valued, the columns assume a small number of integer values Step5: Based on the characteristics of each column, coffee and numprob easily fit into the category of count data appropriate to a gamma-Poisson distribution Step6: Note that while integer valued data with high values is sometimes modeled with a gamma-Poisson ditribution, remember that the gamma-Poisson distribution has equal mean and variance $\lambda$
13,619
<ASSISTANT_TASK:> Python Code: from sklearn.linear_model import LogisticRegression from sklearn.datasets import make_classification %matplotlib inline import matplotlib.pyplot as plt import numpy as np X, y = make_classification(n_features=1, n_informative=1, n_redundant=0, n_clusters_per_class=1, class_sep=0.9, random_state=27) plt.scatter(X, y, alpha=0.4) plt.xlabel('X') plt.ylabel('Probabilidad') lr = LogisticRegression() lr.fit(X, y) plt.scatter(X, y, alpha=0.4, label='real') plt.plot(np.sort(X, axis=0), lr.predict_proba(np.sort(X, axis=0))[:,1], color='r', label='sigmoide') plt.legend(loc=2) plt.xlabel('X') plt.ylabel('Probabilidad') plt.scatter(X, y, alpha=0.4, label='real') plt.plot(np.sort(X, axis=0), lr.predict(np.sort(X, axis=0)), color='r', label='categoría') plt.legend(loc=2) plt.xlabel('X') plt.ylabel('Probabilidad') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Con la función make_classification de scikit-learn, creamos un conjunto de datos para clasificar. Para empezar vamos a contar con sólo un atributo o feature y dos clases o categorías. Los categorías van a estar separadas, pero permitiremos un cierto grado de solapamiento a través del parámetro class_sep; así, la clasificación probabilística cobra más sentido. Step2: En regresión logística los que vamos a hacer es calcular las probabilidades $p(C_k|x)$. La función logística o sigmoide nos va a permitir definir esas probabilidades y viene definida como Step3: Por defecto nos va a imprimir los parámetros con los que se ha entrenado el modelo. Una vez entrenado podemos predecir las probabilidades de pertenencia a cada categoría. Para ello, como ya hemos dicho, utilizaremos la función predict_proba() que toma como datos de entrada los atributos X. Step4: Se aprecia claramente la curva en forma de S de la función logística que es lo que estábamos buscando. Esto nos dice que un punto con $x=0$ tiene aproximadamente un 50 % de probabilidades de pertenecer a cualquiera de las dos categorías.
13,620
<ASSISTANT_TASK:> Python Code: import oommfc as oc import discretisedfield as df %matplotlib inline print(df.__file__) system = oc.System(name="first_notebook") A = 1e-12 # exchange energy constant (J/m) H = (5e6, 0, 0) # external magnetic field in x-direction (A/m) system.hamiltonian = oc.Exchange(A=A) + oc.Demag() + oc.Zeeman(H=H) gamma = 2.211e5 # gamma parameter (m/As) alpha = 0.2 # Gilbert damping system.dynamics = oc.Precession(gamma=gamma) + oc.Damping(alpha=alpha) L = 100e-9 # cubic sample edge length (m) d = 5e-9 # discretisation cell size (m) mesh = oc.Mesh(p1=(0, 0, 0), p2=(L, L, L), cell=(d, d, d)) Ms = 8e6 # saturation magnetisation (A/m) system.m = df.Field(mesh, value=(0, 1, 0), norm=Ms) mesh system.hamiltonian system.dynamics system.m.plot_plane("z"); md = oc.MinDriver() md.drive(system) # centre of the system is assumed for plane to be plotted system.m.plot_plane("z"); # plane can be chosen manually as well system.m.plot_plane(z=10e-9); system.m.average <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We create a system object and provide Step2: Our Hamiltonian should only contain exchange, demagnetisation, and Zeeman energy terms. We will apply the external magnetic field in the $x$ direction for the purpose of this demonstration Step3: The dynamics of the system is governed by the LLG equation containing precession and damping terms Step4: We initialise the system in positive $y$ direction, i.e. (0, 1, 0), which is different from the equlibrium state we expect for the external Zeeman field applied in $x$ direction Step5: We can check the characteristics of the system we defined by asking objects to represent themselves Step6: We can also visualise the current magnetisation field Step7: After the system object is created, we can minimise its energy (relax it) using the Minimisation Driver (MinDriver). Step8: The system is now relaxed, and we can plot its slice and compute its average magnetisation.
13,621
<ASSISTANT_TASK:> Python Code: def load_data(): path = 'data/cifar-10-batches-py' num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.zeros((num_train_samples,), dtype='uint8') for i in range(1, 6): data, labels = load_batch(os.path.join(path, 'data_batch_' + str(i))) x_train[(i - 1) * 10000: i * 10000, :, :, :] = data y_train[(i - 1) * 10000: i * 10000] = labels x_test, y_test = load_batch(os.path.join(path, 'test_batch')) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test) (x_train, y_train), (x_test, y_test) = load_data() plt.imshow(x_train[1]) x_train = x_train/255. x_test = x_test/255. def relu(x): return Activation('relu')(x) def dropout(x, p): return Dropout(p)(x) if p else x def bn(x): return BatchNormalization()(x) # Keras 2 (axix=-1 is default for TensorFlow image dim ordering) def relu_bn(x): return relu(bn(x)) def conv(x, nf, sz, wd, p): x = Conv2D(nf, (sz, sz), kernel_initializer='he_uniform', padding='same', # Keras 2 kernel_regularizer=l2(wd))(x) return dropout(x,p) def conv_block(x, nf, bottleneck=False, p=None, wd=0): x = relu_bn(x) if bottleneck: x = relu_bn(conv(x, nf * 4, 1, wd, p)) return conv(x, nf, 3, wd, p) def dense_block(x, nb_layers, growth_rate, bottleneck=False, p=None, wd=0): if bottleneck: nb_layers //= 2 for i in range(nb_layers): b = conv_block(x, growth_rate, bottleneck=bottleneck, p=p, wd=wd) x = concatenate([x,b]) # Keras 2 return x def transition_block(x, compression=1.0, p=None, wd=0): nf = int(x.get_shape().as_list()[-1] * compression) x = relu_bn(x) x = conv(x, nf, 1, wd, p) return AveragePooling2D((2, 2), strides=(2, 2))(x) def create_dense_net(nb_classes, img_input, depth=40, nb_block=3, growth_rate=12, nb_filter=16, bottleneck=False, compression=1.0, p=None, wd=0, activation='softmax'): assert activation == 'softmax' or activation == 'sigmoid' assert (depth - 4) % nb_block == 0 nb_layers_per_block = int((depth - 4) / nb_block) nb_layers = [nb_layers_per_block] * nb_block x = conv(img_input, nb_filter, 3, wd, 0) for i,block in enumerate(nb_layers): x = dense_block(x, block, growth_rate, bottleneck=bottleneck, p=p, wd=wd) if i != len(nb_layers)-1: x = transition_block(x, compression=compression, p=p, wd=wd) x = relu_bn(x) x = GlobalAveragePooling2D()(x) return Dense(nb_classes, activation=activation, kernel_regularizer=l2(wd))(x) # Keras 2 input_shape = (32,32,3) img_input = Input(shape=input_shape) x = create_dense_net(10, img_input, depth=100, nb_filter=16, compression=0.5, bottleneck=True, p=0.2, wd=1e-4) model = Model(img_input, x) model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.SGD(0.1, 0.9, nesterov=True), metrics=["accuracy"]) parms = {'verbose': 2, 'callbacks': [TQDMNotebookCallback()]} K.set_value(model.optimizer.lr, 0.1) model.fit(x_train, y_train, 64, 20, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.01) model.fit(x_train, y_train, 64, 4, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.1) model.fit(x_train, y_train, 64, 20, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.01) model.fit(x_train, y_train, 64, 40, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.001) model.fit(x_train, y_train, 64, 20, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.01) model.fit(x_train, y_train, 64, 10, validation_data=(x_test, y_test), **parms) K.set_value(model.optimizer.lr, 0.001) model.fit(x_train, y_train, 64, 20, validation_data=(x_test, y_test), **parms) %time model.save_weights('models/93.h5') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Here's an example of CIFAR-10 Step2: We want to normalize pixel values (0-255) to unit interval. Step3: Densenet Step4: Convolutional layer Step5: Define ConvBlock as sequence Step6: Now we can define the dense block Step7: As typical for CV architectures, we'll do some pooling after computation. Step8: Build the DenseNet model Step9: Train Step10: This will likely need to run overnight + lr annealing... Step11: And we're able to replicate their state-of-the-art results!
13,622
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, activation="softmax", name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Preprocess the data (these are NumPy arrays) x_train = x_train.reshape(60000, 784).astype("float32") / 255 x_test = x_test.reshape(10000, 784).astype("float32") / 255 y_train = y_train.astype("float32") y_test = y_test.astype("float32") # Reserve 10,000 samples for validation x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] model.compile( optimizer=keras.optimizers.RMSprop(), # Optimizer # Loss function to minimize loss=keras.losses.SparseCategoricalCrossentropy(), # List of metrics to monitor metrics=[keras.metrics.SparseCategoricalAccuracy()], ) print("Fit model on training data") history = model.fit( x_train, y_train, batch_size=64, epochs=2, # We pass some validation for # monitoring validation loss and metrics # at the end of each epoch validation_data=(x_val, y_val), ) history.history # Evaluate the model on the test data using `evaluate` print("Evaluate on test data") results = model.evaluate(x_test, y_test, batch_size=128) print("test loss, test acc:", results) # Generate predictions (probabilities -- the output of the last layer) # on new data using `predict` print("Generate predictions for 3 samples") predictions = model.predict(x_test[:3]) print("predictions shape:", predictions.shape) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) model.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) def get_uncompiled_model(): inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, activation="softmax", name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) return model def get_compiled_model(): model = get_uncompiled_model() model.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) return model def custom_mean_squared_error(y_true, y_pred): return tf.math.reduce_mean(tf.square(y_true - y_pred)) model = get_uncompiled_model() model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error) # We need to one-hot encode the labels to use MSE y_train_one_hot = tf.one_hot(y_train, depth=10) model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) class CustomMSE(keras.losses.Loss): def __init__(self, regularization_factor=0.1, name="custom_mse"): super().__init__(name=name) self.regularization_factor = regularization_factor def call(self, y_true, y_pred): mse = tf.math.reduce_mean(tf.square(y_true - y_pred)) reg = tf.math.reduce_mean(tf.square(0.5 - y_pred)) return mse + reg * self.regularization_factor model = get_uncompiled_model() model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE()) y_train_one_hot = tf.one_hot(y_train, depth=10) model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1) class CategoricalTruePositives(keras.metrics.Metric): def __init__(self, name="categorical_true_positives", **kwargs): super(CategoricalTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name="ctp", initializer="zeros") def update_state(self, y_true, y_pred, sample_weight=None): y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1)) values = tf.cast(y_true, "int32") == tf.cast(y_pred, "int32") values = tf.cast(values, "float32") if sample_weight is not None: sample_weight = tf.cast(sample_weight, "float32") values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives def reset_states(self): # The state of the metric will be reset at the start of each epoch. self.true_positives.assign(0.0) model = get_uncompiled_model() model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[CategoricalTruePositives()], ) model.fit(x_train, y_train, batch_size=64, epochs=3) class ActivityRegularizationLayer(layers.Layer): def call(self, inputs): self.add_loss(tf.reduce_sum(inputs) * 0.1) return inputs # Pass-through layer. inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) # Insert activity regularization as a layer x = ActivityRegularizationLayer()(x) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) # The displayed loss will be much higher than before # due to the regularization component. model.fit(x_train, y_train, batch_size=64, epochs=1) class MetricLoggingLayer(layers.Layer): def call(self, inputs): # The `aggregation` argument defines # how to aggregate the per-batch values # over each epoch: # in this case we simply average them. self.add_metric( keras.backend.std(inputs), name="std_of_activation", aggregation="mean" ) return inputs # Pass-through layer. inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) # Insert std logging as a layer. x = MetricLoggingLayer()(x) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) model.fit(x_train, y_train, batch_size=64, epochs=1) inputs = keras.Input(shape=(784,), name="digits") x1 = layers.Dense(64, activation="relu", name="dense_1")(inputs) x2 = layers.Dense(64, activation="relu", name="dense_2")(x1) outputs = layers.Dense(10, name="predictions")(x2) model = keras.Model(inputs=inputs, outputs=outputs) model.add_loss(tf.reduce_sum(x1) * 0.1) model.add_metric(keras.backend.std(x1), name="std_of_activation", aggregation="mean") model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), ) model.fit(x_train, y_train, batch_size=64, epochs=1) class LogisticEndpoint(keras.layers.Layer): def __init__(self, name=None): super(LogisticEndpoint, self).__init__(name=name) self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) self.accuracy_fn = keras.metrics.BinaryAccuracy() def call(self, targets, logits, sample_weights=None): # Compute the training-time loss value and add it # to the layer using `self.add_loss()`. loss = self.loss_fn(targets, logits, sample_weights) self.add_loss(loss) # Log accuracy as a metric and add it # to the layer using `self.add_metric()`. acc = self.accuracy_fn(targets, logits, sample_weights) self.add_metric(acc, name="accuracy") # Return the inference-time prediction tensor (for `.predict()`). return tf.nn.softmax(logits) import numpy as np inputs = keras.Input(shape=(3,), name="inputs") targets = keras.Input(shape=(10,), name="targets") logits = keras.layers.Dense(10)(inputs) predictions = LogisticEndpoint(name="predictions")(logits, targets) model = keras.Model(inputs=[inputs, targets], outputs=predictions) model.compile(optimizer="adam") # No loss argument! data = { "inputs": np.random.random((3, 3)), "targets": np.random.random((3, 10)), } model.fit(data) model = get_compiled_model() model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1) model = get_compiled_model() # First, let's create a training Dataset instance. # For the sake of our example, we'll use the same MNIST data as before. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) # Shuffle and slice the dataset. train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Now we get a test dataset. test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(64) # Since the dataset already takes care of batching, # we don't pass a `batch_size` argument. model.fit(train_dataset, epochs=3) # You can also evaluate or predict on a dataset. print("Evaluate") result = model.evaluate(test_dataset) dict(zip(model.metrics_names, result)) model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Only use the 100 batches per epoch (that's 64 * 100 samples) model.fit(train_dataset, epochs=3, steps_per_epoch=100) model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Prepare the validation dataset val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(64) model.fit(train_dataset, epochs=1, validation_data=val_dataset) model = get_compiled_model() # Prepare the training dataset train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Prepare the validation dataset val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(64) model.fit( train_dataset, epochs=1, # Only run validation using the first 10 batches of the dataset # using the `validation_steps` argument validation_data=val_dataset, validation_steps=10, ) import numpy as np class_weight = { 0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, # Set weight "2" for class "5", # making this class 2x more important 5: 2.0, 6: 1.0, 7: 1.0, 8: 1.0, 9: 1.0, } print("Fit with class weight") model = get_compiled_model() model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1) sample_weight = np.ones(shape=(len(y_train),)) sample_weight[y_train == 5] = 2.0 print("Fit with sample weight") model = get_compiled_model() model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1) sample_weight = np.ones(shape=(len(y_train),)) sample_weight[y_train == 5] = 2.0 # Create a Dataset that includes sample weights # (3rd element in the return tuple). train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight)) # Shuffle and slice the dataset. train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) model = get_compiled_model() model.fit(train_dataset, epochs=1) image_input = keras.Input(shape=(32, 32, 3), name="img_input") timeseries_input = keras.Input(shape=(None, 10), name="ts_input") x1 = layers.Conv2D(3, 3)(image_input) x1 = layers.GlobalMaxPooling2D()(x1) x2 = layers.Conv1D(3, 3)(timeseries_input) x2 = layers.GlobalMaxPooling1D()(x2) x = layers.concatenate([x1, x2]) score_output = layers.Dense(1, name="score_output")(x) class_output = layers.Dense(5, name="class_output")(x) model = keras.Model( inputs=[image_input, timeseries_input], outputs=[score_output, class_output] ) keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True) model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], ) model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], metrics=[ [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], [keras.metrics.CategoricalAccuracy()], ], ) model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={ "score_output": keras.losses.MeanSquaredError(), "class_output": keras.losses.CategoricalCrossentropy(), }, metrics={ "score_output": [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], "class_output": [keras.metrics.CategoricalAccuracy()], }, ) model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={ "score_output": keras.losses.MeanSquaredError(), "class_output": keras.losses.CategoricalCrossentropy(), }, metrics={ "score_output": [ keras.metrics.MeanAbsolutePercentageError(), keras.metrics.MeanAbsoluteError(), ], "class_output": [keras.metrics.CategoricalAccuracy()], }, loss_weights={"score_output": 2.0, "class_output": 1.0}, ) # List loss version model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[None, keras.losses.CategoricalCrossentropy()], ) # Or dict loss version model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss={"class_output": keras.losses.CategoricalCrossentropy()}, ) model.compile( optimizer=keras.optimizers.RMSprop(1e-3), loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()], ) # Generate dummy NumPy data img_data = np.random.random_sample(size=(100, 32, 32, 3)) ts_data = np.random.random_sample(size=(100, 20, 10)) score_targets = np.random.random_sample(size=(100, 1)) class_targets = np.random.random_sample(size=(100, 5)) # Fit on lists model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1) # Alternatively, fit on dicts model.fit( {"img_input": img_data, "ts_input": ts_data}, {"score_output": score_targets, "class_output": class_targets}, batch_size=32, epochs=1, ) train_dataset = tf.data.Dataset.from_tensor_slices( ( {"img_input": img_data, "ts_input": ts_data}, {"score_output": score_targets, "class_output": class_targets}, ) ) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) model.fit(train_dataset, epochs=1) model = get_compiled_model() callbacks = [ keras.callbacks.EarlyStopping( # Stop training when `val_loss` is no longer improving monitor="val_loss", # "no longer improving" being defined as "no better than 1e-2 less" min_delta=1e-2, # "no longer improving" being further defined as "for at least 2 epochs" patience=2, verbose=1, ) ] model.fit( x_train, y_train, epochs=20, batch_size=64, callbacks=callbacks, validation_split=0.2, ) class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs): self.per_batch_losses = [] def on_batch_end(self, batch, logs): self.per_batch_losses.append(logs.get("loss")) model = get_compiled_model() callbacks = [ keras.callbacks.ModelCheckpoint( # Path where to save the model # The two parameters below mean that we will overwrite # the current checkpoint if and only if # the `val_loss` score has improved. # The saved model name will include the current epoch. filepath="mymodel_{epoch}", save_best_only=True, # Only save a model if `val_loss` has improved. monitor="val_loss", verbose=1, ) ] model.fit( x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2 ) import os # Prepare a directory to store all the checkpoints. checkpoint_dir = "./ckpt" if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) print("Restoring from", latest_checkpoint) return keras.models.load_model(latest_checkpoint) print("Creating a new model") return get_compiled_model() model = make_or_restore_model() callbacks = [ # This callback saves a SavedModel every 100 batches. # We include the training loss in the saved model name. keras.callbacks.ModelCheckpoint( filepath=checkpoint_dir + "/ckpt-loss={loss:.2f}", save_freq=100 ) ] model.fit(x_train, y_train, epochs=1, callbacks=callbacks) initial_learning_rate = 0.1 lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True ) optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule) keras.callbacks.TensorBoard( log_dir="/full_path_to_your_logs", histogram_freq=0, # How often to log histogram visualizations embeddings_freq=0, # How often to log embedding visualizations update_freq="epoch", ) # How often to write logs (default: once per epoch) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Training and evaluation with the built-in methods Step2: 시작하기 Step3: 일반적인 엔드 투 엔드 워크 플로는 다음과 같이 구성되어 있습니다. Step4: 훈련 구성(최적화 프로그램, 손실, 메트릭)을 지정합니다. Step5: fit()를 호출하여 데이터를 "batch_size" 크기의 "배치"로 분할하고 지정된 수의 "epoch"에 대해 전체 데이터세트를 반복 처리하여 모델을 훈련시킵니다. Step6: 반환되는 "이력" 객체는 훈련 중 손실 값과 메트릭 값에 대한 레코드를 유지합니다. Step7: evaluate()를 통해 테스트 데이터에 대해 모델을 평가합니다. Step8: 이제이 워크 플로의 각 부분을 자세히 검토하겠습니다. Step9: metrics 인수는 목록이어야합니다. 모델에는 여러 개의 메트릭이있을 수 있습니다. Step10: 나중에 재사용하기 위해 모델 정의와 컴파일 단계를 함수에 넣겠습니다. 이 안내서의 여러 예에서 여러 번 호출합니다. Step11: 많은 내장 옵티 마이저, 손실 및 메트릭을 사용할 수 있습니다 Step12: y_true 및 y_pred 이외의 매개 변수를 사용하는 손실 함수가 필요한 경우 tf.keras.losses.Loss 클래스를 서브 클래스 화하고 다음 두 메소드를 구현할 수 있습니다. Step13: 맞춤 측정 항목 Step14: 표준 서명에 맞지 않는 손실 및 메트릭 처리하기 Step15: add_metric() 사용하여 메트릭 값 로깅에 대해 동일한 작업을 수행 할 수 있습니다. Step16: Functional API 에서 model.add_loss(loss_tensor) 또는 model.add_metric(metric_tensor, name, aggregation) 호출 할 수도 있습니다. Step17: add_loss() 를 통해 손실을 전달하면 모델에는 이미 손실이 있으므로 손실 함수없이 compile() 을 호출 할 수 있습니다. Step18: 다음과 같이 loss 인수없이 컴파일 된 두 개의 입력 (입력 데이터 및 대상)이있는 모델에서 사용할 수 있습니다. Step19: 다중 입력 모델 교육에 대한 자세한 내용은 다중 입력, 다중 출력 모델로 데이터 전달 섹션을 참조하십시오. Step20: tf.data 데이터 세트의 교육 및 평가 Step21: 데이터세트는 각 epoch의 끝에서 재설정되므로 다음 epoch에서 재사용할 수 있습니다. Step22: 유효성 검사 데이터 집합 사용 Step23: 각 시대가 끝날 때 모델은 유효성 검사 데이터 집합을 반복하고 유효성 검사 손실 및 유효성 검사 메트릭을 계산합니다. Step24: 유효성 검사 데이터 세트는 사용 후마다 재설정되므로 항상 에포크에서 에포크까지 동일한 샘플을 평가하게됩니다. Step25: 샘플 무게 Step26: 일치하는 Dataset 예는 다음과 같습니다. Step27: 다중 입력, 다중 출력 모델로 데이터 전달 Step28: 이 모델을 플로팅하여 여기서 수행중인 작업을 명확하게 확인할 수 있습니다 (플롯에 표시된 셰이프는 샘플 별 셰이프가 아니라 배치 셰이프 임). Step29: 컴파일 타임에 손실 함수를 목록으로 전달하여 출력마다 다른 손실을 지정할 수 있습니다. Step30: 모델에 단일 손실 함수만 전달하는 경우, 모든 출력에 동일한 손실 함수가 적용됩니다(여기서는 적합하지 않음). Step31: 출력 레이어에 이름을 지정 했으므로 dict를 통해 출력 당 손실 및 메트릭을 지정할 수도 있습니다. Step32: 출력이 두 개 이상인 경우 명시적 이름과 사전을 사용하는 것이 좋습니다. Step33: 이러한 출력이 예측 용이지만 훈련 용이 아닌 경우 특정 출력에 대한 손실을 계산하지 않도록 선택할 수도 있습니다. Step34: 적합하게 다중 입력 또는 다중 출력 모델에 데이터를 전달하는 것은 컴파일에서 손실 함수를 지정하는 것과 유사한 방식으로 작동합니다. NumPy 배열 목록을 전달할 수 있습니다 (손실 함수를 수신 한 출력에 1 Step35: Dataset 사용 사례는 다음과 같습니다. NumPy 배열에서 수행 한 것과 유사하게 Dataset 은 튜플 튜플을 반환해야합니다. Step36: 콜백 사용하기 Step37: 많은 내장 콜백을 사용할 수 있습니다 Step38: 모델 검사점 설정하기 Step39: ModelCheckpoint 콜백을 사용하여 내결함성을 구현할 수 있습니다. 훈련이 무작위로 중단 된 경우 모델의 마지막 저장된 상태에서 훈련을 다시 시작할 수있는 기능. 기본 예는 다음과 같습니다. Step40: 또한 모델 저장 및 복원을 위해 자체 콜백을 작성하십시오. Step41: ExponentialDecay , PiecewiseConstantDecay , PolynomialDecay 및 InverseTimeDecay 와 같은 몇 가지 기본 제공 일정을 사용할 수 있습니다.
13,623
<ASSISTANT_TASK:> Python Code: %load_ext sql %%sql mysql://admin:admin@172.20.101.81/pidata DROP TABLE if exists temps3; CREATE TABLE temps3 ( device varchar(20) DEFAULT NULL, datetime datetime DEFAULT NULL, temp float DEFAULT NULL, hum float DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; %sql show tables; %%sql mysql://admin:admin@172.20.101.81 DROP USER IF EXISTS piuser3; CREATE USER 'piuser3'@'%' IDENTIFIED BY 'logger'; GRANT SELECT, INSERT, DELETE, UPDATE ON pidata.temps3 TO 'piuser3'@'%'; FLUSH PRIVILEGES; %sql select * from mysql.user; %%sql mysql://piuser3:logger@172.20.101.81/pidata select * from temps3; for x in range(10): %sql INSERT INTO temps3 (device,datetime,temp,hum) VALUES('pi-003',date(now()),73.2,22.0); %sql SELECT * from temps3; %sql DELETE FROM temps3; %sql SELECT * FROM temps3; <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: <b>Connect to the MySQL database instance using and account that has admin access and run SQL to drop/create table Step2: Check to see that the table was created Step3: Exercise 2 Step4: <b>Check to be sure the new user has access to the new table Step5: <b>Exercise 3 Step6: Now we can check to see if the data was inserted as expected Step7: Next we will empty the table so it will be ready for live sensor data
13,624
<ASSISTANT_TASK:> Python Code: %pylab inline from IPython.display import HTML # Import from local directory # import sys # sys.path.insert(0, '../pypdb') # from pypdb import * # Import from installed package from pypdb import * %load_ext autoreload %autoreload 2 found_pdbs = Query("ribosome").search() print(found_pdbs[:10]) found_pdbs = Query(27499440, "PubmedIdQuery").search() print(found_pdbs[:10]) found_pdbs = Query('6239', 'TreeEntityQuery').search() #TaxID for C elegans print(found_pdbs[:5]) found_pdbs = Query('SOLID-STATE NMR', query_type='ExpTypeQuery').search() print(found_pdbs[:10]) found_pdbs = Query('2E8D', query_type="structure").search() print(found_pdbs[:10]) found_pdbs = Query('Perutz, M.F.', query_type='AdvancedAuthorQuery').search() print(found_pdbs) q = Query("Dictyostelium", query_type="OrganismQuery") print(q.search()[:10]) matching_papers = find_papers('crispr', max_results=10) print(list(matching_papers)[:10]) pdb_file = get_pdb_file('4lza', filetype='cif', compression=False) print(pdb_file[:400]) all_info = get_info('4LZA') print(list(all_info.keys())) q = Query("VLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHGKKVADALTAVAHVDDMPNAL", query_type="sequence", return_type="polymer_entity") print(q.search()) pfam_info = Query("PF00008", query_type="pfam").search() print(pfam_info[:5]) from pypdb.clients.search.search_client import perform_search from pypdb.clients.search.search_client import ReturnType from pypdb.clients.search.operators import text_operators search_operator = text_operators.DefaultOperator(value="ribosome") return_type = ReturnType.ENTRY results = perform_search(search_operator, return_type) print(results[:10]) search_operator = text_operators.ExactMatchOperator(value="Mus musculus", attribute="rcsb_entity_source_organism.taxonomy_lineage.name") return_type = ReturnType.POLYMER_ENTITY results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.InOperator(values=["Mus musculus", "Homo sapiens"], attribute="rcsb_entity_source_organism.taxonomy_lineage.name") return_type = ReturnType.NON_POLYMER_ENTITY results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.ContainsWordsOperator(value="actin-binding protein", attribute="struct.title") return_type = ReturnType.POLYMER_INSTANCE results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.ContainsPhraseOperator(value="actin-binding protein", attribute="struct.title") return_type = ReturnType.ASSEMBLY results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.ComparisonOperator( value="2019-01-01T00:00:00Z", attribute="rcsb_accession_info.initial_release_date", comparison_type=text_operators.ComparisonType.GREATER) return_type = ReturnType.ENTRY results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.RangeOperator( from_value="2019-01-01T00:00:00Z", to_value="2020-01-01T00:00:00Z", include_lower=True, include_upper=False, attribute="rcsb_accession_info.initial_release_date") return_type = ReturnType.ENTRY results = perform_search(search_operator, return_type) print(results[:5]) from pypdb.clients.search.search_client import perform_search_with_graph, SearchService, ReturnType from pypdb.clients.search.operators import text_operators cell_a_operator = text_operators.RangeOperator( attribute='cell.length_a', from_value=80, to_value=84, include_upper=True ) results = perform_search_with_graph( query_object=cell_a_operator, return_type=ReturnType.ENTRY ) print(results[:5]) search_operator = text_operators.ComparisonOperator( value=4, attribute="rcsb_entry_info.resolution_combined", comparison_type=text_operators.ComparisonType.LESS) return_type = ReturnType.ENTRY results = perform_search(search_operator, return_type) print(results[:5]) search_operator = text_operators.ExistsOperator( attribute="rcsb_accession_info.initial_release_date") return_type = ReturnType.ENTRY results = perform_search(search_operator, return_type) print(results[:5]) from pypdb.clients.search.search_client import perform_search_with_graph from pypdb.clients.search.search_client import ReturnType from pypdb.clients.search.search_client import QueryGroup, LogicalOperator from pypdb.clients.search.operators import text_operators # SearchOperator associated with structures with under 4 Angstroms of resolution under_4A_resolution_operator = text_operators.ComparisonOperator( value=4, attribute="rcsb_entry_info.resolution_combined", comparison_type=text_operators.ComparisonType.GREATER) # SearchOperator associated with entities containing 'Mus musculus' lineage is_mus_operator = text_operators.ExactMatchOperator( value="Mus musculus", attribute="rcsb_entity_source_organism.taxonomy_lineage.name") # SearchOperator associated with entities containing 'Homo sapiens' lineage is_human_operator = text_operators.ExactMatchOperator( value="Homo sapiens", attribute="rcsb_entity_source_organism.taxonomy_lineage.name") # QueryGroup associated with being either human or `Mus musculus` is_human_or_mus_group = QueryGroup( queries = [is_mus_operator, is_human_operator], logical_operator = LogicalOperator.OR ) # QueryGroup associated with being ((Human OR Mus) AND (Under 4 Angstroms)) is_under_4A_and_human_or_mus_group = QueryGroup( queries = [is_human_or_mus_group, under_4A_resolution_operator], logical_operator = LogicalOperator.AND ) return_type = ReturnType.ENTRY results = perform_search_with_graph( query_object=is_under_4A_and_human_or_mus_group, return_type=return_type) print("\n", results[:10]) # Huzzah <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Search functions that return lists of PDB IDs Step2: Search by PubMed ID Number Step3: Search by source organism using NCBI TaxId Step4: Search by a specific experimental method Step5: Search by protein structure similarity Step6: Search by Author Step7: Search by organism Step8: Information Search functions Step9: Functions that return information about single PDB IDs Step10: Get a general description of the entry's metadata Step11: Run a Sequence search Step12: Search by PFAM number Step13: New API for advanced search Step14: Search for all entries that mention the word 'ribosome' Step15: Search for polymers from 'Mus musculus' Step16: Search for non-polymers from 'Mus musculus' or 'Homo sapiens' Step17: Search for polymer instances whose titles contain "actin" or "binding" or "protein" Step18: Search for assemblies that contain the words "actin binding protein" Step19: Search for entries released in 2019 or later Step20: Search for entries released only in 2019 Step21: Search by cell length Step22: Search for structures under 4 angstroms of resolution Step23: Search for structures with a given attribute. Step24: Search for 'Mus musculus' or 'Homo sapiens' structures after 2019 using graph search
13,625
<ASSISTANT_TASK:> Python Code: ### START CODE HERE ### (≈ 1 line of code) test = "Hello World" ### END CODE HERE ### print ("test: " + test) # GRADED FUNCTION: basic_sigmoid import math def basic_sigmoid(x): Compute sigmoid of x. Arguments: x -- A scalar Return: s -- sigmoid(x) ### START CODE HERE ### (≈ 1 line of code) s = 1 / (1 + math.exp(-x)) ### END CODE HERE ### return s basic_sigmoid(3) ### One reason why we use "numpy" instead of "math" in Deep Learning ### x = [1, 2, 3] basic_sigmoid(x) # you will see this give an error when you run it, because x is a vector. import numpy as np # example of np.exp x = np.array([1, 2, 3]) print(np.exp(x)) # result is (exp(1), exp(2), exp(3)) # example of vector operation x = np.array([1, 2, 3]) print (x + 3) # GRADED FUNCTION: sigmoid import numpy as np # this means you can access numpy functions by writing np.function() instead of numpy.function() def sigmoid(x): Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size Return: s -- sigmoid(x) ### START CODE HERE ### (≈ 1 line of code) s = 1 / (1 + np.exp(-x)) ### END CODE HERE ### return s x = np.array([1, 2, 3]) sigmoid(x) # GRADED FUNCTION: sigmoid_derivative def sigmoid_derivative(x): Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x. You can store the output of the sigmoid function into variables and then use it to calculate the gradient. Arguments: x -- A scalar or numpy array Return: ds -- Your computed gradient. ### START CODE HERE ### (≈ 2 lines of code) s = sigmoid(x) ds = s * (1 - s) ### END CODE HERE ### return ds x = np.array([1, 2, 3]) print ("sigmoid_derivative(x) = " + str(sigmoid_derivative(x))) # GRADED FUNCTION: image2vector def image2vector(image): Argument: image -- a numpy array of shape (length, height, depth) Returns: v -- a vector of shape (length*height*depth, 1) ### START CODE HERE ### (≈ 1 line of code) v = image.reshape(image.shape[0] * image.shape[1] * image.shape[2], 1) ### END CODE HERE ### return v # This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values image = np.array([[[ 0.67826139, 0.29380381], [ 0.90714982, 0.52835647], [ 0.4215251 , 0.45017551]], [[ 0.92814219, 0.96677647], [ 0.85304703, 0.52351845], [ 0.19981397, 0.27417313]], [[ 0.60659855, 0.00533165], [ 0.10820313, 0.49978937], [ 0.34144279, 0.94630077]]]) print ("image2vector(image) = " + str(image2vector(image))) # GRADED FUNCTION: normalizeRows def normalizeRows(x): Implement a function that normalizes each row of the matrix x (to have unit length). Argument: x -- A numpy matrix of shape (n, m) Returns: x -- The normalized (by row) numpy matrix. You are allowed to modify x. ### START CODE HERE ### (≈ 2 lines of code) # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True) x_norm = np.linalg.norm(x, ord=2, axis=1, keepdims=True) # Divide x by its norm. x = x / x_norm ### END CODE HERE ### return x x = np.array([ [0, 3, 4], [1, 6, 4]]) print("normalizeRows(x) = " + str(normalizeRows(x))) # GRADED FUNCTION: softmax def softmax(x): Calculates the softmax for each row of the input x. Your code should work for a row vector and also for matrices of shape (n, m). Argument: x -- A numpy matrix of shape (n,m) Returns: s -- A numpy matrix equal to the softmax of x, of shape (n,m) ### START CODE HERE ### (≈ 3 lines of code) # Apply exp() element-wise to x. Use np.exp(...). x_exp = np.exp(x) # Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True). x_sum = np.sum(x_exp, axis=1, keepdims=True) # Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting. s = x_exp / x_sum ### END CODE HERE ### return s x = np.array([ [9, 2, 5, 0, 0], [7, 5, 0, 0 ,0]]) print("softmax(x) = " + str(softmax(x))) import time x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ### tic = time.process_time() dot = 0 for i in range(len(x1)): dot+= x1[i]*x2[i] toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC OUTER PRODUCT IMPLEMENTATION ### tic = time.process_time() outer = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros for i in range(len(x1)): for j in range(len(x2)): outer[i,j] = x1[i]*x2[j] toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC ELEMENTWISE IMPLEMENTATION ### tic = time.process_time() mul = np.zeros(len(x1)) for i in range(len(x1)): mul[i] = x1[i]*x2[i] toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ### W = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array tic = time.process_time() gdot = np.zeros(W.shape[0]) for i in range(W.shape[0]): for j in range(len(x1)): gdot[i] += W[i,j]*x1[j] toc = time.process_time() print ("gdot = " + str(gdot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### VECTORIZED DOT PRODUCT OF VECTORS ### tic = time.process_time() dot = np.dot(x1,x2) toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED OUTER PRODUCT ### tic = time.process_time() outer = np.outer(x1,x2) toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED ELEMENTWISE MULTIPLICATION ### tic = time.process_time() mul = np.multiply(x1,x2) toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED GENERAL DOT PRODUCT ### tic = time.process_time() dot = np.dot(W,x1) toc = time.process_time() print ("gdot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") # GRADED FUNCTION: L1 def L1(yhat, y): Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L1 loss function defined above ### START CODE HERE ### (≈ 1 line of code) loss = np.sum(np.abs(y - yhat)) ### END CODE HERE ### return loss yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L1 = " + str(L1(yhat,y))) # GRADED FUNCTION: L2 def L2(yhat, y): Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L2 loss function defined above ### START CODE HERE ### (≈ 1 line of code) loss = np.sum(np.dot((y - yhat), (y - yhat))) ### END CODE HERE ### return loss yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L2 = " + str(L2(yhat,y))) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Expected output Step3: Expected Output Step4: In fact, if $ x = (x_1, x_2, ..., x_n)$ is a row vector then $np.exp(x)$ will apply the exponential function to every element of x. The output will thus be Step5: Furthermore, if x is a vector, then a Python operation such as $s = x + 3$ or $s = \frac{1}{x}$ will output s as a vector of the same size as x. Step7: Any time you need more info on a numpy function, we encourage you to look at the official documentation. Step9: Expected Output Step11: Expected Output Step13: Expected Output Step15: Expected Output Step16: Expected Output Step18: As you may have noticed, the vectorized implementation is much cleaner and more efficient. For bigger vectors/matrices, the differences in running time become even bigger. Step20: Expected Output
13,626
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'atmoschem') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/mixing ratio for gas" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Operator splitting" # "Integrated" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.transport_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Soil" # "Sea surface" # "Anthropogenic" # "Biomass burning" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Aircraft" # "Biomass burning" # "Lightning" # "Volcanos" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HOx" # "NOy" # "Ox" # "Cly" # "HSOx" # "Bry" # "VOCs" # "isoprene" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Cly" # "Bry" # "NOy" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule))" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon/soot" # "Polar stratospheric ice" # "Secondary organic aerosols" # "Particulate organic matter" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline (clear sky)" # "Offline (with clouds)" # "Online" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 1.3. Chemistry Scheme Scope Step7: 1.4. Basic Approximations Step8: 1.5. Prognostic Variables Form Step9: 1.6. Number Of Tracers Step10: 1.7. Family Approach Step11: 1.8. Coupling With Chemical Reactivity Step12: 2. Key Properties --&gt; Software Properties Step13: 2.2. Code Version Step14: 2.3. Code Languages Step15: 3. Key Properties --&gt; Timestep Framework Step16: 3.2. Split Operator Advection Timestep Step17: 3.3. Split Operator Physical Timestep Step18: 3.4. Split Operator Chemistry Timestep Step19: 3.5. Split Operator Alternate Order Step20: 3.6. Integrated Timestep Step21: 3.7. Integrated Scheme Type Step22: 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order Step23: 4.2. Convection Step24: 4.3. Precipitation Step25: 4.4. Emissions Step26: 4.5. Deposition Step27: 4.6. Gas Phase Chemistry Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry Step30: 4.9. Photo Chemistry Step31: 4.10. Aerosols Step32: 5. Key Properties --&gt; Tuning Applied Step33: 5.2. Global Mean Metrics Used Step34: 5.3. Regional Metrics Used Step35: 5.4. Trend Metrics Used Step36: 6. Grid Step37: 6.2. Matches Atmosphere Grid Step38: 7. Grid --&gt; Resolution Step39: 7.2. Canonical Horizontal Resolution Step40: 7.3. Number Of Horizontal Gridpoints Step41: 7.4. Number Of Vertical Levels Step42: 7.5. Is Adaptive Grid Step43: 8. Transport Step44: 8.2. Use Atmospheric Transport Step45: 8.3. Transport Details Step46: 9. Emissions Concentrations Step47: 10. Emissions Concentrations --&gt; Surface Emissions Step48: 10.2. Method Step49: 10.3. Prescribed Climatology Emitted Species Step50: 10.4. Prescribed Spatially Uniform Emitted Species Step51: 10.5. Interactive Emitted Species Step52: 10.6. Other Emitted Species Step53: 11. Emissions Concentrations --&gt; Atmospheric Emissions Step54: 11.2. Method Step55: 11.3. Prescribed Climatology Emitted Species Step56: 11.4. Prescribed Spatially Uniform Emitted Species Step57: 11.5. Interactive Emitted Species Step58: 11.6. Other Emitted Species Step59: 12. Emissions Concentrations --&gt; Concentrations Step60: 12.2. Prescribed Upper Boundary Step61: 13. Gas Phase Chemistry Step62: 13.2. Species Step63: 13.3. Number Of Bimolecular Reactions Step64: 13.4. Number Of Termolecular Reactions Step65: 13.5. Number Of Tropospheric Heterogenous Reactions Step66: 13.6. Number Of Stratospheric Heterogenous Reactions Step67: 13.7. Number Of Advected Species Step68: 13.8. Number Of Steady State Species Step69: 13.9. Interactive Dry Deposition Step70: 13.10. Wet Deposition Step71: 13.11. Wet Oxidation Step72: 14. Stratospheric Heterogeneous Chemistry Step73: 14.2. Gas Phase Species Step74: 14.3. Aerosol Species Step75: 14.4. Number Of Steady State Species Step76: 14.5. Sedimentation Step77: 14.6. Coagulation Step78: 15. Tropospheric Heterogeneous Chemistry Step79: 15.2. Gas Phase Species Step80: 15.3. Aerosol Species Step81: 15.4. Number Of Steady State Species Step82: 15.5. Interactive Dry Deposition Step83: 15.6. Coagulation Step84: 16. Photo Chemistry Step85: 16.2. Number Of Reactions Step86: 17. Photo Chemistry --&gt; Photolysis Step87: 17.2. Environmental Conditions
13,627
<ASSISTANT_TASK:> Python Code: #@title Copyright 2020 The Earth Engine Community Authors { display-mode: "form" } # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ee # Trigger the authentication flow. ee.Authenticate() # Initialize the library. ee.Initialize() import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm, gamma, f, chi2 import IPython.display as disp %matplotlib inline # Import the Folium library. import folium # Define a method for displaying Earth Engine image tiles to folium map. def add_ee_layer(self, ee_image_object, vis_params, name): map_id_dict = ee.Image(ee_image_object).getMapId(vis_params) folium.raster_layers.TileLayer( tiles = map_id_dict['tile_fetcher'].url_format, attr = 'Map Data &copy; <a href="https://earthengine.google.com/">Google Earth Engine</a>', name = name, overlay = True, control = True ).add_to(self) # Add EE drawing method to folium. folium.Map.add_ee_layer = add_ee_layer geoJSON = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ 8.473892211914062, 49.98081240937428 ], [ 8.658599853515625, 49.98081240937428 ], [ 8.658599853515625, 50.06066538593667 ], [ 8.473892211914062, 50.06066538593667 ], [ 8.473892211914062, 49.98081240937428 ] ] ] } } ] } coords = geoJSON['features'][0]['geometry']['coordinates'] aoi = ee.Geometry.Polygon(coords) ffa_db = ee.Image(ee.ImageCollection('COPERNICUS/S1_GRD') .filterBounds(aoi) .filterDate(ee.Date('2020-08-01'), ee.Date('2020-08-31')) .first() .clip(aoi)) ffa_fl = ee.Image(ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT') .filterBounds(aoi) .filterDate(ee.Date('2020-08-01'), ee.Date('2020-08-31')) .first() .clip(aoi)) ffa_db.bandNames().getInfo() url = ffa_db.select('VV').getThumbURL({'min': -20, 'max': 0}) disp.Image(url=url, width=800) location = aoi.centroid().coordinates().getInfo()[::-1] # Make an RGB color composite image (VV,VH,VV/VH). rgb = ee.Image.rgb(ffa_db.select('VV'), ffa_db.select('VH'), ffa_db.select('VV').divide(ffa_db.select('VH'))) # Create the map object. m = folium.Map(location=location, zoom_start=12) # Add the S1 rgb composite to the map object. m.add_ee_layer(rgb, {'min': [-20, -20, 0], 'max': [0, 0, 2]}, 'FFA') # Add a layer control panel to the map. m.add_child(folium.LayerControl()) # Display the map. display(m) geoJSON = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ 8.534317016601562, 50.021637833966786 ], [ 8.530540466308594, 49.99780882512238 ], [ 8.564186096191406, 50.00663576154257 ], [ 8.578605651855469, 50.019431940583104 ], [ 8.534317016601562, 50.021637833966786 ] ] ] } } ] } coords = geoJSON['features'][0]['geometry']['coordinates'] aoi_sub = ee.Geometry.Polygon(coords) start = 0 end = 500 step = 0.5 bucket_width = int((end - start)/step) print('bucket_width: ', bucket_width) hist = ffa_fl.select('VV').reduceRegion( ee.Reducer.fixedHistogram(0, 0.5, 500),aoi_sub).get('VV').getInfo() mean = ffa_fl.select('VV').reduceRegion( ee.Reducer.mean(), aoi_sub).get('VV').getInfo() variance = ffa_fl.select('VV').reduceRegion( ee.Reducer.variance(), aoi_sub).get('VV').getInfo() a = np.array(hist) print('a.shape: ', a.shape) # fetching data x = a[:, 0] # array of bucket edge positions y = a[:, 1]/np.sum(a[:, 1]) # normalized array of bucket contents # Plotting plt.grid() plt.plot(x, y, '.') plt.show() Gamma_params = gamma.fit(x) print('Gamma_params: ', Gamma_params) fit_alpha, fit_loc, fit_beta = Gamma_params if any([x <0 for x in Gamma_params]): print('Choosing an alterate solution for fittin the gamma distribution') beta = variance/mean alpha = mean/beta loc = 0 print('alpha: {0}, beta: {1}'.format(alpha, beta)) Gamma_params = (alpha, loc, beta) y_expected = gamma.pdf(x, *Gamma_params) plt.grid() plt.plot(x, y, '.', label='data') plt.plot(x, y_expected/bucket_width, '-r', label='gamma') plt.legend() plt.show() def X(n): return np.sum(np.cos(4*np.pi*(np.random.rand(n)-0.5)))/np.sqrt(n/2) n= 10000 Xs = [X(n) for i in range(10000)] y, x = np.histogram(Xs, 100, range=[-5,5]) plt.plot(x[:-1], y/1000, 'b.', label='simulated data') plt.plot(x, norm.pdf(x), '-r', label='normal distribution') plt.grid() plt.legend() plt.show() mean ** 2 / variance <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Detecting Changes in Sentinel-1 Imagery (Part 1) Step2: Datasets and Python modules Step3: And in order to make use of interactive maps, we import the folium package Step4: A Sentinel-1 image Step5: Note that the last and first corners are identical, indicating closure of the polygon. We have to bore down into the GeoJSON structure to get the geometry coordinates, then create an ee.Geometry() object Step6: Next, we filter the S1 archive to get an image over the aoi acquired sometime in August, 2020. Any old image will do fine, so we won't bother to specify the orbit number or whether we want the ASCENDING or DESCENDING node. If we don't specify the instrument mode or resolution, we get IW (interferometric wide swath) mode and $10\times 10\ m^2$ pixels by default. For convenience we grab both decibel and float versions Step7: Notice that we have clipped the images to our aoi so as not to work with the entire swath. To confirm that we have an image, we list its band names, fetching the result from the GEE servers with the getInfo() class method Step8: and display the VV band of the decibel version using the getThumbURL() method and IPython's display module. The float intensities $I$ are generally between 0 and 1, so we stretch the decibel image $10\log_{10}(I)$ from $-20$ to $0$ Step9: This is fine, but a little boring. We can use folium to project onto a map for geographical context. The folium Map() constructor wants its location keyword in long-lat rather than lat-long, so we do a list reverse in the first line Step10: Pixel distributions Step11: Using standard reducers from the GEE library we can easily calculate a histogram and estimate the first two moments (mean and variance) of the pixels in the polygon aoi_sub , again retrieving the results from the servers with getInfo() . Step12: Here is a plot of the (normalized) histogram using numpy and matplotlib Step13: The above histogram is in fact a gamma probability density distribution Step14: In order to understand just why this is the case, let's take a step back and consider how the pixels were generated. Step15: Furthermore, $x$ and $y$ are uncorrelated since, in the expression for covariance of $x$ and $y$, the sums of products of cosine and sine terms cancel to zero. This means that $x + {\bf i}y$, and hence the observed single look complex signal $S_{vv}$ (see Eq. (1.8)), has a complex normal distribution .
13,628
<ASSISTANT_TASK:> Python Code: import os import requests import gzip from six.moves import cPickle as pkl import time import numpy import theano import theano.tensor as T from theano.tensor.nnet import categorical_crossentropy from theano import config from fuel.datasets import TextFile from fuel.streams import DataStream from fuel.schemes import ConstantScheme from fuel.transformers import Batch, Padding if not os.path.exists('traindata.txt'): r = requests.get('http://www-etud.iro.umontreal.ca/~brakelp/traindata.txt.gz') with open('traindata.txt.gz', 'wb') as data_file: data_file.write(r.content) with gzip.open('traindata.txt.gz', 'rb') as data_file: with open('traindata.txt', 'w') as out_file: out_file.write(data_file.read()) if not os.path.exists('valdata.txt'): r = requests.get('http://www-etud.iro.umontreal.ca/~brakelp/valdata.txt.gz') with open('valdata.txt.gz', 'wb') as data_file: data_file.write(r.content) with gzip.open('valdata.txt.gz', 'rb') as data_file: with open('valdata.txt', 'w') as out_file: out_file.write(data_file.read()) if not os.path.exists('dictionary.pkl'): r = requests.get('http://www-etud.iro.umontreal.ca/~brakelp/dictionary.pkl') with open('dictionary.pkl', 'wb') as data_file: data_file.write(r.content) def gauss_weight(rng, ndim_in, ndim_out=None, sd=.005): if ndim_out is None: ndim_out = ndim_in W = rng.randn(ndim_in, ndim_out) * sd return numpy.asarray(W, dtype=config.floatX) def index_dot(indices, w): return w[indices.flatten()] class LstmLayer: def __init__(self, rng, input, mask, n_in, n_h): # Init params self.W_i = theano.shared(gauss_weight(rng, n_in, n_h), 'W_i', borrow=True) self.W_f = theano.shared(gauss_weight(rng, n_in, n_h), 'W_f', borrow=True) self.W_c = theano.shared(gauss_weight(rng, n_in, n_h), 'W_c', borrow=True) self.W_o = theano.shared(gauss_weight(rng, n_in, n_h), 'W_o', borrow=True) self.U_i = theano.shared(gauss_weight(rng, n_h), 'U_i', borrow=True) self.U_f = theano.shared(gauss_weight(rng, n_h), 'U_f', borrow=True) self.U_c = theano.shared(gauss_weight(rng, n_h), 'U_c', borrow=True) self.U_o = theano.shared(gauss_weight(rng, n_h), 'U_o', borrow=True) self.b_i = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_i', borrow=True) self.b_f = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_f', borrow=True) self.b_c = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_c', borrow=True) self.b_o = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_o', borrow=True) self.params = [self.W_i, self.W_f, self.W_c, self.W_o, self.U_i, self.U_f, self.U_c, self.U_o, self.b_i, self.b_f, self.b_c, self.b_o] outputs_info = [T.zeros((input.shape[1], n_h)), T.zeros((input.shape[1], n_h))] rval, updates = theano.scan(self._step, sequences=[mask, input], outputs_info=outputs_info) # self.output is in the format (length, batchsize, n_h) self.output = rval[0] def _step(self, m_, x_, h_, c_): i_preact = (index_dot(x_, self.W_i) + T.dot(h_, self.U_i) + self.b_i) i = T.nnet.sigmoid(i_preact) f_preact = (index_dot(x_, self.W_f) + T.dot(h_, self.U_f) + self.b_f) f = T.nnet.sigmoid(f_preact) o_preact = (index_dot(x_, self.W_o) + T.dot(h_, self.U_o) + self.b_o) o = T.nnet.sigmoid(o_preact) c_preact = (index_dot(x_, self.W_c) + T.dot(h_, self.U_c) + self.b_c) c = T.tanh(c_preact) c = f * c_ + i * c c = m_[:, None] * c + (1. - m_)[:, None] * c_ h = o * T.tanh(c) h = m_[:, None] * h + (1. - m_)[:, None] * h_ return h, c def sequence_categorical_crossentropy(prediction, targets, mask): prediction_flat = prediction.reshape(((prediction.shape[0] * prediction.shape[1]), prediction.shape[2]), ndim=2) targets_flat = targets.flatten() mask_flat = mask.flatten() ce = categorical_crossentropy(prediction_flat, targets_flat) return T.sum(ce * mask_flat) class LogisticRegression(object): def __init__(self, rng, input, n_in, n_out): W = gauss_weight(rng, n_in, n_out) self.W = theano.shared(value=numpy.asarray(W, dtype=theano.config.floatX), name='W', borrow=True) # initialize the biases b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # compute vector of class-membership probabilities in symbolic form energy = T.dot(input, self.W) + self.b energy_exp = T.exp(energy - T.max(energy, axis=2, keepdims=True)) pmf = energy_exp / energy_exp.sum(axis=2, keepdims=True) self.p_y_given_x = pmf self.params = [self.W, self.b] batch_size = 100 n_epochs = 40 n_h = 50 DICT_FILE = 'dictionary.pkl' TRAIN_FILE = 'traindata.txt' VAL_FILE = 'valdata.txt' # Load the datasets with Fuel dictionary = pkl.load(open(DICT_FILE, 'r')) # add a symbol for unknown characters dictionary['~'] = len(dictionary) reverse_mapping = dict((j, i) for i, j in dictionary.items()) train = TextFile(files=[TRAIN_FILE], dictionary=dictionary, unk_token='~', level='character', preprocess=str.lower, bos_token=None, eos_token=None) train_stream = DataStream.default_stream(train) # organize data in batches and pad shorter sequences with zeros train_stream = Batch(train_stream, iteration_scheme=ConstantScheme(batch_size)) train_stream = Padding(train_stream) # idem dito for the validation text val = TextFile(files=[VAL_FILE], dictionary=dictionary, unk_token='~', level='character', preprocess=str.lower, bos_token=None, eos_token=None) val_stream = DataStream.default_stream(val) # organize data in batches and pad shorter sequences with zeros val_stream = Batch(val_stream, iteration_scheme=ConstantScheme(batch_size)) val_stream = Padding(val_stream) # Set the random number generator' seeds for consistency rng = numpy.random.RandomState(12345) x = T.lmatrix('x') mask = T.matrix('mask') # Construct an LSTM layer and a logistic regression layer recurrent_layer = LstmLayer(rng=rng, input=x, mask=mask, n_in=111, n_h=n_h) logreg_layer = LogisticRegression(rng=rng, input=recurrent_layer.output[:-1], n_in=n_h, n_out=111) # define a cost variable to optimize cost = sequence_categorical_crossentropy(logreg_layer.p_y_given_x, x[1:], mask[1:]) / batch_size # create a list of all model parameters to be fit by gradient descent params = logreg_layer.params + recurrent_layer.params # create a list of gradients for all model parameters grads = T.grad(cost, params) learning_rate = 0.1 updates = [ (param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads) ] update_model = theano.function([x, mask], cost, updates=updates) evaluate_model = theano.function([x, mask], cost) x_t = T.iscalar() h_p = T.vector() c_p = T.vector() h_t, c_t = recurrent_layer._step(T.ones(1), x_t, h_p, c_p) energy = T.dot(h_t, logreg_layer.W) + logreg_layer.b energy_exp = T.exp(energy - T.max(energy, axis=1, keepdims=True)) output = energy_exp / energy_exp.sum(axis=1, keepdims=True) single_step = theano.function([x_t, h_p, c_p], [output, h_t, c_t]) def speak(single_step, prefix='the meaning of life is ', n_steps=450): try: h_p = numpy.zeros((n_h,), dtype=config.floatX) c_p = numpy.zeros((n_h,), dtype=config.floatX) sentence = prefix for char in prefix: x_t = dictionary[char] prediction, h_p, c_p = single_step(x_t, h_p.flatten(), c_p.flatten()) # Renormalize probability in float64 flat_prediction = prediction.flatten() flat_pred_sum = flat_prediction.sum(dtype='float64') if flat_pred_sum > 1: flat_prediction = flat_prediction.astype('float64') / flat_pred_sum sample = numpy.random.multinomial(1, flat_prediction) for i in range(n_steps): x_t = numpy.argmax(sample) prediction, h_p, c_p = single_step(x_t, h_p.flatten(), c_p.flatten()) # Renormalize probability in float64 flat_prediction = prediction.flatten() flat_pred_sum = flat_prediction.sum(dtype='float64') if flat_pred_sum > 1: flat_prediction = flat_prediction.astype('float64') / flat_pred_sum sample = numpy.random.multinomial(1, flat_prediction) sentence += reverse_mapping[x_t] return sentence except ValueError as e: print 'Something went wrong during sentence generation: {}'.format(e) start_time = time.clock() iteration = 0 for epoch in range(n_epochs): print 'epoch:', epoch for x_, mask_ in train_stream.get_epoch_iterator(): iteration += 1 cross_entropy = update_model(x_.T, mask_.T) # Generate some text after each 20 minibatches if iteration % 40 == 0: sentence = speak(single_step, prefix='the meaning of life is ', n_steps=450) print print 'LSTM: "' + sentence + '"' print print 'epoch:', epoch, ' minibatch:', iteration val_scores = [] for x_val, mask_val in val_stream.get_epoch_iterator(): val_scores.append(evaluate_model(x_val.T, mask_val.T)) print 'Average validation CE per sentence:', numpy.mean(val_scores) end_time = time.clock() print('Optimization complete.') print('The code ran for %.2fm' % ((end_time - start_time) / 60.)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The Model Step2: The next block contains some code that computes cross-entropy for masked sequences and a stripped down version of the logistic regression class from the deep learning tutorials which we will need later. Step3: Processing the Data Step4: The Theano Graph Step5: We can now compile the function that updates the gradients. We also added a function that computes the cost without updating for monitoring purposes. Step6: Generating Sequences
13,629
<ASSISTANT_TASK:> Python Code: import numpy as np import math import random import string from scipy import optimize %matplotlib inline import matplotlib.pyplot as plt from IPython.html.widgets import interact from sklearn.datasets import load_digits digits = load_digits() print(digits.data.shape) Neron for for detrminig what is put in and the weights of certin aspects of the input def __init__(self, n_inputs ): self.n_inputs = n_inputs self.set_weights( [random.uniform(0,1) for x in range(0,n_inputs+1)] ) def sum(self, inputs ): return sum(val*self.weights[i] for i,val in enumerate(inputs)) def set_weights(self, weights ): self.weights = weights def str_(self): return ( str(self.weights[:-1]),str(self.weights[-1]) ) def init(self, n_neurons, n_inputs): self.n_neurons = n_neurons self.neurons = [Neuron( n_inputs ) for _ in range(0,self.n_neurons)] def str_(self): return 'Layer:\n\t'+'\n\t'.join([str(neuron) for neuron in self.neurons])+'' def learn(self, X, y, learning_rate=0.2, epochs=10000): X = np.atleast_2d(X) temp = np.ones([X.shape[0], X.shape[1]+1]) temp[:, 0:-1] = X X = temp y = np.array(y) for i in range(epochs): k = np.random.randint(X.shape[0]) a = [X[i]] for j in range(len(self.weights)): a.append(self.activation(np.dot(a[j], self.weights[j]))) error = y[i] - a[-1] deltas = [error * self.activation_deriv(a[-1])] for i in range(len(a) - 2, 0, -1): deltas.append(deltas[-1].dot(self.weights[i].T)*self.activation_deriv(a[i])) deltas.reverse() for i in range(len(self.weights)): layer = np.atleast_2d(a[i]) delta = np.atleast_2d(deltas[i]) self.weights[i] += learning_rate * layer.T.dot(delta) return self.weights[i] def show_digit(i): plt.matshow(digits.images[i]); print (show_digit(0)) print (show_digit(1)) print (show_digit(2)) print (show_digit(3)) print (show_digit(4)) print (show_digit(5)) print (show_digit(6)) print (show_digit(7)) print (show_digit(8)) print (show_digit(9)) interact(show_digit, i=(0,100)); <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Core Algorithms Step3: Neuron Layer Step4: Training Set Step5: Visualizations
13,630
<ASSISTANT_TASK:> Python Code: # Ensure that the github-issues-data volume is mounted in /mnt !ls -la /mnt # Set path for data dir %env DATA_DIR=/mnt/github-issues-data # Download the github-issues.zip training data to /mnt/github-issues-data !wget --directory-prefix=${DATA_DIR} https://storage.googleapis.com/kubeflow-examples/github-issue-summarization-data/github-issues.zip # Unzip the file into /mnt/github-issues-data directory !unzip ${DATA_DIR}/github-issues.zip -d ${DATA_DIR} # Create a symlink from <current_directory>/github-issues-data to /mnt/github-issues-data !ln -sf ${DATA_DIR} github-issues-data # Make sure that the github-issues-data symlink is created !ls -lh github-issues-data/github_issues.csv data_file='github-issues-data/github_issues.csv' # read in data sample 2000 rows (for speed of tutorial) # Set this to False to train on the entire dataset use_sample_data=True if use_sample_data: training_data_size=2000 traindf, testdf = train_test_split(pd.read_csv(data_file).sample(n=training_data_size), test_size=.10) else: traindf, testdf = train_test_split(pd.read_csv(data_file),test_size=.10) #print out stats about shape of data print(f'Train: {traindf.shape[0]:,} rows {traindf.shape[1]:,} columns') print(f'Test: {testdf.shape[0]:,} rows {testdf.shape[1]:,} columns') # preview data traindf.head(3) train_body_raw = traindf.body.tolist() train_title_raw = traindf.issue_title.tolist() #preview output of first element train_body_raw[0] %reload_ext autoreload %autoreload 2 from ktext.preprocess import processor %%time # Clean, tokenize, and apply padding / truncating such that each document length = 70 # also, retain only the top 8,000 words in the vocabulary and set the remaining words # to 1 which will become common index for rare words body_pp = processor(keep_n=8000, padding_maxlen=70) train_body_vecs = body_pp.fit_transform(train_body_raw) print('\noriginal string:\n', train_body_raw[0], '\n') print('after pre-processing:\n', train_body_vecs[0], '\n') # Instantiate a text processor for the titles, with some different parameters # append_indicators = True appends the tokens '_start_' and '_end_' to each # document # padding = 'post' means that zero padding is appended to the end of the # of the document (as opposed to the default which is 'pre') title_pp = processor(append_indicators=True, keep_n=4500, padding_maxlen=12, padding ='post') # process the title data train_title_vecs = title_pp.fit_transform(train_title_raw) print('\noriginal string:\n', train_title_raw[0]) print('after pre-processing:\n', train_title_vecs[0]) import dill as dpickle import numpy as np # Save the preprocessor with open('body_pp.dpkl', 'wb') as f: dpickle.dump(body_pp, f) with open('title_pp.dpkl', 'wb') as f: dpickle.dump(title_pp, f) # Save the processed data np.save('train_title_vecs.npy', train_title_vecs) np.save('train_body_vecs.npy', train_body_vecs) from seq2seq_utils import load_decoder_inputs, load_encoder_inputs, load_text_processor encoder_input_data, doc_length = load_encoder_inputs('train_body_vecs.npy') decoder_input_data, decoder_target_data = load_decoder_inputs('train_title_vecs.npy') num_encoder_tokens, body_pp = load_text_processor('body_pp.dpkl') num_decoder_tokens, title_pp = load_text_processor('title_pp.dpkl') %matplotlib inline from keras.models import Model from keras.layers import Input, LSTM, GRU, Dense, Embedding, Bidirectional, BatchNormalization from keras import optimizers #arbitrarly set latent dimension for embedding and hidden units latent_dim = 300 ##### Define Model Architecture ###### ######################## #### Encoder Model #### encoder_inputs = Input(shape=(doc_length,), name='Encoder-Input') # Word embeding for encoder (ex: Issue Body) x = Embedding(num_encoder_tokens, latent_dim, name='Body-Word-Embedding', mask_zero=False)(encoder_inputs) x = BatchNormalization(name='Encoder-Batchnorm-1')(x) # Intermediate GRU layer (optional) #x = GRU(latent_dim, name='Encoder-Intermediate-GRU', return_sequences=True)(x) #x = BatchNormalization(name='Encoder-Batchnorm-2')(x) # We do not need the `encoder_output` just the hidden state. _, state_h = GRU(latent_dim, return_state=True, name='Encoder-Last-GRU')(x) # Encapsulate the encoder as a separate entity so we can just # encode without decoding if we want to. encoder_model = Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model') seq2seq_encoder_out = encoder_model(encoder_inputs) ######################## #### Decoder Model #### decoder_inputs = Input(shape=(None,), name='Decoder-Input') # for teacher forcing # Word Embedding For Decoder (ex: Issue Titles) dec_emb = Embedding(num_decoder_tokens, latent_dim, name='Decoder-Word-Embedding', mask_zero=False)(decoder_inputs) dec_bn = BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb) # Set up the decoder, using `decoder_state_input` as initial state. decoder_gru = GRU(latent_dim, return_state=True, return_sequences=True, name='Decoder-GRU') decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=seq2seq_encoder_out) x = BatchNormalization(name='Decoder-Batchnorm-2')(decoder_gru_output) # Dense layer for prediction decoder_dense = Dense(num_decoder_tokens, activation='softmax', name='Final-Output-Dense') decoder_outputs = decoder_dense(x) ######################## #### Seq2Seq Model #### #seq2seq_decoder_out = decoder_model([decoder_inputs, seq2seq_encoder_out]) seq2seq_Model = Model([encoder_inputs, decoder_inputs], decoder_outputs) seq2seq_Model.compile(optimizer=optimizers.Nadam(lr=0.001), loss='sparse_categorical_crossentropy') from seq2seq_utils import viz_model_architecture seq2seq_Model.summary() viz_model_architecture(seq2seq_Model) from keras.callbacks import CSVLogger, ModelCheckpoint script_name_base = 'tutorial_seq2seq' csv_logger = CSVLogger('{:}.log'.format(script_name_base)) model_checkpoint = ModelCheckpoint('{:}.epoch{{epoch:02d}}-val{{val_loss:.5f}}.hdf5'.format(script_name_base), save_best_only=True) batch_size = 1200 epochs = 7 history = seq2seq_Model.fit([encoder_input_data, decoder_input_data], np.expand_dims(decoder_target_data, -1), batch_size=batch_size, epochs=epochs, validation_split=0.12, callbacks=[csv_logger, model_checkpoint]) #save model seq2seq_Model.save('seq2seq_model_tutorial.h5') from seq2seq_utils import Seq2Seq_Inference seq2seq_inf = Seq2Seq_Inference(encoder_preprocessor=body_pp, decoder_preprocessor=title_pp, seq2seq_model=seq2seq_Model) # this method displays the predictions on random rows of the holdout set seq2seq_inf.demo_model_predictions(n=50, issue_df=testdf) #convenience function that generates predictions on holdout set and calculates BLEU Score bleu_score = seq2seq_inf.evaluate_model(holdout_bodies=testdf.body.tolist(), holdout_titles=testdf.issue_title.tolist(), max_len_title=12) print(f'BLEU Score (avg of BLUE 1-4) on Holdout Set: {bleu_score * 100}') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Process Data Step2: Convert to lists in preparation for modeling Step3: Pre-Process Data For Deep Learning Step4: Look at one example of processed issue bodies Step5: Look at one example of processed issue titles Step6: Serialize all of this to disk for later use Step7: Define Model Architecture Step8: Define Model Architecture Step9: Examine Model Architecture Summary Step10: Train Model Step11: See Example Results On Holdout Set Step12: Evaluate Model
13,631
<ASSISTANT_TASK:> Python Code: import sys sys.path.insert(0, "..") import sqlite3 import datetime import os import osmdigest.sqlite as sq # Delete the database if it already exists. try: os.remove("demo.db") except FileNotFoundError: pass import os filename = os.path.join("//media", "disk", "OSM_Data", "isle-of-man-latest.osm.bz2") #filename = os.path.join("..", "..", "..", "Data", "isle-of-man-latest.osm.bz2") start = datetime.datetime.now() sq.convert(filename, "demo.db") print(datetime.datetime.now() - start) db = sq.OSM_SQLite("demo.db") for i, node in zip(range(10), db.nodes()): print(node) for i, node in zip(range(10), db.ways()): print(node) for i, node in zip(range(2), db.relations()): print(node) way = next(db.ways()) print("Normal way:") print(way) print("And with full node details:") print(db.complete_way(way)) all_relations = db.relations() for _ in range(4): relation = next(all_relations) relation = db.complete_relation(relation) relation.members[0], relation.complete_members[0] os.stat("demo.db").st_size / 1024 / 1024 import collections collections.Counter(len(node.tags) for node in db.nodes()) # Sanity check: this agrees, and is (a bit) slower import osmdigest.digest as digest gen = digest.parse(filename) next(gen), next(gen) collections.Counter(len(element.tags) for element in gen if element.name == "node") collections.Counter(len(node.tags) for node in db.ways()) collections.Counter(len(node.tags) for node in db.relations()) collections.Counter(len(way.nodes) for way in db.ways()) def make_new_db(name): try: os.remove(name) except Exception as e: print("Failed to delete {} because {}".format(name, e)) return sqlite3.connect(name) def close_connection(connection, name): connection.commit() connection.close() connection = sqlite3.connect(name, isolation_level=None) connection.execute("vacuum") connection.close() return os.stat(name).st_size / 1024 / 1024 connection = make_new_db("nodes.db") connection.execute("create table nodes(osm_id integer primary key, longitude integer, latitude integer)") for res in db.connection.execute("select * from nodes"): connection.execute("insert into nodes values(?,?,?)", tuple(res)) close_connection(connection, "nodes.db") connection = make_new_db("node_tags.db") connection.execute("create table node_tags(osm_id integer, key text, value text)") connection.execute("create index node_tags_osm_id_idx on node_tags(osm_id)") for res in db.connection.execute("select * from node_tags"): connection.execute("insert into node_tags values(?,?,?)", tuple(res)) close_connection(connection, "node_tags.db") connection = make_new_db("ways.db") connection.execute("create table ways(osm_id integer, position integer, noderef integer)") connection.execute("create index ways_idx on ways(osm_id, position)") for res in db.connection.execute("select * from ways"): connection.execute("insert into ways values(?,?,?)", tuple(res)) close_connection(connection, "ways.db") connection = make_new_db("way_tags.db") connection.execute("create table way_tags(osm_id integer, key text, value text)") connection.execute("create index way_tags_osm_id_idx on way_tags(osm_id)") for res in db.connection.execute("select * from way_tags"): connection.execute("insert into way_tags values(?,?,?)", tuple(res)) close_connection(connection, "way_tags.db") connection = make_new_db("relations.db") connection.execute("create table relations(osm_id integer, member text, memberref integer, role text)") connection.execute("create index relations_idx on relations(osm_id)") for res in db.connection.execute("select * from relations"): connection.execute("insert into relations values(?,?,?,?)", tuple(res)) close_connection(connection, "relations.db") connection = make_new_db("relation_tags.db") connection.execute("create table relation_tags(osm_id integer, key text, value text)") connection.execute("create index relation_tags_osm_id_idx on relation_tags(osm_id)") for res in db.connection.execute("select * from relation_tags"): connection.execute("insert into relation_tags values(?,?,?)", tuple(res)) close_connection(connection, "relation_tags.db") def chunk(nodes, length): i = 0 out = [] part = [] while i < len(nodes): part.append(nodes[i]) i += 1 if i == len(nodes): while len(part) < length: part.append(None) out.append(part) break if len(part) == length: out.append(part) part = [] return out assert(chunk([1,2,3,4,5,6], 2) == [[1,2],[3,4],[5,6]]) assert(chunk([1,2,3,4,5], 2) == [[1,2],[3,4],[5,None]]) for length in range(1,20): connection = make_new_db("ways_trial.db") create_string = ("create table ways(osm_id integer, position integer," + ", ".join(["noderef"+str(i)+" integer" for i in range(length)]) + ")" ) insert_string = ("insert into ways values (?,?," + ",".join(["?"]*length) + ")" ) connection.execute(create_string) connection.execute("create index ways_idx on ways(osm_id, position)") for way in db.ways(): for pos, part in enumerate(chunk(way.nodes, length)): data = (way.osm_id, pos, *part) connection.execute(insert_string, data) size = close_connection(connection, "ways_trial.db") print("{} -> {}".format(length, size)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Similarly for relations. It is worth noting something a little surprising about the input data here. The first relation returned is "Scotland". This is because, in OSM data, the boundary for Scotland forms part of the boundary of the Isle of Man. If you investigate further, you'll find that some of the nodes and ways in this relation are not included in the XML file, because e.g. the label node for Scotland is not part of the Isle of Man. Step2: The library also supports reading full way and relation information, basically doing the necessary "joins" for you... For relations, as noted above, some of the members may not be documented in the XML file extract. Step3: Experiment with file sizes Step4: Let's look at the average number of tags etc. Step5: Size of each DB component Step6: Change the schema for ways Step7: Storing around 10 to 15 node references in each row would be more efficient...
13,632
<ASSISTANT_TASK:> Python Code: %%capture # Installing the required libraries: !pip install matplotlib pandas scikit-learn tensorflow pyarrow tqdm !pip install google-cloud-bigquery google-cloud-bigquery-storage !pip install flake8 pycodestyle pycodestyle_magic # Python Builtin Libraries from datetime import datetime # Third Party Libraries from google.cloud import bigquery # Configurations %matplotlib inline try: from google.colab import auth print("Authenticating in Colab") auth.authenticate_user() print("Authenticated") except: # noqa print("This notebook is not running on Colab.") print("Please make sure to follow the authentication steps.") # ENTER THE GCP PROJECT HERE gcp_project = "YOUR-GCP-PROJECT" print(f"gcp_project is set to {gcp_project}") def helper_function(): Add a description about what this function does. return None query = SELECT created_date, category, complaint_type, neighborhood, latitude, longitude FROM `bigquery-public-data.san_francisco_311.311_service_requests` LIMIT 1000; bqclient = bigquery.Client(project=gcp_project) dataframe = bqclient.query(query).result().to_dataframe() print(dataframe.shape) dataframe.head() # Convert the datetime to date dataframe['created_date'] = dataframe['created_date'].apply(datetime.date) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Authentication Step3: Configurations Step5: Data Preparation Step6: Check the Dataframe Step7: Process the Dataframe
13,633
<ASSISTANT_TASK:> Python Code: %matplotlib inline from medpy.io import load import matplotlib.pyplot as plt import matplotlib.cm as cm i, h = load("flair.nii.gz") plt.imshow(i, cmap = cm.Greys_r); print(h) from medpy.io import header print header.get_pixel_spacing(h) print header.get_offset(h) header.set_pixel_spacing(h, (0.8, 1.2)) print header.get_pixel_spacing(h) from medpy.io import save save(i, "flair_distorted.nii.gz", h, force=True) j, hj = load("flair_distorted.nii.gz") print header.get_pixel_spacing(h) print(h.header['qform_code']) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Now let's take a look at the header. Step2: That is quite a lot of information and the header appear to be of class 'nibabel.nifti1.Nifti1Image'. The reason behind this is that MedPy relies on third party librarier to save and load image. To keep the compatibility high and the maintenance requirements at a minimum, MedPy does not posess a dedicated header object, but instead returns the third party libraries image object as pseudo-header (don't worry, the image data is not kept twice). Step3: And correspondingly for the offest. Step4: Both of these values can also be set, Step5: Saving the array with the modified header, the new meta-data are stored alongside the image. Step6: Further meta-data from the headers is largely incompatible between formats. If you require access to additional header attributes, you can do this by querying the image header object directly. In the above case of a NiBabel class, you can, for example, query the infamous 'qform_code' of the NIfTI format.
13,634
<ASSISTANT_TASK:> Python Code: # We need scipy for .wav file IO. !pip install tensorflowjs==2.1.0 scipy==1.4.1 # TensorFlow 2.3.0 is required due to https://github.com/tensorflow/tensorflow/issues/38135 # TODO: Switch to 2.3.0 final release when it comes out. !pip install tensorflow-cpu==2.3.0 !mkdir -p /tmp/tfjs-sc-model !curl -o /tmp/tfjs-sc-model/metadata.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/metadata.json !curl -o /tmp/tfjs-sc-model/model.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/model.json !curl -o /tmp/tfjs-sc-model/group1-shard1of2 -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard1of2 !curl -o /tmp/tfjs-sc-model/group1-shard2of2 -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard2of2 import json import tensorflow as tf import tensorflowjs as tfjs # Specify the path to the TensorFlow.js Speech Commands model, # either original or transfer-learned on https://teachablemachine.withgoogle.com/) tfjs_model_json_path = '/tmp/tfjs-sc-model/model.json' # This is the main classifier model. model = tfjs.converters.load_keras_model(tfjs_model_json_path) !curl -o /tmp/tfjs-sc-model/sc_preproc_model.tar.gz -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/conversion/sc_preproc_model.tar.gz !cd /tmp/tfjs-sc-model && tar xzvf ./sc_preproc_model.tar.gz # Load the preprocessing layer (wrapped in a tf.keras Model). preproc_model_path = '/tmp/tfjs-sc-model/sc_preproc_model' preproc_model = tf.keras.models.load_model(preproc_model_path) preproc_model.summary() # From the input_shape of the preproc_model, we can determine the # required length of the input audio snippet. input_length = preproc_model.input_shape[-1] print("Input audio length = %d" % input_length) # Construct the new non-browser model by combining the preprocessing # layer with the main classifier model. combined_model = tf.keras.Sequential(name='combined_model') combined_model.add(preproc_model) combined_model.add(model) combined_model.build([None, input_length]) combined_model.summary() !curl -o /tmp/tfjs-sc-model/audio_sample_one_male_adult.wav -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/conversion/audio_sample_one_male_adult.wav # Listen to the audio sample. wav_file_path = '/tmp/tfjs-sc-model/audio_sample_one_male_adult.wav' import IPython.display as ipd ipd.Audio(wav_file_path) # Play the .wav file. # Read the wav file and truncate it to the an input length # suitable for the model. from scipy.io import wavfile # fs: sample rate in Hz; xs: the audio PCM samples. fs, xs = wavfile.read(wav_file_path) if len(xs) >= input_length: xs = xs[:input_length] else: raise ValueError("Audio from .wav file is too short") # Try running some examples through the combined model. input_tensor = tf.constant(xs, shape=(1, input_length), dtype=tf.float32) / 32768.0 # The model outputs the probabilties for the classes (`probs`). probs = combined_model.predict(input_tensor) # Read class labels of the model. metadata_json_path = '/tmp/tfjs-sc-model/metadata.json' with open(metadata_json_path, 'r') as f: metadata = json.load(f) class_labels = metadata["words"] # Get sorted probabilities and their corresponding class labels. probs_and_labels = list(zip(probs[0].tolist(), class_labels)) # Sort the probabilities in descending order. probs_and_labels = sorted(probs_and_labels, key=lambda x: -x[0]) probs_and_labels # len(probs_and_labels) # Print the top-5 labels: print('top-5 class probabilities:') for i in range(5): prob, label = probs_and_labels[i] print('%20s: %.4e' % (label, prob)) # Save the model as a tflite file. tflite_output_path = '/tmp/tfjs-sc-model/combined_model.tflite' converter = tf.lite.TFLiteConverter.from_keras_model(combined_model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] with open(tflite_output_path, 'wb') as f: f.write(converter.convert()) print("Saved tflite file at: %s" % tflite_output_path) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Below we download the files of the original or transfer-learned TF.js Speech Commands model. Step2: As a required step, we download the audio preprocessing layer that replicates Step3: In order to quickly test that the converted model works, let's download a sample .wav file.
13,635
<ASSISTANT_TASK:> Python Code: import cufflinks as cf # The colors module includes a pre-defined set of commonly used colors cf.colors.cnames # HEX to RGB cf.colors.hex_to_rgb('red') # RGB to HEX cf.colors.rgb_to_hex('rgb(219, 64, 82)') # RGB or HEX to RGBA (transparency) cf.colors.to_rgba('#3780bf',.5), cf.colors.to_rgba('rgb(219, 64, 82)',.4) # RGBA to RGB (flatten transparency) # By default assumes that the transparency color is *white*, however this can be also passed as a parameter. cf.colors.rgba_to_rgb('rgba(219, 64, 82, 0.4)','white') # Cufflinks.colors.normalize will always return the an hex value for all types of colors colors=['#f08','rgb(240, 178, 185)','rgba(219, 64, 82, 0.4)','green'] [cf.colors.normalize(c) for c in colors] # 10 different tones of pink cf.colors.color_range('pink',10) # Displaying a table of defined colors (list) colors=['#f08', 'rgb(240, 178, 185)', 'blue' , '#32ab60'] cf.colors.color_table(colors) # Generating 15 shades of orange cf.colors.color_table('orange',15) # Create a generator using 3 defined base colors colors=['green','orange','blue'] gen=cf.colors.colorgen(colors) outputColors=[next(gen) for _ in range(15)] cf.colors.color_table(outputColors) # Create a generator with default set of colors gen=cf.colors.colorgen() outputColors=[next(gen) for _ in range(15)] cf.colors.color_table(outputColors) # We can see all available scales with cf.get_scales() # Other color scales can be also seen here cf.colors.scales() colorscale=cf.colors.get_scales('accent') cf.colors.color_table(colorscale) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Colors can be represented as strings Step2: Normalization Step3: Color Ranges Step4: Color Tables Step5: Color Generators
13,636
<ASSISTANT_TASK:> Python Code: %pylab inline import LakeModel alpha = 0.012 lamb = 0.2486 b = 0.001808 d = 0.0008333 g = b-d N0 = 100. e0 = 0.92 u0 = 1-e0 T = 50 LM0 = LakeModel.LakeModel(lamb,alpha,b,d) x0 = LM0.find_steady_state()# initial conditions print "Initial Steady State: ", x0 LM1 = LakeModel.LakeModel(0.2,alpha,b,d) xbar = LM1.find_steady_state() # new steady state X_path = vstack(LM1.simulate_stock_path(x0*N0,T)) # simulate stocks x_path = vstack(LM1.simulate_rate_path(x0,T)) # simulate rates print "New Steady State: ", xbar figure(figsize=[10,9]) subplot(3,1,1) plot(X_path[:,0]) title(r'Employment') subplot(3,1,2) plot(X_path[:,1]) title(r'Unemployment') subplot(3,1,3) plot(X_path.sum(1)) title(r'Labor Force') figure(figsize=[10,6]) subplot(2,1,1) plot(x_path[:,0]) hlines(xbar[0],0,T,'r','--') title(r'Employment Rate') subplot(2,1,2) plot(x_path[:,1]) hlines(xbar[1],0,T,'r','--') title(r'Unemployment Rate') bhat = 0.003 T_hat = 20 LM1 = LakeModel.LakeModel(lamb,alpha,bhat,d) X_path1 = vstack(LM1.simulate_stock_path(x0*N0,T_hat)) # simulate stocks x_path1 = vstack(LM1.simulate_rate_path(x0,T_hat)) # simulate rates X_path2 = vstack(LM0.simulate_stock_path(X_path1[-1,:2],T-T_hat+1)) # simulate stocks x_path2 = vstack(LM0.simulate_rate_path(x_path1[-1,:2],T-T_hat+1)) # simulate rates x_path = vstack([x_path1,x_path2[1:]]) # note [1:] to avoid doubling period 20 X_path = vstack([X_path1,X_path2[1:]]) # note [1:] to avoid doubling period 20 figure(figsize=[10,9]) subplot(3,1,1) plot(X_path[:,0]) title(r'Employment') subplot(3,1,2) plot(X_path[:,1]) title(r'Unemployment') subplot(3,1,3) plot(X_path.sum(1)) title(r'Labor Force') figure(figsize=[10,6]) subplot(2,1,1) plot(x_path[:,0]) hlines(x0[0],0,T,'r','--') title(r'Employment Rate') subplot(2,1,2) plot(x_path[:,1]) hlines(x0[1],0,T,'r','--') title(r'Unemployment Rate') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Now construct the class containing the initial conditions of the problem Step2: New legislation changes $\lambda$ to $0.2$ Step3: Now plot stocks Step4: And how the rates evolve Step5: We see that it takes 20 periods for the economy to converge to it's new steady state levels Step6: We simulate for 20 periods at the new parameters Step7: Now using the state after 20 periods for the new initial conditions we simulate for the additional 30 periods Step8: Finally we combine these two paths and plot Step9: And the rates
13,637
<ASSISTANT_TASK:> Python Code: prglngth_cdf = st.Cdf(live.prglngth, label='Pregnancy length') prglngth_cdf.plot() firsts_cdf = st.Cdf(firsts.prglngth, label='first babies') others_cdf = st.Cdf(others.prglngth, label='other babies') fig = st.multiplot([firsts_cdf, others_cdf], title='CDF of Pregnancy Length') import numpy as np sample_exp = np.random.exponential(2, 300) exp_cdf = st.Cdf(sample_exp, label='exp sample') exp_cdf.plot() exp_cdf.plot(complement=True, yscale='log') # another way is to use the transform argument # cdf.plot(transform='exponential') # calculate mean and std pmf_weights = st.Pmf(live.birthwgt_kg, label='Weights') print(pmf_weights.mean(), pmf_weights.std()) # Make a nornal cdf with the same values for mean and std xs, ps = st.utils.data_generators.render_normal_cdf(pmf_weights.mean(), pmf_weights.std(), 0, 6, n=1000) cdf_normal = st.Cdf(xs, ps, label='Normal') # Generate cdf and plot both on same axis cdf_weights = pmf_weights.to_cdf() fig = st.multiplot([cdf_weights, cdf_normal], plt_kwds={'linewidth':1.5}) norm1 = np.random.normal(1, 2, 200) norm2 = np.random.normal(1, 5, 200) norm3 = np.random.normal(3, 2, 200) exp1 = np.random.exponential(5, 200) c1 = st.Cdf(norm1, label='Norm($\mu=1$, $\sigma=2$)') c2 = st.Cdf(norm2, label='Norm($\mu=1$, $\sigma=5$)') c3 = st.Cdf(norm3, label='Norm($\mu=3$, $\sigma=2$)') c4 = st.Cdf(exp1, label='Exp($\lambda=5$)') fig = st.normal_probability_plot([c1, c2, c3], linewidth=2) fig = st.normal_probability_plot([c1, c4], linewidth=2) # Sample data from Behavioral Risk Factor Surveillance System (BRFSS) brfss = pd.read_csv('../data/BRFSS.csv') weights = brfss.wtkg2.dropna() brfss_weights_cdf = st.Cdf(weights , label='Weights kg') # Make a nornal cdf with the same values for mean and std xs, ps = st.utils.data_generators.render_normal_cdf(weights.mean(), weights.std(), 0, 200, n=1000) norm_cdf = st.Cdf(xs, ps, label='Normal CDF') # Use subplots to generate all axes objects fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,6)) # These can then be passed to multiplot, or any other obj.plot() method with the 'axes' keyword norm_cdf.label = 'Normal CDF' fig = st.multiplot([brfss_weights_cdf, norm_cdf], plt_kwds={'linewidth':1.5, 'title': 'Normal CDF Comparison', 'xlabel':'Weight kg'}, axes=ax1) norm_cdf.label = 'Lognormal CDF' fig = st.multiplot([brfss_weights_cdf, norm_cdf], plt_kwds={'linewidth':1.5, 'xscale': 'log', 'title': 'Lognormal CDF Comparison', 'xlabel':'log(Weight kg)'}, axes=ax2) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,6)) fig = st.normal_probability_plot(weights, labels=['weights'], axes=ax1, ylabel='Sample Weights kg') fig = st.normal_probability_plot(np.log(weights), labels=['log(weights)'], axes=ax2, ylabel='Sample log(Weights) kg') import scipy xs = scipy.stats.pareto.rvs(5, 3, size=1000) cdf = st.Cdf(xs, label='pareto') cdf.plot(xscale='log', yscale='log', complement=True) # or equivilantly with # cdf.plot(transform='pareto') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Methods Step2: The plot function for cdfs can take a complement argument, and a yscale argument Step3: Testing if a distribution is Normal Step4: render_normal_cdf takes parameters mean, std, low, high and n. Step5: The Normal Probability Plot Step6: normal_probability_plot takes a single sequence or cdf object, or alternatively a list of sequences or cdf objects,. All are plotted ont he same axis, but the fit line is only for the first object/sequence in the given list. A list of lables can optionally be given to override the defaults. Step7: Testing if a distribution is log Normal Step8: From comparing the standad cdfs, difference is not so obvious, though if x is now plotted with a log scale on the x axis, the normal model fits slightly better Step9: This differnce is more obvious though when visualised as a normal probability plot Step10: Testing Whether a Sample is Pareto Distributed
13,638
<ASSISTANT_TASK:> Python Code: tot_moves = 0 all_moves = [] num_runs = 10000 for i in range(num_runs): game.run() moves = SL.count_moves(game.records) tot_moves += moves all_moves.append(moves) print(tot_moves/num_runs) max(all_moves) df = pd.DataFrame({'moves': all_moves}) df.describe() type(df['moves'].value_counts()) move_counts = df['moves'].value_counts() df.hist(bins=max(move_counts.keys())-min(move_counts.keys())) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We want as many bins as there are integers between the lowest and highest number of moves found in the data!
13,639
<ASSISTANT_TASK:> Python Code: # for plotting purposes %matplotlib inline from matplotlib.pylab import plt from matplotlib import rcParams dark_colors = ["#A51C30", "#808080", (0.8509803921568627, 0.37254901960784315, 0.00784313725490196), (0.4588235294117647, 0.4392156862745098, 0.7019607843137254), (0.9058823529411765, 0.1607843137254902, 0.5411764705882353), (0.4, 0.6509803921568628, 0.11764705882352941), (0.9019607843137255, 0.6705882352941176, 0.00784313725490196), (0.6509803921568628, 0.4627450980392157, 0.11372549019607843), (0.4, 0.4, 0.4)] # dark_colors = ['#A51C30', '#808080'] rcParams['figure.figsize'] = (13, 6) rcParams['figure.dpi'] = 150 rcParams['axes.color_cycle'] = dark_colors rcParams['lines.linewidth'] = 2 rcParams['axes.facecolor'] = "white" rcParams['axes.titlesize'] = 20 rcParams['axes.labelsize'] = 17.5 rcParams['xtick.labelsize'] = 15 rcParams['ytick.labelsize'] = 15 rcParams['legend.fontsize'] = 17.5 rcParams['patch.edgecolor'] = 'none' rcParams['grid.color']="gray" rcParams['grid.linestyle']="-" rcParams['grid.linewidth'] = 0.3 rcParams['grid.alpha']=1 rcParams['text.color'] = "444444" rcParams['axes.labelcolor'] = "444444" rcParams['ytick.color'] = "444444" rcParams['xtick.color'] = "444444" from __future__ import print_function import nilmtk from nilmtk.utils import print_dict from nilmtk import DataSet # import dataset dataset = DataSet('./redd_data/redd.h5') dataset.set_window("2011-04-01", "2011-05-01") # look at house 2 BUILDING = 2 elec = dataset.buildings[BUILDING].elec # plot mains elec.mains().plot(kind = 'sum'); elec['fridge'].plot(); elec.submeters().appliances corr = elec.correlation_of_sum_of_submeters_with_mains() print print('------------------------------------------------------------------------------') print('the correlation between the sum of the submeters and the main meter is:', corr) print('------------------------------------------------------------------------------') dataset = DataSet('./redd_data/redd.h5') dataset.set_window("2011-04-19", "2011-04-20") dataset.buildings[2].elec.mains().plot(kind= 'sum'); plt.figure(figsize = (16,8)) dataset.buildings[2].elec.submeters().plot(kind = 'sum'); elec['fridge'].get_activations()[0].plot(); elec['microwave'].get_activations()[0].plot(); dataset.buildings[2].elec.plot_when_on() # here are the code that perform the preprocessing of the mini batches import nilmtk from nilmtk.utils import print_dict from nilmtk import DataSet from neuralnilm.data.loadactivations import load_nilmtk_activations from neuralnilm.data.syntheticaggregatesource import SyntheticAggregateSource from neuralnilm.data.realaggregatesource import RealAggregateSource from neuralnilm.data.stridesource import StrideSource from neuralnilm.data.datapipeline import DataPipeline from neuralnilm.data.processing import DivideBy, IndependentlyCenter # ------------ # create dictionary with train, unseen_house, unseen_appliance # ------------ def select_windows(train_buildings, unseen_buildings): windows = {fold: {} for fold in DATA_FOLD_NAMES} def copy_window(fold, i): windows[fold][i] = WINDOWS[fold][i] for i in train_buildings: copy_window('train', i) copy_window('unseen_activations_of_seen_appliances', i) for i in unseen_buildings: copy_window('unseen_appliances', i) return windows def filter_activations(windows, activations): new_activations = { fold: {appliance: {} for appliance in APPLIANCES} for fold in DATA_FOLD_NAMES} for fold, appliances in activations.iteritems(): for appliance, buildings in appliances.iteritems(): required_building_ids = windows[fold].keys() required_building_names = [ 'UK-DALE_building_{}'.format(i) for i in required_building_ids] for building_name in required_building_names: try: new_activations[fold][appliance][building_name] = ( activations[fold][appliance][building_name]) except KeyError: pass return activations NILMTK_FILENAME = './redd_data/redd.h5' SAMPLE_PERIOD = 6 STRIDE = None APPLIANCES = ['fridge'] WINDOWS = { 'train': { 1: ("2011-04-19", "2011-05-21"), 2: ("2011-04-19", "2013-05-01"), 3: ("2011-04-19", "2013-05-26"), 6: ("2011-05-22", "2011-06-14"), }, 'unseen_activations_of_seen_appliances': { 1: ("2011-04-19", None), 2: ("2011-04-19", None), 3: ("2011-04-19", None), 6: ("2011-05-22", None), }, 'unseen_appliances': { 5: ("2011-04-19", None) } } # get the dictionary of activations for each appliance activations = load_nilmtk_activations( appliances=APPLIANCES, filename=NILMTK_FILENAME, sample_period=SAMPLE_PERIOD, windows=WINDOWS ) # ------------ # get pipeline for the fridge example # ------------ num_seq_per_batch = 16 target_appliance = 'fridge' seq_length = 512 train_buildings = [1, 2, 3, 6] unseen_buildings = [5] DATA_FOLD_NAMES = ( 'train', 'unseen_appliances', 'unseen_activations_of_seen_appliances') filtered_windows = select_windows(train_buildings, unseen_buildings) filtered_activations = filter_activations(filtered_windows, activations) synthetic_agg_source = SyntheticAggregateSource( activations=filtered_activations, target_appliance=target_appliance, seq_length=seq_length, sample_period=SAMPLE_PERIOD ) real_agg_source = RealAggregateSource( activations=filtered_activations, target_appliance=target_appliance, seq_length=seq_length, filename=NILMTK_FILENAME, windows=filtered_windows, sample_period=SAMPLE_PERIOD ) # ------------ # needed to rescale the input aggregated data # rescaling is done using the a first batch of num_seq_per_batch sequences sample = real_agg_source.get_batch(num_seq_per_batch=1024).next() sample = sample.before_processing input_std = sample.input.flatten().std() target_std = sample.target.flatten().std() # ------------ pipeline = DataPipeline( [synthetic_agg_source, real_agg_source], num_seq_per_batch=num_seq_per_batch, input_processing=[DivideBy(input_std), IndependentlyCenter()], target_processing=[DivideBy(target_std)] ) # ------------ # create the validation set # ------------ num_test_seq = 101 X_valid = np.empty((num_test_seq*num_seq_per_batch, seq_length)) Y_valid = np.empty((num_test_seq*num_seq_per_batch, 3)) for i in range(num_test_seq): (x_valid,y_valid) = pipeline.train_generator(fold = 'unseen_appliances', source_id = 1).next() X_valid[i*num_seq_per_batch: (i+1)*num_seq_per_batch,:] = x_valid[:,:,0] Y_valid[i*num_seq_per_batch: (i+1)*num_seq_per_batch,:] = y_valid X_valid = np.reshape(X_valid, [X_valid.shape[0],X_valid.shape[1],1]) # needed to rescale the input aggregated data # rescaling is done using the a first batch of num_seq_per_batch sequences sample = real_agg_source.get_batch(num_seq_per_batch=1024).next() sample = sample.before_processing input_std = sample.input.flatten().std() target_std = sample.target.flatten().std() # ------------ def scores(Y_pred, Y_test, activation_threshold = 0.1 ,plot_results= True, print_results = False): a function that computes the classification scores with various metrics return: dictionary with the various scores # post process the data np.putmask(Y_pred[:,0], Y_pred[:,0] <=0, 0) np.putmask(Y_pred[:,1], Y_pred[:,1] >=1, 1) np.putmask(Y_pred[:,0],Y_pred[:,1] < Y_pred[:,0],0) np.putmask(Y_pred[:,1],Y_pred[:,1] < Y_pred[:,0],0) np.putmask(Y_pred[:,1],Y_pred[:,2] < activation_threshold,0) np.putmask(Y_pred[:,0],Y_pred[:,2] < activation_threshold,0) # find negative in prediction pred_negatives = (Y_pred[:,0] ==0) &(Y_pred[:,1] ==0) pred_positives = ~pred_negatives obs_negatives = (Y_test[:,0] ==0) &(Y_test[:,1] ==0) obs_positives = ~obs_negatives TP = obs_positives[pred_positives].sum() FN = obs_positives[pred_negatives].sum() TN = obs_negatives[pred_negatives].sum() FP = obs_negatives[pred_positives].sum() recall = TP / float(TP + FN) precision = TP / float(TP+ FP) f1 = 2* precision*recall / (precision + recall) accuracy = (TP + TN)/ float(obs_negatives.sum() +obs_positives.sum() ) if print_results: print('number of Predicted negatives:',pred_negatives.sum() ) print('number of Predicted positives:',pred_positives.sum() ) print('number of Observed negatives:', obs_negatives.sum() ) print('number of Observed positives:', obs_positives.sum() ) print('f1:', f1) print('precision :' ,precision) print('recall : ', recall) print('accuracy:', accuracy) results = { 'accuracy': accuracy, 'f1_score': f1, 'precision': precision, 'recall_score': recall} if plot_results: pd_results = pd.DataFrame.from_dict(results, orient = 'index') pd_results = pd_results.transpose() sns.barplot(data = pd_results) return results def train_generator(self, fold='train', enable_all_appliances=False, source_id=None, reset_iterator=False, validation=False ): class method that yield generator for training the neural network parameters: ---------- fold: 'train' for training or 'unseen_appliance' for testing/validation source_id: if None we use both real and synthetic data with 50:50 ratio if 1 , only use the real data if 0, only use the synthetic data output: --------- generator (X_train,Y_train): a tuple with X_train being the mini-batch for training and Y_train being the mini-batch of labels while 1: batch_iter = self.get_batch(fold, enable_all_appliances, source_id, reset_iterator,validation) X_train = batch_iter.input input_dim = X_train.shape[1] Y_train = self._get_output_neurons(batch_iter) yield (np.reshape(X_train, [self.num_seq_per_batch, input_dim, 1]), Y_train.astype(np.float32)) def _get_output_neurons(self, new_batch): class method that generates the indices needed for the generator Once the network has seen all the sample, the algorithm feed again the mini-batch by reinitialization of the generator (there is no StopIteration error raised) Suffling of the mini-batch is the default strategy implemented batch_size = new_batch.target.shape[0] neural_net_output = np.empty((batch_size, 3)) for b in range(batch_size): seq = new_batch.target[b] # case 1 and 2: if the signal start at 0 if seq[0] > 0: start = 0 stop_array = np.where(seq > 0)[0] # case 2: signal stops after 1 # set stop to the last element if len(stop_array) == 0: stop = seq[-1] # case 1: signal stops before 1 else: stop = stop_array[-1] # calculate avg power avg_power = np.mean(seq[start:stop + 1]) # case 3: signal starts after 0 and before 1 else: start_array = np.where(seq > 0)[0] if len(start_array) == 0: # case 5: there is no signal in the window start = 0 stop = 0 avg_power = 0 else: start = start_array[0] # find stop stop_array = np.where(seq > 0)[0] # case 4: signal stops after 1 # set to the last element if len(stop_array) == 0: stop = seq[-1] else: stop = stop_array[-1] avg_power = np.mean(seq[start:stop + 1]) start = start / float(new_batch.target.shape[1] - 1) stop = stop / float(new_batch.target.shape[1] - 1) if stop < start: raise ValueError("start must be before stop in sequence {}".format(b)) neural_net_output[b, :] = np.array([start, stop, avg_power]) return neural_net_output w_ahead = w + mu * v # evaluate dw_ahead v = mu * v - learning_rate * dw_ahead x += v m = beta1*m + (1-beta1)*dw v = beta2*v + (1-beta2)*(dw**2) w += - learning_rate * m / (np.sqrt(v) + eps) # import Keras related libraries from keras.layers import Input, Dense, Flatten, MaxPooling1D, AveragePooling1D, Convolution1D from keras.models import Model import keras.callbacks from keras.callbacks import ModelCheckpoint import time from keras.models import model_from_json import pickle # ------------ exp_number = 13 output_architecture = './tmpdata/convnet_architecture_exp' + str(exp_number) + '.json' best_weights_during_run = './tmpdata/weights_exp' + str(exp_number) + '.h5' final_weights = './tmpdata/weights_exp' + str(exp_number) + '_final.h5' loss_history = './tmpdata/history_exp' + str(exp_number) + '.pickle' # ------------ # ------------ # a class used to record the training and validation loss # at the end of each epoch class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.train_losses = [] self.valid_losses = [] def on_epoch_end(self, epoch, logs = {}): self.train_losses.append(logs.get('loss')) self.valid_losses.append(logs.get('val_loss')) # ------------ # input sequence input_seq = Input(shape = (seq_length, 1)) # first convolutional layer conv1_layer = Convolution1D(nb_filter = 16, filter_length = 3, border_mode='valid', init = 'normal', activation = 'relu') conv1 = conv1_layer(input_seq) # flatten the weights flat = Flatten()(conv1) # first dense layer dense1 = Dense(1024, activation = 'relu')(flat) # second dense layer dense2 = Dense(512, activation = 'relu', init= 'normal')(dense1) # output layer predictions = Dense(3, activation = 'linear')(dense2) # create the model model = Model(input=input_seq, output=predictions) # compile the model -- define the loss and the optimizer model.compile(loss='mean_squared_error', optimizer='Adam') # record the loss history history = LossHistory() # save the weigths when the vlaidation lost decreases only checkpointer = ModelCheckpoint(filepath=best_weights_during_run, save_best_only=True, verbose =1 ) # fit the network using the generator of mini-batches. model.fit_generator(pipeline.train_generator(fold = 'train'), \ samples_per_epoch = 30000, \ nb_epoch = 20, verbose = 1, callbacks=[history, checkpointer], validation_data = (x_valid,y_valid), max_q_size = 50) losses_dic = {'train_loss': history.train_losses, 'valid_loss':history.valid_losses} # save history losses_dic = {'train_loss': history.train_losses, 'valid_loss':history.valid_losses} with open(loss_history, 'wb') as handle: pickle.dump(losses_dic, handle) print('\n saving the architecture of the model \n') json_string = model.to_json() open(output_architecture, 'w').write(json_string) print('\n saving the final weights ... \n') model.save_weights(final_weights, overwrite = True) print('done saving the weights') print('\n saving the training and validation losses') print('This was the model trained') print(model.summary()) from keras.models import model_from_json # load experiments number exp_number = 13 # load the model architecture output_architecture = './tmpdata/convnet_architecture_exp' + str(exp_number) + '.json' # load the weights for the lowest validation loss during training best_weights_during_run = './tmpdata/weights_exp' + str(exp_number) + '.h5' # load the final weights at the end of the 20 epochs final_weights = './tmpdata/weights_exp' + str(exp_number) + '_final.h5' model = model_from_json(open(output_architecture).read()) # load intermediate or final weights model.load_weights(best_weights_during_run) model.compile(loss='mean_squared_error', optimizer='sgd') # print the summary of the architecture model.summary() # load the loss summary (training and validation losses) import pickle losses = pickle.load( open('./tmpdata/history_exp' + str(exp_number) + '.pickle' , 'rb')) # load the test set test_set = pickle.load( open('./tmpdata/TestSet.pickle', 'rb')) X_test = test_set['X_test'] Y_test = test_set["Y_test"] # Here we predict the output from the neural network and show the scores import neuralnilm.scores Y_pred = model.predict(X_test) scores(Y_pred, Y_test) from neuralnilm.scores import plot_loss plot_loss(losses) exp_number = 14 output_architecture = './tmpdata/convnet_architecture_exp' + str(exp_number) + '.json' best_weights_during_run = './tmpdata/weights_exp' + str(exp_number) + '.h5' final_weights = './tmpdata/weights_exp' + str(exp_number) + '_final.h5' model = model_from_json(open(output_architecture).read()) # load intermediate or final weights model.load_weights(best_weights_during_run) model.compile(loss='mean_squared_error', optimizer='sgd') model.summary() # load the loss summary import pickle losses = pickle.load( open('./tmpdata/history_exp' + str(exp_number) + '.pickle' , 'rb')) # load the test set test_set = pickle.load( open('./tmpdata/TestSet.pickle', 'rb')) X_test = test_set['X_test'] Y_test = test_set["Y_test"] import neuralnilm.scores Y_pred = model.predict(X_test) scores(Y_pred, Y_test) from neuralnilm.scores import plot_loss plot_loss(losses) from keras.models import model_from_json exp_number = 13 output_architecture = './tmpdata/convnet_architecture_exp' + str(exp_number) + '.json' best_weights_during_run = './tmpdata/weights_exp' + str(exp_number) + '.h5' final_weights = './tmpdata/weights_exp' + str(exp_number) + '_final.h5' model = model_from_json(open(output_architecture).read()) # load intermediate or final weights model.load_weights(best_weights_during_run) model.compile(loss='mean_squared_error', optimizer='sgd') model.summary() # load the loss summary import pickle losses = pickle.load( open('./tmpdata/history_exp' + str(exp_number) + '.pickle' , 'rb')) # load the test set test_set = pickle.load( open('./tmpdata/TestSet_microwave.pickle', 'rb')) X_test_microwave = test_set['X_test'] Y_test_microwave = test_set["Y_test"] from neuralnilm.scores import scores Y_pred_microwave = model.predict(X_test_microwave) scores(Y_pred_microwave, Y_test_microwave) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Data preparation Step2: One contribution to this aggregated data is the cycles of the fridge plotted below. Step3: Of course there are many other appliances that contributes to the aggregated data observed above. The list of appliances listed in this house is shown here Step4: The sum of all these appliances is highly correlated to the aggregated data. Step5: We can visualize this looking at the main meters for one day and the sum of all the appliances for one day. Step6: The correlation is high because the 9 appliances listed above consume a significant proportion of the energy and represent a range of different power 'signatures'. Here is one signature from the fridge and one signature from the microwave. Step7: Obviously the microwave signature is a pic of short duration while the fridge cycle is a bit longer. In this study we try to retrieve from the aggregated data the times an appliance switches on, the times it swithes off and the average power comsuption in each cycle. We call this the rectangle method. Essentially, we want to retrieve from the aggregated data, this type of representation Step8: Data pipeline Step9: Standardisation of the input data (aggregated data) Step11: Output data (start time, end time and average power) Step14: Implementation strategy for real time data augmentation Step15: Network architecture Step16: We found by experimenting that the best optimizer is Adam (http Step17: where dw is the gradient of the lost function with respect to the considered weight and w the considered weight. The hyperparameter for this optimizer are beta1, beta2 and eps. We set beta_1=0.9, beta_2=0.999, epsilon=1e-08 and the learning rate = 1e-3. Step18: One convolutional layer Step19: This is a very good F1 score, much better than with any other method tested before. Here we show the evolution of the training loss and the validation loss. Step20: The training and validation loss during training decreases simulateanously. There is no overfitting. We stop the simulation after 20 epochs but the model was still improving. 20 epochs on GPU took about 1.5 hours. Step21: There are 2 convolutional layers with 16 filters and 2 dense layers and the output layer. In this case the results are fairly similar to the network presented above. Step22: Results for the microwave
13,640
<ASSISTANT_TASK:> Python Code: import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.callbacks import ModelCheckpoint from keras.utils import np_utils from time import gmtime, strftime import os import re import pickle import random import sys # load ascii text from file filename = "data/obama.txt" raw_text = open(filename).read() # get rid of any characters other than letters, numbers, # and a few special characters raw_text = re.sub('[^\nA-Za-z0-9 ,.:;?!-]+', '', raw_text) # convert all text to lowercase raw_text = raw_text.lower() n_chars = len(raw_text) print("length of text:", n_chars) print("text preview:", raw_text[:500]) # extract all unique characters in the text chars = sorted(list(set(raw_text))) n_vocab = len(chars) print("number of unique characters found:", n_vocab) # create mapping of characters to integers and back char_to_int = dict((c, i) for i, c in enumerate(chars)) int_to_char = dict((i, c) for i, c in enumerate(chars)) # test our mapping print('a', "- maps to ->", char_to_int["a"]) print(25, "- maps to ->", int_to_char[25]) # prepare the dataset of input to output pairs encoded as integers seq_length = 100 inputs = [] outputs = [] for i in range(0, n_chars - seq_length, 1): inputs.append(raw_text[i:i + seq_length]) outputs.append(raw_text[i + seq_length]) n_sequences = len(inputs) print("Total sequences: ", n_sequences) indeces = list(range(len(inputs))) random.shuffle(indeces) inputs = [inputs[x] for x in indeces] outputs = [outputs[x] for x in indeces] print(inputs[0], "-->", outputs[0]) # create two empty numpy array with the proper dimensions X = np.zeros((n_sequences, seq_length, n_vocab), dtype=np.bool) y = np.zeros((n_sequences, n_vocab), dtype=np.bool) # iterate over the data and build up the X and y data sets # by setting the appropriate indices to 1 in each one-hot vector for i, example in enumerate(inputs): for t, char in enumerate(example): X[i, t, char_to_int[char]] = 1 y[i, char_to_int[outputs[i]]] = 1 print('X dims -->', X.shape) print('y dims -->', y.shape) # define the LSTM model model = Sequential() model.add(LSTM(128, return_sequences=False, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.50)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def generate(sentence, prediction_length=50, diversity=0.35): print('----- diversity:', diversity) generated = sentence sys.stdout.write(generated) # iterate over number of characters requested for i in range(prediction_length): # build up sequence data from current sentence x = np.zeros((1, X.shape[1], X.shape[2])) for t, char in enumerate(sentence): x[0, t, char_to_int[char]] = 1. # use trained model to return probability distribution # for next character based on input sequence preds = model.predict(x, verbose=0)[0] # use sample() function to sample next character # based on probability distribution and desired diversity next_index = sample(preds, diversity) # convert integer to character next_char = int_to_char[next_index] # add new character to generated text generated += next_char # delete the first character from beginning of sentance, # and add new caracter to the end. This will form the # input sequence for the next predicted character. sentence = sentence[1:] + next_char # print results to screen sys.stdout.write(next_char) sys.stdout.flush() print() filepath="-basic_LSTM.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min') callbacks_list = [checkpoint] epochs = 50 prediction_length = 100 for iteration in range(epochs): print('epoch:', iteration + 1, '/', epochs) model.fit(X, y, validation_split=0.2, batch_size=256, epochs=1, callbacks=callbacks_list) # get random starting point for seed start_index = random.randint(0, len(raw_text) - seq_length - 1) # extract seed sequence from raw text seed = raw_text[start_index: start_index + seq_length] print('----- generating with seed:', seed) for diversity in [0.5, 1.2]: generate(seed, prediction_length, diversity) pickle_file = '-basic_data.pickle' try: f = open(pickle_file, 'wb') save = { 'X': X, 'y': y, 'int_to_char': int_to_char, 'char_to_int': char_to_int, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Saved data to', pickle_file) print('Compressed pickle size:', statinfo.st_size) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The first thing we need to do is generate our training data set. In this case we will use a recent article written by Barack Obama for The Economist newspaper. Make sure you have the obama.txt file in the /data folder within the /week-6 folder in your repository. Step2: Next, we use python's set() function to generate a list of all unique characters in the text. This will form our 'vocabulary' of characters, which is similar to the categories found in typical ML classification problems. Step3: Now we need to define the training data for our network. With RNN's, the training data usually takes the shape of a three-dimensional matrix, with the size of each dimension representing Step4: Now let's shuffle both the input and output data so that we can later have Keras split it automatically into a training and test set. To make sure the two lists are shuffled the same way (maintaining correspondance between inputs and outputs), we create a separate shuffled list of indeces, and use these indeces to reorder both lists. Step5: Let's visualize one of these sequences to make sure we are getting what we expect Step6: Next we will prepare the actual numpy datasets which will be used to train our network. We first initialize two empty numpy arrays in the proper formatting Step7: Next, we define our RNN model in Keras. This is very similar to how we defined the CNN model, except now we use the LSTM() function to create an LSTM layer with an internal memory of 128 neurons. LSTM is a special type of RNN layer which solves the unstable gradients issue seen in basic RNN. Along with LSTM layers, Keras also supports basic RNN layers and GRU layers, which are similar to LSTM. You can find full documentation for recurrent layers in Keras' documentation Step8: Next, we define two helper functions Step9: The generate() function will take in Step10: Next, we define a system for Keras to save our model's parameters to a local file after each epoch where it achieves an improvement in the overall loss. This will allow us to reuse the trained model at a later time without having to retrain it from scratch. This is useful for recovering models incase your computer crashes, or you want to stop the training early. Step11: Now we are finally ready to train the model. We want to train the model over 50 epochs, but we also want to output some generated text after each epoch to see how our model is doing. Step12: That looks pretty good! You can see that the RNN has learned alot of the linguistic structure of the original writing, including typical length for words, where to put spaces, and basic punctuation with commas and periods. Many words are still misspelled but seem almost reasonable, and it is pretty amazing that it is able to learn this much in only 50 epochs of training.
13,641
<ASSISTANT_TASK:> Python Code: # Author: Marijn van Vliet <w.m.vanvliet@gmail.com> # Roman Goj <roman.goj@gmail.com> # Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import mne from mne.datasets import sample from mne.time_frequency import csd_morlet from mne.beamformer import make_dics, apply_dics_csd print(__doc__) data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif' fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' subjects_dir = data_path + '/subjects' raw = mne.io.read_raw_fif(raw_fname) raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel # Set picks picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') # Read epochs event_id, tmin, tmax = 1, -0.2, 0.5 events = mne.read_events(event_fname) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12)) evoked = epochs.average() # Read forward operator forward = mne.read_forward_solution(fname_fwd) csd = csd_morlet(epochs, tmin=0, tmax=0.5, decim=20, frequencies=np.linspace(6, 10, 4)) # Compute DICS spatial filter and estimate source power. filters = make_dics(epochs.info, forward, csd, reg=0.5) stc, freqs = apply_dics_csd(csd, filters) message = 'DICS source power in the 8-12 Hz frequency band' brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir, time_label=message) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Reading the raw data Step2: Computing the cross-spectral density matrix at 4 evenly spaced frequencies
13,642
<ASSISTANT_TASK:> Python Code: import os import pandas as pd from google.cloud import bigquery %load_ext google.cloud.bigquery PROJECT = # Replace with your PROJECT BUCKET = PROJECT REGION = "us-east1" os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION %%bigquery --project $PROJECT SELECT url, title, score FROM `bigquery-public-data.hacker_news.stories` WHERE LENGTH(title) > 10 AND score > 10 AND LENGTH(url) > 0 LIMIT 10 %%bigquery --project $PROJECT SELECT ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source, COUNT(title) AS num_articles FROM `bigquery-public-data.hacker_news.stories` WHERE REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$') AND LENGTH(title) > 10 GROUP BY source ORDER BY num_articles DESC LIMIT 100 regex = ".*://(.[^/]+)/" sub_query = SELECT title, ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source FROM `bigquery-public-data.hacker_news.stories` WHERE REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$') AND LENGTH(title) > 10 .format( regex ) query = SELECT LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title, source FROM ({sub_query}) WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch') .format( sub_query=sub_query ) print(query) bq = bigquery.Client(project=PROJECT) title_dataset = bq.query(query).to_dataframe() title_dataset.head() print(f"The full dataset contains {len(title_dataset)} titles") title_dataset.source.value_counts() DATADIR = "./data/" if not os.path.exists(DATADIR): os.makedirs(DATADIR) FULL_DATASET_NAME = "titles_full.csv" FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME) # Let's shuffle the data before writing it to disk. title_dataset = title_dataset.sample(n=len(title_dataset)) title_dataset.to_csv( FULL_DATASET_PATH, header=False, index=False, encoding="utf-8" ) sample_title_dataset = title_dataset.sample(n=1000) sample_title_dataset.source.value_counts() SAMPLE_DATASET_NAME = "titles_sample.csv" SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME) sample_title_dataset.to_csv( SAMPLE_DATASET_PATH, header=False, index=False, encoding="utf-8" ) sample_title_dataset.head() import datetime import os import shutil import pandas as pd import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, TensorBoard from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.utils import to_categorical from tensorflow_hub import KerasLayer print(tf.__version__) %matplotlib inline MODEL_DIR = "./text_models" DATA_DIR = "./data" ls ./data/ DATASET_NAME = "titles_full.csv" TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME) COLUMNS = ["title", "source"] titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS) titles_df.head() titles_df.source.value_counts() CLASSES = {"github": 0, "nytimes": 1, "techcrunch": 2} N_CLASSES = len(CLASSES) def encode_labels(sources): classes = [CLASSES[source] for source in sources] one_hots = to_categorical(classes, num_classes=N_CLASSES) return one_hots encode_labels(titles_df.source[:4]) N_TRAIN = int(len(titles_df) * 0.95) titles_train, sources_train = ( titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN], ) titles_valid, sources_valid = ( titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:], ) sources_train.value_counts() sources_valid.value_counts() X_train, Y_train = titles_train.values, encode_labels(sources_train) X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid) X_train[:3] Y_train[:3] # TODO 1 NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2" nnlm_module = KerasLayer( NNLM, output_shape=[50], input_shape=[], dtype=tf.string, trainable=True ) # TODO 1 nnlm_module(tf.constant(["The dog is happy to see people in the street."])) # TODO 1 SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1" swivel_module = KerasLayer( SWIVEL, output_shape=[20], input_shape=[], dtype=tf.string, trainable=True ) # TODO 1 swivel_module(tf.constant(["The dog is happy to see people in the street."])) def build_model(hub_module, name): model = Sequential( [ hub_module, # TODO 2 Dense(16, activation="relu"), Dense(N_CLASSES, activation="softmax"), ], name=name, ) model.compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) return model def train_and_evaluate(train_data, val_data, model, batch_size=5000): X_train, Y_train = train_data tf.random.set_seed(33) model_dir = os.path.join(MODEL_DIR, model.name) if tf.io.gfile.exists(model_dir): tf.io.gfile.rmtree(model_dir) history = model.fit( X_train, Y_train, epochs=100, batch_size=batch_size, validation_data=val_data, callbacks=[EarlyStopping(), TensorBoard(model_dir)], ) return history data = (X_train, Y_train) val_data = (X_valid, Y_valid) nnlm_model = build_model(nnlm_module, "nnlm") nnlm_history = train_and_evaluate(data, val_data, nnlm_model) history = nnlm_history pd.DataFrame(history.history)[["loss", "val_loss"]].plot() pd.DataFrame(history.history)[["accuracy", "val_accuracy"]].plot() swivel_model = build_model(swivel_module, name="swivel") swivel_history = train_and_evaluate(data, val_data, swivel_model) history = swivel_history pd.DataFrame(history.history)[["loss", "val_loss"]].plot() pd.DataFrame(history.history)[["accuracy", "val_accuracy"]].plot() OUTPUT_DIR = "./savedmodels" shutil.rmtree(OUTPUT_DIR, ignore_errors=True) EXPORT_PATH = os.path.join(OUTPUT_DIR, "swivel") os.environ["EXPORT_PATH"] = EXPORT_PATH shutil.rmtree(EXPORT_PATH, ignore_errors=True) tf.saved_model.save(swivel_model, EXPORT_PATH) %%bash # TODO 5 MODEL_NAME=title_model VERSION_NAME=swivel if [[ $(gcloud ai-platform models list --format='value(name)' | grep ^$MODEL_NAME$) ]]; then echo "$MODEL_NAME already exists" else echo "Creating $MODEL_NAME" gcloud ai-platform models create --region=$REGION $MODEL_NAME fi if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep ^$VERSION_NAME$) ]]; then echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... " echo yes | gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME echo "Please run this cell again if you don't see a Creating message ... " sleep 2 fi echo "Creating $MODEL_NAME:$VERSION_NAME" gcloud ai-platform versions create \ --model=$MODEL_NAME $VERSION_NAME \ --framework=tensorflow \ --python-version=3.7 \ --runtime-version=2.1 \ --origin=$EXPORT_PATH \ --staging-bucket=gs://$BUCKET \ --machine-type n1-standard-4 \ --region=$REGION !saved_model_cli show \ --tag_set serve \ --signature_def serving_default \ --dir {EXPORT_PATH} !find {EXPORT_PATH} %%writefile input.json {"keras_layer_1_input": "hello"} !gcloud ai-platform predict \ --model title_model \ --json-instances input.json \ --version swivel \ --region=$REGION <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Replace the variable values in the cell below Step2: Create a Dataset from BigQuery Step3: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning. Step7: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here. Step8: AutoML for text classification requires that Step9: Let's make sure we have roughly the same number of labels for each of our three labels Step10: Finally we will save our data, which is currently in-memory, to disk. Step11: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML). Step12: Let's write the sample datatset to disk. Step13: Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located Step14: Loading the dataset Step15: Let's look again at the number of examples per label to make sure we have a well-balanced dataset Step16: Preparing the labels Step17: Preparing the train/test splits Step18: To be on the safe side, we verify that the train and test splits Step19: Now let's create the features and labels we will feed our models with Step20: NNLM Model Step21: Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence Step22: Swivel Model Step23: Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence Step24: Building the models Step25: Let's also wrap the training code into a train_and_evaluate function that Step26: Training NNLM Step27: Training Swivel Step28: Deploying the model Step29: Then we can deploy the model using the gcloud CLI as before Step30: Before we try our deployed model, let's inspect its signature to know what to send to the deployed API Step31: Let's go ahead and hit our model
13,643
<ASSISTANT_TASK:> Python Code: publisher = IoT_mqtt_publisher("localhost", 1883) sensor_1 = IoT_sensor("1", "temperature", "°C", 20, 26, 2) sensor_2 = IoT_sensor("2", "umidade", "%", 50, 60, 3) sensor_3 = IoT_sensor("3", "temperature", "°C", 28, 30, 4) sensor_4 = IoT_sensor("4", "umidade", "%", 40, 55, 5) sensor_1.connect(publisher) sensor_2.connect(publisher) sensor_3.connect(publisher) sensor_4.connect(publisher) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Componente para simulação de um sensor Step2: Conectando os Componentes
13,644
<ASSISTANT_TASK:> Python Code: import warnings import scipy as sp import numpy as np import openpnm as op %config InlineBackend.figure_formats = ['svg'] np.set_printoptions(precision=4) np.random.seed(10) %matplotlib inline from pathlib import Path path = Path('../_fixtures/ICL-Sandstone(Berea)/') project = op.io.Statoil.import_data(path=path, prefix='Berea') pn = project.network pn.name = 'berea' print(pn) op.io.VTK.export_data(network=pn, filename='imported_statoil') print('Number of pores before trimming: ', pn.Np) h = pn.check_network_health() op.topotools.trim(network=pn, pores=np.hstack([h['isolated_pores'],h['disconnected_pores']])) print('Number of pores after trimming: ', pn.Np) water = op.phases.Water(network=pn) water.add_model(propname='throat.hydraulic_conductance', model=op.models.physics.hydraulic_conductance.valvatne_blunt) print(water['throat.hydraulic_conductance']) flow = op.algorithms.StokesFlow(network=pn, phase=water) flow.set_value_BC(pores=pn.pores('inlets'), values=200000) flow.set_value_BC(pores=pn.pores('outlets'), values=100000) flow.run() # Get the average value of the fluid viscosity mu = np.mean(water['pore.viscosity']) # Specify a pressure difference (in Pa) delta_P = 100000 # Using the rate method of the StokesFlow algorithm Q = np.absolute(flow.rate(pores=pn.pores('inlets'))) # Because we know the inlets and outlets are at x=0 and x=X Lx = np.amax(pn['pore.coords'][:, 0]) - np.amin(pn['pore.coords'][:, 0]) A = Lx*Lx # Since the network is cubic Lx = Ly = Lz K = Q*mu*Lx/(delta_P*A) print(K) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The following assumes that the folder containing the 'dat' files is in a directory called 'fixtures' in the same directory as this script. You can also enter a full path to the files. Step2: This import class extracts all the information contained in the 'Statoil' files, such as sizes, locations and connectivity. Note that the io classes return a project object, and the network itself can be accessed using the network attribute. The following printout display which information was contained in the file Step3: At this point, the network can be visualized in Paraview. A suitable '.vtp' file can be created with Step4: The resulting network is shown below Step5: Dealing with Inlet and Outlet Pores Step6: Apply Pore-Scale Models Step7: Recall that boundary pores and throats had no geometrical properties associated with them, so the hydraulic conductances of boundary throats will be undefined (filled with NaNs) Step8: Run StokesFlow Algorithm Step9: The resulting pressure field can be visualized in Paraview, giving the following
13,645
<ASSISTANT_TASK:> Python Code: %%sh pip install pandas pip install scikit-learn pip install keras from __future__ import print_function import numpy as np %matplotlib inline import pandas as pd import matplotlib.pyplot as plt from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from classification_utilities import display_cm, display_adj_cm from sklearn.metrics import confusion_matrix, f1_score from sklearn import preprocessing filename = 'train_test_data.csv' training_data = pd.read_csv(filename) training_data.head(10) # Set 'Well Name' and 'Formation' fields as categories training_data['Well Name'] = training_data['Well Name'].astype('category') training_data['Formation'] = training_data['Formation'].astype('category') # Fill missing values and normalize for 'PE' field training_data['PE'] = training_data['PE'].fillna(value=0) mean_pe = training_data['PE'].mean() std_pe = training_data['PE'].std() training_data['PE'] = (training_data['PE']-mean_pe)/std_pe # Normalize the rest of fields (GR, ILD_log10, DelthaPHI, PHIND,NM_M,RELPOS) correct_facies_labels = training_data['Facies'].values feature_vectors = training_data.drop(['Formation', 'Depth'], axis=1) well_labels = training_data[['Well Name', 'Facies']].values data_vectors = feature_vectors.drop(['Well Name', 'Facies'], axis=1).values scaler = preprocessing.StandardScaler().fit(data_vectors) scaled_features = scaler.transform(data_vectors) data_out = np.hstack([well_labels, scaled_features]) def preprocess(data_out): data = data_out well_data = {} well_names = list(set(data[:, 0])) for name in well_names: well_data[name] = [[], []] for row in data: well_data[row[0]][1].append(row[1]) well_data[row[0]][0].append(list(row[2::])) # Sliding window positive_lag = 5 negative_lag = 5 chunks = [] chunks_test = [] chunk_length = positive_lag+negative_lag+1 chunks_facies = [] for name in well_names: if name not in ['STUART', 'CRAWFORD']: test_well_data = well_data[name] log_values = np.array(test_well_data[0]) log_values_padded = np.lib.pad(log_values, (negative_lag,positive_lag), 'edge')[:, negative_lag:-positive_lag] facies_values = np.array(test_well_data[1]) for i in range(log_values.shape[0]): chunks.append(log_values_padded[i:i+chunk_length, :]) chunks_facies.append(facies_values[i]) else: test_well_data = well_data[name] log_values = np.array(test_well_data[0]) log_values_padded = np.lib.pad(log_values, (negative_lag,positive_lag), 'edge')[:, negative_lag:-positive_lag] facies_values = np.array(test_well_data[1]) for i in range(log_values.shape[0]): chunks_test.append(log_values_padded[i:i+chunk_length, :]) chunks_facies = np.array(chunks_facies, dtype=np.int32)-1 X_ = np.array(chunks) X = np.zeros((len(X_),len(X_[0][0]) * len(X_[0]))) for i in range(len(X_)): X[i,:] = X_[i].flatten() X_test = np.array(chunks_test) X_test_out = np.zeros((len(X_test),len(X_test[0][0]) * len(X_test[0]))) for i in range(len(X_test)): X_test_out[i,:] = X_test[i].flatten() y = np_utils.to_categorical(chunks_facies) return X, y, X_test_out np.random.seed(1337) # For reproducibility X_train, y_train, X_test = preprocess(data_out) # Set parameters input_dim = 77 hidden_dim_1 = 128 hidden_dim_2 = 32 output_dim = 9 batch_size = 32 nb_epoch = 10 def proposed_model(): # Define the model model = Sequential() model.add(Dense(128, input_dim=77, init='normal', activation='relu')) model.add(Dense(32, input_dim=128, init='normal', activation='relu')) model.add(Dropout(0.3)) model.add(Dense(9, init='normal', activation='softmax')) # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model def accuracy(conf): total_correct = 0. nb_classes = conf.shape[0] for i in np.arange(0,nb_classes): total_correct += conf[i][i] acc = total_correct/sum(sum(conf)) return acc adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]]) facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS','WS', 'D','PS', 'BS'] def accuracy_adjacent(conf, adjacent_facies): nb_classes = conf.shape[0] total_correct = 0. for i in np.arange(0,nb_classes): total_correct += conf[i][i] for j in adjacent_facies[i]: total_correct += conf[i][j] return total_correct / sum(sum(conf)) estimator = KerasClassifier(build_fn=proposed_model, nb_epoch=10, batch_size=50, verbose=0) # Cross Validation kfold = KFold(n_splits=5, shuffle=True) results = cross_val_score(estimator, X_train, y_train, cv=kfold) print(' Cross Validation Results') print( results ) plt.hist(results, bins= 5) plt.title("Model Accuracy") plt.show() print ( "Cross Validation Average Accuracy %.3f" % results.mean() ) print ( "Cross Validation Standard Deviation %.3f" % results.std() ) # Load the model model = proposed_model() #Train model model.fit(X_train, y_train, nb_epoch=10, verbose=2, shuffle = True) # Predict Values on Training set y_predicted = model.predict( X_train , batch_size=32, verbose=0) # Print Report # Format output [0 - 8 ] y_ = np.zeros((len(y_train),1)) for i in range(len(y_train)): y_[i] = np.argmax(y_train[i]) y_predicted_ = np.zeros((len(y_predicted), 1)) for i in range(len(y_predicted)): y_predicted_[i] = np.argmax( y_predicted[i]) # Confusion Matrix conf = confusion_matrix(y_, y_predicted_) # Print Results print ("\nModel Report") print ("-Accuracy: %.6f" % ( accuracy(conf) )) print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) )) print ("\nConfusion Matrix") display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True) y_test = model.predict( X_test , batch_size=32, verbose=0) final_predictions = np.zeros((len(y_test),1)) for i in range(len(y_test)): final_predictions[i] = np.argmax(y_test[i]) + 1 last_submission = pd.read_csv('Prediction.csv') ls = last_submission['Facies'].as_matrix() conf = confusion_matrix(ls, final_predictions) # Print Results print ("\nModel Report") print ("-Accuracy: %.6f" % ( accuracy(conf) )) print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) )) print ("\nConfusion Matrix") display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True) # Store results test_data = pd.read_csv('../validation_data_nofacies.csv') test_data['Facies'] = final_predictions test_data.to_csv('Prediction2.csv') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We load the training and testing data to preprocess it for further analysis. Step2: We fill the missing data values in the PE field with zero and proceed to normalize the data that will be fed into our model. Step3: In order to start training stage, it is required to format the data by considering a sliding window over the depth component in order to classify a given set of features at some specific depth for each well in the training set. Step4: Data Analysis Step5: In order to evaluate our classification model accurary we will use the our following defined metrics, based on the confusion matrix once the classification is performed. The first metric only considers misclassification error and the second one takes into account the fact that facies could be misclassified if they belong to a same group with similar geological characteristics. Step6: Once the set of parameters are fixed, the training stage of our model begins. We perform a Cross Validation routine to evaluate the performance of the model. Step7: Prediction
13,646
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-2', 'ocnbgchem') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Geochemical" # "NPZD" # "PFT" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Fixed" # "Variable" # "Mix of both" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.damping') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline" # "Online" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Use that of ocean model" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Atmospheric Chemistry model" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Land Surface model" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other protocol" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea water" # "Free" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrogen (N)" # "Phosphorous (P)" # "Silicium (S)" # "Iron (Fe)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrates (NO3)" # "Amonium (NH4)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dentrification" # "N fixation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "PFT including size based (specify both below)" # "Size based only (specify below)" # "PFT only (specify below)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Diatoms" # "Nfixers" # "Calcifiers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microphytoplankton" # "Nanophytoplankton" # "Picophytoplankton" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "Size based (specify below)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microzooplankton" # "Mesozooplankton" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Labile" # "Semi-labile" # "Refractory" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diagnostic" # "Diagnostic (Martin profile)" # "Diagnostic (Balast)" # "Prognostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "POC" # "PIC (calcite)" # "PIC (aragonite" # "BSi" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "No size spectrum used" # "Full size spectrum" # "Discrete size classes (specify which below)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Function of particule size" # "Function of particule type (balast)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "C13" # "C14)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Prognostic" # "Diagnostic)" # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 1.3. Model Type Step7: 1.4. Elemental Stoichiometry Step8: 1.5. Elemental Stoichiometry Details Step9: 1.6. Prognostic Variables Step10: 1.7. Diagnostic Variables Step11: 1.8. Damping Step12: 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport Step13: 2.2. Timestep If Not From Ocean Step14: 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks Step15: 3.2. Timestep If Not From Ocean Step16: 4. Key Properties --&gt; Transport Scheme Step17: 4.2. Scheme Step18: 4.3. Use Different Scheme Step19: 5. Key Properties --&gt; Boundary Forcing Step20: 5.2. River Input Step21: 5.3. Sediments From Boundary Conditions Step22: 5.4. Sediments From Explicit Model Step23: 6. Key Properties --&gt; Gas Exchange Step24: 6.2. CO2 Exchange Type Step25: 6.3. O2 Exchange Present Step26: 6.4. O2 Exchange Type Step27: 6.5. DMS Exchange Present Step28: 6.6. DMS Exchange Type Step29: 6.7. N2 Exchange Present Step30: 6.8. N2 Exchange Type Step31: 6.9. N2O Exchange Present Step32: 6.10. N2O Exchange Type Step33: 6.11. CFC11 Exchange Present Step34: 6.12. CFC11 Exchange Type Step35: 6.13. CFC12 Exchange Present Step36: 6.14. CFC12 Exchange Type Step37: 6.15. SF6 Exchange Present Step38: 6.16. SF6 Exchange Type Step39: 6.17. 13CO2 Exchange Present Step40: 6.18. 13CO2 Exchange Type Step41: 6.19. 14CO2 Exchange Present Step42: 6.20. 14CO2 Exchange Type Step43: 6.21. Other Gases Step44: 7. Key Properties --&gt; Carbon Chemistry Step45: 7.2. PH Scale Step46: 7.3. Constants If Not OMIP Step47: 8. Tracers Step48: 8.2. Sulfur Cycle Present Step49: 8.3. Nutrients Present Step50: 8.4. Nitrous Species If N Step51: 8.5. Nitrous Processes If N Step52: 9. Tracers --&gt; Ecosystem Step53: 9.2. Upper Trophic Levels Treatment Step54: 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton Step55: 10.2. Pft Step56: 10.3. Size Classes Step57: 11. Tracers --&gt; Ecosystem --&gt; Zooplankton Step58: 11.2. Size Classes Step59: 12. Tracers --&gt; Disolved Organic Matter Step60: 12.2. Lability Step61: 13. Tracers --&gt; Particules Step62: 13.2. Types If Prognostic Step63: 13.3. Size If Prognostic Step64: 13.4. Size If Discrete Step65: 13.5. Sinking Speed If Prognostic Step66: 14. Tracers --&gt; Dic Alkalinity Step67: 14.2. Abiotic Carbon Step68: 14.3. Alkalinity
13,647
<ASSISTANT_TASK:> Python Code: # Keep the original SGD sgd_orig = sgd.copy() # Find overlapping genes in park's data loci=[] for i in range(len(sgd)-10): for j in range(1,10): if sgd.ix[i,'max'] >= sgd.ix[i+j,'min_cassette'] and sgd.ix[i,'chromosome'] == sgd.ix[i+j,'chromosome']: loci.append(sgd.iloc[i+j].name) if sgd.ix[i,'max_cassette'] >= sgd.ix[i+j,'min'] and sgd.ix[i,'chromosome'] == sgd.ix[i+j,'chromosome']: loci.append(sgd.iloc[i].name) for i in set(loci): sgd.drop(i, axis=0, inplace=True) sgd.count() # Sanity check of the dataset filtering sgd_orig.count() overlap_index = set(sgd_orig.index) - set(sgd.index) overlap_sgd = sgd_orig[sgd_orig.index.isin(overlap_index)] ''' List all wigs (already joined... Not all dulicates) and convert wig to wiggle objects and run meta_genomics and log ''' Gcn4_ChEC = set([i for i in glob.glob('./data/SG_gcn4/*')]) PolII = set([i for i in glob.glob('./data/meta_genomics/*')]) for i in Gcn4_ChEC: name = i[15:-4] print name vars()[name] = pd.read_csv(i) vars()[name].set_index(vars()[name].columns[0], inplace=True) vars()[name].index.rename('position', inplace=True) for i in PolII: name = i[21:-4] print name vars()[name] = pd.read_csv(i) vars()[name].set_index(vars()[name].columns[0], inplace=True) vars()[name].index.rename('position', inplace=True) # lastly one ChIP data name = 'AE4_ATGTCA' vars()[name] = pd.read_csv('./data/AE_ChIP/AE4_ATGTCA_Sc__meta_genomic.csv') vars()[name].set_index(vars()[name].columns[0], inplace=True) vars()[name].index.rename('position', inplace=True) ChEC_norm = SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.sum(axis=1)/SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.shape[1] ChIP_norm = AE4_ATGTCA.sum(axis=1) / AE4_ATGTCA.shape[1] ChIP_ChEC_gcn4 = pd.concat([ChEC_norm, ChIP_norm], axis=1) ChIP_ChEC_gcn4.columns = ['ChEC_norm', 'ChIP_norm'] # Plot of meta-analysis of ChEC and ChIP ChIP_ChEC_gcn4.iloc[1:].plot(lw=3) plt.plot([0,0],[0,0.3], lw=3, ls="--") plt.ylim(0,0.25) plt.box(on=None) plt.grid() plt.xlabel('position', fontsize=14) plt.ylabel('norm. counts', fontsize=14) plt.legend()#loc='center left', bbox_to_anchor=(1, 0.5)) # Plot (scatter) correlation between ChEC and ChIP a = pd.DataFrame(SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.sum(axis=0)) a.columns = ['ChEC'] b = pd.DataFrame(AE4_ATGTCA.sum(axis=0)) b.columns=['ChIP'] c = pd.concat([a, b], axis=1) c.plot.scatter(x='ChEC', y='ChIP', figsize=(6,6)) plt.title('Whole set of genes', fontsize=14); plt.xlabel('ChEC', fontsize=14) plt.ylabel('ChIP', fontsize=14) txt = 'R = ' + str(c.corr().values[1][0].round(4)) + '\nn = ' + str(c.shape[0]) plt.text(850,1050,txt, fontsize=14) mE, mI = c.ChEC.mean(), c.ChIP.mean() sE, sI = c.ChEC.std(), c.ChIP.std() c['zChEC'] = c['ChEC'].apply(lambda x: (x - mE) / sE) c['zChIP'] = (c['ChIP'] - mI) / sI #drop NaNs c.dropna(inplace=True) # subset to work with towards defining the points that show correlation c_box = c[['zChEC','zChIP']] # add 0.01 to all points to avoid dealing with zeroes c_box = c_box.apply(lambda x: x+0.001) # divide them to see which points correlate c_box['zcorr'] = c['zChEC'] / c['zChIP'] # Searching the data points that show best correlation c_fit = c_box[(c_box['zcorr']<1.5) & (c_box['zcorr']>0.5)] # plot a scatter plot with best fitting c_fit.plot.scatter(x='zChEC', y='zChIP', figsize=(6,6)); # add all points with some alpha and green color plt.scatter(x=c_box.zChEC.values, y=c_box.zChIP.values, color='green', alpha=0.2) plt.xlabel('transf ChEC', fontsize=14) plt.ylabel('transf ChIP', fontsize=14) plt.title('Subset of genes with good correlation between ChEC and ChIP', fontsize=14); #plt.box(on=None); m,b = np.polyfit(c_fit.zChEC.values, c_fit.zChIP.values,1); x=[-1,5] plt.plot(x, m*x+b, color='red'); plt.xlim(-1,5); plt.ylim(-1,5); texto = 'R = ' + str(c_fit[['zChEC','zChIP']].corr().values[0][1].round(3)) \ + '\nn = ' + str(c_fit.shape[0]) plt.text(3,0,texto, fontsize=14); ChEC_ChIP_good_R = set(c_fit.index) #just to simplify my life I rename these df a = SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.copy() a = a.loc[:, a.columns.isin(ChEC_ChIP_good_R)] b = AE4_ATGTCA.copy() b = AE4_ATGTCA.loc[:,AE4_ATGTCA.columns.isin(ChEC_ChIP_good_R)] ChEC_norm = a.sum(axis=1)/a.shape[1] ChIP_norm = b.sum(axis=1) / b.shape[1] ChIP_ChEC_gcn4 = pd.concat([ChEC_norm, ChIP_norm], axis=1) ChIP_ChEC_gcn4.columns = ['ChEC_norm', 'ChIP_norm'] # Plot of meta-analysis of ChEC and ChIP ChIP_ChEC_gcn4.iloc[1:].plot(lw=3) plt.plot([0,0],[0,0.3], lw=3, ls="--") plt.ylim(0,0.25) plt.box(on=None) plt.grid() plt.xlabel('position', fontsize=14) plt.ylabel('norm. counts', fontsize=14) plt.legend()#loc='center left', bbox_to_anchor=(1, 0.5))''' # Abrir los wig y ver quantity and quality (peaks or not?) de senal en telomeros #http://www.yeastgenome.org/contig/Chromosome_I/overview #http://www.yeastgenome.org/contig/Chromosome_II/overview #etc. Ver que los telomeros no son tan largos como dice ahi... # Otra forma es verificar que cada senal sea en efecto un pico y no random signal. #ChEC_telomer_test = lib.wiggle('./') # Load the Free MNase wig into wiggle object Free_MN = pd.read_csv('./data/Mediator/FreeMNase_5m__meta_genomic.csv') Free_MN.set_index(Free_MN.columns[0], inplace=True) Free_MN.index.rename('position', inplace=True) # Let's start by observing distribution and characteristics of Free MNase. sns.boxplot(data=Free_MN.sum(axis=0)) display(Free_MN.sum(axis=0).describe()) # Here I can spot some genes with weird ammounts of signal in the Free MNase #display(Free_MN.sum(axis=0))#.sort_values()) # So then I can plot signal form Free MNase together with signal from Gcn4 for the same gene a = Free_MN.sum(axis=0) plt.figure() Free_MN['YIR011C'].dropna().plot(label='free MNase') SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic['YIR011C'].dropna().plot(label='plus SM', lw=3) plt.box(on=None) plt.xlabel('position', fontsize=14) plt.ylabel('counts', fontsize=14) plt.plot([0,0],[0,3], ls="--") plt.legend() plt.title('YIR011C') # Normalize gcn4 signal by free MNase gcn4_ChEC_p = SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic / Free_MN gcn4_ChEC_m = s64_G4MNmSM_2_100_ends__meta_genomic / Free_MN # Compute the +SM / -SM gcn4_ChEC_pm = gcn4_ChEC_p / gcn4_ChEC_m gcn4_ChEC_pm.fillna(0, inplace=True) # Compute the "per gene" counts a = gcn4_ChEC_pm.sum(axis=1) / gcn4_ChEC_pm.shape[1] b = gcn4_ChEC_m.sum(axis=1) / gcn4_ChEC_m.shape[1] c = gcn4_ChEC_p.sum(axis=1) / gcn4_ChEC_p.shape[1] # Incorporate the raw signal "per gene" d = SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.sum(axis=1)/SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.shape[1] d.fillna(0, inplace=True) # COncatenate all data towards a plot e = pd.concat([a,b,c, d], axis=1) e.columns = ['plus/no SM', 'no SM', 'plus SM', 'raw signal'] # Finally plot it e.plot(lw=0.5) plt.box(on=None) plt.grid() plt.xlabel('position', fontsize=14) plt.ylabel('norm. counts', fontsize=14) plt.title('ChEC signal normalized by Free MNase') plt.plot([0,0],[0,0.25], lw=2, ls="--") # concatenate the dfs on axis 0 to compare their statistical parameters a = gcn4_ChEC_pm.sum(axis=0) b = gcn4_ChEC_m.sum(axis=0) c = gcn4_ChEC_p.sum(axis=0) d = SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic / s64_G4MNmSM_2_100_ends__meta_genomic d = d.sum(axis=0) e = pd.concat([a,b,c,d], axis=1) e.columns = ['plus/no SM', 'no SM', 'plus SM', 'raw signal (plus/no SM)'] e.fillna(0, inplace=True) '''from scipy.stats import zscore f = e.apply(zscore) ''' # Check distribution of new data compared to original raw data plt.figure() sns.boxplot(data=e) plt.box(on=None) plt.grid() plt.ylim(-1,20) a1, a2 = pd.DataFrame(e[e>=1.1].count()), pd.DataFrame(e[e<=0.9].count()) a3 = pd.DataFrame(e[(e>0.9)&(e<1.1)].count()) a4 = pd.concat([a1,a2,a3], axis=1) a4.columns = ['>1.1', '<0.9', '~1'] display(a4) #plt.title('norm. distributions') plt.figure(figsize=(5,5)) lista2 = ['del_gcn4_SM__meta_genomic','SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic','WT_Gcn4_SM__meta_genomic',\ 'del_gcn4_DMSO__meta_genomic','WT_Gcn4_DMSO__meta_genomic'] for name in lista2: df = vars()[name].iloc[1:]/vars()[name].iloc[1:].shape[1] df.sum(axis=1).plot(label=name, lw=3) plt.plot([0,0],[0,0.8], lw=3, ls="--") plt.ylim(0,0.8) plt.box(on=None) plt.grid() plt.xlabel('position', fontsize=14) plt.ylabel('norm. counts', fontsize=14) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) #plt.savefig('./PolII_ChIP_vs_ChEC-seq') # Here I bring both DMSO and SM to zero so I can compate their slopes a = del_gcn4_DMSO__meta_genomic.iloc[1:].sum(axis=1) / del_gcn4_DMSO__meta_genomic.shape[1] b = del_gcn4_SM__meta_genomic.iloc[1:].sum(axis=1) / del_gcn4_SM__meta_genomic.shape[1] ma, mb = a.min(),b.min() a, b = a-ma, b-mb # plot it plt.figure(figsize=(6,6)) a.plot(label='DMSO', lw=3) b.plot(label='SM', lw=3) plt.plot([0,0],[0,0.8], lw=3, ls="--") plt.ylim(-0.05,0.5) plt.box(on=None) plt.grid() plt.xlabel('position', fontsize=14) plt.ylabel('norm. counts', fontsize=14) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # Let's make some space in the ram del a,b # Will work with these files here P2_gcn4_lista = ['WT_Gcn4_SM__meta_genomic', 'del_gcn4_SM__meta_genomic', \ 'del_gcn4_DMSO__meta_genomic', 'WT_Gcn4_DMSO__meta_genomic'] # and will merge them into this df P2_gcn4 = pd.DataFrame([]) ''' First check position-wise what are the std and plot it to see if INDEED, at the minimum position there is notmuch of a change ''' def get_errors(df, vals, stdv): ind = df.index.values v = df[vals].values s = df[stdv].values m = [] for i in range(0, len(v), 200): plt.plot([ind[i],ind[i]],[v[i]-s[i],v[i]+s[i]], lw=3) plt.figure() get_errors(tmp.iloc[1:], 'suma', 'stdv') #WT_Gcn4_SM__meta_genomic.plot() '''for name in P2_gcn4_lista: tmp = vars()[name].copy() tmp['suma'] = tmp.iloc[1:].sum(axis=1)/1000 tmp['stdv'] = tmp.iloc[1:].std(axis=1) ''' plt.legend() ''' P2_gcn4 = pd.concat([P2_gcn4,df], axis=1) # Feature engineering P2_gcn4['WT_SM/DMSO'] = P2_gcn4.WT_Gcn4_SM / P2_gcn4.WT_Gcn4_DMSO P2_gcn4['del_SM/DMSO'] = P2_gcn4.del_gcn4_SM / P2_gcn4.del_gcn4_DMSO P2_gcn4['WT/del'] = P2_gcn4['WT_SM/DMSO'] / P2_gcn4['del_SM/DMSO'] P2_gcn4['log2-WTn/deln'] = np.log2(P2_gcn4['WT_SM/DMSO'] / P2_gcn4['del_SM/DMSO']) mn, sd = P2_gcn4['log2-WTn/deln'].mean(), P2_gcn4['log2-WTn/deln'].std() P2_gcn4['zscore_2log'] = (P2_gcn4['log2-WTn/deln']-mn) / sd ''' # cleaning NaNs #P2_gcn4.fillna(10, inplace=True) #P2_gcn4[['WT_SM/DMSO','del_SM/DMSO','log2-WTn/deln']].hist(bins=100) #plt.figure(figsize=(5,5)) #sns.distplot(P2_gcn4['log2-WTn/deln'], hist=False, rug=True, kde_kws={"linewidth":2}); '''plt.figure(figsize=(5,5)) sns.boxplot(data=P2_gcn4[['WT_SM/DMSO','del_SM/DMSO','WT/del']]) #,'log2-WTn/deln']]) #,'zscore_2log']]) #plt.ylim(-2.5,4) plt.box(on=None) plt.grid() #plt.xlim(0,4000)''' P2_gcn4_positives = P2_gcn4.loc[P2_gcn4['log2-WTn/deln']>0,'log2-WTn/deln'] P2_gcn4_negatives = P2_gcn4.loc[P2_gcn4['log2-WTn/deln']<0,'log2-WTn/deln'] gcn4_ChEC_lista = ['s64_G4MNmSM_2_100_ends__meta_genomic', 'SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic'] ChEC_gcn4 = pd.DataFrame([]) for name in gcn4_ChEC_lista: vars()[name].set_index(vars()[name].columns[0], inplace=True) vars()[name].index.rename('position', inplace=True) vars()[name].fillna(0, inplace=True) df = pd.DataFrame(vars()[name].iloc[1:500].sum(axis=0)) df.columns = [name[:-14]] ChEC_gcn4 = pd.concat([ChEC_gcn4,df], axis=1) ChEC_gcn4.columns = ['DMSO','SM'] ChEC_gcn4['SM/DMSO'] = ChEC_gcn4.DMSO / ChEC_gcn4.SM ChEC_gcn4['log2_SM/DMSO'] = np.log2(ChEC_gcn4['SM/DMSO']) ChEC_gcn4_B = ChEC_gcn4.copy() ChEC_gcn4_B.dropna(inplace=True) ChEC_gcn4.replace(np.inf,10, inplace=True) ChEC_gcn4.replace(-np.inf,-10, inplace=True) # to fill from describe() values different categories ChEC_gcn4['present_in_DMSO'] = ChEC_gcn4.DMSO > 0 ChEC_gcn4['goes_up_with_SM'] = ChEC_gcn4['SM/DMSO'] > 1.1699 ChEC_gcn4['goes_down_with_SM'] = ChEC_gcn4['SM/DMSO'] < 0.686 ChEC_gcn4.describe() import venn # Cuales ChEC estan presentes con DMSO y cuales en SM g0 = set(sgd.index) g1 = set(ChEC_gcn4.loc[ChEC_gcn4['present_in_DMSO'], 'present_in_DMSO'].index) g2 = set(ChEC_gcn4.loc[ChEC_gcn4['goes_up_with_SM'], 'goes_up_with_SM'].index) g3 = set(ChEC_gcn4.loc[ChEC_gcn4['goes_down_with_SM'],'goes_down_with_SM'].index) labels = venn.get_labels([g0, g1, g2, g3], fill=['number']) fig, ax = venn.venn4(labels, names=['All genes', 'gcn4 ChEC signal in DMSO', 'gcn4 ChEC signal goes up in SM',\ 'gcn4 ChEC signal goes down in SM']) fig.show() totals = pd.DataFrame([]) # 250 es iloc[0]!! for i in [[0,240],[250,350],[350,450],[450,650]]: name = 'P2_del_gcn4' + str([j-250 for j in i]) tmp = pd.DataFrame(del_gcn4_SM__meta_genomic.iloc[i[0]:i[1]].sum(axis=0)) tmp.columns = [name] totals = pd.concat([totals, tmp], axis=1) ChEC_gcn4 = pd.DataFrame(SG_Sc_64_G4MNpSM_2_100_ends__meta_genomic.sum(axis=0)) ChEC_gcn4.columns = ['ChEC_gcn4'] P2_del_gcn4_total = pd.DataFrame(del_gcn4_SM__meta_genomic.sum(axis=0)) P2_del_gcn4_total.columns = ['P2_del_gcn4_total'] P2_gcn4_total = pd.DataFrame(WT_Gcn4_SM__meta_genomic.sum(axis=0)) P2_gcn4_total.columns = ['P2_gcn4_total'] totals = pd.concat([totals,ChEC_gcn4, P2_del_gcn4_total, P2_gcn4_total], axis=1) # Include Rhee's data on tata and taf1 '''Rhee = pd.read_excel('../../Downloads/nature10799-s2/Rhee_SuppData1_revised.xls') Rhee = Rhee[['gene_id','gene_class_1','gene_class_2','mismatch']] Rhee.set_index('gene_id', inplace=True) Rhee['mismatch'] = Rhee.mismatch.str[:1]''' #totals2 = pd.concat([totals, Rhee], axis=1) #print totals2 #Rhee = Rhee[Rhee.index.duplicated(keep=False)==False] # Adicionar Rhee a totals totals2 = pd.concat([totals,Rhee], axis=1, join='inner') plt.figure(figsize=(20,5)); sns.boxplot(data=totals) plt.ylim(0,500); #df['avg'] = df.sum(axis=1)/len(df) #m = df.avg.values #y = df.index #plt.fill_between(y,m, alpha=0.3, label='joya') #plt.xlim(-250,750) #plt.legend() ''' Trying bringing the minima to zero... ''' totals1 = totals - totals.min() plt.figure(); tmp = totals1[['Taf7_YPD_DMSO__meta_genomic', 'Taf7_YPD_3IAA__meta_genomic', 'Spt20_deg_GC_3IAA__meta_genomic',\ 'Spt20_deg_GC_DMSO__meta_genomic','Spt20_deg_YPD_3IAA__meta_genomic', \ '143_Spt20_deg_YPD_DMSO_B_Sc__meta_genomic']] tmp.plot(figsize=(15,15), linewidth=5, fontsize=14) plt.plot([0,0],[0,1.2],ls='--', lw=3, color='k') plt.legend() #loc='center left', bbox_to_anchor=(1.0, 0.5)); plt.xlabel('position (relative to TSS)',fontsize=14) plt.ylabel('norm counts',fontsize=14) plt.savefig('./Spt20_reads_distribution_zeroed.jpg') logs = sgd.chromosome for i in glob.glob('./*log*csv'): name = i[2:-4] vars()[name] = pd.read_csv(i) vars()[name].set_index('locus', inplace=True) vars()[name].columns = [name] logs = pd.concat([logs,vars()[name]], axis=1, join='inner') ''' Include Pugh's data about Taf1 and TATA ''' pugh = pd.read_excel('../../Rhee_SuppData1_revised.xls') pugh = pugh[['gene_id', 'gene_class_1','gene_class_2']] pugh.set_index('gene_id', inplace=True) pugh.dropna(inplace=True) #print set(pugh.gene_class_1) #print set(pugh.gene_class_2) pugh.columns = ['TATA','Taf1'] logs = pd.concat([logs,pugh], axis=1, join='inner') logs.drop('chromosome', axis=1, inplace=True) logs['Taf7'] = np.log2(logs['Taf7_YPD_3IAA__log']/logs['Taf7_YPD_DMSO__log']) logs['Spt20_GC'] = np.log2(logs['Spt20_deg_GC_3IAA__log']/logs['Spt20_deg_GC_DMSO__log']) logs['Spt20_YPD'] = np.log2(logs['Spt20_deg_YPD_3IAA__log']/logs['143_Spt20_deg_YPD_DMSO_B_Sc__log']) logs['Spt3_YPD'] = np.log2(logs['Spt3_deg_YPD_3IAA__log']/logs['Spt3_deg_YPD_DMSO__log']) logs2 = logs[['Taf7', 'Spt20_GC', 'Spt20_YPD', 'Spt3_YPD', 'Taf1', 'TATA']] logs2.to_csv('logs2.csv') logs2.boxplot(figsize=(20,20), showfliers=False, rot=90) logs2.boxplot(by='Taf1', showfliers=False, figsize=(10,10)); logs2.boxplot(by='TATA', showfliers=False, figsize=(10,10)); #ypd_3iaa = Spt20_deg_YPD_3IAA.meta_genomic(sgd_slice=sgd) #gc_3iaa = Spt20_deg_GC_3IAA.meta_genomic(sgd_slice=sgd) #gc_dmso = Spt20_deg_GC_DMSO.meta_genomic(sgd_slice=sgd) #ypd_3iaa['ypd_3iaa'] = ypd_3iaa.iloc[1:].sum(axis=1) / ypd_3iaa.shape[1] #gc_3iaa['gc_3iaa'] = gc_3iaa.iloc[1:].sum(axis=1) / gc_3iaa.shape[1] #gc_dmso['gc_dmso'] = gc_dmso.iloc[1:].sum(axis=1) / gc_dmso.shape[1] #gral = pd.concat([ypd_3iaa.ypd_3iaa, gc_3iaa.gc_3iaa, gc_dmso.gc_dmso], axis=1, join='inner') #ypd_3iaa_log = Spt20_deg_YPD_3IAA.wig2log(sgd_slice=sgd) #gc_3iaa_log = Spt20_deg_GC_3IAA.wig2log(sgd_slice=sgd) #gc_dmso_log = Spt20_deg_GC_DMSO.wig2log(sgd_slice=sgd) #ypd_3iaa_log.columns = ['ypd_3iaa_log'] #gc_3iaa_log.columns = ['gc_3iaa_log'] #gc_dmso_log.columns = ['gc_dmso_log'] #gral2 = pd.concat([ypd_3iaa_log, gc_3iaa_log, gc_dmso_log], axis=1, join='inner') #gral2['gc_log2'] = np.log2(gral2.gc_3iaa_log/gral2.gc_dmso_log) gral2.boxplot() plt.ylim(-1,100) gral.plot(lw=4, fontsize=14, figsize=(10,10)) taf7_ypd_dmso.plot(lw=4, fontsize=14, figsize=(10,10)) plt.plot([0,0],[0,1.2], ls='--', lw=3, c='black') plt.ylabel('normalized avg. counts', fontsize=14) plt.ylim(0,1.2) plt.savefig('no_diff_first_100bp_Spt20.jpg') #taf7_meta = vars()['compare/Taf7_YPD_DMSO'].meta_genomic(sgd_slice=sgd) #taf7_ypd_dmso = taf7_meta.iloc[1:].sum(axis=1) / taf7_meta.shape[1] taf7_ypd_dmso.plot() for i in [Spt20_deg_GC_3IAA_metGen, Spt20_deg_YPD_3IAA_metGen, Spt20_deg_GC_DMSO_metGen]: i.set_index(i.columns[0], inplace=True) i['totals'] = i.sum(axis=1) / i.shape[1] #WT_Taf1_metGen.set_index(WT_Taf1_metGen.columns[0], inplace=True) #WT_Taf1_metGen['totals'] = WT_Taf1_metGen.sum(axis=1) #WT_Taf1_metGen['totals'] = WT_Taf1_metGen.totals / WT_Taf1_metGen.shape[1] # Plot them... plt.figure(figsize=(10,10)) ax = plt.subplot(111) ax.plot([0,0],[0,1], ls='--', lw=3, c='black') for i in ['WT_Gcn4_DMSO_metGen', 'WT_Gcn4_SM_metGen', 'Spt20_deg_GC_3IAA_metGen', 'Spt20_deg_YPD_3IAA_metGen', 'Spt20_deg_GC_DMSO_metGen']: vars()[i].totals.plot(label=i, lw=4, fontsize=14) #plt.fill_between(m84_dmso_IN.index.values, m84_dmso_IN.values/3, alpha=0.3, label='Input') #plt.fill_between(m84_dmso_IP.index.values, m84_dmso_IP.values/3, alpha=0.3, label='Input2') ax.legend(loc=2, fontsize=14) #plt.ylim(0.2,1.5) plt.xlim(-200,750) plt.ylabel('normalized avg. counts', fontsize=14) plt.xlabel(Spt20_deg_GC_3IAA_metGen.index.name, fontsize=14) # Hide the right and top spines ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.grid(True, which='both') n=0 for i in glob.glob('./wig/*Spt20*wig'): if n>=3: continue name = i[6:-4] vars()[name] = lib.wiggle(i) name2 = name + '_metaGen' vars()[name2] = vars()[name].meta_genomic(sgd) n+=1 vars()['144_Spt20_deg_YPD_3IAA_B_Sc'].wig.head(5) list_A = [i for i in glob.glob('./wig/*_A_*.wig')] list_B = [i for i in glob.glob('./wig/*_B_*.wig')] list_all = [i for i in glob.glob('./wig/*.wig')] diff = set(list_all) - set(list_A) -set(list_B) for i in diff: name = i[6:-4] vars()[name] = lib.wiggle(i) name_log = name + "_log2" vars()[name_log] = vars()[name].wig2log() out_name = name + '.csv' vars()[name_log].to_csv(out_name) file_solit = './wig/143_Spt20_deg_YPD_DMSO_B_Sc.wig' name_solit = file_solit[6:-4] vars()[name_solit] = lib.wiggle(file_solit) name_log = name_solit + "_log2" vars()[name_log] = vars()[name_solit].wig2log() out_name = name_solit + '.csv' vars()[name_log].to_csv(out_name) lista=[] for i in glob.glob('./*csv'): if i=='./lwarfield.csv' or i=='./lwarfield_v3.csv' or i=='./clusters.csv': continue name = i[2:-4] lista.append(name) vars()[name] = pd.read_csv(i) vars()[name].set_index('locus', inplace=True) vars()[name].columns = [name] print name Spt20_deg_YPD = pd.concat([vars()['143_Spt20_deg_YPD_DMSO_B_Sc'],Spt20_deg_YPD_3IAA], axis=1, join='inner') Spt20_deg_YPD.columns = ['DMSO', '3IAA'] Spt20_deg_YPD['Spt20_deg_YPD_log2'] = np.log2(Spt20_deg_YPD['3IAA']/Spt20_deg_YPD['DMSO']) Spt20_deg_GC = pd.concat([Spt20_deg_GC_DMSO, Spt20_deg_GC_3IAA], axis=1, join='inner') Spt20_deg_GC.columns = ['DMSO', '3IAA'] Spt20_deg_GC['Spt20_deg_GC_log2'] = np.log2(Spt20_deg_GC['3IAA']/Spt20_deg_GC['DMSO']) Spt3_deg_YPD = pd.concat([Spt3_deg_YPD_DMSO, Spt3_deg_YPD_3IAA], axis=1, join='inner') Spt3_deg_YPD.columns = ['DMSO', '3IAA'] Spt3_deg_YPD['Spt3_deg_YPD_log2'] = np.log2(Spt3_deg_YPD['3IAA']/Spt3_deg_YPD['DMSO']) WT_Gcn4 = pd.concat([WT_Gcn4_DMSO, WT_Gcn4_SM], axis=1, join='inner') WT_Gcn4.columns = ['DMSO', 'SM'] WT_Gcn4['WT_Gcn4_log2'] = np.log2(WT_Gcn4['SM']/WT_Gcn4['DMSO']) del_Gcn4 = pd.concat([del_gcn4_DMSO, del_gcn4_SM], axis=1, join='inner') del_Gcn4.columns = ['DMSO','SM'] del_Gcn4['del_Gcn4_log2'] = np.log2(del_Gcn4['SM']/del_Gcn4['DMSO']) gral = pd.concat([Spt20_deg_YPD['Spt20_deg_YPD_log2'], Spt20_deg_GC['Spt20_deg_GC_log2'], \ Spt3_deg_YPD['Spt3_deg_YPD_log2'], WT_Gcn4['WT_Gcn4_log2'], del_Gcn4['del_Gcn4_log2']], \ axis=1, join='inner') # There were some -inf that must be taken off to downstream calculations gral.drop(gral[gral[gral.columns[0]]==-np.inf].index[0], axis=0, inplace=True) gral.dropna(inplace=True) pugh = pd.read_excel('../../Rhee_SuppData1_revised.xls') pugh = pugh[['gene_id', 'gene_class_1','gene_class_2']] pugh.set_index('gene_id', inplace=True) pugh.dropna(inplace=True) print set(pugh.gene_class_1) print set(pugh.gene_class_2) pugh.columns = ['TATA','Taf1'] lista = ['Spt20_deg_YPD','Spt20_deg_GC','Spt3_deg_YPD','WT_Gcn4','del_Gcn4'] for i in lista: name = 'pugh_' + i vars()[name] = pd.concat([vars()[i], pugh], join='inner', axis=1) log2_pugh = pd.concat([Spt20_deg_YPD['Spt20_deg_YPD_log2'],Spt20_deg_GC['Spt20_deg_GC_log2'],\ Spt3_deg_YPD['Spt3_deg_YPD_log2'],WT_Gcn4['WT_Gcn4_log2'],\ del_Gcn4['del_Gcn4_log2'], pugh], join='inner', axis=1) log2_pugh.boxplot(by='Taf1', figsize=(10,10)); plt.savefig('boxplots_Taf1.jpg') log2_pugh.boxplot(by='TATA', figsize=(10,10)); plt.ylim(-2,1.5) plt.savefig('boxplots_TATA.jpg') for i in gral.columns: name = i + 'z_score' m, s = np.mean(gral[i]), np.std(gral[i]) gral[name] = (gral[i] - m) / s '''wt_mean, taf7_mean = np.mean(wt_log2), np.mean(taf7_log2) wt_std, taf7_std = wt_log2.std(), taf7_log2.std() wt_zscores = (wt_log2-wt_mean) / wt_std taf7_zscores = (taf7_log2-taf7_mean) / taf7_std plt.figure() wt_log2.hist(bins=100) plt.xlim(-6,6); plt.figure() wt_zscores.hist(bins=100) plt.xlim(-6,6);''' gral.hist(bins=50, figsize=(20,20)); from sklearn.cluster import KMeans # Convert DataFrame to matrix mat = gral.ix[:,5:].as_matrix() '''from scipy.spatial.distance import cdist, pdist k_range = range(1,50) k_means_obj = [KMeans(n_clusters=k).fit(mat) for k in k_range] centroids = [X.cluster_centers_ for X in k_means_obj] k_euclid = [cdist(mat, cen, 'euclidean') for cen in centroids] dist = [np.min(ke, axis=1) for ke in k_euclid] sumaQuaDist = [sum(d**2) for d in dist] tss = sum(pdist(mat)**2)/mat.shape[0] bss = tss - sumaQuaDist''' plt.plot(bss) plt.ylabel('% variance') plt.xlabel('# clusters') plt.plot([5,5],[0,20000], lw=3, ls='--') plt.xticks(np.arange(0, 50, 5)); # Using sklearn km = KMeans(n_clusters=5) km.fit(mat) # Get cluster assignment labels labels = km.labels_ # Format results as a DataFrame results = pd.DataFrame([gral.index,labels]).T results.columns = ['locus','group'] results.set_index('locus', inplace=True) cluster = pd.concat([gral.ix[:,5:],results],axis=1,join='inner') cluster.sort_values('group', inplace=True) cluster = cluster*100 cluster = cluster.astype(int) fig, ax = plt.subplots(1,1, figsize=(5,10)) ax.pcolor(cluster, cmap='Greens', label=cluster.columns); ax.set_xticklabels(cluster.columns, rotation='vertical', fontsize=15, ha='left'); # check max and min values print np.max(cluster.max()), np.min(cluster.min()) for i in ['taf7_dmso', 'taf7_3iaa', 'wt_dmso', 'wt_3iaa']: vars()[i].columns = [i] wt_taf7 = pd.concat([taf7_dmso, taf7_3iaa, wt_dmso, wt_3iaa], join='inner', axis=1) #wt_taf7.to_csv('./wt_taf7__dmso_3iaa.csv') wt_taf7.dropna(inplace=True) wt_taf7.corr() log2s = pd.concat([wt_log2, taf7_log2], join='inner', axis=1) log2s.columns = ['wt','taf7'] log2s.boxplot(); pugh = pd.read_excel('../Rhee_SuppData1_revised.xls') pugh = pugh[['gene_id', 'gene_class_1','gene_class_2']] pugh.set_index('gene_id', inplace=True) pugh.dropna(inplace=True) print set(pugh.gene_class_1) print set(pugh.gene_class_2) complete = pd.concat([log2s, pugh], join='inner', axis=1) #complete['taf1'] = pd.Categorical(complete.gene_class_2) #complete['tata'] = pd.Categorical(complete.gene_class_1) #complete.drop(['taf1'], axis=1, inplace=True) complete.columns = ['wt','taf7','tata','taf1'] complete.boxplot(by='taf1'); complete.boxplot(by='tata'); df = pd.concat([log2s, wt_dmso], join='inner', axis=1) df.dropna(inplace=True) #log2s.dropna(inplace=True) #plt.figure(figsize=(5,5)) plt.hexbin(df.wt_dmso, df.taf7, bins=300, cmap='hot_r'); #plt.ylim(-4,2) plt.xlim(-2,500); plt.title('log2 vs counts', fontsize=20); '''plt.figure() plt.hist2d(log2s.taf7,log2s.wt,bins=100) plt.title('log2 for both', fontsize=20);''' <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load wiggle objects of sequenced gcn4 Pol_II and Mediator data Step2: I compare ChIP-seq and ChEC-seq Step3: <font color=green>Interesting...</font> Their meta analysis perfectly match but the correlation is a disaster... Let's see which are the genes that DO correlate. Step4: So I keep a set of genes that should be responsible for the good match of the plot <font color=green>count=f(position)</font> Step5: It turns out that the lack of correlation comes from beyond the TSSs? Step6: Check if the signal comes from a peak or its just noisy reads... And it looks that >25% quantiles is a fair limit between signal and noise. Step7: May be I should devide every ChEC signal to the Free MNase Step8: What is the correlation between gcn4 data and Pol-II ChIP Step9: There are apparently two major differences with del gcn4. The most visible is that ALL signal goes down upon addition of SM, even the minimum value @~-100. The other is that the slope of the increase changes downstream of TSS upon addition of SM. Follolwing a plot to make clear the difference in the slope. Step10: Some conclusions form the previous plot Step11: Pol_II data df --> <font color=red>P2_gcn4_positives</font> Step12: Gcn4_ChEC df --> <font color=red>ChEC_gcn4</font> Step13: Categorical casting of genes on presence or absence in SM or even DMSO Venn diagrams Step14: <font color=blue>I will try to correlate the <span style="background-color Step15: <font color=red> CHECK THAT NUCLEOSOME IS THERE!!! CHECK SINGLE GENES WHERE THERE IS NO GENE 400 UPSTREAM!!! Step16: Boxplots Step17: Plot tss-100bp distribution for all genes in filtered list Step18: from the manuscript Step19: Will have to run this again!! Step20: from the manuscript Step21: Download Pugh's data and include Taf1 and TATA columns to each df Step22: Merge dfs with pughs. Later, mix all log2s with pughs data Step23: <font color=red>Calculate z-scores... For normalizing the distribution? The distribution is already normal...</font> Step24: Combine dfs, export and check the correlation chart Step25: Decide the optimal number of clusters (Elbow) Step26: Distribute data through the k clusters decided above Step27: Plot the results to see how Srinivas clusters fit to the new data Step28: Taf1 data from Step29: <font color=red>What are the Control Pol II ChIP used by Srinivas?</font>
13,648
<ASSISTANT_TASK:> Python Code: import pandas as pd iris = pd.read_csv('../datasets/iris.csv') # Print some info and statistics about the dataset iris.info() iris.Class.unique() iris.describe() # Encode the classes to numeric values class_encodings = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2} iris.Class = iris.Class.map(class_encodings) iris.Class.unique() # Create a scatterplot for sepal length and sepal width import matplotlib.pyplot as plt %matplotlib inline sl = iris.Sepal_length sw = iris.Sepal_width # Create a scatterplot of these two properties using plt.scatter() # Assign different colors to each data point according to the class it belongs to plt.scatter(sl[iris.Class == 0], sw[iris.Class == 0], color='red') plt.scatter(sl[iris.Class == 1], sw[iris.Class == 1], color='green') plt.scatter(sl[iris.Class == 2], sw[iris.Class == 2], color='blue') # Specify labels for the X and Y axis plt.xlabel('Sepal Length') plt.ylabel('Sepal Width') # Show graph plt.show() # Create a scatterplot for petal length and petal width pl = iris.Petal_length pw = iris.Petal_width # Create a scatterplot of these two properties using plt.scatter() # Assign different colors to each data point according to the class it belongs to plt.scatter(pl[iris.Class == 0], pw[iris.Class == 0], color='red') plt.scatter(pl[iris.Class == 1], pw[iris.Class == 1], color='green') plt.scatter(pl[iris.Class == 2], pw[iris.Class == 2], color='blue') # Specify labels for the X and Y axis plt.xlabel('Petal Length') plt.ylabel('Petal Width') # Show graph plt.show() X = iris.drop('Class', axis=1) t = iris.Class.values # Use sklean's train_test_plit() method to split our data into two sets. from sklearn.cross_validation import train_test_split Xtr, Xts, ytr, yts = train_test_split(X, t) # Use the training set to build a LogisticRegression model from sklearn.linear_model import LogisticRegression lr = LogisticRegression().fit(Xtr, ytr) # Fit a logistic regression model # Use the LogisticRegression's score() method to assess the model accuracy lr.score(Xtr, ytr) from sklearn.metrics import confusion_matrix # Use scikit-learn's confusion_matrix to understand which classes were misclassified. # See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html confusion_matrix(ytr, lr.predict(Xtr)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Visualizing data Step2: Classifying species
13,649
<ASSISTANT_TASK:> Python Code: graph = { "A": {"B": (12, 3), "C":(25, 6), }, "B": {"C": (11, 2), }, "C": {"A": (30, 6), "D": (16, 4), }, "D": {"A": (12, 2), }, } # decompose graph into separate cost and time graphs graph_c = {key: {key2: -val2[0] for (key2, val2) in value.items()} for (key, value) in graph.items()} graph_c graph_t = {key: {key2: val2[1] for (key2, val2) in value.items()} for (key, value) in graph.items()} graph_t from itertools import permutations def all_vertices(graph): Return a set of all vertices in a graph. graph -- a directed graph. vertices = set() for v in graph.keys(): vertices.add(v) for u in graph[v].keys(): vertices.add(u) return vertices def is_edge(graph, tail, head): Return True if the edge (tail)->(head) is present in a graph. graph -- a directed graph. tail -- a vertex. head -- a vertex. return (tail in graph) and (head in graph[tail]) V = tuple(all_vertices(graph)) n = len(V) all_paths = [path for path_n in range(1, n + 1) for path in permutations(V, path_n) if all( is_edge(graph, tail, head) for (tail, head) in zip(path, path[1:]) ) ] all_cycles = [(*path, path[0]) for path in all_paths if is_edge(graph, path[-1], path[0])] cycles = [] cycle_sets = [] for cycle in all_cycles: edges = set(x[0]+x[1] for x in zip(cycle, cycle[1:])) if edges not in cycle_sets: cycle_sets.append(edges) cycles.append(cycle) cycles class NoShortestPathError(Exception): pass class NegativeCycleError(NoShortestPathError): def __init__(self, weight, cycle): self.weight = weight self.cycle = cycle def __str__(self): return f"Weight {self.weight}: {self.cycle}" def path_weight(path, graph): Returns the sum of the weights along a path or cycle. return sum(graph[tail][head] for (tail, head) in zip(path, path[1:])) def cost_time_ratio(cycle, graph_c, graph_t): Find cost to time ratio for a cycle. (tramp steamer problem objective) Parameters ---------- cycle : list directed path that ends where it began graph_c : dict graph with cost values graph_t : dict graph with time values Returns ------- Ratio of net cost to net travel time for a cycle. w = sum(graph_c[tail][head] for (tail, head) in zip(cycle, cycle[1:]) ) t = sum(graph_t[tail][head] for (tail, head) in zip(cycle, cycle[1:]) ) return w/t def shortest_cycle_bf(graph, cycles): Find the shortest cycle in cycles using a brute force approach. If a negative cycle exists, raise NegativeCycleError. Otherwise return shortest cycle. Parameters ---------- graph : dictionary A directed, weighted graph. cycles : list of tuples List of cycles contained in graph. Returns ------- Tuple with weight and path of the cycle with the lowest weight. for cycle in cycles: weight = path_weight(cycle, graph) if weight < 0: raise NegativeCycleError(weight, cycle) return min( (path_weight(path, graph), path) for path in cycles) def seq_search(graph, cycles, mu=100): Perform sequential search of cycles in graph using le = ce - mu * te for edge weights. If a Negative weight cycle is found, mu is updated based on this cycle and the process is repeated until no negative cycles are found by the shortest cycle step. Parameters ---------- graph : dictionary A directed, weighted graph. cycles : list of tuples List of cycles contained in graph. mu : float Initial cost to weight ratio. Expected upper bound. Returns ------- Cycle with the minimum cost to time ratio and the number of loops required to find it. loops = 0 # for algo comparison while True: loops += 1 # for algo comparison # Compute 'length' values for edges: l_e = c_e - mu * t_e graph_le = graph.copy() for (key, dic) in graph_le.items(): graph_le[key] = {keyy: -val[0] - mu * val[1] for (keyy, val) in dic.items()} try: # find shortest cycle based on le weights cycle_weight, W = shortest_cycle_bf(graph_le, cycles) except NegativeCycleError as error: # found negative cost cycle w.r.t. le) mu = cost_time_ratio(error.cycle, graph_c, graph_t) # print('mu ', mu, 'weight ', error.weight) else: # Found a zero cost cycle w.r.t. le. optimum = W break return optimum, loops opt, loops = seq_search(graph, cycles) print("loops: ",loops) print("Optimal path: ", opt) print("Optimal cost ratio: {:.3f}".format( cost_time_ratio(opt, graph_c, graph_t))) def binary_search(graph, cycles): Perform binary search of cycles in graph using an initial search range of -C to C with C = max(all costs in graph). Search for shortest cycle using le = ce - mu * te for the edge weights. If a negative cycle is found, set upper end of range to the current cost ratio estimate (mu) otherwise set the lower end of the range to mu. Terminate the search if the current cycle weight is less than the precision. Parameters ---------- graph : dict a directed, weighted graph. cycles : list of tuples List of cycles contained in graph.. Returns ------- optimum : tuple Cycle with the minimum cost to time ratio. loops : int Number of loops required to find minima. # define limits for starting range of search C = max([max(val.values()) for key, val in graph_c.items()]) mu_lower = C # lower limit to search for min cycle cost ratio. # Since c_i are all negative, need to reverse the signs mu_upper = -C # upper limit to search for min cycle cost ratio loops = 0 while True: loops += 1 mu = (mu_lower + mu_upper) / 2 #Compute 'length' values for edges: l_e = c_e - mu * t_e graph_le = graph.copy() for (key, dic) in graph_le.items(): graph_le[key] = {keyy: -val[0] - mu * val[1] for (keyy, val) in dic.items()} try: # Solve shortest path problem with lengths le cycle_weight, W = shortest_cycle_bf(graph_le, cycles) # print("c_weight: {:.3g}, range: {:.3g} to {:.3g}, mu: {}". # format(cycle_weight, mu_lower, mu_upper, mu)) except NegativeCycleError as error: # negative cost cycle w.r.t. le': mu_star < mu mu_upper = mu W = error.cycle cycle_weight = error.weight # print("*c_weight: {:.3g}, range: {:.3g} to {:.3g}, mu: {}". # format(error.weight, mu_lower, mu_upper, mu)) else: # mu_star > mu mu_lower = mu finally: # precision of 1/sum(t_e) suffices to solve the problem exactly precision = 1/ sum(graph_t[tail][head] for (tail, head) in zip(W, W[1:]) ) if abs(cycle_weight) < precision: # zero cost cycle, mu_star == mu optimum = W break return optimum, loops opt, loops = binary_search(graph, cycles) print("loops: ",loops) print("Optimal path: ", opt) print("Optimal cost ratio: {:.3f}".format( cost_time_ratio(opt, graph_c, graph_t))) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: I will encode the graph as a python dictionary. Each vertex is a key. The value for this key is another dictionary. This dictionary has keys for each connected vertex and values that are tuples containing profit and time for each connection. For example vertex A is connected to B with a profit of 12 and travel time of 3. To make the rest of the code clearer, I will decompose the graph into a cost graph (negative profits) and a time graph. Step4: To shorten the search, I want to find only the unique cycles in this graph. Step9: I have 4 unique cycles in this graph. 'Unique' means I eliminate cycles that follow the same path but start at a different vertex. For example, A -> C -> A is equivalent to C -> A -> C. Step11: After 3 passes the sequential algorithm finds an optimal cycle with a profit/time ratio of 4.818. Worst case time complexity for sequential search is O(|cycles|) or 4 in this case.
13,650
<ASSISTANT_TASK:> Python Code: from sklearn.cluster import KMeans df = load_data() kmeans = KMeans(n_clusters=2) labels = kmeans.fit_predict(df[['mse']]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description:
13,651
<ASSISTANT_TASK:> Python Code: import pandas as pd import json classfile_in = 'ap-aas229-test-classifications.csv' classifications = pd.read_csv(classfile_in) len(classifications) classifications subjectsfile_in = 'ap-aas229-test-subjects.csv' subjects = pd.read_csv(subjectsfile_in) workflowsfile_in = 'ap-aas229-test-workflows.csv' workflows = pd.read_csv(workflowsfile_in) subjects workflows classifications['metadata_json'] = [json.loads(q) for q in classifications.metadata] classifications['annotations_json'] = [json.loads(q) for q in classifications.annotations] classifications['subject_data_json'] = [json.loads(q) for q in classifications.subject_data] classifications['annotations_json'][1][0]['value'][1] classifications['annotations_json'][1][0]['value'][0]['x'] classifications['n_markings'] = [ len(q[0]['value']) for q in classifications.annotations_json ] iclass = classifications[classifications.workflow_version == classifications['workflow_version'].max()] len(iclass) # output markings from 31 classifications in iclass to csv clist=[] for index, c in iclass.iterrows(): if c['n_markings'] > 0: for q in c.annotations_json[0]['value']: clist.append({'classification_id':c.classification_id, 'user_name':c.user_name, 'user_id':c.user_id, 'created_at':c.created_at, 'subject_ids':c.subject_ids, 'tool':q['tool'], 'tool_label':q['tool_label'], 'x':q['x'], 'y':q['y'], 'r':q['r'], 'frame':q['frame']}) col_order=['classification_id','user_name','user_id','created_at','subject_ids', 'tool','tool_label','x','y','r','frame'] out=pd.DataFrame(clist)[col_order] out out.to_csv('ap-aas229-test-markings.csv',index_label='mark_id') import matplotlib import matplotlib.pyplot as plt %matplotlib inline # Unique Subjects usub = classifications.drop_duplicates(subset='subject_ids') len(usub) # Make Images # Select Subject Index Number subsel=12 marks = out[out.subject_ids == usub.subject_ids[subsel]] imgnm = list(json.loads(usub.subject_data[subsel]).values())[0]['IMG_COLOR'] imgpath='../ap_sampledata/' imgfile=imgpath+imgnm fig, ax = plt.subplots() im = plt.imread(imgfile) ax.imshow(im) for index, m in marks.iterrows(): if m.tool == 0: ax.add_artist(plt.Circle( (m.x, m.y), m.r, color='b', fill=False)) if m.tool == 1: ax.add_artist(plt.Circle( (m.x, m.y), m.r, color='r', fill=False)) if m.tool > 1: ax.add_artist(plt.Circle( (m.x, m.y), m.r, color='g', fill=False)) plt.axis('off') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.savefig('out_'+imgnm, dpi=120, bbox_inches='tight', pad_inches=0) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Read-in and examine content of "classifications" table. Step2: Read-in and examine content of "subjects" and "workflows" table. Step3: Expanding JSON Fields Step4: Understanding the fields of the annotations data Step5: Creating a new column based on classification data Step6: Helpful (optional) subselection of classification data Step7: Creating A New Output File Step8: Make Overlay Images
13,652
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error import datetime as dt from math import sqrt data = pd.read_csv('./all_stocks_5yr.csv') data.shape data.head() data.dtypes data['Date'] = pd.to_datetime(data['Date']) data.Date.min(), data.Date.max() periods = np.array_split(data.Date.unique(), 5) for yr in range(5): print(periods[yr].min(), periods[yr].max()) data.set_index(['Name', 'Date'], inplace=True) data.head() pd.isnull(data['Close']).any() symbols = list(data.index.levels[0]) symbols[:10] symbols[-10:] len(symbols) plt.gcf().set_size_inches(16, 6) plt.subplot(121) data.loc['FB'].Close.plot() plt.title('Price evolution Facebook over 5 years') plt.subplot(122) data.loc['FB'].loc[periods[4]].Close.plot() plt.title('Price evolution Facebook over the last year') plt.show() import pandas_datareader as pdr sp500_index = pdr.get_data_yahoo('^GSPC') sp500_index.head() sp500_index.index.min(), sp500_index.index.max() sp500_index = sp500_index.loc[data.index.min()[1]:data.index.max()[1],:] sp500_index['Name'] = '^GSPC' sp500_index['Date'] = sp500_index.index sp500_index.set_index(['Name', 'Date'], inplace=True) data = pd.concat([data, sp500_index]) symbols.append('^GSPC') symbols[-10:] sp500_return = data.loc['^GSPC'].Close / data.loc['^GSPC'].Close[0] fb_return = data.loc['FB'].Close / data.loc['FB'].Close[0] sp500_return.plot(label='SP500') fb_return.plot(label='FB') plt.legend() plt.title('5 year return Facebook vs S&P 500 index') plt.show() def return_risk_ratio(ticker, start, end): Return a tuple: (relative return over period, risk over the period, return/risk) # select the data for the ticker and time period data_masked = data.loc[ticker].loc[start:end] x = np.arange(data_masked.Close.size) y = np.array(data_masked.Close) # Fit a degree 1 regression line fit = np.polyfit(x, y, 1) fit_fn = np.poly1d(fit) y_regression = fit_fn(x) # Calculate Root Mean Square Error = beta = risk RMSE = sqrt(mean_squared_error(y_true = y, y_pred = y_regression)) slope = fit_fn.coeffs[0] y0_regression = fit_fn(x[0]) return_percent = slope / y0_regression * x.size * 100 # return in percent RMSE_percent = RMSE / y0_regression * 100 return (return_percent, RMSE_percent , return_percent / RMSE_percent) start_date_iso = dt.date.isoformat(pd.Timestamp(periods[4].min())) end_date_iso = dt.date.isoformat(pd.Timestamp(periods[4].max())) print(start_date_iso, end_date_iso) print(list(return_risk_ratio('FB', start_date_iso, end_date_iso))) # print(return_risk_ratio('^GSPC', start_date, end_date)) # create an empty Dataframe rrr_index = symbols rrr_columns = ['Period', 'Symbol', 'Return', 'Risk', 'ReturnRiskRatio', 'RiskReturnToIndex'] rrr = pd.DataFrame(columns=rrr_columns) # Fill the Dataframe with the return risk ratios n = 0 for i, p in enumerate(periods): start_date_iso = dt.date.isoformat(pd.Timestamp(p.min())) end_date_iso = dt.date.isoformat(pd.Timestamp(p.max())) for s in symbols: #print(i, s, start_date_iso, end_date_iso) if data.loc[s].loc[start_date_iso:end_date_iso].size > 0: # data available for that period ret, risk, ratio = return_risk_ratio(s, start_date_iso, end_date_iso) rrr.loc[n] = [i, s, ret, risk, ratio, np.NaN] n = n + 1 rrr.head() rrr.set_index(['Period', 'Symbol'], inplace=True) rrr.head() rrr.loc[0].loc['FB'] rrr.loc[0].loc['^GSPC'] plt.gcf().set_size_inches(16, 16) subplot = 320 for y in rrr.index.levels[0]: subplot += 1 plt.subplot(subplot) rrry = rrr.loc[y] plt.scatter(rrry.Risk, rrry.Return, alpha=.2, label='individual stock') plt.scatter(rrry.loc['^GSPC'].Risk, rrry.loc['^GSPC'].Return, c='r', label='Index') fit = np.polyfit(rrry.Risk, rrry.Return, deg=1) plt.plot(rrry.Risk, fit[0] * rrry.Risk + fit[1], color='red', label='regression') plt.xlabel('Risk') plt.ylabel('Return %') title = "year {} Return vs Risk".format(y) plt.title(title) plt.legend() plt.show() for y in rrr.index.levels[0]: for s in symbols: if s in rrr.loc[y].index: # data available for that period rrr.loc[y].loc[s].RiskReturnToIndex = \ rrr.loc[y].loc[s].ReturnRiskRatio / rrr.loc[y].loc['^GSPC'].ReturnRiskRatio rrr.head() #rrr.nlargest(20, 'RiskReturnToIndex') rrr.nlargest(10, 'RiskReturnToIndex') best_performers = rrr[rrr.RiskReturnToIndex > 1] len(best_performers) plt.gcf().set_size_inches(16, 16) subplot = 320 for y in rrr.index.levels[0]: subplot += 1 ax = plt.subplot(subplot) ax.set_xlim([0, 20]) # fix risk and return scale to for all subplots ax.set_ylim([0, 200]) rrry = rrr.loc[y] best_performersy = best_performers.loc[y] plt.scatter(best_performersy.Risk, best_performersy.Return, alpha=.2, label='high performing stock') plt.scatter(rrry.loc['^GSPC'].Risk, rrry.loc['^GSPC'].Return, c='r', label='Index') plt.xlabel('Risk') plt.ylabel('Return %') title = "year {} Return vs Risk, Return(sp500) = {:5.2f}%".format(y, rrry.loc['^GSPC'].Return) plt.title(title) plt.legend() plt.show() best_stocks = best_performers.index.get_level_values(1) from collections import Counter d = Counter(best_stocks) d = {k: v for k, v in d.items() if v > 2} print(len(d)) d.keys() best_performers.loc[4] #rrr.loc[4].nlargest(10, 'RiskReturnToIndex') bestInYear = list(rrr.loc[4].nlargest(20, 'RiskReturnToIndex').index) bestInYear plt.gcf().set_size_inches(12, 8) rrry = rrr.loc[4] best4 = best_performers.loc[4] plt.scatter(rrry.Risk, rrry.Return, alpha=.2, label='individual stock') plt.scatter(rrry.loc['^GSPC'].Risk, rrry.loc['^GSPC'].Return, c='r', label='Index') fit = np.polyfit(rrry.Risk, rrry.Return, deg=1) plt.plot(rrry.Risk, fit[0] * rrry.Risk + fit[1], color='red', label='regression') plt.scatter(best4.Risk, best4.Return, color='green', label='best of year') plt.xlabel('Risk') plt.ylabel('Return %') title = "last year Return vs Risk".format(y) plt.title(title) plt.legend() plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Explore the Kaggle dataset all_stocks_5yrs.csv Step2: For each trading day we have the ticker symbol (Name) and the price at opening (Open), closing (Close), highest (High) and lowest (Low) of the day and the number of stocks traded (Volume). Step3: We will only use the Date, Close and Name features for this exercise. Step4: What is the time period over which we have date? Step5: Split the total period in 5 periods of approx 1 year each Step6: Create a multi-index for better retrieval later Step7: Are there any null values? Step8: How much data points do we have? Step9: How many symbols do we have? Step10: Facebook (ticker symbol 'FB') is one of these superstar companies of the past 5 year. Step11: Create and explore the dataset with historical prices for the S&P 500 index, retrieved from yahoo finance Step12: What is the date range of the data? Step13: Align the date range with the data from Kaggle Step14: join the sp500 index data with the kaggle data Step15: Facebook is one of those stocks that outperformed the index. Step17: Research question Step18: Calculate return / risk ratio for latest year for all tickers, including S&P 500 index (ticker Step19: Check return / risk ratio for Facebook Step20: Calculate return / risk ratios for all S&P 500 symbols and store the results in a Dataframe Step21: Set a multi index for easier data retrieval Step22: Check the data for Facebook and the SP500 index Step23: Visualise the results Step24: Calculate the relative performance for each ticker vs the index for every year Step25: Rank tickers on relative return / risk ratio, show top 20 Step26: Filter tickers, selects the ones with better performance than SP500 index Step27: So only about 20% of the stocks performs better than the SP500 index! Step28: Which stocks outperformed the SP500 index more than twice Step29: Which stocks where the best performers of the last year
13,653
<ASSISTANT_TASK:> Python Code: %matplotlib inline from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt # setup Lambert Conformal basemap. m = Basemap(width=12000000,height=9000000,projection='lcc', resolution='c',lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.) # draw coastlines. m.drawcoastlines() # draw a boundary around the map, fill the background. # this background will end up being the ocean color, since # the continents will be drawn on top. m.drawmapboundary(fill_color='aqua') # fill continents, set lake color same as ocean color. m.fillcontinents(color='coral',lake_color='aqua') plt.show() from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt # setup Lambert Conformal basemap. # set resolution=None to skip processing of boundary datasets. m = Basemap(width=12000000,height=9000000,projection='lcc', resolution=None,lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.) # draw a land-sea mask for a map background. # lakes=True means plot inland lakes with ocean color. m.drawlsmask(land_color='coral',ocean_color='aqua',lakes=True) plt.show() from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt # setup Lambert Conformal basemap. # set resolution=None to skip processing of boundary datasets. m = Basemap(width=12000000,height=9000000,projection='lcc', resolution=None,lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.) m.bluemarble() plt.show() from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt # setup Lambert Conformal basemap. # set resolution=None to skip processing of boundary datasets. m = Basemap(width=12000000,height=9000000,projection='lcc', resolution=None,lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.) m.shadedrelief() plt.show() from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt # setup Lambert Conformal basemap. # set resolution=None to skip processing of boundary datasets. m = Basemap(width=12000000,height=9000000,projection='lcc', resolution=None,lat_0=40.,lon_0=90.) #resolution=None,lat_1=45,lat_2=90,lat_0=50,lon_0=0.) #resolution=None,lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.) m.etopo() plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Draw coastlines, filling ocean and land areas. Step2: Draw a land-sea mask as an image. Step3: Draw the NASA ‘Blue Marble’ image. Step4: Draw a shaded relief image. Step5: Draw an etopo relief image.
13,654
<ASSISTANT_TASK:> Python Code: #instructor key info n1 = 11 * 7 e1 = 37 d1 = 13 #student key info n2 = 13 * 19 e2 = 41 d2 = 137 grade = 5 m = pow(grade, e2, n2) signature = pow(m, d1, n1) print(f'message|signature: {m}|{signature}') if (pow(m, e1, n1) != signature): print("Failed to verify") # Choose big prime number p # Choose two random numbers g and x # Compute y = g^x mod p # public key - (y, g, p); private key - (x, g, p) from math import gcd as bltin_gcd from random import randint import gmpy2 from functools import wraps import math import sympy def is_prime(n, k): if n == 1: return False if k >= n: k = num - 1 for i in range(k): a = randint(1, n - 1) if pow(a, n - 1, n) != 1: return False return True def gen_prime(n, k=1000): found_prime = False while not found_prime: p = randint(2 ** (n - 1), 2 ** n) if is_prime(p, k): return p def gen_safe_prime(n, k=1000): found_safe_prime = False while not found_safe_prime: p = gen_prime(n, k) sp = 2 * p + 1 if (is_prime(sp, k)): return sp def prim_root(p): found_prime_root = False while not found_prime_root: g = randint(0, p - 1) first_exp = 2 second_exp = (p - 1) // 2 if (pow(g, first_exp, p) != 1 and pow(g, second_exp, p) != 1): return g def elg_key_gen(n): p = gen_safe_prime(n) g = prim_root(p) x = randint(2 ** (n - 1), 2 ** n) y = pow(g, x, p) return (y, g, p), (x, g, p) from math import gcd as bltin_gcd from random import randint import gmpy2 def elg_encrypt(msg, pub_key): y, g, p = pub_key k = randint(1, p - 1) a = pow(g, k, p) b = pow(y, k, p) * msg % p return (a, b) def elg_decrypt(emsg, priv_key): a, b = emsg x, g, p = priv_key msg = b * gmpy2.invert(pow(a, x, p), p) % p return msg key_length_bits = 256 msg_orig = 0xdeadface pub_key, priv_key = elg_key_gen(key_length_bits) emsg = elg_encrypt(msg_orig, pub_key) msg = elg_decrypt(emsg, priv_key) if (msg_orig == msg): print("Success") else: print("Something went wrong") from math import gcd as bltin_gcd from random import randint import gmpy2 def find_coprime(p): while True: k = randint(1, p - 1) if bltin_gcd(k, p): return k def elg_sign(msg, priv_key): while True: x, g, p = priv_key k = find_coprime(p) a = pow(g, k, p) try: b = (msg - x * a) * (gmpy2.invert(k, p - 1)) % (p - 1) break except ZeroDivisionError: pass return (a, b) def elg_verify_sign(msg, sign, pub_key): a, b = sign y, g, p = pub_key left_op = pow(y, a, p) * pow(a, b, p) % p right_op = pow(g, msg, p) if (left_op == right_op): return "OK" else: return "ERR" key_length_bits = 256 msg_orig = 0xdeadface pub_key, priv_key = elg_key_gen(key_length_bits) signature = elg_sign(msg_orig, priv_key) sign_verificaton = elg_verify_sign(msg_orig, signature, pub_key) print("Signature verification result:", sign_verificaton) def sha256(data): # Set of helper functions def chunks(data, block_size): return [data[i:i + block_size] for i in range(0, len(data), block_size)] def ror(x, n): return (x >> n) | (x << 32 - n) & 0xffffffff bytes = "" # Initialize hash values, such that # first 32 bits of the fractional # parts of the square roots of the first 8 primes 2..19: h0 = 0x6a09e667 h1 = 0xbb67ae85 h2 = 0x3c6ef372 h3 = 0xa54ff53a h4 = 0x510e527f h5 = 0x9b05688c h6 = 0x1f83d9ab h7 = 0x5be0cd19 # Initialize array of round constants such that # first 32 bits of the fractional parts of the cube # roots of the first 64 primes 2..311: k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2] # Pre-processing (Padding): # begin with the original message of length L bits # append a single '1' bit for n in range(len(data)): bytes += '{0:08b}'.format(ord(data[n])) bits = bytes + "1" pBits = bits # append K '0' bits, where K is the minimum number >= 0 # such that L + 1 + K + 64 is a multiple of 512 while (len(pBits) + 64) % 512 != 0: pBits += "0" # append L as a 64-bit big-endian integer, making # the total post-processed length a multiple of 512 bits pBits += '{0:064b}'.format(len(bits) - 1) # Process the message in successive 512-bit chunks: for block in chunks(pBits, 512): #print(block) #print(len(block)) # create a 64-entry message schedule array w[0..63] of 32-bit words w = [0 for x in range(64)] # copy chunk into first 16 words w[0..15] of the message schedule array words = chunks(block, 32) for n in range(len(words)): w[n] = int(words[n], 2) # extend the first 16 words into the remaining 48 words # w[16..63] of the message schedule array: for i in range(16, 64): s0 = ror(w[i - 15], 7) ^ ror(w[i - 15], 18) ^ (w[i - 15] >> 3) s1 = ror(w[i - 2], 17) ^ ror(w[i - 2], 19) ^ (w[i - 2] >> 10) w[i] = w[i - 16] + s0 + w[i - 7] + s1 & 0xffffffff # Initialize working variables to current hash value: a, b, c, d, e, f, g, h = h0, h1, h2, h3, h4, h5, h6, h7 # Compression function main loop for i in range(0, 64): S1 = ((ror(e, 6) ^ ror(e, 11) ^ ror(e, 25))) ch = (e & f) ^ ((~e) & g) temp1 = ((h + S1 + ch + k[i] + w[i] )) S0 = (ror(a, 2) ^ ror(a, 13) ^ ror(a, 22)) maj = ((a & b) ^ (a & c) ^ (b & c)) temp2 = (S0 + maj) h, g, f, e, d, c, b, a = g, f, e, d + temp1 & 0xffffffff, c, b, a, temp1 + temp2 & 0xffffffff # Add the compressed chunk to the current hash value: h0 = h0 + a & 0xffffffff h1 = h1 + b & 0xffffffff h2 = h2 + c & 0xffffffff h3 = h3 + d & 0xffffffff h4 = h4 + e & 0xffffffff h5 = h5 + f & 0xffffffff h6 = h6 + g & 0xffffffff h7 = h7 + h & 0xffffffff return '%08x%08x%08x%08x%08x%08x%08x%08x' % (h0, h1, h2, h3, h4, h5, h6, h7) string_small = 'This is a very small string with a few characters.' string_larger = 'This is a larger string that contains more characters.' string_big = 'This is a larger string that contains more characters. This demonstrates that no matter how big the input stream is, the generated hash is the same size (but of course, not the same value). If two files have a different hash, they surely contain different data.' string_empty = '' print(sha256(string_small)) print(sha256(string_larger)) print(sha256(string_big)) print(sha256(string_empty)) # 64 hex values, each hex value represents 4 bits print(4 * len(sha256(string_small))) print(4 * len(sha256(string_larger))) print(4 * len(sha256(string_big))) print(4 * len(sha256(string_empty))) def bitwise_distance(str1, str2): bits1 = '' bits2 = '' diff = 0 if (len(str1) == len(str2) == 0): return 0 for n in range(len(str1)): bits1 += '{0:08b}'.format(ord(str1[n])) for n in range(len(str2)): bits2 += '{0:08b}'.format(ord(str2[n])) n = max(len(bits1), len(bits2)) while (len(bits1)) % n != 0 or len(bits1) == 0: bits1 += "0" while (len(bits2)) % n != 0 or len(bits2) == 0: bits2 += "0" for i in range(len(bits1)): if (bits1[i] != bits2[i]): diff += 1 return diff def hamming2(str1, str2): bits1 = '' bits2 = '' for n in range(len(str1)): bits1 += '{0:04b}'.format(int(str1[n], 16)) for n in range(len(str2)): bits2 += '{0:04b}'.format(int(str2[n], 16)) Calculate the Hamming distance between two bit strings return sum(c1 != c2 for c1, c2 in zip(bits1, bits2)) string = [string_small, string_larger, string_big, string_empty] for i, substr1 in enumerate(string): for substr2 in string[i:len(string)]: if (substr1 == substr2): continue print("--------------------------------------------------------------------------------------------") print("String1: %.70s" % substr1) print("String2: %.70s" % substr2) print("Bitwise difference between strings: %5d" % bitwise_distance(substr1, substr2)) print("Bitwise difference between hashes: %5d" % hamming2(sha256(substr1), sha256(substr2))) def str2num(data): res = 0 for n in range(len(data)): res += (2 ** (n * 8)) * ord(data[n]) return res msg = "easy-peasy" msg_num = str2num(msg) hash_val = int(sha256(msg), 16) key_length_bits = 256 pub_key, priv_key = elg_key_gen(key_length_bits) print("Calculate the signature of hashed message") sign_hash = elg_sign(hash_val, priv_key) print(sign_hash) print("Calculate the signature of message itself") sign_msg = elg_sign(msg_num, priv_key) print(sign_msg) import matplotlib import networkx as nx %matplotlib qt5 %matplotlib inline from networkx import balanced_tree, draw_networkx, draw G = nx.Graph() positions = {} coordinates = [ [0, 4], [-2, 3], [2, 3], [-3, 2], [-1, 2], [1, 2], [3, 2], [-3, 1], [-1, 1], [1, 1], [3, 1] ] parents = [0, 0, 0, 1, 1, 2, 2, 3, 4, 5, 6] for index in range(11): G.add_node(index) G.add_edge(index, parents[index]) positions[index] = coordinates[index] nx.draw(G, coordinates, node_size = 1000) labels = { 0: b'0', 1: b'1', 2: b'2', 3: b'3', 4: b'4', 5: b'5', 6: b'6', 7: b'tx1', 8: b'tx2', 9: b'tx3', 10: b'tx4', } nx.draw_networkx_labels(G, positions, labels = labels) import hashlib first_hash = hashlib.sha256(b"hello") # "b" stands for binary representation second_hash = hashlib.sha256() print('First hash represented as a hexadecimal number:', first_hash.hexdigest()) second_hash.update(first_hash.digest()) print('Second hash represented as a hexadecimal number:', second_hash.hexdigest()) import matplotlib.pyplot as plt labels[3] = hashlib.sha256(hashlib.sha256(b"tx1").digest()).hexdigest() # and plot the graph again plt.figure(figsize=(10,10)) nx.draw(G, positions, node_size = 2000) nx.draw_networkx_labels(G, positions, labels = labels, font_size = 8) print(labels[3]) #labels[3] = hashlib.sha256(hashlib.sha256(b"tx1").digest()).hexdigest() # and plot the graph again def do_merkle_tree(parents, labels): marks = [0 for x in range(len(parents))] for node, parent in reversed(list(enumerate(parents))): if node not in parents: labels[parents[node]] = hashlib.sha256(labels[node]).hexdigest() marks[parents[node]] = 1 continue if (marks[node] == 0): if (len(labels[node]) == 64): continue index = sorted([i for i, x in enumerate(parents) if (x == node)]) str_to_be_hashed = '' fail = False for children in index: if (children == 0): continue if (len(labels[children]) < 64): fail = True break str_to_be_hashed += labels[children] if (fail == True): continue else: labels[node] = hashlib.sha256(str_to_be_hashed.encode('utf-8')).hexdigest() marks[node] = 1 return labels labels = do_merkle_tree(parents, labels) labels_short = {} for key in labels: labels_short[key] = labels[key][:11] plt.figure(figsize=(7,7)) nx.draw(G, positions, node_size = 3000) nx.draw_networkx_labels(G, positions, labels = labels_short, font_size = 8) hash1 = hashlib.sha256(b'tx2').hexdigest() # hash of tx2 hash0 = labels[3] # hash of tx1 hash2 = labels[2] # hash of the right part of tree hash3 = labels[0] # root hash hash01 = hashlib.sha256((hash0 + hash1).encode('utf-8')).hexdigest() hash012 = hashlib.sha256((hash01 + hash2).encode('utf-8')).hexdigest() if (hash012 == hash3): print("Success") hash0 = hashlib.sha256(b'tx3').hexdigest() # hash of tx3 hash1 = hashlib.sha256(b'tx4').hexdigest() # hash of tx4 hash2 = labels[1] # hash of the left part of tree hash3 = labels[0] # root hash hash01 = hashlib.sha256((hash0 + hash1).encode('utf-8')).hexdigest() hash012 = hashlib.sha256((hash2 + hash01).encode('utf-8')).hexdigest() if (hash012 == hash3): print("Success") labels = { 0: b'0', 1: b'1', 2: b'2', 3: b'3', 4: b'4', 5: b'5', 6: b'6', 7: b'tx12', 8: b'tx2', 9: b'tx3', 10: b'tx4', } labels = do_merkle_tree(parents, labels) labels_short = {} for key in labels: labels_short[key] = labels[key][:11] plt.figure(figsize=(7,7)) nx.draw(G, positions, node_size = 3000) nx.draw_networkx_labels(G, positions, labels = labels_short, font_size = 8) G = nx.Graph() positions = {} coordinates = [ [0, 4], [-2, 3], [0, 3], [+2, 3], [-3, 2], [-1, 2], [0, 2], [+2, 2], [-3, 1], [-1, 1], [0, 1], [+2,1] ] parents = [0, 0, 0, 0, 1, 1, 2, 3, 4, 5, 6, 7] for index in range(12): G.add_node(index) G.add_edge(index, parents[index]) positions[index] = coordinates[index] plt.figure(figsize=(7,7)) nx.draw(G, coordinates, node_size = 3000) labels = { 0: b'0', 1: b'1', 2: b'2', 3: b'3', 4: b'4', 5: b'5', 6: b'6', 7: b'7', 8: b'tx1', 9: b'tx2', 10: b'tx3', 11: b'tx4' } nx.draw_networkx_labels(G, positions, labels = labels) G = nx.Graph() positions = {} coordinates = [ [0, 4], [-3, 3], [0, 3], [+3, 3], [-4, 2], [-3, 2], [-2, 2], [-1, 2], [0, 2], [1, 2], [2, 2], [3, 2], [4, 2], [-4, 1], [-3, 1], [-2, 1], [-1, 1], [0, 1], [1, 1], [2, 1], [3, 1], [4, 1] ] parents = [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] for index in range(22): G.add_node(index) G.add_edge(index, parents[index]) positions[index] = coordinates[index] plt.figure(figsize=(10,10)) nx.draw(G, coordinates, node_size = 3000) labels = { 0: b'0', 1: b'1', 2: b'2', 3: b'3', 4: b'4', 5: b'5', 6: b'6', 7: b'7', 8: b'8', 9: b'9', 10: b'10', 11: b'11', 12: b'12', 13: b'tx1', 14: b'tx2', 15: b'tx3', 16: b'tx4', 17: b'tx5', 18: b'tx6', 19: b'tx7', 20: b'tx8', 21: b'tx9' } nx.draw_networkx_labels(G, positions, labels = labels) from networkx.drawing.nx_agraph import write_dot, graphviz_layout G = nx.Graph() parents = [0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10] + list(range(11, 20)) for index in range(29): G.add_node(index) G.add_edge(index, parents[index]) positions = graphviz_layout(G, prog='dot') plt.figure(figsize=(10,10)) nx.draw(G, positions, node_size = 3000) labels = [str(i).encode('utf-8') for i in range(0,29)] for i in range(20, 29): labels[i] = ('tx' + str(i - 20)).encode('utf-8') labels_dict = {i : item for i, item in enumerate(labels)} nx.draw_networkx_labels(G, positions, labels = labels_dict, font_size = '8') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Problem 2 (10 points) Step2: 2.Implement functions that implement the encryption and decryption functions. (2 points) Step3: 3.Test your functions on random values and show that your implementation works correctly (1 point) Step4: 4.Implement functions that perform creation and verification of digital signature (2 points) Step5: 5.Test your functions on random values and show that your algorithm works correctly (1 point) Step6: Problem 3 (15 points) Step7: 2.Calculate hashes of the texts below (1 point) Step8: 3.What is a bit length of each hash? (1 point) Step10: 4.What is the bitwise distance between them? What is bitwise distance between their hashes? (1 point) Step11: 5.Typically use apply hash function to our passwords and texts that we want to digitally sign. Implement digital signature of hashed string using El-Gamal digital signature. Compare the digital signature of plain text and hashed text. (2 points) Step12: Problem 4 (15 points) Step13: Let us plot graph basis for Merkle hash tree Step14: In Bitcoin double sha256 hash scheme is used. Here is an example. Step15: Now we can easily change vertices' labels to hashes of corresponding messages and plot new graph Step16: 1.Construct Merkle hash tree using previously constructed graph by finding corresponding SHA256 hashes on vertices (2 points). Plot obtained Merkle hash tree (1 point) Step17: 2.Provide a proof of correctness of leaf tx2 (2 points). Step18: 3.Provide a proof of correctness for set of leafs (tx3-tx4) (2 points) Step19: 4.Change the value on leaf tx1 and recompute corresponding hashes. Plot newly obtained Merkle hash tree (2 points) Step20: 5.Nodes in Merkle hash trees may have arbitrary fanout. In previouse items we consider the case of fanout equals to two. But what will change if we set a fanout equals to three? Construct Merkle hash trees with fanout 3 to sign 9 values? Construct the hash tree with fanout 2 to sign the same set of values? Plot obtained trees (4 points) Step21: Now let's try to construct merkle tree with fanout 3 and 9 values. Step22: In that configuration to check any of trX we need to calculate hash of it and concatenate with two neighboring, then calculate hash of it and concat with two neighboring and compare with root hash. So here we need to perform 3 hash calculations and 5 requests (including root).
13,655
<ASSISTANT_TASK:> Python Code: import sys import os import PyFBA model_functions_file = "data/citrobacter.assigned_functions" close_genomes_functions_file = "data/close_genomes_functions" org_name = "Citrobacter sedlakii" org_id = "Citrobacter sedlakii" model = PyFBA.model.roles_to_model(model_functions_file, org_id, org_name) lb_media_file = os.path.join(os.environ["PYFBA_MEDIA_DIR"], "ArgonneLB.txt") with open(lb_media_file) as f: for l in f: print(l, end="") # status := optimization status of FBA simplex solver # flux_value := biomass flux value (objective function) # growth := boolean whether the model was able to grow or not status, flux_value, growth = model.run_fba("ArgonneLB.txt") print("Growth:", growth) success = model.gapfill("ArgonneLB.txt", close_genomes_functions_file, use_flux=True, verbose=1) if not success: print("Model was unable to gap-fill!") for n, rid in enumerate(model.gf_reactions, start=1): print("({}) {}: {}".format(n, rid, model.reactions[rid].equation)) model_directory = "save_citrobacter_sedlakii" PyFBA.model.save_model(model, model_directory) for f in os.listdir(model_directory): fp = os.path.join(model_directory, f) print(f, ": ", os.path.getsize(fp), "B", sep="") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Generate model Step2: The model has been generated and is now ready to use for flux-balance analysis simulations. Running flux-balance analysis will show the model does not contain all required metabolism to grow in the LB media. Step3: Gap-fill model on LB media Step4: We can view the reactions that were gap-filled into the model. Step5: Save model Step6: Model has been stored. Here is a directory listing of the files that were created.
13,656
<ASSISTANT_TASK:> Python Code: DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' # Use Floyd's cifar-10 dataset if present floyd_cifar10_location = '/input/cifar-10/python.tar.gz' if isfile(floyd_cifar10_location): tar_gz_path = floyd_cifar10_location else: tar_gz_path = 'cifar-10-python.tar.gz' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', tar_gz_path, pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open(tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper import numpy as np # Explore the dataset batch_id = 1 sample_id = 5 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id) def normalize(x): Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_normalize(normalize) def one_hot_encode(x): One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_one_hot_encode(one_hot_encode) DON'T MODIFY ANYTHING IN THIS CELL # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode) DON'T MODIFY ANYTHING IN THIS CELL import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb')) import tensorflow as tf def neural_net_image_input(image_shape): Return a Tensor for a batch of image input : image_shape: Shape of the images : return: Tensor for image input. # TODO: Implement Function return None def neural_net_label_input(n_classes): Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. # TODO: Implement Function return None def neural_net_keep_prob_input(): Return a Tensor for keep probability : return: Tensor for keep probability. # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input) def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernal size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_con_pool(conv2d_maxpool) def flatten(x_tensor): Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_flatten(flatten) def fully_conn(x_tensor, num_outputs): Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_fully_conn(fully_conn) def output(x_tensor, num_outputs): Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. # TODO: Implement Function return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_output(output) def conv_net(x, keep_prob): Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) # TODO: Apply a Flatten Layer # Function Definition from Above: # flatten(x_tensor) # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: # fully_conn(x_tensor, num_outputs) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: # output(x_tensor, num_outputs) # TODO: return output return None DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net) def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data # TODO: Implement Function pass DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_train_nn(train_neural_network) def print_stats(session, feature_batch, label_batch, cost, accuracy): Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function # TODO: Implement Function pass # TODO: Tune Parameters epochs = None batch_size = None keep_probability = None DON'T MODIFY ANYTHING IN THIS CELL print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) DON'T MODIFY ANYTHING IN THIS CELL save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path) DON'T MODIFY ANYTHING IN THIS CELL %matplotlib inline %config InlineBackend.figure_format = 'retina' import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): Test the saved model against the test dataset test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 图像分类 Step2: 探索数据 Step5: 实现预处理函数 Step8: One-hot 编码 Step10: 随机化数据 Step12: 检查点 Step17: 构建网络 Step20: 卷积和最大池化层 Step23: 扁平化层 Step26: 全连接层 Step29: 输出层 Step32: 创建卷积模型 Step35: 训练神经网络 Step37: 显示数据 Step38: 超参数 Step40: 在单个 CIFAR-10 部分上训练 Step42: 完全训练模型 Step45: 检查点
13,657
<ASSISTANT_TASK:> Python Code: import matplotlib.pyplot as plt %matplotlib inline from sklearn import datasets iris = datasets.load_iris() X_train = iris.data[iris.target != 2, :2] # first two features and y_train = iris.target[iris.target != 2] # first two labels only fig = plt.figure(figsize=(8,8)) mycolors = {"blue": "steelblue", "red": "#a76c6e", "green": "#6a9373"} plt.scatter(X_train[:, 0], X_train[:, 1], s=100, alpha=0.9, c=[mycolors["red"] if yi==1 else mycolors["blue"] for yi in y_train]) plt.xlabel('sepal length', fontsize=16) plt.ylabel('sepal width', fontsize=16); from sklearn.linear_model import LogisticRegression # import from sklearn logreg = LogisticRegression() # initialize classifier logreg.fit(X_train, y_train); # train on training data import numpy as np fig = plt.figure(figsize=(8,8)) plt.scatter(X_train[:, 0], X_train[:, 1], s=100, c=[mycolors["red"] if yi==1 else mycolors["blue"] for yi in y_train]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') x_min, x_max = np.min(X_train[:,0])-0.1, np.max(X_train[:,0])+0.1 y_min, y_max = np.min(X_train[:,1])-0.1, np.max(X_train[:,1])+0.1 plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) x1 = np.linspace(x_min, x_max, 100) w0 = logreg.intercept_ w1 = logreg.coef_[0][0] w2 = logreg.coef_[0][1] x2 = #TODO plt.plot(x1, x2, color="gray"); V = {"angeles": 0, "los": 1, "new": 2, "post": 3, "times": 4, "york": 5} D = ["new york times", "new york post", "los angeles times"] M = np.zeros((len(D),len(V))) for ii, doc in enumerate(D): for term in doc.split(): #TODO print M from sklearn.feature_extraction.text import CountVectorizer # import CountVectorizer vectorizer = CountVectorizer() # initialize the vectorizer X = vectorizer.fit_transform(D) # fit to training data and transform to matrix print vectorizer.vocabulary_ print X.todense() d4 = ["new york new tribune"] x4 = vectorizer.transform(d4) print x4.todense() row_norms = np.array([np.linalg.norm(row) for row in Xtfidf]) X_tfidf_n = np.dot(np.diag(1./row_norms), Xtfidf) print X_tfidf_n from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() Y = tfidf.fit_transform(D) print Y.todense() import csv def read_and_clean_data(fname, remove_stops=True): with open('data/stopwords.txt', 'rt') as f: stops = [line.rstrip('\n') for line in f] with open(fname,'rt') as tsvin: reader = csv.reader(tsvin, delimiter='\t') labels = []; text = [] for ii, row in enumerate(reader): labels.append(int(row[0])) words = row[1].lower().split() words = [w for w in words if not w in stops] if remove_stops else words text.append(" ".join(words)) return text, labels text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True) text_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True) labels_train[:4] text_train[3] text_train[0] text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True) text_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True) cvec = CountVectorizer() X_bw_train = cvec.fit_transform(text_train) y_train = np.array(labels_train) X_bw_test = cvec.transform(text_test) y_test = np.array(labels_test) from sklearn.metrics import accuracy_score bwLR = LogisticRegression() bwLR.fit(X_bw_train, y_train) pred_bwLR = bwLR.predict(X_bw_test) print "Logistic Regression accuracy with Bag-of-Words: ", accuracy_score(y_test, pred_bwLR) tvec = TfidfVectorizer() X_tf_train = tvec.fit_transform(text_train) X_tf_test = tvec.transform(text_test) tfLR = LogisticRegression() tfLR.fit(X_tf_train, y_train) pred_tfLR = tfLR.predict(X_tf_test) print "Logistic Regression accuracy with tf-idf: ", accuracy_score(y_test, pred_tfLR) import matplotlib.pyplot as plt %matplotlib inline from sklearn import datasets iris = datasets.load_iris() X_train = iris.data[iris.target != 2, :2] # first two features and y_train = iris.target[iris.target != 2] # first two labels only fig = plt.figure(figsize=(8,8)) mycolors = {"blue": "steelblue", "red": "#a76c6e", "green": "#6a9373"} plt.scatter(X_train[:, 0], X_train[:, 1], s=100, alpha=0.9, c=[mycolors["red"] if yi==1 else mycolors["blue"] for yi in y_train]) plt.xlabel('sepal length', fontsize=16) plt.ylabel('sepal width', fontsize=16); from sklearn.linear_model import LogisticRegression # import from sklearn logreg = LogisticRegression() # initialize classifier logreg.fit(X_train, y_train); # train on training data import numpy as np fig = plt.figure(figsize=(8,8)) plt.scatter(X_train[:, 0], X_train[:, 1], s=100, c=[mycolors["red"] if yi==1 else mycolors["blue"] for yi in y_train]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') x_min, x_max = np.min(X_train[:,0])-0.1, np.max(X_train[:,0])+0.1 y_min, y_max = np.min(X_train[:,1])-0.1, np.max(X_train[:,1])+0.1 plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) x1 = np.linspace(x_min, x_max, 100) w0 = logreg.intercept_ w1 = logreg.coef_[0][0] w2 = logreg.coef_[0][1] x2 = -(w0/w2) - (w1/w2)*x1 #TODO plt.plot(x1, x2, color="gray"); V = {"angeles": 0, "los": 1, "new": 2, "post": 3, "times": 4, "york": 5} D = ["new york times", "new york post", "los angeles times"] M = np.zeros((len(D),len(V))) for ii, doc in enumerate(D): for term in doc.split(): M[ii, V[term]] += 1 print M from sklearn.feature_extraction.text import CountVectorizer # import CountVectorizer vectorizer = CountVectorizer() # initialize the vectorizer X = vectorizer.fit_transform(D) # fit to training data and transform to matrix print vectorizer.vocabulary_ print X.todense() d4 = ["new york new tribune"] x4 = vectorizer.transform(d4) print x4.todense() idf = np.array([np.log(3), np.log(3), np.log(3./2), np.log(3), np.log(3./2), np.log(3./2)]) Xtfidf = np.dot(X.todense(), np.diag(idf)) print Xtfidf row_norms = np.array([np.linalg.norm(row) for row in Xtfidf]) X_tfidf_n = np.dot(np.diag(1./row_norms), Xtfidf) print X_tfidf_n from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() Y = tfidf.fit_transform(D) print Y.todense() import csv def read_and_clean_data(fname, remove_stops=True): with open('data/stopwords.txt', 'r') as f: stops = [line.rstrip('\n') for line in f] with open(fname,'rb') as tsvin: reader = csv.reader(tsvin, delimiter='\t') labels = []; text = [] for ii, row in enumerate(reader): labels.append(int(row[0])) words = row[1].lower().split() words = [w for w in words if not w in stops] if remove_stops else words text.append(" ".join(words)) return text, labels text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=False) text_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=False) labels_train[:4] text_train[0] text_train[3] text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True) text_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True) cvec = CountVectorizer() X_bw_train = cvec.fit_transform(text_train) y_train = np.array(labels_train) X_bw_test = cvec.transform(text_test) y_test = np.array(labels_test) X_bw_train.shape from sklearn.metrics import accuracy_score bwLR = LogisticRegression() bwLR.fit(X_bw_train, y_train) pred_bwLR = bwLR.predict(X_bw_test) print "Logistic Regression accuracy with Bag-of-Words: ", accuracy_score(y_test, pred_bwLR) tvec = TfidfVectorizer() X_tf_train = tvec.fit_transform(text_train) X_tf_test = tvec.transform(text_test) tfLR = LogisticRegression() tfLR.fit(X_tf_train, y_train) pred_tfLR = tfLR.predict(X_tf_test) print "Logistic Regression accuracy with tf-idf: ", accuracy_score(y_test, pred_tfLR) from IPython.core.display import HTML HTML( <style> .MathJax nobr>span.math>span{border-left-width:0 !important}; </style> ) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We'll train a logistic regression model of the form Step2: Q Step3: Problem 2 Step4: Let's also store the documents in a list as follows Step5: To be consistent with sklearn conventions, we'll encode the documents as row-vectors stored in a matrix. In this case, each row of the matrix corresponds to a document, and each column corresponds to a term in the vocabulary. For our example this gives us a matrix $M$ of shape $3 \times 6$. The $(d,t)$-entry in $M$ is then the number of times the term $t$ appears in document $d$ Step6: Hopefully your code returns the matrix Step7: The $\texttt{fit_transform}$ method actually does two things. It fits the model to the training data by building a vocabulary. It then transforms the text in $D$ into matrix form. Step8: Note that this is the same vocabulary and indexing that we definfed ourselves. Hopefully that means we'll get the same term-frequency matrix. We can print $X$ and check Step9: Yep, they're the same! Notice that we had to convert $X$ to a dense matrix for printing. This is because CountVectorizer actually returns a sparse matrix. This is a very good thing since most vectors in a text model will be extremely sparse, since most documents will only contain a handful of words from the vocabulary. Step10: We've already fit the CountVectorizer to the training set, so all we need to do is transform the test set documents into a term-frequency vector using the same conventions. Since we've already fit the model, we do the transformation with the $\texttt{transform}$ method Step11: Let's print it and see what it looks like Step12: Notice that the query document included the word $\texttt{new}$ twice, which corresponds to the entry in the $(0,2)$-position. Step13: Let's see what we get when we use sklearn. Sklearn has a vectorizer called TfidfVectorizer which is similar to CountVectorizer, but it computes tf-idf scores. Step14: Note that these are not quite the same, becuase sklearn's implementation of tf-idf uses the add-one smoothing in the denominator for idf. Step15: The current parameters are set to not remove stop words from the text so that it's a bit easier to explore. Step16: The first review is labeled $1$ and has the following text Step17: The fourth review is labeled $0$ and has the following text Step18: Hopefully it's obvious that label 1 corresponds to positive reviews and label 0 to negative reviews! Step19: Q Step20: OK, so we got an accuracy of around 81% using Bag-of-Words. Now lets do the same tests but this time with tf-idf features. Step21: WOOHOO! With tf-idf features we got around 85% accuracy, which is a 4% improvement. (If you're scoffing at this, wait until you get some more experience working with real-world data. 4% improvement is pretty awesome). Step22: We'll train a logistic regression model of the form Step23: Q Step24: Problem 2 Step25: Let's also store the documents in a list as follows Step26: To be consistent with sklearn conventions, we'll encode the documents as row-vectors stored in a matrix. In this case, each row of the matrix corresponds to a document, and each column corresponds to a term in the vocabulary. For our example this gives us a matrix $M$ of shape $3 \times 6$. The $(d,t)$-entry in $M$ is then the number of times the term $t$ appears in document $d$ Step27: Hopefully your code returns the matrix Step28: The $\texttt{fit_transform}$ method actually does two things. It fits the model to the training data by building a vocabulary. It then transforms the text in $D$ into matrix form. Step29: Note that this is the same vocabulary and indexing that we definfed ourselves. Hopefully that means we'll get the same term-frequency matrix. We can print $X$ and check Step30: Yep, they're the same! Notice that we had to convert $X$ to a dense matrix for printing. This is because CountVectorizer actually returns a sparse matrix. This is a very good thing since most vectors in a text model will be extremely sparse, since most documents will only contain a handful of words from the vocabulary. Step31: We've already fit the CountVectorizer to the training set, so all we need to do is transform the test set documents into a term-frequency vector using the same conventions. Since we've already fit the model, we do the transformation with the $\texttt{transform}$ method Step32: Let's print it and see what it looks like Step33: Notice that the query document included the word $\texttt{new}$ twice, which corresponds to the entry in the $(0,2)$-position. Step34: Hopefully you got something like the following Step35: Let's see what we get when we use sklearn. Sklearn has a vectorizer called TfidfVectorizer which is similar to CountVectorizer, but it computes tf-idf scores. Step36: Note that these are not quite the same, becuase sklearn's implementation of tf-idf uses the add-one smoothing in the denominator for idf. Step37: The current parameters are set to not remove stop words from the text so that it's a bit easier to explore. Step38: The first review is labeled $1$ and has the following text Step39: The fourth review is labeled $0$ and has the following text Step40: Hopefully it's obvious that label 1 corresponds to positive reviews and label 0 to negative reviews! Step41: Q Step42: A Step43: OK, so we got an accuracy of around 81% using Bag-of-Words. Now lets do the same tests but this time with tf-idf features. Step45: WOOHOO! With tf-idf features we got around 85% accuracy, which is a 4% improvement. (If you're scoffing at this, wait until you get some more experience working with real-world data. 4% improvement is pretty awesome).
13,658
<ASSISTANT_TASK:> Python Code: #This notebook also uses the `(some) LaTeX environments for Jupyter` #https://github.com/ProfFan/latex_envs wich is part of the #jupyter_contrib_nbextensions package from myhdl import * from myhdlpeek import Peeker import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sympy import * init_printing() import itertools from IPython.display import clear_output #https://github.com/jrjohansson/version_information %load_ext version_information %version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, itertools, IPython #helper functions to read in the .v and .vhd generated files into python def VerilogTextReader(loc, printresult=True): with open(f'{loc}.v', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***Verilog modual from {loc}.v***\n\n', VerilogText) return VerilogText def VHDLTextReader(loc, printresult=True): with open(f'{loc}.vhd', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText) return VerilogText def ConstraintXDCTextReader(loc, printresult=True): with open(f'{loc}.xdc', 'r') as xdcText: ConstraintText=xdcText.read() if printresult: print(f'***Constraint file from {loc}.xdc***\n\n', ConstraintText) return ConstraintText @block def ClockDivider(Divisor, clkOut, clk,rst): Simple Clock Divider based on the Digilint Clock Divider https://learn.digilentinc.com/Documents/262 Input: Divisor(32 bit): the clock frequncy divide by value clk(bool): The input clock rst(bool): clockDivider Reset Ouput: clkOut(bool): the divided clock ouput count(32bit): the value of the internal counter count_i=Signal(modbv(0)[32:]) @always(clk.posedge, rst.posedge) def counter(): if rst: count_i.next=0 elif count_i==(Divisor-1): count_i.next=0 else: count_i.next=count_i+1 clkOut_i=Signal(bool(0)) @always(clk.posedge, rst.posedge) def clockTick(): if rst: clkOut_i.next=0 elif count_i==(Divisor-1): clkOut_i.next=not clkOut_i else: clkOut_i.next=clkOut_i @always_comb def OuputBuffer(): clkOut.next=clkOut_i return instances() RefClkFreq=125e6 TargetClkFreq=40 DivsionFactor=int(RefClkFreq/TargetClkFreq) DivsionFactor Peeker.clear() clk=Signal(bool(0)); Peeker(clk, 'clk') Divisor=Signal(intbv(DivsionFactor)[32:]); Peeker(Divisor, 'Divisor') clkOut=Signal(bool(0)); Peeker(clkOut, 'clkOut') rst=Signal(bool(0)); Peeker(rst, 'rst') DUT=ClockDivider(Divisor, clkOut, clk,rst) DUT.convert() VerilogTextReader('ClockDivider'); ConstraintXDCTextReader('ClockAXI'); <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: IP ClockDivider
13,659
<ASSISTANT_TASK:> Python Code: from vis_int import * import vis_int print(dir(vis_int)) def biseccion(funcion, a, b, tol_x = 1e-6, factor_ty = 1e2): f0 = funcion(a) f1 = funcion(b) if abs(f0) < tol_x: # Se verifica que los extremos sean raices return a elif abs(f1) < tol_x: return b else: # Si los extremos no son raices, se bisecta. c = (a + b) / 2.0 f2 = funcion(c) while abs(f2) >= tol_x and abs(c - b) >= tol_x: if f2 * f0 < 0 : b = c f1 = f2 else: a = c f0 = f2 c = (a + b) / 2.0 f2 = funcion(c) if abs(f2) < tol_x * factor_ty: # Se verifica que efectivamente sea raiz return c else: # En caso de ser asintota vertical con cambio de signo return None def incremental(funcion, a, b, delta_x = 1e-4, tol_x = 1e-6): c0 = a f0 = funcion(c0) c1 = c0 + delta_x c = None while c == None and c1 <=b: # Si no se ha hallado raíz y se esta en el intervalo, avance f1 = funcion(c1) while f0*f1 > 0 and c1 <= b: c0 = c1 f0 = f1 c1 = c1 + delta_x f1 = funcion(c1) if c1 > b: # Final del intervalo, equivalente f0*f1 > 0 return None else: # Sub-intervalo con cambio de signo c = biseccion(funcion, c0, c1, tol_x) # Se invoca bisección para mejorar aproximación if c == None: # Si el candidato era discontinuidad, incremental avanza c0 = c1 f0 = f1 c1 = c1 + delta_x return c def raiz_n(funcion, a, b, N, delta_x = 1e-4, tol_x = 1e-6): c0 = a cont_raiz = 0 while c0 < b and cont_raiz < N: c = incremental(funcion, c0, b, delta_x, tol_x) if c == None: # Si incremental termina en 'None', no hay más raíces return None cont_raiz = cont_raiz + 1 c0 = c + delta_x if cont_raiz == N: return c else: return None def trascendental(E, V_0, a): k2 = sqrt(V_0 - E) return sqrt(E) - k2*tan(k2*a/2) def int_raiz_trasc(V_0:(.1,20.,.1), a:(.1,15.,.1), N:(1, 6, 1), n:(1, 100, 1)): f = lambda E: trascendental(E, V_0, a) try: r = raiz_n(f, 0, V_0, N) E, tr = discretizar(f, 0, V_0, n) graficar_funcion(E, tr) graficar_punto_texto(r, 0, 'Autovalor ' + str(N)) display(Latex('\(E_' + str(N) + '=' + str(r) + '\)')) plt.grid(True) plt.show() display(HTML('<div class="alert alert-warning">'+\ '<strong>Advertencia</strong> Alrededor de las discontinuidades'+\ ' el gráfico no es representado fielmente. </div>')) except ValueError: display(HTML('<div class="alert alert-danger">'+\ '<strong>Error</strong> Se evaluo la función en una discontinuidad.'+\ '</div>')) interact(int_raiz_trasc) def estacionario(K, L, h): x = -L/2 while x < L/2 and K(x) <= 0: x = x + h if x >= L/2: return L/2 elif x == -L/2: return -L/2 else: return x - h def numerov(K_ex, L, E, N, n): h = L / n K = lambda x: K_ex(E, x) p_est = estacionario(K, L, h) x = -L/2 phi0 = 0.0 x = x + h phi1 = 1e-10 x = x + h while x <= p_est : term0 = 1 + h**2 * K(x - h) / 12 term1 = 2 - 5 * h**2 * K( x) / 6 term2 = 1 + h**2 * K(x + h) / 12 aux = phi1 phi1 = (term1 * phi1 - term0 * phi0) / term2 phi0 = aux x = x + h phi_i_1 = phi1 phi_i_0 = phi0 x = L/2 phi0 = 0.0 x = x - h phi1 = 1e-10 * (-1)**(N%2 + 1) x = x - h while x > p_est : term0 = 1 + h**2 * K(x + h) / 12 term1 = 2 - 5 * h**2 * K(x) / 6 term2 = 1 + h**2 * K(x - h) / 12 aux = phi1 phi1 = (term1 * phi1 - term0 * phi0) / term2 phi0 = aux x = x - h phi_d_1 = phi_i_1 phi_d_0 = phi0 * phi_i_1 / phi1 return (2*phi_d_1 - (phi_i_0+phi_d_0)) / (phi_d_0 - phi_i_0) def Phi(K_ex, L, E, N, n): h = L / n K = lambda x: K_ex(E, x) p_est = estacionario(K, L, h) x = -L/2 x_g = [x] phi0 = 0.0 phi_g = [phi0] x = x + h phi1 = 1e-10 x_g.append(x) phi_g.append(phi1) x = x + h while x <= p_est: term0 = 1 + h**2 * K(x - h) / 12 term1 = 2 - 5 * h**2 * K(x) / 6 term2 = 1 + h**2 * K(x + h) / 12 aux = phi1 phi1 = (term1 * phi1 - term0 * phi0) / term2 x_g.append(x) phi_g.append(phi1) phi0 = aux x = x + h x = L/2 phi0 = 0.0 x_gd = [x] phi_gd = [phi0] x = x - h phi1 = 1e-10 * (-1)**(N%2 + 1) x_gd.insert(0, x) phi_gd.insert(0, phi1) x = x - h while x > p_est: term0 = 1 + h**2 * K(x + h) / 12 term1 = 2 - 5 * h**2 * K(x) / 6 term2 = 1 + h**2 * K(x - h) / 12 aux = phi1 phi1 = (term1 * phi1 - term0 * phi0) / term2 x_gd.insert(0, x) phi_gd.insert(0, phi1) phi0 = aux x = x - h n_d = len(phi_gd) phi_gd = [phi_gd[i] * phi_g[-1] / phi1 for i in range(n_d)] x_g.extend(x_gd) phi_g.extend(phi_gd) return x_g, phi_g def K_Schr(V_0, a): return lambda e, x: e - potencial(V_0, a, x) def disparo(V_0, a, L, n, N, E): x, phi = Phi(K_Schr(V_0, a), L, E, N, n) V = [potencial(V_0, a, i) for i in x] graficar_potencial(x, V) graficar_autofuncion(x, phi, V_0) graficar_autovalor(L, E) plt.show() def presion_disparo(boton): disparo(V_0, a.value, L, n.value, N, E.value) interact(disparo, V_0=(0., 20., .5), a=(.5, 10., .1), L=(10., 50., 5.), n=(100, 500, 50), N=fixed(1), E=(.0, 5., .01)) def E_N(K, E_max, L, N, n, delta_e = 1e-4, tol_e = 1e-6): Numerov = lambda e: numerov(K, L, e, N, n) return raiz_n(Numerov, tol_e, E_max, N, delta_e, tol_e) def Solve_Schr(Vx, E_max, L, N, n): x_vec, V_vec = discretizar(Vx, -L/2, L/2, n) V_min = min(V_vec) K = lambda e, x : e - Vx(x) + V_min E = E_N(K, E_max - V_min, L, N, n) if E != None: x_vec, phi = Phi(K, L, E, N, n) E = E + V_min display(Latex('\(E_{' + str(N) + '} = ' + str(E) + '\)')) V_vec = [Vx(i) for i in x_vec] graficar_potencial(x_vec, V_vec) V_max = max(V_vec) V_ref = max(abs(V_min), V_max) graficar_autofuncion(x_vec, phi, V_ref) graficar_autovalor(L, E) plt.show() return E, x_vec, phi else: display(HTML('<div class="alert alert-danger">'+\ '<strong>Error</strong> Se evaluo la función en una discontinuidad.'+\ '</div>')) def fun_contenedor_base(): E_max = FloatSlider(value=10., min = 1., max=20., step=1., description= '$E_{max}$') L = FloatSlider(value = 30., min = 10., max = 100., step= 1., description='L') N = IntSlider(value=1, min=1, max= 6, step=1, description='N') n = IntSlider(value= 300, min= 100, max= 500, step=20, description='n') return Box(children=[E_max, L, N, n]) Contenedor_base = fun_contenedor_base() display(Contenedor_base) def agregar_control(base, control): controles = list(base.children) controles.append(control) base.children = tuple(controles) control_prueba = fun_contenedor_base() agregar_control(control_prueba, Text(description='Casilla de texto para prueba')) display(control_prueba) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Técnicas Numéricas Step2: Se observa en la implementación del método de bisección, que se considera una revisión extra a los códigos tradicionales, con el fin de validad si el candidato a raíz realmente lo es. Esto se requiere ya que es posible que la función asociada a la discretización de la energía posea discontinuidades alrededor de las cuales presente cambio de signo. Step3: A continuación se ilustra el uso de la técnica con la función trascendental del problema del pozo finito simetrico con paridad par, que en la forma adimensional corresponde a Step4: Ecuaciones diferenciales con problemas de frontera Step5: Para la ecuación de Schrödinger, $K(x) = E - V(x)$. Step6: Para ilustrar el método del disparo, se presenta el siguiente control. La idea es ajustar para una configuración de potencial $V_0$, ancho $a$, longitud total $L$ y numero de elementos de discretización $n$, la energía $E$ adecuada para observar continuidad en la función de onda y su derivada en todo el intervalo. Dada la implementación del método se verifica dicha continuidad en el limite de la primera pared. Más adelante se define de manera general como seleccionar el punto de comparación. Step7: La anterior ilustración tambien permite observar los efectos del potencial sobre un paquete de onda cuando la energía es menor o mayor que el potencial. Se puede observar como para $E>V_0$, se obtiene una función de onda oscilante en todo el intervalo, equivalente a una particula libre. Step8: El siguiente bloque define la base de los controles, fun_contenedor_base, para el notebook conceptual, Estados ligados, donde los parametros de máxima energía de búsqueda, longitud de interes, número de estado y particiones son comunes.
13,660
<ASSISTANT_TASK:> Python Code: %matplotlib inline import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from six.moves import range # Setup Pandas pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) # Setup Seaborn sns.set_style("whitegrid") sns.set_context("poster") critics = pd.read_csv('./critics.csv') #let's drop rows with missing quotes critics = critics[~critics.quote.isnull()] critics.head() n_reviews = len(critics) n_movies = critics.rtid.unique().size n_critics = critics.critic.unique().size print("Number of reviews: {:d}".format(n_reviews)) print("Number of critics: {:d}".format(n_critics)) print("Number of movies: {:d}".format(n_movies)) df = critics.copy() df['fresh'] = df.fresh == 'fresh' grp = df.groupby('critic') counts = grp.critic.count() # number of reviews by each critic means = grp.fresh.mean() # average freshness for each critic means[counts > 100].hist(bins=10, edgecolor='w', lw=1) plt.xlabel("Average Rating per critic") plt.ylabel("Number of Critics") plt.yticks([0, 2, 4, 6, 8, 10]); from sklearn.feature_extraction.text import CountVectorizer text = ['Hop on pop', 'Hop off pop', 'Hop Hop hop'] print("Original text is\n{}".format('\n'.join(text))) vectorizer = CountVectorizer(min_df=0) # call `fit` to build the vocabulary vectorizer.fit(text) # call `transform` to convert text to a bag of words x = vectorizer.transform(text) # CountVectorizer uses a sparse array to save memory, but it's easier in this assignment to # convert back to a "normal" numpy array x = x.toarray() print("") print("Transformed text vector is \n{}".format(x)) # `get_feature_names` tracks which word is associated with each column of the transformed x print("") print("Words for each feature:") print(vectorizer.get_feature_names()) # Notice that the bag of words treatment doesn't preserve information about the *order* of words, # just their frequency def make_xy(critics, vectorizer=None): #Your code here if vectorizer is None: vectorizer = CountVectorizer() X = vectorizer.fit_transform(critics.quote) X = X.tocsc() # some versions of sklearn return COO format y = (critics.fresh == 'fresh').values.astype(np.int) return X, y X, y = make_xy(critics) from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) clf = MultinomialNB() clf.fit(X_train, y_train) print(accuracy_score(y_train, clf.predict(X_train))) print(accuracy_score(y_test, clf.predict(X_test))) from sklearn.model_selection import KFold def cv_score(clf, X, y, scorefunc): result = 0. nfold = 5 for train, test in KFold(nfold).split(X): # split data into train/test groups, 5 times clf.fit(X[train], y[train]) # fit the classifier, passed is as clf. result += scorefunc(clf, X[test], y[test]) # evaluate score function on held-out data return result / nfold # average def log_likelihood(clf, x, y): prob = clf.predict_log_proba(x) rotten = y == 0 fresh = ~rotten return prob[rotten, 0].sum() + prob[fresh, 1].sum() from sklearn.model_selection import train_test_split _, itest = train_test_split(range(critics.shape[0]), train_size=0.7) mask = np.zeros(critics.shape[0], dtype=np.bool) mask[itest] = True from sklearn.naive_bayes import MultinomialNB #the grid of parameters to search over alphas = [0.001, 0.01, .1, 1, 5, 10, 50] best_min_df = 0.01 # YOUR TURN: put your value of min_df here. #Find the best value for alpha and min_df, and the best classifier best_alpha = None maxscore=-np.inf for alpha in alphas: vectorizer = CountVectorizer(min_df=best_min_df) Xthis, ythis = make_xy(critics, vectorizer) Xtrainthis = Xthis[mask] ytrainthis = ythis[mask] score = cv_score(MultinomialNB(alpha), Xtrainthis, ytrainthis, log_likelihood) if (score > maxscore): maxscore = score best_alpha = alpha print("alpha: {}".format(best_alpha)) vectorizer = CountVectorizer(min_df=best_min_df) X, y = make_xy(critics, vectorizer) xtrain=X[mask] ytrain=y[mask] xtest=X[~mask] ytest=y[~mask] clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain) #your turn. Print the accuracy on the test and training dataset training_accuracy = clf.score(xtrain, ytrain) test_accuracy = clf.score(xtest, ytest) print("Accuracy on training data: {:2f}".format(training_accuracy)) print("Accuracy on test data: {:2f}".format(test_accuracy)) from sklearn.metrics import confusion_matrix print(confusion_matrix(ytest, clf.predict(xtest))) words = np.array(vectorizer.get_feature_names()) x = np.eye(xtest.shape[1]) probs = clf.predict_log_proba(x)[:, 0] ind = np.argsort(probs) good_words = words[ind[:10]] bad_words = words[ind[-10:]] good_prob = probs[ind[:10]] bad_prob = probs[ind[-10:]] print("Good words\t P(fresh | word)") for w, p in zip(good_words, good_prob): print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p))) print("Bad words\t P(fresh | word)") for w, p in zip(bad_words, bad_prob): print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p))) x, y = make_xy(critics, vectorizer) prob = clf.predict_proba(x)[:, 0] predict = clf.predict(x) bad_rotten = np.argsort(prob[y == 0])[:5] bad_fresh = np.argsort(prob[y == 1])[-5:] print("Mis-predicted Rotten quotes") print('---------------------------') for row in bad_rotten: print(critics[y == 0].quote.iloc[row]) print("") print("Mis-predicted Fresh quotes") print('--------------------------') for row in bad_fresh: print(critics[y == 1].quote.iloc[row]) print("") r = vectorizer.transform(['This movie is not remarkable, touching, or superb in any way']) clf.predict(r) # http://scikit-learn.org/dev/modules/feature_extraction.html#text-feature-extraction # http://scikit-learn.org/dev/modules/classes.html#text-feature-extraction-ref from sklearn.feature_extraction.text import TfidfVectorizer tfidfvectorizer = TfidfVectorizer(min_df=1, stop_words='english') Xtfidf=tfidfvectorizer.fit_transform(critics.quote) X_train, X_test, y_train, y_test = train_test_split(Xtfidf, y, test_size=0.25, random_state=42) clf = MultinomialNB() clf.fit(X_train, y_train) print(accuracy_score(y_train, clf.predict(X_train))) print(accuracy_score(y_test, clf.predict(X_test))) X, y = make_xy(critics, CountVectorizer(ngram_range=(1,2))) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) clf = MultinomialNB(alpha=1.0) clf.fit(X_train, y_train) print(accuracy_score(y_train, clf.predict(X_train))) print(accuracy_score(y_test, clf.predict(X_test))) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Table of Contents Step2: Explore Step3: <div class="span5 alert alert-info"> Step4: Naive Bayes Step5: Picking Hyperparameters for Naive Bayes and Text Maintenance Step6: We use the log-likelihood as the score here in scorefunc. The higher the log-likelihood, the better. Indeed, what we do in cv_score above is to implement the cross-validation part of GridSearchCV. Step7: We'll cross-validate over the regularization parameter $\alpha$. Step8: <div class="span5 alert alert-info"> Step9: <div class="span5 alert alert-info"> Step10: Interpretation Step11: <div class="span5 alert alert-info"> Step12: <div class="span5 alert alert-info"> Step13: Aside Step14: <div class="span5 alert alert-info">
13,661
<ASSISTANT_TASK:> Python Code: #!pip install -I "phoebe>=2.3,<2.4" import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger() b = phoebe.default_binary() b.add_dataset('mesh', compute_times=[0.75], dataset='mesh01') b['requiv@primary@component'] = 1.8 b.run_compute(irrad_method='none', distortion_method='roche', model='rochemodel') b.run_compute(irrad_method='none', distortion_method='rotstar', model='rotstarmodel') afig, mplfig = b.plot(model='rochemodel',show=True) afig, mplfig = b.plot(model='rotstarmodel',show=True) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: As always, let's do imports and initialize a logger and a new bundle. Step2: Adding Datasets Step3: Running Compute Step4: Now we'll compute synthetics at the times provided using the default options Step5: Plotting
13,662
<ASSISTANT_TASK:> Python Code: fig, ax = plt.subplots(figsize=(8,6)) ax.scatter(data['X1'], data['X2'], s=50, c=data['y'], cmap='Reds') ax.set_title('Raw data') ax.set_xlabel('X1') ax.set_ylabel('X2') svc1 = sklearn.svm.LinearSVC(C=1, loss='hinge') svc1.fit(data[['X1', 'X2']], data['y']) svc1.score(data[['X1', 'X2']], data['y']) data['SVM1 Confidence'] = svc1.decision_function(data[['X1', 'X2']]) fig, ax = plt.subplots(figsize=(8,6)) ax.scatter(data['X1'], data['X2'], s=50, c=data['SVM1 Confidence'], cmap='RdBu') ax.set_title('SVM (C=1) Decision Confidence') svc100 = sklearn.svm.LinearSVC(C=100, loss='hinge') svc100.fit(data[['X1', 'X2']], data['y']) svc100.score(data[['X1', 'X2']], data['y']) data['SVM100 Confidence'] = svc100.decision_function(data[['X1', 'X2']]) fig, ax = plt.subplots(figsize=(8,6)) ax.scatter(data['X1'], data['X2'], s=50, c=data['SVM100 Confidence'], cmap='RdBu') ax.set_title('SVM (C=100) Decision Confidence') data.head() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: try $C=1$ Step2: try $C=100$
13,663
<ASSISTANT_TASK:> Python Code: from collections import OrderedDict # For recording the model specification import pandas as pd # For file input/output import numpy as np # For vectorized math operations import pylogit as pl # For MNL model estimation and # conversion from wide to long format # Note that the .dat files used by python biogeme are tab delimited text files wide_swiss_metro = pd.read_table("../data/swissmetro.dat", sep="\t") # Select obervations whose choice is known (i.e. CHOICE != 0) # **AND** whose PURPOSE is either 1 (commute) or 3 (business) include_criteria = (wide_swiss_metro.PURPOSE.isin([1, 3]) & (wide_swiss_metro.CHOICE != 0)) # Note that the .copy() ensures that any later changes are made # to a copy of the data and not to the original data wide_swiss_metro = wide_swiss_metro.loc[include_criteria].copy() # Look at the first 5 rows of the data wide_swiss_metro.head().T # Look at the columns of the swiss metro dataset wide_swiss_metro.columns # Create the list of individual specific variables ind_variables = wide_swiss_metro.columns.tolist()[:15] # Specify the variables that vary across individuals and some or all alternatives # The keys are the column names that will be used in the long format dataframe. # The values are dictionaries whose key-value pairs are the alternative id and # the column name of the corresponding column that encodes that variable for # the given alternative. Examples below. alt_varying_variables = {u'travel_time': dict([(1, 'TRAIN_TT'), (2, 'SM_TT'), (3, 'CAR_TT')]), u'travel_cost': dict([(1, 'TRAIN_CO'), (2, 'SM_CO'), (3, 'CAR_CO')]), u'headway': dict([(1, 'TRAIN_HE'), (2, 'SM_HE')]), u'seat_configuration': dict([(2, "SM_SEATS")])} # Specify the availability variables # Note that the keys of the dictionary are the alternative id's. # The values are the columns denoting the availability for the # given mode in the dataset. availability_variables = {1: 'TRAIN_AV', 2: 'SM_AV', 3: 'CAR_AV'} ########## # Determine the columns for: alternative ids, the observation ids and the choice ########## # The 'custom_alt_id' is the name of a column to be created in the long-format data # It will identify the alternative associated with each row. custom_alt_id = "mode_id" # Create a custom id column that ignores the fact that this is a # panel/repeated-observations dataset. Note the +1 ensures the id's start at one. obs_id_column = "custom_id" wide_swiss_metro[obs_id_column] = np.arange(wide_swiss_metro.shape[0], dtype=int) + 1 # Create a variable recording the choice column choice_column = "CHOICE" # Perform the conversion to long-format long_swiss_metro = pl.convert_wide_to_long(wide_swiss_metro, ind_variables, alt_varying_variables, availability_variables, obs_id_column, choice_column, new_alt_id_name=custom_alt_id) # Look at the resulting long-format dataframe long_swiss_metro.head(10).T ########## # Create scaled variables so the estimated coefficients are of similar magnitudes ########## # Scale the travel time column by 60 to convert raw units (minutes) to hours long_swiss_metro["travel_time_hrs"] = long_swiss_metro["travel_time"] / 60.0 # Scale the headway column by 60 to convert raw units (minutes) to hours long_swiss_metro["headway_hrs"] = long_swiss_metro["headway"] / 60.0 # Figure out who doesn't incur a marginal cost for the ticket # This can be because he/she owns an annial season pass (GA == 1) # or because his/her employer pays for the ticket (WHO == 2). # Note that all the other complexity in figuring out ticket costs # have been accounted for except the GA pass (the annual season # ticket). Make sure this dummy variable is only equal to 1 for # the rows with the Train or Swissmetro long_swiss_metro["free_ticket"] = (((long_swiss_metro["GA"] == 1) | (long_swiss_metro["WHO"] == 2)) & long_swiss_metro[custom_alt_id].isin([1,2])).astype(int) # Scale the travel cost by 100 so estimated coefficients are of similar magnitude # and acccount for ownership of a season pass long_swiss_metro["travel_cost_hundreth"] = (long_swiss_metro["travel_cost"] * (long_swiss_metro["free_ticket"] == 0) / 100.0) ########## # Create various dummy variables to describe the choice context of a given # invidual for each choice task. ########## # Create a dummy variable for whether a person has a single piece of luggage long_swiss_metro["single_luggage_piece"] = (long_swiss_metro["LUGGAGE"] == 1).astype(int) # Create a dummy variable for whether a person has multiple pieces of luggage long_swiss_metro["multiple_luggage_pieces"] = (long_swiss_metro["LUGGAGE"] == 3).astype(int) # Create a dummy variable indicating that a person is NOT first class long_swiss_metro["regular_class"] = 1 - long_swiss_metro["FIRST"] # Create a dummy variable indicating that the survey was taken aboard a train # Note that such passengers are a-priori imagined to be somewhat partial to train modes long_swiss_metro["train_survey"] = 1 - long_swiss_metro["SURVEY"] # NOTE: - Specification and variable names must be ordered dictionaries. # - Keys should be variables within the long format dataframe. # The sole exception to this is the "intercept" key. # - For the specification dictionary, the values should be lists # of integers or or lists of lists of integers. Within a list, # or within the inner-most list, the integers should be the # alternative ID's of the alternative whose utility specification # the explanatory variable is entering. Lists of lists denote # alternatives that will share a common coefficient for the variable # in question. basic_specification = OrderedDict() basic_names = OrderedDict() basic_specification["intercept"] = [1, 2] basic_names["intercept"] = ['ASC Train', 'ASC Swissmetro'] basic_specification["travel_time_hrs"] = [[1, 2,], 3] basic_names["travel_time_hrs"] = ['Travel Time, units:hrs (Train and Swissmetro)', 'Travel Time, units:hrs (Car)'] basic_specification["travel_cost_hundreth"] = [1, 2, 3] basic_names["travel_cost_hundreth"] = ['Travel Cost * (Annual Pass == 0), units: 0.01 CHF (Train)', 'Travel Cost * (Annual Pass == 0), units: 0.01 CHF (Swissmetro)', 'Travel Cost, units: 0.01 CHF (Car)'] basic_specification["headway_hrs"] = [1, 2] basic_names["headway_hrs"] = ["Headway, units:hrs, (Train)", "Headway, units:hrs, (Swissmetro)"] basic_specification["seat_configuration"] = [2] basic_names["seat_configuration"] = ['Airline Seat Configuration, base=No (Swissmetro)'] basic_specification["train_survey"] = [[1, 2]] basic_names["train_survey"] = ["Surveyed on a Train, base=No, (Train and Swissmetro)"] basic_specification["regular_class"] = [1] basic_names["regular_class"] = ["First Class == False, (Swissmetro)"] basic_specification["single_luggage_piece"] = [3] basic_names["single_luggage_piece"] = ["Number of Luggage Pieces == 1, (Car)"] basic_specification["multiple_luggage_pieces"] = [3] basic_names["multiple_luggage_pieces"] = ["Number of Luggage Pieces > 1, (Car)"] # Estimate the multinomial logit model (MNL) swissmetro_mnl = pl.create_choice_model(data=long_swiss_metro, alt_id_col=custom_alt_id, obs_id_col=obs_id_column, choice_col=choice_column, specification=basic_specification, model_type="MNL", names=basic_names) # Specify the initial values and method for the optimization. swissmetro_mnl.fit_mle(np.zeros(14)) # Look at the estimation results swissmetro_mnl.get_statsmodels_summary() # Look at other all results at the same time swissmetro_mnl.print_summaries() # Look at the general and goodness of fit statistics swissmetro_mnl.fit_summary # Look at the parameter estimation results, and round the results for easy viewing np.round(swissmetro_mnl.summary, 3) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load and filter the raw Swiss Metro data Step2: Convert the Swissmetro data to "Long Format" Step3: Perform desired variable creations and transformations Step4: Create the model specification Step5: Estimate the conditional logit model Step6: View results without using statsmodels summary table
13,664
<ASSISTANT_TASK:> Python Code: from IPython.display import YouTubeVideo YouTubeVideo(id='sdF0uJo2KdU', width="100%") import networkx as nx from datetime import datetime import matplotlib.pyplot as plt import numpy as np import warnings from nams import load_data as cf warnings.filterwarnings('ignore') G = cf.load_seventh_grader_network() type(G) H = nx.Graph() type(H) list(G.nodes())[0:5] len(G.nodes()) list(G.nodes(data=True))[0:5] G.nodes[1] from nams.solutions.intro import node_metadata #### REPLACE THE NEXT LINE WITH YOUR ANSWER mf_counts = node_metadata(G) from typing import Dict def test_answer(mf_counts: Dict): assert mf_counts['female'] == 17 assert mf_counts['male'] == 12 test_answer(mf_counts) list(G.edges())[0:5] len(G.edges()) list(G.edges(data=True))[0:5] G.edges[15, 10] from nams.solutions.intro import edge_metadata #### REPLACE THE NEXT LINE WITH YOUR ANSWER maxcount = edge_metadata(G) def test_maxcount(maxcount): assert maxcount == 3 test_maxcount(maxcount) from nams.solutions.intro import adding_students #### REPLACE THE NEXT LINE WITH YOUR ANSWER G = adding_students(G) def test_graph_integrity(G): assert 30 in G.nodes() assert 31 in G.nodes() assert G.nodes[30]['gender'] == 'male' assert G.nodes[31]['gender'] == 'female' assert G.has_edge(30, 31) assert G.has_edge(30, 7) assert G.has_edge(31, 7) assert G.edges[30, 7]['count'] == 3 assert G.edges[7, 30]['count'] == 3 assert G.edges[31, 7]['count'] == 3 assert G.edges[7, 31]['count'] == 3 assert G.edges[30, 31]['count'] == 3 assert G.edges[31, 30]['count'] == 3 print('All tests passed.') test_graph_integrity(G) from nams.solutions.intro import unrequitted_friendships_v1 #### REPLACE THE NEXT LINE WITH YOUR ANSWER unrequitted_friendships = unrequitted_friendships_v1(G) assert len(unrequitted_friendships) == 124 from nams.solutions.intro import unrequitted_friendships_v2 # unrequitted_friendships_v2?? from nams.solutions.intro import unrequitted_friendships_v3 # unrequitted_friendships_v3?? import nams.solutions.intro as solutions import inspect print(inspect.getsource(solutions)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: In this chapter, we will introduce you to the NetworkX API. Step2: Understanding a graph's basic statistics Step3: Because the graph is a DiGraph, Step4: Querying node information Step5: G.nodes() returns a "view" on the nodes. Step6: If our nodes have metadata attached to them, Step7: G.nodes(data=True) returns a NodeDataView, Step8: Now, because a NodeDataView is dictionary-like, Step9: Test your implementation by checking it against the test_answer function below. Step10: With this dictionary-like syntax, Step11: Similar to the NodeView, G.edges() returns an EdgeView that is also iterable. Step12: Likewise, we can also query for all of the edge's metadata Step13: Additionally, it is possible for us to select out individual edges, as long as they exist in the graph Step14: This yields the metadata dictionary for that edge. Step15: Likewise, you can test your answer using the test function below Step16: Manipulating the graph Step17: You can verify that the graph has been correctly created Step18: Coding Patterns Step19: In a previous session at ODSC East 2018, a few other class participants provided the following solutions, Step20: This one by @end0 is a unique one involving sets. Step21: Solution Answers
13,665
<ASSISTANT_TASK:> Python Code: import numpy as np import holoviews as hv hv.notebook_extension() np.random.seed(10) def sine_curve(phase, freq, amp, power, samples=102): xvals = [0.1* i for i in range(samples)] return [(x, amp*np.sin(phase+freq*x)**power) for x in xvals] phases = [0, np.pi/2, np.pi, 3*np.pi/2] powers = [1,2,3] amplitudes = [0.5,0.75, 1.0] frequencies = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] gridspace = hv.GridSpace(kdims=['Amplitude', 'Power'], group='Parameters', label='Sines') for power in powers: for amplitude in amplitudes: holomap = hv.HoloMap(kdims=['Frequency']) for frequency in frequencies: sines = {phase : hv.Curve(sine_curve(phase, frequency, amplitude, power)) for phase in phases} ndoverlay = hv.NdOverlay(sines , kdims=['Phase']).relabel(group='Phases', label='Sines', depth=1) overlay = ndoverlay * hv.Points([(i,0) for i in range(0,10)], group='Markers', label='Dots') holomap[frequency] = overlay gridspace[amplitude, power] = holomap penguins = hv.RGB.load_image('../assets/penguins.png').relabel(group="Family", label="Penguin") layout = gridspace + penguins layout print(repr(layout)) print(repr(layout)) layout.Parameters.Sines layout.Parameters.Sines[0.5, 1] layout.Parameters.Sines[0.5, 1][1.0] (layout.Parameters.Sines[0.5, 1][1].Phases.Sines + layout.Parameters.Sines[0.5, 1][1].Markers.Dots) l=layout.Parameters.Sines[0.5, 1][1].Phases.Sines[0.0] l repr(l) type(layout.Parameters.Sines[0.5, 1][1].Phases.Sines[0.0].data) layout.Parameters.Sines[0.5, 1][1].Phases.Sines[0.0][5.2] layout.Parameters.Sines[0.5, 1][1].Phases.Sines[0.0][5.23], layout.Parameters.Sines[0.5, 1][1].Phases.Sines[0.0][5.27] o1 = layout.Parameters.Sines.select(Amplitude=0.5, Power=1.0).select(Frequency=1.0) o2 = layout.Parameters.Sines.select(Amplitude=0.5, Power=1.0, Frequency=1.0) o1 + o2 layout.Parameters.Sines.select(Amplitude=0.5,Power=1.0, Frequency=1.0).Phases.Sines.select(Phase=0.0) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: This code produces what looks like a relatively simple animation of two side-by-side figures, but is actually a deeply nested data structure Step2: The structure of this object can be seen in the repr() Step3: Nesting hierarchy <a id='NestingHierarchy'></a> Step4: In the examples below, we will unpack this data structure using attribute access (explained in the Introductory tutorial) as well as indexing and slicing (explained in the Sampling Data tutorial). Step5: HoloMap Level Step6: As shown in the schematic above, a HoloMap contains many elements with associated keys. In this example, these keys are indexed with a dimension Frequency, which is why the Frequency varies when you play the animation here. Step7: NdOverlay Level Step8: Curve Level Step9: Data Level Step10: Actually, HoloViews will let you go even further down, accessing data inside the Numpy array using the continuous (floating-point) coordinate systems declared in HoloViews. E.g. here we can ask for a single datapoint, such as the value at x=5.2 Step11: Indexing into 1D Elements like Curve and higher-dimensional but regularly gridded Elements like Image, Surface, and HeatMap will return the nearest defined value (i.e., the results "snap" to the nearest data item) Step12: For other Element types, such as Points, snapping is not supported and thus indexing down into the .data array will be less useful, because it will only succeed for a perfect floating-point match on the key dimensions. In those cases, you can still use all of the access methods provided by the numpy array itself, via .data, e.g. .data[52], but note that such native operations force you to use the native indexing scheme of the array, i.e. integer access starting at zero, not the more convenient and semantically meaningful continuous coordinate systems we provide through HoloViews. Step13: The second form demonstrates HoloViews' deep indexing feature, which allows indexes to cross nested container boundaries. The above is as far as we can index before reaching a heterogeneous type (the Overlay), where we need to use attribute access. Here is the more explicit method of indexing down to a curve, using .select to specify dimensions by name instead of bracket-based indexing by position
13,666
<ASSISTANT_TASK:> Python Code: # Import the packages/libraries you typically use import clr import System import numpy as np import matplotlib.pyplot as plt #This forces plots inline in the Spyder/Python Command Console %matplotlib inline #In the line below, make sure the path matches your installation! LTCOM64Path="C:\\Program Files\\Optical Research Associates\\" LTCOM64Path=LTCOM64Path + "LightTools 8.4.0\\Utilities.NET\\LTCOM64.dll" clr.AddReference(LTCOM64Path) from LTCOM64 import LTAPIx lt0=LTAPIx() #If PID capabilities (for multiple LightTools sessions) needed, use the PID for the session you want #lt0.LTPID=12040 lt0.UpdateLTPointer #If no PID is specified, connect to the first running session lt0.Message("Hello from jupyter Notebook - 2!") #Set the focus to the 3D Window, pass a fixed command string to create a sphere with radius 5 lt0.Cmd('\V3D ctrsphere xyz 0,0,0 xyz 0,0,5') cmdstr="ctrsphere " + lt0.Coord3(0,0,0) + lt0.Coord3(0,0,5) print(cmdstr) #so that we can see it lt0.Cmd(cmdstr) #Set the radius to 10 key="Solid[@Last].Primitive[1]" lt0.DbSet(key,"Radius",10) r=lt0.DbGet(key,"Radius") print("Radius of the sphere is: " + str(r)) from IPython.display import Image Image(filename = PATH + 'BooleanAndMove.PNG',width=500,height=100) cmdstr="Cylinder " +lt0.Coord3(0,0,0) + " 3 15" #radius =3, length = 15 lt0.Cmd(cmdstr) #Get the names of the objects. We have 2 objects #Notice that we are using the "index" of each solid object names=[] for i in [1,2]: key="Solid[" + str(i) + "]" print("Current data key is: " + key) #so that we can see it names.append(lt0.DbGet(key, "Name")) print(names[i-1]) #Select two objects lt0.Cmd("Select " + lt0.Str(names[0]) + " More " + lt0.Str(names[1])) lt0.Cmd("Subtract") #Resulting object has te name of the first selected object for boolean lt0.Cmd("Select " + lt0.Str(names[0])) lt0.Cmd("Move " + lt0.Coord3(0,10,10)) #Get the spectral power distribution from a receiver (1D grids) key="receiver[1].spectral_distribution[1]" cellcount=int(lt0.DbGet(key,"Count")) print("Number of rows: " + str(cellcount)) w=np.zeros((cellcount)) p=np.zeros((cellcount)) for i in range(1,cellcount+1,1): w[i-1],stat=lt0.DbGet(key,"Wavelength_At",0,i,1) #data returned is a tuple! p[i-1],stat=lt0.DbGet(key,"Power_At",0,i,1) plt.plot(w,p,'-r') #Get the mesh data one cell at a time (this is a 2D grid) # Note that a faster method for mesh data is described below key="receiver[1].Mesh[1]" xdim=int(lt0.DbGet(key,"X_Dimension")) #Columns ydim=int(lt0.DbGet(key,"Y_Dimension")) #Rows cv=np.zeros((ydim,xdim)) for i in range(1,xdim+1,1): for j in range(1,ydim+1): cv[j-1,i-1],stat=lt0.DbGet(key,"CellValue",0,i,j) #Get the mesh bounds MinX=lt0.DbGet(key,"Min_X_Bound") MaxX=lt0.DbGet(key,"Max_X_Bound") MinY=lt0.DbGet(key,"Min_Y_Bound") MaxY=lt0.DbGet(key,"Max_Y_Bound") #Create a data grid for plotting, and plot the data xvec=np.linspace(MinX,MaxX,xdim+1) yvec=np.linspace(MinY,MaxY,ydim+1) X,Y=np.meshgrid(xvec,yvec) plt.pcolormesh(X,Y,cv,cmap='jet') plt.xlabel("X") plt.ylabel("Y") plt.axis("equal") #See below for a simpler/faster method to access mesh data def GetLTMeshParams(MeshKey,CellValueType): Get the data from a receiver mesh. Parameters ---------- MeshKey : String data access string for the receiver mesh CellValueType : data type to retrieve Returns ------- X_Dimension Number of bins in X dimension Y_Dimension Number of bins in Y dimension Min_X_Bound Minimum X bound for the mesh Max_X_Bound Maximum X bound for the mesh Min_Y_Bound Minimum Y bound for the mesh Max_Y_Bound Maximum Y bound for the mesh Mesh_Data_Array An array of data, based on the cell value type requested Examples -------- meshkey="receiver[1].Mesh[1]" xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue") XDim=int(lt0.DbGet(MeshKey,"X_Dimension")) YDim=int(lt0.DbGet(MeshKey,"Y_Dimension")) MinX=lt0.DbGet(MeshKey,"Min_X_Bound") MaxX=lt0.DbGet(MeshKey,"Max_X_Bound") MinY=lt0.DbGet(MeshKey,"Min_Y_Bound") MaxY=lt0.DbGet(MeshKey,"Max_Y_Bound") # We need a double array to retrieve data dblArray=System.Array.CreateInstance(System.Double,XDim,YDim) [Stat,mData]=lt0.GetMeshData(MeshKey,dblArray,CellValueType) MeshData=np.ones((XDim,YDim)) print(XDim,YDim) for i in range(0,XDim): for j in range(0,YDim): MeshData[i,j]=mData[i,j] #print(mData[i,j]) MeshData=np.rot90(MeshData) #Notice how we return multiple data items return XDim,YDim,MinX,MaxX,MinY,MaxY,MeshData import matplotlib meshkey="receiver[1].Mesh[1]" xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue") cellx=np.linspace(minx,maxx,xdim+1) celly=np.linspace(miny,maxy,ydim+1) X,Y=np.meshgrid(cellx,celly) #Raster chart in LOG scale plt.pcolormesh(X,Y,np.flipud(md),cmap="jet",norm=matplotlib.colors.LogNorm()) plt.colorbar() plt.axis("equal") plt.xlabel("X") plt.ylabel("Y") from LTCOM64 import JSNET2 js=JSNET2() #If PID capabilities (for multiple LightTools sessions) needed, use the PID for the session you want #js.LTPID=12040 js.UpdateLTPointer js.MakeSphere(5,"mySphere") js.MoveVector("mySphere",0,10,10) # js.MoveVector("mys*",0,10,10) will move all objects whose name starts with 'mys' #First, let's create a simple function to add a new optical property #This will create a new property, and return the name def AddNewProperty(propname): lt0.Cmd("\O" + lt0.Str("PROPERTY_MANAGER[1]")) lt0.Cmd("AddNew=") lt0.Cmd("\Q") lt0.DbSet("Property[@Last]", "Name", propname) return 0 op="myMirror" AddNewProperty(op) key="PROPERTY[" + op + "]" lt0.DbSet(key,"Simple Type","Mirror") mirrorname="myMirror" js.MakeTube(0.25,10,10,"R",mirrorname) key="SOLID[@Last].SURFACE[LeftSurface].ZONE[1]" lt0.DbSet(key,"PropertyName",op) #Set the orientation, Alpha=45 key="Solid[@Last]" lt0.DbSet(key,"Alpha",-45) #Add a NSRay lt0.Cmd("NSRayAim xyz 0,10,0 xyz 0,0,0") #Add a dummy plane lt0.Cmd("DummyPlane xyz 0,0,-20 xyz 0,0,-40") key="Solid[1]" segkey="NS_RAY[@Last].NS_SEGMENT[segment_2]" numpts=11 datax=np.zeros((numpts,numpts)) datay=np.zeros((numpts,numpts)) alpha=np.linspace(-55,-35,11) beta=np.linspace(-20,20,numpts) for i in range(0,numpts,1): lt0.DbSet(key,"Alpha",float(alpha[i])) for j in range(0,11,1): lt0.DbSet(key,"Beta",float(beta[j])) datax[i,j]=lt0.DbGet(segkey,"Local_Surface_X") datay[i,j]=lt0.DbGet(segkey,"Local_Surface_Y") plt.scatter(datax,datay) plt.xlabel('X') plt.ylabel('Y') from scipy.optimize import minimize import numpy as np import matplotlib.pyplot as plt import clr #Initiate the connection with LightTools clr.AddReference("C:\\Program Files\\Optical Research Associates\\LightTools 8.4.0\\Utilities.NET\\LTCOM64.dll") from LTCOM64 import LTAPIx lt0=LTAPIx() lt0.UpdateLTPointer def EvalMF(): lt0.Cmd("\O" + lt0.Str("OPT_MERITFUNCTIONS[1]")) lt0.Cmd("EvaluateAll=") lt0.Cmd("\Q") return 0 def setVarVals(v): v=np.asarray(v) vlist=lt0.DbList('Lens_Manager[1]','Opt_DBVariable') vcount=lt0.ListSize(vlist) lt0.SetOption('DbUpdate',0) for i in range(1,vcount+1): vkey=lt0.ListAtPos(vlist,i) lt0.DbSet(vkey,'CurrentValue',float(v[i-1])) print('Variable Value: ' + str(v[i-1])) lt0.SetOption('DbUpdate',1) lt0.ListDelete(vlist) def ApplyVarsReturnMF(vardata): myd=np.asarray(vardata) setVarVals(myd) EvalMF() mfv=lt0.DbGet('OPT_MERITFUNCTIONS[1]','CurrentValue') print("MF Value: " + str(mfv)) print('****') return mfv # Here's a sample list of optimization algorithms we can try # Some of these algorithms require 'jac', which is the Jacobian (gradiant), and it's not shown here # The Nelder-Mead is the best option to try first, given its simplicity optengines=['Nelder-Mead','BFGS','powell','Newton-CG','SLSQP','TNC'] vlist=lt0.DbList('Lens_Manager[1]','Opt_DBVariable') vcount=int(lt0.ListSize(vlist)) lt0.ListDelete(vlist) v0=np.zeros((vcount)) for i in range(1,vcount+1): v0[i-1]=lt0.DbGet('OPT_DBVARIABLE[' +str(i) +']','CurrentValue') # Note that 'maxiter' should be small (e.g. 5) for other algorithms, except 'Nelder-Mead' res=minimize(ApplyVarsReturnMF,v0,method=optengines[0],options={'disp': True,'maxiter':50}) res=minimize(ApplyVarsReturnMF,v0,method=optengines[2],options={'disp': True,'maxiter':5}) #Import the module and update the LT pointer import LTData as ltd ltd.lt0=lt0 #update the pointer #Now you can get/set the data items like this R = ltd.GetLTDbItem('Solid[1].Primitive[1].radius') print('Radius is: ' + str(R)) ltd.SetLTDbItem('solid[1].primitive[1].radius',15) illum=ltd.GetLTGridItem('receiver[1].mesh[1].CellValue_UI',45,45) #Accessing a 2D grid print('Value is: ' + str(illum)) wave=ltd.GetLTGridItem('RECEIVER[1].SPECTRAL_DISTRIBUTION[1].Wavelength_At',5) #Accessing a 1D grid print('Wavelength is: ' + str(wave)) #Make sure there's a valid spectral region with at least 1 row for the following code! stat=ltd.SetLTGridItem('spectral_region[1].WavelengthAt',600,1) #Setting data in a 1D grid #First, import standard libraries we need for arrays/plotting import matplotlib.pyplot as plt # general plotting import numpy as np #additional support for arrays, etc. #Plot a mesh ltd.PlotRaster('receiver[1].mesh[1]','cellvalue',colormap='jet', xlabel='X-Value',ylabel='Y-Value',title='Mesh Data',plotsize=(5,5),plottype='2D') #Plot the spectral distribution numrows,spd=ltd.PlotSpectralDistribution('receiver[1].spectral_distribution[1]',returndata=True) plt.plot(spd[:,0],spd[:,1]) #Plot true color data. Note the index=2 for the CIE mesh r,g,b=ltd.PlotTrueColorRster('receiver[1].mesh[2]',plotsize=(5,5),returndata=True) #We need to save the screenshot as an image file in the work directory #LTUtilities module handles the work directory and file IO import LTUtilities as ltu ltu.lt0=lt0 ltd.ltu=ltu #check the workdir wd=ltu.checkpyWorkDir() print(ltu.workdirstr) # this is where image files are saved #Get a screenshot of the 3D View viewname='3d' im,imname=ltd.GetViewImage(viewname) plt.imshow(im) #Get a screenshot of an open chart view #Usually, V3D is the first view. The '3' below indicates the second chart view currently open viewname='3' im,imname=ltd.GetViewImage(viewname) plt.imshow(im) #Let's get a screenshot of the full system viewname='1' im,imname=ltd.GetViewImage(viewname) plt.imshow(im) #Ray path data key='receiver[1]' #First, let's hide all ray paths lt0.Cmd('\O"RECEIVER[1].FORWARD_SIM_FUNCTION[1]" HideAll= \Q') #Now get the ray path data, and show only the matchine paths va,pa,ra,st=ltd.GetRayPathData(key,usevisibleonly=False) # Two subplots, different size from matplotlib import gridspec fig = plt.figure(figsize=(6, 6)) gs = gridspec.GridSpec(2,1, height_ratios=[1,3]) ax1 = plt.subplot(gs[0]) ax1.plot(pa,'o') ax1.set_xlabel('Path Index') ax1.set_ylabel('Power') ax1.grid(True) s2='cylin' #this is the string we're searching for for i in range(0,len(st)): #print(st[i]) s1=st[i].lower() if s2 in s1: #print(str(i) + ';' + st[i]) ltd.SetLTGridItem(key + '.forward_sim_function[1].RayPathVisibleAt','yes',(i+1)) #Finally, let's get another screenshot to show the results viewname='1' im,imname=ltd.GetViewImage(viewname) ax2 = plt.subplot(gs[1]) ax2.imshow(im) ax2.axis('off') plt.tight_layout() #receiver ray data des=['raydatax','raydatay','raydataz'] reckey='receiver[1]' simtype='Forward_Sim_Function[1]' #Note here that we specify the following function to # use passfilters flag N,M,raydata=ltd.GetLTReceiverRays(reckey,des,usepassfilters=True) plt.plot(raydata[:,0],raydata[:,1],'o') plt.xlabel('Ray Data Local X') plt.ylabel('Ray Data Local Y') plt.axis('equal') #Assume default data, x, y, z, l, m, n, p simdata='forward_sim_function[1]' reckey1='receiver[1]' #receiver on the lens surface reckey2='receiver[2]' #receiver on the dummy plane n,rayfname=ltd.MakeRayFileUsingRayOrdinal(reckey1,DataAccessKey_Ordinal=reckey2) #Extra ray data, OPL reckey='receiver[1]' #Notice that the second argument is an Enum (integer) for the filter type N,exdata=ltd.GetLTReceiverRays_Extra(reckey,ltd.ExtraRayData.Optical_Path_Length.value) plt.hist(exdata,bins=21,color='green') plt.xlabel('OPL') plt.ylabel('Frequency') import win32com.client import numpy as np import matplotlib.pyplot as plt #DbGet() and Mesh data example lt = win32com.client.Dispatch("LightTools.LTAPI4") XD=int(lt.DbGet(MeshKey,"X_Dimension")) YD=int(lt.DbGet(MeshKey,"Y_Dimension")) k=np.ones((XD,YD)) #The CellFilter may not work for all options in COM mode [stat,myd,f]=lt.GetMeshData("receiver[1].Mesh[1]",list(k),"CellValue") g=np.asarray(myd) g=np.rot90(g) x = np.linspace(-3, 3, XD) y = np.linspace(-3, 3, XD) X,Y = np.meshgrid(x, y) plt.pcolor(X,Y,g) plt.pcolormesh(X,Y,g,cmap="gray") plt.xlabel("X") plt.ylabel("Y") #JumpStart library js = win32com.client.Dispatch("LTCOM64.JSML") js.MakeSphere(lt,5,"mySphere") js.MoveVector(lt,"mySphere",0,10,10) # js.MoveVector(lt,"mys*",0,10,10) will move all objects whose name starts with 'mys' <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Sending a message to LightTools Step2: Sending commands to LightTools Step3: Send a command with Coord3() function Step4: Setting and getting data Step5: Select, Copy, Boolean, Move Step6: Access data in grids (1D and 2D) Step7: Here's an example of getting mesh data from a receiver Step9: Writing and calling functions Step10: Here's how we call the above function with arguments Step11: Accessing JumpStart Functions Step12: After creating the handle, you can use all the available functions Step13: Creating a simple model for a parameter study Step14: Add the mirror, set the optical property and orientation Step15: Add the dummy and NSRay Step16: Now we are ready to change mirror and get the ray data Step17: How to use optimization algorithms from 'scipy' Step18: Our objective function, called by the minimize, should use the parameters sent from the minimize function Step19: Another function to apply variable values Step20: Now we can create the objective function Step21: Finally, we call the minimize function with arguments Step22: Simple optimization example Step23: Sample Library Project ("LTPython") Step24: Several examples of getting and plotting receiver mesh and spectral data Step25: Examples of capturing screenshots Step26: Access to ray path data Step27: Now we can get the ray path strings, and turn on only the paths that involve the cylinder object Step28: Get receiver ray data that match the selected ray paths Step29: Receiver rays based on Ray Ordinal Number Step30: Import the resulting ray source using the local coordinate system on the lens surface Step31: Running Spyder
13,667
<ASSISTANT_TASK:> Python Code: try: %matplotlib inline except: !pip3 install matplotlib import matplotlib.pyplot as plt try: import numpy as np except: !pip3 install numpy chess_board = np.zeros([8, 8], dtype=int) chess_board[0::2, 1::2] = 1 chess_board[1::2, 0::2] = 1 plt.matshow(chess_board, cmap=plt.cm.gray) resolution = 100 x = np.arange(0, 3*np.pi, np.pi/resolution) si = np.sin(x) co = np.cos(x) plt.plot(x, si, c = 'r') plt.plot(x, co, c = 'g') plt.legend(['$\sin(x)$', '$\cos(x)$']) plt.xlabel('radians') plt.title('sine($x$) vs. cosine($x$)') plt.xticks(x*resolution, ['0', '$\pi$', '$2\pi$'], rotation='horizontal') plt.xlim(0,3*np.pi) plt.show() x = np.array([[(x+y)/25 for x in range(256)] for y in range(256)]) si = np.sin(x) plt.imshow(si, cmap='hot', interpolation='nearest') plt.show() # https://github.com/AeroPython/Taller-Aeropython-PyConEs16 def funcion(x,y): return np.cos(x) + np.sin(y) x_1d = np.linspace(0, 5, 100) y_1d = np.linspace(-2, 4, 100) X, Y = np.meshgrid(x_1d, y_1d) Z = funcion(X,Y) plt.contourf(X, Y, Z, np.linspace(-2, 2, 100),cmap=plt.cm.Spectral) plt.colorbar() cs = plt.contour(X, Y, Z, np.linspace(-2, 2, 9), colors='k') plt.clabel(cs) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Importing it Step2: Drawing data structures (matrices) Step3: Drawing 2D curves Step4: Drawing 3D curves
13,668
<ASSISTANT_TASK:> Python Code: import numpy as np import matplotlib.pyplot as plt import gdsfactory as gf import gdsfactory.simulation.sipann as gs def pltAttr(x, y, title=None, legend="upper right", save=None): if legend is not None: plt.legend(loc=legend) plt.xlabel(x) plt.ylabel(y) if title is not None: plt.title(title) if save is not None: plt.savefig(save) s = gs.straight(width=0.5) hr = gs.straight(wg_width=0.45, length_x=20.0) width = np.linspace(300, 500, 100) wavelength = 1550 hr.update(width=width) t = hr.predict(wavelength) title = "Straight $\lambda=1550$ 20um long" plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(width, np.abs(t) ** 2, label="t") pltAttr("width (nm)", "|S|", title) plt.subplot(122) plt.plot(width, -np.unwrap(np.angle(t)), label="t") pltAttr("width (nm)", "Phase (rad)", title) # Lets look at the layout of a coupler_ring gf.components.coupler_ring() hr = gs.coupler_ring() r = np.linspace(5000, 50000, 100) wavelength = 1550 hr.update(radius=r) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(r / 1e3, np.abs(k) ** 2, label="k") plt.plot(r / 1e3, np.abs(t) ** 2, label="t") pltAttr("Radius (um)", "Magnitude Squared", "HalfRing $\lambda=1550$") plt.subplot(122) plt.plot(r / 1e3, np.unwrap(np.angle(k)), label="k") plt.plot(r / 1e3, -np.unwrap(np.angle(t)), label="t") pltAttr("Radius (um)", "Phase (rad)", "HalfRing $\lambda=1550$") hr = gs.coupler_ring(width=0.45, length_x=20.0) gap = np.linspace(200, 500, 100) wavelength = 1550 hr.update(gap=gap) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) title = "Half ring coupler $\lambda=1550$ length=20um 450nm waveguides" plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(gap, np.abs(k) ** 2 * 100, label="k") plt.plot(gap, np.abs(t) ** 2 * 100, label="t") pltAttr("gap (nm)", "Coupling (%)", title) plt.subplot(122) plt.plot(gap, np.unwrap(np.angle(k)), label="k") plt.plot(gap, -np.unwrap(np.angle(t)), label="t") pltAttr("gap (nm)", "Phase (rad)", title) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(gap, np.abs(k) ** 2 * 100, label="k") pltAttr("gap (nm)", "Coupling (%)", "HalfRing $\lambda=1550$ 20um straight") gap = 0.236 length = 20.0 width = 0.5 dx = 5.0 dy = 5.0 coupler_layout = gf.components.coupler( gap=gap, length=length, width=width, dx=dx, dy=dy ) coupler_layout.plot() # lets see the default parameters for the circuit model gs.coupler? # lets see the different parameters for the layout gf.components.coupler? c = gs.coupler(gap=gap, length=length, width=width, dx=dx, dy=dy) wavelength = np.linspace(1500, 1600, 500) k = c.predict((1, 4), wavelength) t = c.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(wavelength, np.abs(k) ** 2, label="k") plt.plot(wavelength, np.abs(t) ** 2, label="t") plt.xlabel("Wavelength (nm)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() hr = gs.coupler() length = np.linspace(1, 70, 100) * 1e3 wavelength = 1550 hr.update(length=length) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(length / 1e3, np.abs(k) ** 2, label="k") plt.plot(length / 1e3, np.abs(t) ** 2, label="t") plt.xlabel("length (um)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() plt.subplot(122) plt.plot(length / 1e3, np.unwrap(np.angle(k)), label="k") plt.plot(length / 1e3, -np.unwrap(np.angle(t)), label="t") plt.xlabel("length (um)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() hr = gs.coupler(length=10, gap=0.25, width=0.450) length = np.linspace(1, 45, 100) * 1e3 wavelength = 1550 hr.update(length=length) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(length / 1e3, np.abs(k) ** 2, label="k") plt.plot(length / 1e3, np.abs(t) ** 2, label="t") plt.xlabel("Wavelength (nm)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() plt.subplot(122) plt.plot(length / 1e3, np.unwrap(np.angle(k)), label="k") plt.plot(length / 1e3, -np.unwrap(np.angle(t)), label="t") plt.xlabel("length (um)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() hr = gs.coupler(length=10, gap=0.13, width=0.5) length = np.linspace(1, 45, 100) * 1e3 wavelength = 1550 hr.update(length=length) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(length / 1e3, np.abs(k) ** 2, label="k") plt.plot(length / 1e3, np.abs(t) ** 2, label="t") plt.xlabel("Wavelength (nm)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() plt.subplot(122) plt.plot(length / 1e3, np.unwrap(np.angle(k)), label="k") plt.plot(length / 1e3, -np.unwrap(np.angle(t)), label="t") plt.xlabel("length (um)") plt.ylabel("Magnitude Squared") plt.title("Crossover at $\lambda \approx 1550nm$") plt.legend() c50 = gs.coupler(length=18, gap=0.25, width=0.45) wavelength = np.linspace(1500, 1600, 500) k = c50.predict((1, 4), wavelength) t = c50.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(wavelength, np.abs(k) ** 2, label="k") plt.plot(wavelength, np.abs(t) ** 2, label="t") pltAttr("Wavelength (nm)", "Magnitude Squared", "Crossover at $\lambda \approx 1550nm$") import numpy as np import matplotlib.pyplot as plt hr = gs.coupler_ring(length_x=2e3, width=0.45) gap = np.linspace(0.5, 3, 40) * 1e3 wavelength = 1550 hr.update(gap=gap) k = hr.predict((1, 4), wavelength) t = hr.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(gap / 1e3, np.abs(k) ** 2, label="k") plt.plot(gap / 1e3, np.abs(t) ** 2, label="t") plt.xlabel("coupler gap (nm)") plt.ylabel("Magnitude Squared") plt.title("2 mm coupling $\lambda=1550$") c = gs.coupler_ring(length_x=20, wg_width=0.45, gap=0.45) wavelength = np.linspace(1500, 1600, 500) k = c.predict((1, 4), wavelength) t = c.predict((1, 3), wavelength) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.plot(wavelength, np.abs(k) ** 2, label="k") plt.plot(wavelength, np.abs(t) ** 2, label="t") plt.ylabel("Magnitude Squared") plt.plot(wavelength, np.abs(k) ** 2 * 100, label="k") plt.ylabel("Coupling (%)") plt.xlabel("wavelength (nm)") plt.title("20um long 450nm wide 450nm gap straight waveguides") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Coupler ring Step2: Coupler Step3: Reproducing numbers from thesis page 88
13,669
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import layers tf.__version__ dataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip' csv_file = 'datasets/petfinder-mini/petfinder-mini.csv' tf.keras.utils.get_file('petfinder_mini.zip', dataset_url, extract=True, cache_dir='.') dataframe = pd.read_csv(csv_file) dataframe.head() # In the original dataset, `'AdoptionSpeed'` of `4` indicates # a pet was not adopted. dataframe['target'] = np.where(dataframe['AdoptionSpeed']==4, 0, 1) # Drop unused features. dataframe = dataframe.drop(columns=['AdoptionSpeed', 'Description']) train, val, test = np.split(dataframe.sample(frac=1), [int(0.8*len(dataframe)), int(0.9*len(dataframe))]) print(len(train), 'training examples') print(len(val), 'validation examples') print(len(test), 'test examples') def df_to_dataset(dataframe, shuffle=True, batch_size=32): df = dataframe.copy() labels = df.pop('target') df = {key: value[:,tf.newaxis] for key, value in dataframe.items()} ds = tf.data.Dataset.from_tensor_slices((dict(df), labels)) if shuffle: ds = ds.shuffle(buffer_size=len(dataframe)) ds = ds.batch(batch_size) ds = ds.prefetch(batch_size) return ds batch_size = 5 train_ds = df_to_dataset(train, batch_size=batch_size) [(train_features, label_batch)] = train_ds.take(1) print('Every feature:', list(train_features.keys())) print('A batch of ages:', train_features['Age']) print('A batch of targets:', label_batch ) def get_normalization_layer(name, dataset): # Create a Normalization layer for the feature. normalizer = layers.Normalization(axis=None) # Prepare a Dataset that only yields the feature. feature_ds = dataset.map(lambda x, y: x[name]) # Learn the statistics of the data. normalizer.adapt(feature_ds) return normalizer photo_count_col = train_features['PhotoAmt'] layer = get_normalization_layer('PhotoAmt', train_ds) layer(photo_count_col) def get_category_encoding_layer(name, dataset, dtype, max_tokens=None): # Create a layer that turns strings into integer indices. if dtype == 'string': index = layers.StringLookup(max_tokens=max_tokens) # Otherwise, create a layer that turns integer values into integer indices. else: index = layers.IntegerLookup(max_tokens=max_tokens) # Prepare a `tf.data.Dataset` that only yields the feature. feature_ds = dataset.map(lambda x, y: x[name]) # Learn the set of possible values and assign them a fixed integer index. index.adapt(feature_ds) # Encode the integer indices. encoder = layers.CategoryEncoding(num_tokens=index.vocabulary_size()) # Apply multi-hot encoding to the indices. The lambda function captures the # layer, so you can use them, or include them in the Keras Functional model later. return lambda feature: encoder(index(feature)) test_type_col = train_features['Type'] test_type_layer = get_category_encoding_layer(name='Type', dataset=train_ds, dtype='string') test_type_layer(test_type_col) test_age_col = train_features['Age'] test_age_layer = get_category_encoding_layer(name='Age', dataset=train_ds, dtype='int64', max_tokens=5) test_age_layer(test_age_col) batch_size = 256 train_ds = df_to_dataset(train, batch_size=batch_size) val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size) test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size) all_inputs = [] encoded_features = [] # Numerical features. for header in ['PhotoAmt', 'Fee']: numeric_col = tf.keras.Input(shape=(1,), name=header) normalization_layer = get_normalization_layer(header, train_ds) encoded_numeric_col = normalization_layer(numeric_col) all_inputs.append(numeric_col) encoded_features.append(encoded_numeric_col) age_col = tf.keras.Input(shape=(1,), name='Age', dtype='int64') encoding_layer = get_category_encoding_layer(name='Age', dataset=train_ds, dtype='int64', max_tokens=5) encoded_age_col = encoding_layer(age_col) all_inputs.append(age_col) encoded_features.append(encoded_age_col) categorical_cols = ['Type', 'Color1', 'Color2', 'Gender', 'MaturitySize', 'FurLength', 'Vaccinated', 'Sterilized', 'Health', 'Breed1'] for header in categorical_cols: categorical_col = tf.keras.Input(shape=(1,), name=header, dtype='string') encoding_layer = get_category_encoding_layer(name=header, dataset=train_ds, dtype='string', max_tokens=5) encoded_categorical_col = encoding_layer(categorical_col) all_inputs.append(categorical_col) encoded_features.append(encoded_categorical_col) all_features = tf.keras.layers.concatenate(encoded_features) x = tf.keras.layers.Dense(32, activation="relu")(all_features) x = tf.keras.layers.Dropout(0.5)(x) output = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(all_inputs, output) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=["accuracy"]) # Use `rankdir='LR'` to make the graph horizontal. tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR") model.fit(train_ds, epochs=10, validation_data=val_ds) loss, accuracy = model.evaluate(test_ds) print("Accuracy", accuracy) model.save('my_pet_classifier') reloaded_model = tf.keras.models.load_model('my_pet_classifier') sample = { 'Type': 'Cat', 'Age': 3, 'Breed1': 'Tabby', 'Gender': 'Male', 'Color1': 'Black', 'Color2': 'White', 'MaturitySize': 'Small', 'FurLength': 'Short', 'Vaccinated': 'No', 'Sterilized': 'No', 'Health': 'Healthy', 'Fee': 100, 'PhotoAmt': 2, } input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()} predictions = reloaded_model.predict(input_dict) prob = tf.nn.sigmoid(predictions[0]) print( "This particular pet had a %.1f percent probability " "of getting adopted." % (100 * prob) ) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Keras 前処理レイヤーを使って構造化データを分類する Step2: データセットを読み込み、pandas DataFrame に読み込む Step3: DataFrame の最初の 5 行をチェックして、データセットを確認します。 Step4: ターゲット変数を作成する Step5: DataFrame をトレーニング、検証、およびテストセットに分割する Step6: tf.data を使用して入力パイプラインを作成する Step7: 次に、新しく作成された関数 (df_to_dataset) を使用して、入力パイプラインヘルパー関数がトレーニングデータで呼び出すことによって返すデータの形式を確認し、小さなバッチサイズを使用して出力を読み取り可能に保ちます。 Step8: 出力が示すように、トレーニングセットは、行のカラム値にマップされる (DataFrame からの) カラム名のディクショナリを返します。 Step9: 次に、アップロードされたペットの写真特徴量の合計で新しい関数を呼び出して、'PhotoAmt' を正規化して新しい関数をテストします。 Step10: 注意 Step11: ペットの 'Type' 特徴量で呼び出して get_category_encoding_layer 関数をテストし、マルチホットエンコードされたテンソルに変換します。 Step12: ペットの 'Age' 特徴量でこのプロセスを繰り返します。 Step13: 選択した特徴量を前処理して、モデルをトレーニングする Step14: 数値の特徴量 (ペットの写真の数と引き取り料金) を正規化し、encoded_features と呼ばれる入力の 1 つのリストに追加します。 Step15: データセット (ペットの年齢) からの整数カテゴリカル値を整数インデックスに変換し、マルチホットエンコーディングを実行して、結果の特徴量入力を encoded_features に追加します。 Step16: 文字列のカテゴリカル値に対して同じ手順を繰り返します。 Step17: モデルを作成、コンパイル、およびトレーニングする Step18: Keras Model.compile を使用してモデルを構成します。 Step19: 接続性グラフを視覚化しましょう。 Step20: 次に、モデルをトレーニングし、テストします。 Step21: 推論を実行する Step22: Keras model.predict メソッドを呼び出すだけで、新しいサンプルの予測を得ることができます。後は、次を実行するだけです。
13,670
<ASSISTANT_TASK:> Python Code: from __future__ import print_function, division import numpy import scipy.stats import matplotlib.pyplot as pyplot from ipywidgets import interact, interactive, fixed import ipywidgets as widgets # seed the random number generator so we all get the same results numpy.random.seed(18) # some nicer colors from http://colorbrewer2.org/ COLOR1 = '#7fc97f' COLOR2 = '#beaed4' COLOR3 = '#fdc086' COLOR4 = '#ffff99' COLOR5 = '#386cb0' %matplotlib inline weight = scipy.stats.lognorm(0.23, 0, 70.8) weight.mean(), weight.std() xs = numpy.linspace(20, 160, 100) ys = weight.pdf(xs) pyplot.plot(xs, ys, linewidth=4, color=COLOR1) pyplot.xlabel('weight (kg)') pyplot.ylabel('PDF') None def make_sample(n=100): sample = weight.rvs(n) return sample sample = make_sample(n=100) sample.mean(), sample.std() def sample_stat(sample): return sample.mean() def compute_sampling_distribution(n=100, iters=1000): stats = [sample_stat(make_sample(n)) for i in range(iters)] return numpy.array(stats) sample_means = compute_sampling_distribution(n=100, iters=1000) pyplot.hist(sample_means, color=COLOR5) pyplot.xlabel('sample mean (n=100)') pyplot.ylabel('count') None sample_means.mean() std_err = sample_means.std() std_err conf_int = numpy.percentile(sample_means, [5, 95]) conf_int def plot_sampling_distribution(n, xlim=None): Plot the sampling distribution. n: sample size xlim: [xmin, xmax] range for the x axis sample_stats = compute_sampling_distribution(n, iters=1000) se = numpy.std(sample_stats) ci = numpy.percentile(sample_stats, [5, 95]) pyplot.hist(sample_stats, color=COLOR2) pyplot.xlabel('sample statistic') pyplot.xlim(xlim) text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci)) text(0.03, 0.85, 'SE %0.2f' % se) pyplot.show() def text(x, y, s): Plot a string at a given location in axis coordinates. x: coordinate y: coordinate s: string ax = pyplot.gca() pyplot.text(x, y, s, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) plot_sampling_distribution(100) def sample_stat(sample): return sample.mean() slider = widgets.IntSlider(min=10, max=1000, value=100) interact(plot_sampling_distribution, n=slider, xlim=fixed([55, 95])) None def sample_stat(sample): # TODO: replace the following line with another sample statistic return sample.mean() slider = widgets.IntSlider(min=10, max=1000, value=100) interact(plot_sampling_distribution, n=slider, xlim=fixed([0, 100])) None class Resampler(object): Represents a framework for computing sampling distributions. def __init__(self, sample, xlim=None): Stores the actual sample. self.sample = sample self.n = len(sample) self.xlim = xlim def resample(self): Generates a new sample by choosing from the original sample with replacement. new_sample = numpy.random.choice(self.sample, self.n, replace=True) return new_sample def sample_stat(self, sample): Computes a sample statistic using the original sample or a simulated sample. return sample.mean() def compute_sampling_distribution(self, iters=1000): Simulates many experiments and collects the resulting sample statistics. stats = [self.sample_stat(self.resample()) for i in range(iters)] return numpy.array(stats) def plot_sampling_distribution(self): Plots the sampling distribution. sample_stats = self.compute_sampling_distribution() se = sample_stats.std() ci = numpy.percentile(sample_stats, [5, 95]) pyplot.hist(sample_stats, color=COLOR2) pyplot.xlabel('sample statistic') pyplot.xlim(self.xlim) text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci)) text(0.03, 0.85, 'SE %0.2f' % se) pyplot.show() def interact_func(n, xlim): sample = weight.rvs(n) resampler = Resampler(sample, xlim=xlim) resampler.plot_sampling_distribution() interact_func(n=100, xlim=[50, 100]) slider = widgets.IntSlider(min=10, max=1000, value=100) interact(interact_func, n=slider, xlim=fixed([50, 100])) None # Solution goes here class StdResampler(Resampler): Computes the sampling distribution of the standard deviation. def sample_stat(self, sample): Computes a sample statistic using the original sample or a simulated sample. return sample.std() def interact_func2(n, xlim): sample = weight.rvs(n) resampler = StdResampler(sample, xlim=xlim) resampler.plot_sampling_distribution() interact_func2(n=100, xlim=[0, 100]) slider = widgets.IntSlider(min=10, max=1000, value=100) interact(interact_func2, n=slider, xlim=fixed([0, 100])) None female_weight = scipy.stats.lognorm(0.23, 0, 70.8) female_weight.mean(), female_weight.std() male_weight = scipy.stats.lognorm(0.20, 0, 87.3) male_weight.mean(), male_weight.std() female_sample = female_weight.rvs(100) male_sample = male_weight.rvs(100) male_sample.mean() - female_sample.mean() def CohenEffectSize(group1, group2): Compute Cohen's d. group1: Series or NumPy array group2: Series or NumPy array returns: float diff = group1.mean() - group2.mean() n1, n2 = len(group1), len(group2) var1 = group1.var() var2 = group2.var() pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2) d = diff / numpy.sqrt(pooled_var) return d CohenEffectSize(male_sample, female_sample) class CohenResampler(Resampler): def __init__(self, group1, group2, xlim=None): self.group1 = group1 self.group2 = group2 self.xlim = xlim def resample(self): n, m = len(self.group1), len(self.group2) group1 = numpy.random.choice(self.group1, n, replace=True) group2 = numpy.random.choice(self.group2, m, replace=True) return group1, group2 def sample_stat(self, groups): group1, group2 = groups return CohenEffectSize(group1, group2) resampler = CohenResampler(male_sample, female_sample) resampler.plot_sampling_distribution() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Part One Step2: Here's what that distribution looks like Step3: make_sample draws a random sample from this distribution. The result is a NumPy array. Step4: Here's an example with n=100. The mean and std of the sample are close to the mean and std of the population, but not exact. Step5: We want to estimate the average weight in the population, so the "sample statistic" we'll use is the mean Step6: One iteration of "the experiment" is to collect a sample of 100 women and compute their average weight. Step7: The next line runs the simulation 1000 times and puts the results in Step8: Let's look at the distribution of the sample means. This distribution shows how much the results vary from one experiment to the next. Step9: The mean of the sample means is close to the actual population mean, which is nice, but not actually the important part. Step10: The standard deviation of the sample means quantifies the variability from one experiment to the next, and reflects the precision of the estimate. Step11: We can also use the distribution of sample means to compute a "90% confidence interval", which contains 90% of the experimental results Step14: Now we'd like to see what happens as we vary the sample size, n. The following function takes n, runs 1000 simulated experiments, and summarizes the results. Step15: Here's a test run with n=100 Step16: Now we can use interact to run plot_sampling_distribution with different values of n. Note Step17: Other sample statistics Step24: STOP HERE Step25: The following function instantiates a Resampler and runs it. Step26: Here's a test run with n=100 Step27: Now we can use interact_func in an interaction Step30: Exercise 2 Step31: Test your code using the cell below Step32: When your StdResampler is working, you should be able to interact with it Step33: STOP HERE Step34: And here's the men's distribution Step35: I'll simulate a sample of 100 men and 100 women Step36: The difference in means should be about 17 kg, but will vary from one random sample to the next Step38: Here's the function that computes Cohen's effect size again Step39: The difference in weight between men and women is about 1 standard deviation Step40: Now we can write a version of the Resampler that computes the sampling distribution of $d$. Step41: Now we can instantiate a CohenResampler and plot the sampling distribution.
13,671
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-hh', 'seaice') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 2. Key Properties --&gt; Variables Step7: 3. Key Properties --&gt; Seawater Properties Step8: 3.2. Ocean Freezing Point Value Step9: 4. Key Properties --&gt; Resolution Step10: 4.2. Canonical Horizontal Resolution Step11: 4.3. Number Of Horizontal Gridpoints Step12: 5. Key Properties --&gt; Tuning Applied Step13: 5.2. Target Step14: 5.3. Simulations Step15: 5.4. Metrics Used Step16: 5.5. Variables Step17: 6. Key Properties --&gt; Key Parameter Values Step18: 6.2. Additional Parameters Step19: 7. Key Properties --&gt; Assumptions Step20: 7.2. On Diagnostic Variables Step21: 7.3. Missing Processes Step22: 8. Key Properties --&gt; Conservation Step23: 8.2. Properties Step24: 8.3. Budget Step25: 8.4. Was Flux Correction Used Step26: 8.5. Corrected Conserved Prognostic Variables Step27: 9. Grid --&gt; Discretisation --&gt; Horizontal Step28: 9.2. Grid Type Step29: 9.3. Scheme Step30: 9.4. Thermodynamics Time Step Step31: 9.5. Dynamics Time Step Step32: 9.6. Additional Details Step33: 10. Grid --&gt; Discretisation --&gt; Vertical Step34: 10.2. Number Of Layers Step35: 10.3. Additional Details Step36: 11. Grid --&gt; Seaice Categories Step37: 11.2. Number Of Categories Step38: 11.3. Category Limits Step39: 11.4. Ice Thickness Distribution Scheme Step40: 11.5. Other Step41: 12. Grid --&gt; Snow On Seaice Step42: 12.2. Number Of Snow Levels Step43: 12.3. Snow Fraction Step44: 12.4. Additional Details Step45: 13. Dynamics Step46: 13.2. Transport In Thickness Space Step47: 13.3. Ice Strength Formulation Step48: 13.4. Redistribution Step49: 13.5. Rheology Step50: 14. Thermodynamics --&gt; Energy Step51: 14.2. Thermal Conductivity Step52: 14.3. Heat Diffusion Step53: 14.4. Basal Heat Flux Step54: 14.5. Fixed Salinity Value Step55: 14.6. Heat Content Of Precipitation Step56: 14.7. Precipitation Effects On Salinity Step57: 15. Thermodynamics --&gt; Mass Step58: 15.2. Ice Vertical Growth And Melt Step59: 15.3. Ice Lateral Melting Step60: 15.4. Ice Surface Sublimation Step61: 15.5. Frazil Ice Step62: 16. Thermodynamics --&gt; Salt Step63: 16.2. Sea Ice Salinity Thermal Impacts Step64: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Step65: 17.2. Constant Salinity Value Step66: 17.3. Additional Details Step67: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Step68: 18.2. Constant Salinity Value Step69: 18.3. Additional Details Step70: 19. Thermodynamics --&gt; Ice Thickness Distribution Step71: 20. Thermodynamics --&gt; Ice Floe Size Distribution Step72: 20.2. Additional Details Step73: 21. Thermodynamics --&gt; Melt Ponds Step74: 21.2. Formulation Step75: 21.3. Impacts Step76: 22. Thermodynamics --&gt; Snow Processes Step77: 22.2. Snow Aging Scheme Step78: 22.3. Has Snow Ice Formation Step79: 22.4. Snow Ice Formation Scheme Step80: 22.5. Redistribution Step81: 22.6. Heat Diffusion Step82: 23. Radiative Processes Step83: 23.2. Ice Radiation Transmission
13,672
<ASSISTANT_TASK:> Python Code: # Imports all libraries required import os import cv2 import csv import time import pickle import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.image as mpimg from PIL import Image from pylab import rcParams from skimage import transform from sklearn.utils import shuffle from sklearn import preprocessing from sklearn.metrics import confusion_matrix from tensorflow.contrib.layers import flatten from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split %matplotlib inline # Imports all libraries required import os import cv2 import csv import time import pickle import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.image as mpimg from PIL import Image from pylab import rcParams from skimage import transform from sklearn.utils import shuffle from sklearn import preprocessing from sklearn.metrics import confusion_matrix from tensorflow.contrib.layers import flatten from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split %matplotlib inline # Load pickled data training_file = 'train.p' validating_file = 'valid.p' testing_file = 'test.p' with open(training_file, mode='rb') as f:train = pickle.load(f) with open(validating_file, mode='rb') as f:valid = pickle.load(f) with open(testing_file, mode='rb') as f:test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] ### Replace each question mark with the appropriate value. # TODO: Number of training examples n_train = len(X_train) n_valid = len(X_valid) # TODO: Number of testing examples. n_test = len(X_test) # TODO: What's the shape of an traffic sign image? image_shape = X_train[0].shape # TODO: How many unique classes/labels there are in the dataset. n_classes = len(set(y_train)) print("Number of training examples =", n_train) print("Number of validating examples =", n_valid) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) ### Data exploration visualization fig = plt.figure(figsize=(15, 5)) image_seq = np.random.randint(1,len(X_train),10) # Load image labels from csv label_csv = csv.reader(open('signnames.csv', 'r')) label_names = [] for row in label_csv: label_names.append(row[1]) label_names.pop(0) for ind,val in enumerate(image_seq): img = fig.add_subplot(2,5,ind+1) plt.imshow(X_train[val-1]) #Add corresponding label img.set_xlabel("{0} ({1})".format(y_train[val-1], label_names[y_train[val-1]])) #Remove the axis ticks img.set_xticks([]) img.set_yticks([]) plt.show() # A= unique B = counts A, B = np.unique(y_train, return_counts=True) fig = plt.figure(figsize=(15,10)) plt.bar(A, B, color='green') label = [label for label in label_names] plt.xticks(np.arange(0.5,n_classes+0.5), label, rotation=45,ha='right') plt.ylabel('Frequency') plt.title('Training Data Distribution') plt.show() def preprocess(X): # Normalize to range 0-1 X = (X - X.mean())/(np.max(X) - np.min(X)) # grayscale conversion X = 0.114 * X[...,0] + 0.587 * X[...,1] + 0.299 * X[...,2] # BGR->Gray return X X_train = preprocess(X_train) X_valid = preprocess(X_valid) X_test = preprocess(X_test) layer_depth = { 'layer_1': 12, 'layer_2': 32, 'fully_connected_1': 512, 'fully_connected_2': 256, 'fully_connected_3': 128, 'out': n_classes, } weights = { 'layer_1': tf.Variable(tf.truncated_normal( [5, 5, 1, layer_depth['layer_1']], mean=0, stddev=0.1)), 'layer_2': tf.Variable(tf.truncated_normal( [5, 5, layer_depth['layer_1'], layer_depth['layer_2']], mean=0, stddev=0.1)), 'fully_connected_1': tf.Variable(tf.truncated_normal( [5*5*layer_depth['layer_2'], layer_depth['fully_connected_1']])), 'fully_connected_2': tf.Variable(tf.truncated_normal( [layer_depth['fully_connected_1'], layer_depth['fully_connected_2']], mean=0, stddev=0.1)), 'fully_connected_3': tf.Variable(tf.truncated_normal( [layer_depth['fully_connected_2'], layer_depth['fully_connected_3']], mean=0, stddev=0.1)), 'out': tf.Variable(tf.truncated_normal( [layer_depth['fully_connected_3'], layer_depth['out']], mean=0, stddev=0.1)) } biases = { 'layer_1': tf.Variable(tf.zeros(layer_depth['layer_1'])), 'layer_2': tf.Variable(tf.zeros(layer_depth['layer_2'])), 'fully_connected_1': tf.Variable(tf.zeros(layer_depth['fully_connected_1'])), 'fully_connected_2': tf.Variable(tf.zeros(layer_depth['fully_connected_2'])), 'fully_connected_3': tf.Variable(tf.zeros(layer_depth['fully_connected_3'])), 'out': tf.Variable(tf.zeros(layer_depth['out'])) } # Define 2 more functions def conv2d(x, W, b, strides=1): x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding = 'VALID') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2): return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID') # Define Architecture keep_prob = tf.placeholder(tf.float32) def LeNet(x): x = tf.expand_dims(x, -1) conv1 = conv2d(x, weights['layer_1'], biases['layer_1']) conv1 = tf.nn.relu(conv1) conv1 = maxpool2d(conv1) #________________________________________________________________________________________ conv2 = conv2d(conv1, weights['layer_2'], biases['layer_2']) conv2 = tf.nn.relu(conv2) conv2 = maxpool2d(conv2) #________________________________________________________________________________________ fc0 = flatten(conv2) fc1 = tf.add(tf.matmul(fc0, weights['fully_connected_1']), biases['fully_connected_1']) fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, keep_prob=keep_prob) #________________________________________________________________________________________ fc2 = tf.add(tf.matmul(fc1, weights['fully_connected_2']), biases['fully_connected_2']) fc2 = tf.nn.relu(fc2) fc2 = tf.nn.dropout(fc2, keep_prob=keep_prob) #________________________________________________________________________________________ fc3 = tf.add(tf.matmul(fc2, weights['fully_connected_3']), biases['fully_connected_3']) fc3 = tf.nn.relu(fc3) fc3 = tf.nn.dropout(fc3, keep_prob=keep_prob) logits = tf.add(tf.matmul(fc3, weights['out']), biases['out']) return logits saver = tf.train.Saver() # Add placeholder for input and data labels x = tf.placeholder(tf.float32, (None, 32, 32)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, n_classes) learning_rate = 0.0005 logits = LeNet(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) # training Without regulariztaion training_operation = optimizer.minimize(loss_operation) # Evaluation correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_tunning = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, batch_size): end = offset + batch_size batch_x, batch_y = X_data[offset:end], y_data[offset:end] accuracy = sess.run(accuracy_tunning, feed_dict={x: batch_x, y: batch_y, keep_prob: 1}) total_accuracy += (accuracy * len(batch_x)) accuracy = total_accuracy / num_examples return accuracy epochs = 100 batch_size = 64 # Run Training and save model total_time = time.time() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print('Number of training samples: {}'.format(num_examples)) print('Training in progress......\n\n') for i in range(epochs): start_time = time.time() X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, batch_size): end = offset + batch_size batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob:0.6}) validation_accuracy = evaluate(X_valid, y_valid) validation_percent = validation_accuracy*100 print("\nEPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}%".format(validation_percent)) end_time = time.time() - start_time print("Time taken for the last epoch: %.3f seconds" %end_time) test_accuracy = evaluate(X_test, y_test) test_percent = test_accuracy*100 print("\n\n\nAccuracy compared to test set = {:.3f}%".format(test_percent)) final_time = time.time() - total_time print("Total Training: %.3f seconds" %final_time) saver.save(sess, '.\model') print('Model successfully Saved to current directory!') # roload and test the Model with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_test, y_test) test_percent = test_accuracy*100 print("Test Accuracy = {:.3f}%".format(test_percent)) # load up new test images df = pd.read_csv('signnames.csv') import glob images_resized = [] images = [] for j in glob.glob('./extra_German_sign/*.jpg'): image = plt.imread(j) image_resized = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA) images_resized.append(image_resized) image_preprocessed = preprocess(image_resized) images.append(image_preprocessed[np.newaxis,...]) images = np.vstack(images) with tf.Session() as sess: new_saver = tf.train.import_meta_graph('model.meta') new_saver.restore(sess, tf.train.latest_checkpoint('./')) out = sess.run(tf.argmax(logits, 1), feed_dict={x: images, keep_prob: 1}) # Plot Images with prediction new_label_list = [np.argmax(row) for row in out] plt.figure(figsize=(12,12)) for i in range(0,images.shape[0]): with sns.axes_style("white"): plt.subplot(4, 4, i+1) plt.imshow(np.squeeze(images_resized[i]), cmap='gray') plt.tick_params(axis='both', which='both', bottom='on', top='on', labelbottom='off', right='off', left='off', labelleft='off') plt.xlabel(df.loc[out[i]].SignName) plt.tight_layout() k = 5 with tf.Session() as sess: new_saver = tf.train.import_meta_graph('model.meta') new_saver.restore(sess, tf.train.latest_checkpoint('./')) out_prob = sess.run(tf.nn.top_k(tf.nn.softmax(logits), k=k), feed_dict={x: images, keep_prob: 1}) plt.rcParams['figure.figsize'] = (15, 30) image_indices = (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14) #image_indices = np.arange(0, len(images_resized)) for i, im in enumerate(image_indices): with sns.axes_style("white"): plt.subplot(len(image_indices), 2, (2*i)+1) plt.imshow(np.squeeze(images_resized[im]), cmap='gray') plt.axis('on') plt.xlabel(df.loc[out[i]].SignName) plt.subplot(len(image_indices) ,2, (2*i)+2) plt.barh(np.arange(k), out_prob.values[im]) plt.yticks(np.arange(k)+0.3, df.loc[out_prob.indices[im]].SignName) plt.tight_layout() ''' plot_loss_accuracy(batches, loss_batch, train_acc_batch, valid_acc_batch) ''' ''' if not offset % 50: # Calculate Training and Validation accuracy training_accuracy = sess.run(accuracy_tunning, feed_dict={x: X_train, y: y_train, keep_prob: 0.8 }) validation_accuracy = sess.run(accuracy_tunning, feed_dict={x: X_valid, y: y_valid, keep_prob: 1}) # Log batches previous_batch = batches[-1] if batches else 0 batches.append(50 + previous_batch) loss_batch.append(c) train_acc_batch.append(training_accuracy) valid_acc_batch.append(validation_accuracy) ''' <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: some unexpected errors are present Step2: executing the same codes again removes the errors, not sure why!! Step3: Step 1 Step4: Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include Step5: Create a histogram that depicts the overall dataset distribution for the training set. Step6: Step 2 Step7: Question 1 Step8: Question 3 Step9: Question 4 Step10: Question 6 Step11: Question 8
13,673
<ASSISTANT_TASK:> Python Code: # in select mode, shift j/k (to select multiple cells at once) # split cell with ctrl shift - # merge with shift M first = 1 second = 2 third = 3 import numpy as np np.random.choice() mylist = !ls [x.split('_')[-1] for x in mylist] %%bash pwd for i in *.ipynb do echo $i | awk -F . '{print $1}' done echo echo "break" echo for i in *.ipynb do echo $i | awk -F - '{print $2}' done def silly_function(xval): Takes a value and returns the absolute value. xval_sq = xval ** 2.0 1 + 4 xval_abs = np.sqrt(xval_sq) return xval_abs silly_function? silly_function?? silly_function() import numpy as np # np.linspace?? # np.linspace? ex_dict = {} # Indent/dedent/comment for _ in range(5): ex_dict["one"] = 1 ex_dict["two"] = 2 ex_dict["three"] = 3 ex_dict["four"] = 4 ex_dict ex_dict["one_better_name"] = 1. ex_dict["two_better_name"] = 2. ex_dict["three_better_name"] = 3. ex_dict["four_better_name"] = 4. <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Different heading levels Step2: SQL Step4: Tab; shift-tab; shift-tab-tab; shift-tab-tab-tab-tab; and more! Step5: Multicursor magic
13,674
<ASSISTANT_TASK:> Python Code: class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable def __init__(self, fname, lname): self.fname = fname self.lname = lname self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) emp1 = Employee('Sri', 'Paladugu') emp2 = Employee('Dhruv', 'Paladugu') print( emp1.get_fullname() ) print( Employee.emp_count ) # Trobule ensues when you treat class variables as instance attribute. # What the interpreter does in this case is, it creates an instance attribute with the same name and assigns to it. # The class variable still remains intact with old value. emp1.company = 'Verily' print(emp1.company) print(emp1.get_company()) print(emp2.company) print(emp2.email) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname): self.fname = fname self.lname = lname self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount emp1 = Employee('Sri', 'Paladugu') emp2 = Employee('Dhruv', 'Paladugu') Employee.set_raise_amt(1.05) print(Employee.raise_amount) print(emp1.raise_amount) print(emp2.raise_amount) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount @classmethod def from_string(cls, emp_str): fname, lname, salary = emp_str.split("-") return cls(fname, lname, salary) new_emp = Employee.from_string("Pradeep-Koganti-10000") print(new_emp.email) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount @classmethod def from_string(cls, emp_str): fname, lname, salary = emp_str.split("-") return cls(fname, lname, salary) @staticmethod def is_workday(day): if day.weekday() == 5 or day.weekday() == 6: return False else: return True import datetime my_date = datetime.date(2016, 7, 10) print(Employee.is_workday(my_date)) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): pass dev1 = Developer('Sri', 'Paladugu', 1000) print(dev1.get_fullname()) print(help(Developer)) # This command prints the Method resolution order. # Indicating the order in which the interpreter is going to look for methods. class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 dev1 = Developer('Sri', 'Paladugu', 1000) dev1.apply_raise() print(dev1.salary) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 def __init__(self, fname, lname, salary, prog_lang): super().__init__(fname, lname, salary) # or you can also use the following syntax # Employee.__init__(self, fname, lname, salary) self.prog_lang = prog_lang dev1 = Developer('Sri', 'Paladugu', 1000, 'Python') print(dev1.get_fullname()) print(dev1.prog_lang) class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 def __init__(self, fname, lname, salary, prog_lang): super().__init__(fname, lname, salary) # or you can also use the following syntax # Employee.__init__(self, fname, lname, salary) self.prog_lang = prog_lang class Manager(Employee): def __init__(self, fname, lname, salary, employees = None): # Use None as default not empty list [] super().__init__(fname, lname, salary) if employees is None: self.employees = [] else: self.employees = employees def add_employee(self, emp): if emp not in self.employees: self.employees.append(emp) def remove_employee(self, emp): if emp in self.employees: self.employees.remove(emp) def print_emps(self): for emp in self.employees: print('--->', emp.get_fullname()) dev_1 = Developer('Sri', 'Paladugu', 1000, 'Python') dev_2 = Developer('Dhruv', 'Paladugu', 2000, 'Java') mgr_1 = Manager('Sue', 'Smith', 9000, [dev_1]) print(mgr_1.email) print(mgr_1.print_emps()) mgr_1.add_employee(dev_2) print(mgr_1.print_emps()) print('Is dev_1 an instance of Developer: ', isinstance(dev_1, Developer)) print('Is dev_1 an instance of Employee: ', isinstance(dev_1, Employee)) print('Is Developer an Subclass of Developer: ', issubclass(Developer, Developer)) print('Is Developer an Subclass of Employee: ', issubclass(Developer, Employee)) class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' def __repr__(self): # For other developers return "Employee('{}','{}','{}')".format(self.fname, self.lname, self.salary) def __str__(self): # For end user return '{} - {}'.format(self.get_fullname(), self.email) def get_fullname(self): return '{} {}'.format(self.fname, self.lname) emp1 = Employee('Sri', 'Paladugu', 5000) print(emp1) print(repr(emp1)) # if you do: 1 + 2 internally the interpreter calls the dunder method __add__ print(int.__add__(1,2)) # Similarly # if you do: [2,3] + [4,5] internally the interpreter calls the dunder method __add__ print(list.__add__([2,3],[4,5])) print('Paladugu'.__len__()) # This is same as len('Paladugu') class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' def __repr__(self): # For other developers return "Employee('{}','{}','{}')".format(self.fname, self.lname, self.salary) def __str__(self): # For end user return '{} - {}'.format(self.get_fullname(), self.email) def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def __add__(self, other): return self.salary + other.salary def __len__(self): return len(self.get_fullname()) emp1 = Employee('Sri', 'Paladugu', 5000) emp2 = Employee('Dhruv', 'Paladugu', 5000) print(emp1 + emp2) print(len(emp1)) class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary @property def email(self): return '{}.{}@{}.com'.format(self.fname, self.lname, self.company) @property def fullname(self): return '{} {}'.format(self.fname, self.lname) @fullname.setter def fullname(self, name): first, last = name.split(' ') self.fname = first self.lname = last @fullname.deleter def fullname(self): print('Delete Name!') self.fname = None self.lname = None emp1 = Employee('Sri', 'Paladugu', 5000) print(emp1.email) print(emp1.fullname) emp1.fullname = 'Ramki Paladugu' print(emp1.email) del emp1.fullname print(emp1.email) from abc import ABCMeta, abstractmethod class Base(metaclass=ABCMeta): @abstractmethod def foo(self): pass @abstractmethod def bar(self): pass class Concrete(Base): def foo(self): pass # We forget to declare bar() c = Concrete() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Class Methods Step2: Class Methods can be used to create alternate constructors Step3: Static Methods Step4: Inheritance - Creating subclasses Step5: Now what if you want Developer's raise_amount to be 10%? Step6: Now what if we want the Developer class to have an extra attribute like prog_lang? Step7: Gotcha - Mutable default arguments Step8: Magic or Dunder Methods Step9: __add__ Step10: Property Decorators Step11: Abstract Base Classes in Python
13,675
<ASSISTANT_TASK:> Python Code: # As usual, a bit of setup import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.cnn import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient from cs231n.layers import * from cs231n.fast_layers import * from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): returns relative error return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.iteritems(): print '%s: ' % k, v.shape x_shape = (2, 3, 4, 4) w_shape = (3, 3, 4, 4) x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape) w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape) b = np.linspace(-0.1, 0.2, num=3) conv_param = {'stride': 2, 'pad': 1} out, _ = conv_forward_naive(x, w, b, conv_param) correct_out = np.array([[[[[-0.08759809, -0.10987781], [-0.18387192, -0.2109216 ]], [[ 0.21027089, 0.21661097], [ 0.22847626, 0.23004637]], [[ 0.50813986, 0.54309974], [ 0.64082444, 0.67101435]]], [[[-0.98053589, -1.03143541], [-1.19128892, -1.24695841]], [[ 0.69108355, 0.66880383], [ 0.59480972, 0.56776003]], [[ 2.36270298, 2.36904306], [ 2.38090835, 2.38247847]]]]]) # Compare your output to ours; difference should be around 1e-8 print 'Testing conv_forward_naive' print 'difference: ', rel_error(out, correct_out) #print out[0][0] from scipy.misc import imread, imresize kitten, puppy = imread('kitten.jpg'), imread('puppy.jpg') # kitten is wide, and puppy is already square d = kitten.shape[1] - kitten.shape[0] kitten_cropped = kitten[:, d/2:-d/2, :] img_size = 200 # Make this smaller if it runs too slow x = np.zeros((2, 3, img_size, img_size)) x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1)) x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1)) # Set up a convolutional weights holding 2 filters, each 3x3 w = np.zeros((2, 3, 3, 3)) # The first filter converts the image to grayscale. # Set up the red, green, and blue channels of the filter. w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]] w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]] w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]] # Second filter detects horizontal edges in the blue channel. w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]] # Vector of biases. We don't need any bias for the grayscale # filter, but for the edge detection filter we want to add 128 # to each output so that nothing is negative. b = np.array([0, 128]) # Compute the result of convolving each input in x with each filter in w, # offsetting by b, and storing the results in out. out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1}) def imshow_noax(img, normalize=True): Tiny helper to show images as uint8 and remove axis labels if normalize: img_max, img_min = np.max(img), np.min(img) img = 255.0 * (img - img_min) / (img_max - img_min) plt.imshow(img.astype('uint8')) plt.gca().axis('off') # Show the original images and the results of the conv operation plt.subplot(2, 3, 1) imshow_noax(puppy, normalize=False) plt.title('Original image') plt.subplot(2, 3, 2) imshow_noax(out[0, 0]) plt.title('Grayscale') plt.subplot(2, 3, 3) imshow_noax(out[0, 1]) plt.title('Edges') plt.subplot(2, 3, 4) imshow_noax(kitten_cropped, normalize=False) plt.subplot(2, 3, 5) imshow_noax(out[1, 0]) plt.subplot(2, 3, 6) imshow_noax(out[1, 1]) plt.gcf().set_size_inches(3.8,4) plt.show() #plt.savefig('convshow.png') x = np.random.randn(4, 3, 5, 5) w = np.random.randn(2, 3, 3, 3) b = np.random.randn(2,) dout = np.random.randn(4, 2, 5, 5) conv_param = {'stride': 1, 'pad': 1} dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout) out, cache = conv_forward_naive(x, w, b, conv_param) dx, dw, db = conv_backward_naive(dout, cache) # Your errors should be around 1e-9' print 'Testing conv_backward_naive function' print 'dx error: ', rel_error(dx, dx_num) print 'dw error: ', rel_error(dw, dw_num) print 'db error: ', rel_error(db, db_num) x_shape = (2, 3, 4, 4) x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape) pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2} out, _ = max_pool_forward_naive(x, pool_param) correct_out = np.array([[[[-0.26315789, -0.24842105], [-0.20421053, -0.18947368]], [[-0.14526316, -0.13052632], [-0.08631579, -0.07157895]], [[-0.02736842, -0.01263158], [ 0.03157895, 0.04631579]]], [[[ 0.09052632, 0.10526316], [ 0.14947368, 0.16421053]], [[ 0.20842105, 0.22315789], [ 0.26736842, 0.28210526]], [[ 0.32631579, 0.34105263], [ 0.38526316, 0.4 ]]]]) # Compare your output with ours. Difference should be around 1e-8. print 'Testing max_pool_forward_naive function:' print 'difference: ', rel_error(out, correct_out) x = np.random.randn(3, 2, 8, 8) dout = np.random.randn(3, 2, 4, 4) pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout) out, cache = max_pool_forward_naive(x, pool_param) dx = max_pool_backward_naive(dout, cache) # Your error should be around 1e-12 print 'Testing max_pool_backward_naive function:' print 'dx error: ', rel_error(dx, dx_num) from cs231n.fast_layers import conv_forward_fast, conv_backward_fast from time import time x = np.random.randn(100, 3, 31, 31) w = np.random.randn(25, 3, 3, 3) b = np.random.randn(25,) dout = np.random.randn(100, 25, 16, 16) conv_param = {'stride': 2, 'pad': 1} t0 = time() out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param) t1 = time() out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param) t2 = time() print 'Testing conv_forward_fast:' print 'Naive: %fs' % (t1 - t0) print 'Fast: %fs' % (t2 - t1) print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1)) print 'Difference: ', rel_error(out_naive, out_fast) t0 = time() dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive) t1 = time() dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast) t2 = time() print '\nTesting conv_backward_fast:' print 'Naive: %fs' % (t1 - t0) print 'Fast: %fs' % (t2 - t1) print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1)) print 'dx difference: ', rel_error(dx_naive, dx_fast) print 'dw difference: ', rel_error(dw_naive, dw_fast) print 'db difference: ', rel_error(db_naive, db_fast) from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast x = np.random.randn(100, 3, 32, 32) dout = np.random.randn(100, 3, 16, 16) pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} t0 = time() out_naive, cache_naive = max_pool_forward_naive(x, pool_param) t1 = time() out_fast, cache_fast = max_pool_forward_fast(x, pool_param) t2 = time() print 'Testing pool_forward_fast:' print 'Naive: %fs' % (t1 - t0) print 'fast: %fs' % (t2 - t1) print 'speedup: %fx' % ((t1 - t0) / (t2 - t1)) print 'difference: ', rel_error(out_naive, out_fast) t0 = time() dx_naive = max_pool_backward_naive(dout, cache_naive) t1 = time() dx_fast = max_pool_backward_fast(dout, cache_fast) t2 = time() print '\nTesting pool_backward_fast:' print 'Naive: %fs' % (t1 - t0) print 'fast: %fs' % (t2 - t1) print 'speedup: %fx' % ((t1 - t0) / (t2 - t1)) print 'dx difference: ', rel_error(dx_naive, dx_fast) from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward x = np.random.randn(2, 3, 16, 16) w = np.random.randn(3, 3, 3, 3) b = np.random.randn(3,) dout = np.random.randn(2, 3, 8, 8) conv_param = {'stride': 1, 'pad': 1} pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param) dx, dw, db = conv_relu_pool_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout) print 'Testing conv_relu_pool' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) from cs231n.layer_utils import conv_relu_forward, conv_relu_backward x = np.random.randn(2, 3, 8, 8) w = np.random.randn(3, 3, 3, 3) b = np.random.randn(3,) dout = np.random.randn(2, 3, 8, 8) conv_param = {'stride': 1, 'pad': 1} out, cache = conv_relu_forward(x, w, b, conv_param) dx, dw, db = conv_relu_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout) print 'Testing conv_relu:' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) model = ThreeLayerConvNet() N = 50 X = np.random.randn(N, 3, 32, 32) y = np.random.randint(10, size=N) loss, grads = model.loss(X, y) print 'Initial loss (no regularization): ', loss model.reg = 0.5 loss, grads = model.loss(X, y) print 'Initial loss (with regularization): ', loss num_inputs = 2 input_dim = (3, 16, 16) reg = 0.0 num_classes = 10 X = np.random.randn(num_inputs, *input_dim) y = np.random.randint(num_classes, size=num_inputs) model = ThreeLayerConvNet(num_filters=3, filter_size=3, input_dim=input_dim, hidden_dim=7, dtype=np.float64) loss, grads = model.loss(X, y) for param_name in sorted(grads): f = lambda _: model.loss(X, y)[0] param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6) e = rel_error(param_grad_num, grads[param_name]) print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])) num_train = 100 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } model = ThreeLayerConvNet(weight_scale=1e-2) solver = Solver(model, small_data, num_epochs=10, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-4, }, verbose=True, print_every=1) solver.train() plt.subplot(2, 1, 1) plt.plot(solver.loss_history, 'o') plt.xlabel('iteration') plt.ylabel('loss') plt.subplot(2, 1, 2) plt.plot(solver.train_acc_history, '-o') plt.plot(solver.val_acc_history, '-o') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('accuracy') plt.show() model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001) solver = Solver(model, data, num_epochs=1, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-4, }, verbose=True, print_every=20) solver.train() from cs231n.vis_utils import visualize_grid grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1)) plt.imshow(grid.astype('uint8')) plt.axis('off') plt.gcf().set_size_inches(5, 5) plt.show() # Check the training-time forward pass by checking means and variances # of features both before and after spatial batch normalization N, C, H, W = 2, 3, 4, 5 x = 4 * np.random.randn(N, C, H, W) + 10 print 'Before spatial batch normalization:' print ' Shape: ', x.shape print ' Means: ', x.mean(axis=(0, 2, 3)) print ' Stds: ', x.std(axis=(0, 2, 3)) # Means should be close to zero and stds close to one gamma, beta = np.ones(C), np.zeros(C) bn_param = {'mode': 'train'} out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) print 'After spatial batch normalization:' print ' Shape: ', out.shape print ' Means: ', out.mean(axis=(0, 2, 3)) print ' Stds: ', out.std(axis=(0, 2, 3)) # Means should be close to beta and stds close to gamma gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8]) out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) print 'After spatial batch normalization (nontrivial gamma, beta):' print ' Shape: ', out.shape print ' Means: ', out.mean(axis=(0, 2, 3)) print ' Stds: ', out.std(axis=(0, 2, 3)) # Check the test-time forward pass by running the training-time # forward pass many times to warm up the running averages, and then # checking the means and variances of activations after a test-time # forward pass. N, C, H, W = 10, 4, 11, 12 bn_param = {'mode': 'train'} gamma = np.ones(C) beta = np.zeros(C) for t in xrange(50): x = 2.3 * np.random.randn(N, C, H, W) + 13 spatial_batchnorm_forward(x, gamma, beta, bn_param) bn_param['mode'] = 'test' x = 2.3 * np.random.randn(N, C, H, W) + 13 a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) # Means should be close to zero and stds close to one, but will be # noisier than training-time forward passes. print 'After spatial batch normalization (test-time):' print ' means: ', a_norm.mean(axis=(0, 2, 3)) print ' stds: ', a_norm.std(axis=(0, 2, 3)) N, C, H, W = 2, 3, 4, 5 x = 5 * np.random.randn(N, C, H, W) + 12 gamma = np.random.randn(C) beta = np.random.randn(C) dout = np.random.randn(N, C, H, W) bn_param = {'mode': 'train'} fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] dx_num = eval_numerical_gradient_array(fx, x, dout) da_num = eval_numerical_gradient_array(fg, gamma, dout) db_num = eval_numerical_gradient_array(fb, beta, dout) _, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param) dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache) print 'dx error: ', rel_error(dx_num, dx) print 'dgamma error: ', rel_error(da_num, dgamma) print 'dbeta error: ', rel_error(db_num, dbeta) # Train a really good model on CIFAR-10 model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001) solver = Solver(model, data, num_epochs=5, batch_size=100, update_rule='adam', optim_config={ 'learning_rate': 1e-4, }, verbose=True, print_every=50) solver.train() # show first layer filters for visualisation from cs231n.vis_utils import visualize_grid grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1)) plt.imshow(grid.astype('uint8')) plt.axis('off') plt.gcf().set_size_inches(5, 5) plt.show() # print graphs for better understanding the learning process plt.subplot(2, 1, 1) plt.plot(solver.loss_history, 'o') plt.xlabel('iteration') plt.ylabel('loss') plt.subplot(2, 1, 2) plt.plot(solver.train_acc_history, '-o') plt.plot(solver.val_acc_history, '-o') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('accuracy') plt.show() # Maybe Later :) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Convolutional Networks Step2: Convolution Step4: Aside Step5: Convolution Step6: Max pooling Step7: Max pooling Step8: Fast layers Step9: Convolutional "sandwich" layers Step10: Three-layer ConvNet Step11: Gradient check Step12: Overfit small data Step13: Plotting the loss, training accuracy, and validation accuracy should show clear overfitting Step14: As we can see above - training acuraccy become much and much better while vaildation accuracy grows slowly or not at all. If we would continue to more epochs we will see val accuracy decreases Step15: Visualize Filters Step16: Spatial Batch Normalization Step17: Spatial batch normalization Step18: Experiment! Step19: Extra Credit Description
13,676
<ASSISTANT_TASK:> Python Code: from time import time start_nb = time() # Initialize logging. import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s') sentence_obama = 'Obama speaks to the media in Illinois' sentence_president = 'The president greets the press in Chicago' sentence_obama = sentence_obama.lower().split() sentence_president = sentence_president.lower().split() # Import and download stopwords from NLTK. from nltk.corpus import stopwords from nltk import download download('stopwords') # Download stopwords list. # Remove stopwords. stop_words = stopwords.words('english') sentence_obama = [w for w in sentence_obama if w not in stop_words] sentence_president = [w for w in sentence_president if w not in stop_words] start = time() import os from gensim.models import Word2Vec if not os.path.exists('/data/w2v_googlenews/GoogleNews-vectors-negative300.bin.gz'): raise ValueError("SKIP: You need to download the google news model") model = Word2Vec.load_word2vec_format('/data/w2v_googlenews/GoogleNews-vectors-negative300.bin.gz', binary=True) print('Cell took %.2f seconds to run.' % (time() - start)) distance = model.wmdistance(sentence_obama, sentence_president) print 'distance = %.4f' % distance sentence_orange = 'Oranges are my favorite fruit' sentence_orange = sentence_orange.lower().split() sentence_orange = [w for w in sentence_orange if w not in stop_words] distance = model.wmdistance(sentence_obama, sentence_orange) print 'distance = %.4f' % distance # Normalizing word2vec vectors. start = time() model.init_sims(replace=True) # Normalizes the vectors in the word2vec class. distance = model.wmdistance(sentence_obama, sentence_president) # Compute WMD as normal. print 'Cell took %.2f seconds to run.' %(time() - start) # Pre-processing a document. from nltk import word_tokenize download('punkt') # Download data for tokenizer. def preprocess(doc): doc = doc.lower() # Lower the text. doc = word_tokenize(doc) # Split into words. doc = [w for w in doc if not w in stop_words] # Remove stopwords. doc = [w for w in doc if w.isalpha()] # Remove numbers and punctuation. return doc start = time() import json # Business IDs of the restaurants. ids = ['4bEjOyTaDG24SY5TxsaUNQ', '2e2e7WgqU1BnpxmQL5jbfw', 'zt1TpTuJ6y9n551sw9TaEg', 'Xhg93cMdemu5pAMkDoEdtQ', 'sIyHTizqAiGu12XMLX3N3g', 'YNQgak-ZLtYJQxlDwN-qIg'] w2v_corpus = [] # Documents to train word2vec on (all 6 restaurants). wmd_corpus = [] # Documents to run queries against (only one restaurant). documents = [] # wmd_corpus, with no pre-processing (so we can see the original documents). with open('/data/yelp_academic_dataset_review.json') as data_file: for line in data_file: json_line = json.loads(line) if json_line['business_id'] not in ids: # Not one of the 6 restaurants. continue # Pre-process document. text = json_line['text'] # Extract text from JSON object. text = preprocess(text) # Add to corpus for training Word2Vec. w2v_corpus.append(text) if json_line['business_id'] == ids[0]: # Add to corpus for similarity queries. wmd_corpus.append(text) documents.append(json_line['text']) print 'Cell took %.2f seconds to run.' %(time() - start) from matplotlib import pyplot as plt %matplotlib inline # Document lengths. lens = [len(doc) for doc in wmd_corpus] # Plot. plt.rc('figure', figsize=(8,6)) plt.rc('font', size=14) plt.rc('lines', linewidth=2) plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a', '#984ea3','#ff7f00','#ffff33')) # Histogram. plt.hist(lens, bins=20) plt.hold(True) # Average length. avg_len = sum(lens) / float(len(lens)) plt.axvline(avg_len, color='#e41a1c') plt.hold(False) plt.title('Histogram of document lengths.') plt.xlabel('Length') plt.text(100, 800, 'mean = %.2f' % avg_len) plt.show() # Train Word2Vec on all the restaurants. model = Word2Vec(w2v_corpus, workers=3, size=100) # Initialize WmdSimilarity. from gensim.similarities import WmdSimilarity num_best = 10 instance = WmdSimilarity(wmd_corpus, model, num_best=10) start = time() sent = 'Very good, you should seat outdoor.' query = preprocess(sent) sims = instance[query] # A query is simply a "look-up" in the similarity class. print 'Cell took %.2f seconds to run.' %(time() - start) # Print the query and the retrieved documents, together with their similarities. print 'Query:' print sent for i in range(num_best): print print 'sim = %.4f' % sims[i][1] print documents[sims[i][0]] start = time() sent = 'I felt that the prices were extremely reasonable for the Strip' query = preprocess(sent) sims = instance[query] # A query is simply a "look-up" in the similarity class. print 'Query:' print sent for i in range(num_best): print print 'sim = %.4f' % sims[i][1] print documents[sims[i][0]] print '\nCell took %.2f seconds to run.' %(time() - start) print 'Notebook took %.2f seconds to run.' %(time() - start_nb) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: These sentences have very similar content, and as such the WMD should be low. Before we compute the WMD, we want to remove stopwords ("the", "to", etc.), as these do not contribute a lot to the information in the sentences. Step2: Now, as mentioned earlier, we will be using some downloaded pre-trained embeddings. We load these into a Gensim Word2Vec model class. Note that the embeddings we have chosen here require a lot of memory. Step3: So let's compute WMD using the wmdistance method. Step4: Let's try the same thing with two completely unrelated sentences. Notice that the distance is larger. Step5: Normalizing word2vec vectors Step6: Part 2 Step7: Below is a plot with a histogram of document lengths and includes the average document length as well. Note that these are the pre-processed documents, meaning stopwords are removed, punctuation is removed, etc. Document lengths have a high impact on the running time of WMD, so when comparing running times with this experiment, the number of documents in query corpus (about 4000) and the length of the documents (about 62 words on average) should be taken into account. Step8: Now we want to initialize the similarity class with a corpus and a word2vec model (which provides the embeddings and the wmdistance method itself). Step9: The num_best parameter decides how many results the queries return. Now let's try making a query. The output is a list of indeces and similarities of documents in the corpus, sorted by similarity. Step10: The query and the most similar documents, together with the similarities, are printed below. We see that the retrieved documents are discussing the same thing as the query, although using different words. The query talks about getting a seat "outdoor", while the results talk about sitting "outside", and one of them says the restaurant has a "nice view". Step11: Let's try a different query, also taken directly from one of the reviews in the corpus. Step12: This time around, the results are more straight forward; the retrieved documents basically contain the same words as the query.
13,677
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf import tensorflow.experimental.numpy as tnp import numpy as np import os import time path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt') # Read, then decode for py2 compat. text = open(path_to_file, 'rb').read().decode(encoding='utf-8') # length of text is the number of characters in it print ('Length of text: {} characters'.format(len(text))) # Take a look at the first 250 characters in text print(text[:250]) # The unique characters in the file vocab = sorted(set(text)) print ('{} unique characters'.format(len(vocab))) # Creating a mapping from unique characters to indices char2idx = {u:i for i, u in enumerate(vocab)} idx2char = np.array(vocab) text_as_int = np.array([char2idx[c] for c in text]) # The maximum length sentence we want for a single input in characters seq_length = 100 examples_per_epoch = len(text)//(seq_length+1) # Create training examples / targets char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int) for i in char_dataset.take(5): print(idx2char[i.numpy()]) sequences = char_dataset.batch(seq_length+1, drop_remainder=True) for item in sequences.take(5): print(repr(''.join(idx2char[item.numpy()]))) def split_input_target(chunk): input_text = chunk[:-1] target_text = chunk[1:] return input_text, target_text dataset = sequences.map(split_input_target) for input_example, target_example in dataset.take(1): print ('Input data: ', repr(''.join(idx2char[input_example.numpy()]))) print ('Target data:', repr(''.join(idx2char[target_example.numpy()]))) for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])): print("Step {:4d}".format(i)) print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx]))) print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx]))) # Batch size BATCH_SIZE = 64 # Buffer size to shuffle the dataset # (TF data is designed to work with possibly infinite sequences, # so it doesn't attempt to shuffle the entire sequence in memory. Instead, # it maintains a buffer in which it shuffles elements). BUFFER_SIZE = 10000 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) dataset # Length of the vocabulary in chars vocab_size = len(vocab) # The embedding dimension embedding_dim = 256 # Number of RNN units rnn_units = 1024 class Embedding: def __init__(self, vocab_size, embedding_dim): self._vocab_size = vocab_size self._embedding_dim = embedding_dim self._built = False def __call__(self, inputs): if not self._built: self.build(inputs) return tnp.take(self.weights, inputs, axis=0) def build(self, inputs): del inputs self.weights = tf.Variable(tnp.random.randn( self._vocab_size, self._embedding_dim).astype(np.float32)) self._built = True class GRUCell: Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 def __init__(self, n_units, forget_bias=0.0): self._n_units = n_units self._forget_bias = forget_bias self._built = False def __call__(self, inputs): if not self._built: self.build(inputs) x, gru_state = inputs # Dense layer on the concatenation of x and h. y = tnp.dot(tnp.concatenate([x, gru_state], axis=-1), self.w1) + self.b1 # Update and reset gates. u, r = tnp.split(tf.sigmoid(y), 2, axis=-1) # Candidate. c = tnp.dot(tnp.concatenate([x, r * gru_state], axis=-1), self.w2) + self.b2 new_gru_state = u * gru_state + (1 - u) * tnp.tanh(c) return new_gru_state def build(self, inputs): # State last dimension must be n_units. assert inputs[1].shape[-1] == self._n_units # The dense layer input is the input and half of the GRU state. dense_shape = inputs[0].shape[-1] + self._n_units self.w1 = tf.Variable(tnp.random.uniform( -0.01, 0.01, (dense_shape, 2 * self._n_units)).astype(tnp.float32)) self.b1 = tf.Variable((tnp.random.randn(2 * self._n_units) * 1e-6 + self._forget_bias ).astype(tnp.float32)) self.w2 = tf.Variable(tnp.random.uniform( -0.01, 0.01, (dense_shape, self._n_units)).astype(tnp.float32)) self.b2 = tf.Variable((tnp.random.randn(self._n_units) * 1e-6).astype(tnp.float32)) self._built = True @property def weights(self): return (self.w1, self.b1, self.w2, self.b2) class GRU: def __init__(self, n_units, forget_bias=0.0, stateful=False): self._cell = GRUCell(n_units, forget_bias) self._stateful = stateful self._built = False def __call__(self, inputs): if not self._built: self.build(inputs) if self._stateful: state = self.state.read_value() else: state = self._init_state(inputs.shape[0]) inputs = tnp.transpose(inputs, (1, 0, 2)) output = tf.scan( lambda gru_state, x: self._cell((x, gru_state)), inputs, state) if self._stateful: self.state.assign(output[-1, ...]) return tnp.transpose(output, [1, 0, 2]) def _init_state(self, batch_size): return tnp.zeros([batch_size, self._cell._n_units], tnp.float32) def reset_state(self): if not self._stateful: return self.state.assign(tf.zeros_like(self.state)) def create_state(self, batch_size): self.state = tf.Variable(self._init_state(batch_size)) def build(self, inputs): s = inputs.shape[0:1] + inputs.shape[2:] shapes = (s, s[:-1] + (self._cell._n_units,)) self._cell.build([tf.TensorSpec(x, tf.float32) for x in shapes]) if self._stateful: self.create_state(inputs.shape[0]) else: self.state = () self._built = True @property def weights(self): return self._cell.weights class Dense: def __init__(self, n_units, activation=None): self._n_units = n_units self._activation = activation self._built = False def __call__(self, inputs): if not self._built: self.build(inputs) y = tnp.dot(inputs, self.w) +self.b if self._activation != None: y = self._activation(y) return y def build(self, inputs): shape_w = (inputs.shape[-1], self._n_units) lim = tnp.sqrt(6.0 / (shape_w[0] + shape_w[1])) self.w = tf.Variable(tnp.random.uniform(-lim, lim, shape_w).astype(tnp.float32)) self.b = tf.Variable((tnp.random.randn(self._n_units) * 1e-6).astype(tnp.float32)) self._built = True @property def weights(self): return (self.w, self.b) class Model: def __init__(self, vocab_size, embedding_dim, rnn_units, forget_bias=0.0, stateful=False, activation=None): self._embedding = Embedding(vocab_size, embedding_dim) self._gru = GRU(rnn_units, forget_bias=forget_bias, stateful=stateful) self._dense = Dense(vocab_size, activation=activation) self._layers = [self._embedding, self._gru, self._dense] self._built = False def __call__(self, inputs): if not self._built: self.build(inputs) xs = inputs for layer in self._layers: xs = layer(xs) return xs def build(self, inputs): self._embedding.build(inputs) self._gru.build(tf.TensorSpec(inputs.shape + (self._embedding._embedding_dim,), tf.float32)) self._dense.build(tf.TensorSpec(inputs.shape + (self._gru._cell._n_units,), tf.float32)) self._built = True @property def weights(self): return [layer.weights for layer in self._layers] @property def state(self): return self._gru.state def create_state(self, *args): self._gru.create_state(*args) def reset_state(self, *args): self._gru.reset_state(*args) model = Model( vocab_size = vocab_size, embedding_dim=embedding_dim, rnn_units=rnn_units, stateful=True) for input_example_batch, target_example_batch in dataset.take(1): input_example_batch = tnp.asarray(input_example_batch) example_batch_predictions = model(input_example_batch) print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)") example_batch_predictions[0] sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1) sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy() sampled_indices print("Input: \n", repr("".join(idx2char[input_example_batch[0]]))) print() print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices ]))) def one_hot(labels, n): return (labels[..., np.newaxis] == tnp.arange(n)).astype(np.float32) def loss_fn(labels, predictions): predictions = tf.nn.log_softmax(predictions) return -tnp.sum(predictions * one_hot(tnp.asarray(labels), predictions.shape[-1]), axis=-1) example_batch_loss = loss_fn(target_example_batch, example_batch_predictions) print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)") print("scalar_loss: ", tnp.mean(example_batch_loss)) class Adam: def __init__(self, learning_rate=0.001, b1=0.9, b2=0.999, eps=1e-7): self._lr = learning_rate self._b1 = b1 self._b2 = b2 self._eps = eps self._built = False def build(self, weights): self._m = tf.nest.map_structure(lambda x: tf.Variable(tnp.zeros_like(x)), weights) self._v = tf.nest.map_structure(lambda x: tf.Variable(tnp.zeros_like(x)), weights) self._step = tf.Variable(tnp.asarray(0, np.int64)) self._built = True def _update(self, weights_var, grads, m_var, v_var): b1 = self._b1 b2 = self._b2 eps = self._eps step = tnp.asarray(self._step, np.float32) lr = self._lr weights = tnp.asarray(weights_var) m = tnp.asarray(m_var) v = tnp.asarray(v_var) m = (1 - b1) * grads + b1 * m # First moment estimate. v = (1 - b2) * (grads ** 2) + b2 * v # Second moment estimate. mhat = m / (1 - b1 ** (step + 1)) # Bias correction. vhat = v / (1 - b2 ** (step + 1)) weights_var.assign_sub((lr * mhat / (tnp.sqrt(vhat) + eps)).astype(weights.dtype)) m_var.assign(m) v_var.assign(v) def apply_gradients(self, weights, grads): if not self._built: self.build(weights) tf.nest.map_structure(lambda *args: self._update(*args), weights, grads, self._m, self._v) self._step.assign_add(1) @property def state(self): return (self._step, self._m, self._v) optimizer = Adam() @tf.function def train_step(inp, target): with tf.GradientTape() as tape: # tape.watch(tf.nest.flatten(weights)) predictions = model(inp) loss = tnp.mean(loss_fn(target, predictions)) weights = model.weights grads = tape.gradient(loss, weights) optimizer.apply_gradients(weights, grads) return loss # Training step EPOCHS = 10 model.create_state(BATCH_SIZE) for epoch in range(EPOCHS): start = time.time() # initializing the hidden state at the start of every epoch model.reset_state() for (batch_n, (inp, target)) in enumerate(dataset): loss = train_step(inp, target) if batch_n % 100 == 0: template = 'Epoch {} Batch {} Loss {}' print(template.format(epoch+1, batch_n, loss)) print ('Epoch {} Loss {}'.format(epoch+1, loss)) print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) def generate_text(model, start_string): # Evaluation step (generating text using the learned model) # Number of characters to generate num_generate = 1000 # Converting our start string to numbers (vectorizing) input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) # Empty string to store our results text_generated = [] # Low temperatures results in more predictable text. # Higher temperatures results in more surprising text. # Experiment to find the best setting. temperature = 1.0 # Here batch size == 1 model.create_state(1) for i in range(num_generate): predictions = model(input_eval) # remove the batch dimension predictions = tf.squeeze(predictions, 0) # using a categorical distribution to predict the character returned by the model predictions = predictions / temperature predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy() # We pass the predicted character as the next input to the model # along with the previous hidden state input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(idx2char[predicted_id]) return (start_string + ''.join(text_generated)) print(generate_text(model, start_string=u"ROMEO: ")) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Text generation with an RNN Step2: Download the Shakespeare dataset Step3: Read the data Step4: Process the text Step5: The prediction task Step6: Each index of these vectors are processed as one time step. For the input at time step 0, the model receives the index for "F" and tries to predict the index for "i" as the next character. At the next timestep, it does the same thing but the RNN considers the previous step context in addition to the current input character. Step7: Create training batches Step9: Build The Model Step10: For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-likelihood of the next character. Step11: In the above example the sequence length of the input is 100 but the model can be run on inputs of any length Step12: This gives us, at each timestep, a prediction of the next character index Step13: Decode these to see the text predicted by this untrained model Step14: Train the model Step15: Optimizer Step16: Training loop Step17: Generate text
13,678
<ASSISTANT_TASK:> Python Code: import itertools import pprint import re from IPython.display import HTML, display import ga4gh.client import prettytable import requests print(ga4gh.__version__) gc = ga4gh.client.HttpClient("http://localhost:8000") region_constraints = dict(referenceName="1", start=0, end=int(1e10)) variant_set_id = 'YnJjYTE6T1I0Rg' variant_annotation_sets = list(gc.searchVariantAnnotationSets(variant_set_id)) variant_annotation_set = variant_annotation_sets[0] print("Using first variant annotation set (of {n} total) for variant set {vs_id}\nvas_id={vas.id}".format( n=len(variant_annotation_sets), vs_id=variant_set_id, vas=variant_annotation_set)) # poor-man's SO name-to-id map # so_name_id_map will look like this: # {u'natural_variant_site': u'SO:0001147', # u'polypeptide_zinc_ion_contact_site': u'SO:0001103', # u'methylated_adenine': u'SO:0000161', # ... id_name_re = re.compile("id: (?P<id>SO:\d+)\nname: (?P<name>\S+)") url = "https://raw.githubusercontent.com/The-Sequence-Ontology/SO-Ontologies/master/so-xp-simple.obo" so_name_id_map = { m.group('name'): m.group('id') for m in (id_name_re.search(s) for s in requests.get(url).text.split("\n\n")) if m is not None } def mk_effect_id_filter(so_ids=[]): return list of OntologyTerm effect filters for the given list of so ids >>> print(_mk_effect_id_filter("SO:123 SO:456".split())) [{'id': 'SO:123'}, {'id': 'SO:456'}] return [{"id": id} for id in so_ids] def remap_with_so_ids(so_name_map): For a map of label => [so names], return a map of label => [so ids] def _map1(n): try: return so_name_id_map[n] except KeyError: print("SO term name '{n}' is not (currently) valid".format(n=n)) return {label: filter(None, (_map1(n) for n in names)) for label, names in so_name_map.items()} snpeff_so_name_map = { "high": [ "chromosome_large_deletion", "chromosome_large_inversion", "chromosome_large_duplication", "gene_rearrangement", "gene_deleted", "gene_fusion", "gene_fusion_reverese", "transcript_deleted", "exon_deleted", "exon_deleted_partial", "exon_duplication", "exon_duplication_partial", "exon_inversion", "exon_inversion_partial", "frame_shift", "stop_gained", "stop_lost", "start_lost", "splice_site_acceptor", "splice_site_donor", "rare_amino_acid", "protein_protein_interaction_locus", "protein_structural_interaction_locus", ], "moderate": [ "non_synonymous_coding", "codon_insertion", "codon_change_plus_codon_insertion", "codon_deletion", "codon_change_plus_codon_deletion", "utr_5_deleted", "utr_3_deleted", "splice_site_branch_u12", "splice_site_region", "splice_site_branch", "non_synonymous_stop", "non_synonymous_start", "synonymous_coding", "synonymous_start", "synonymous_stop", "codon_change", ], "low": [ "gene_inversion", "gene_duplication", "transcript_duplication", "transcript_inversion", "utr_5_prime", "utr_3_prime", "start_gained", "upstream", "downstream", "motif", "motif_deleted", "regulation", "micro_rna", ], "modifiers": [ "custom", "next_prot", "intron_conserved", "intron", "intragenic", "intergenic_conserved", "intergenic", "cds", "exon", "transcript", "gene", "sequence", "chromosome_elongation", "chromosome", "genome", "none", ] } snpeff_so_id_map = remap_with_so_ids(snpeff_so_name_map) snpeff_so_id_map region_so_name_map = { "locus": [ "gene_fusion", "upstream_gene_variant", ], "cds": [ "missense_variant", "start_lost", "stop_gained", "stop_lost", "synonymous_variant", ], # note that utr, upstream, and downstream sets overlap intentionally "utr": [ "3_prime_UTR_variant", "5_prime_UTR_variant", ], "upstream": [ "5_prime_UTR_variant", "upstream_gene_variant", ], "downstream": [ "3_prime_UTR_variant", "downstream_gene_variant", ], } region_so_id_map = remap_with_so_ids(region_so_name_map) so_maps = {"snpeff": snpeff_so_id_map, "region": region_so_id_map} pprint.pprint(so_maps) field_names = "n_vars name:label n_so_ids so_ids".split() pt = prettytable.PrettyTable(field_names=field_names) for name, so_map in so_maps.items(): for label, so_ids in so_map.items(): vs = [] # Searching with an empty filter means no filtering # This should be changed: searching should be by inclusion, not lack of exclusion. if len(so_ids)>0: efilter = mk_effect_id_filter(so_ids) vs = list(gc.searchVariantAnnotations(variant_annotation_set.id, effects=efilter, **region_constraints)) pt.add_row([ len(vs), name + ":" + label, len(so_ids), " ".join(so_ids) ]) display(HTML(pt.get_html_string())) # invert the SO map (name: {SO: label}) def invert_so_map(so_map): for a so_map of {label: [so_id]}, return the inverse {so_id: [labels]}. so_id:label is many:many lmap = sorted((so, label) for label, so_ids in so_map.items() for so in so_ids) return {k: list(sl[1] for sl in sli) for k, sli in itertools.groupby(lmap, key=lambda e: e[0])} def unique_labels_for_so_ids(so_labels_map, so_ids): given a map of {so: [labels]} and a list of so_ids, return a list of unique labels uniq_labels = set(itertools.chain.from_iterable(so_labels_map.get(so_id, []) for so_id in so_ids)) return list(uniq_labels) def build_variant_record(v): so_ids = list(set(eff.id for te in v.transcriptEffects for eff in te.effects)) impacts = unique_labels_for_so_ids(so_labels_maps["snpeff"], so_ids) regions = unique_labels_for_so_ids(so_labels_maps["region"], so_ids) return dict( g = v.transcriptEffects[0].hgvsAnnotation.genomic, t = v.transcriptEffects[0].hgvsAnnotation.transcript, p = v.transcriptEffects[0].hgvsAnnotation.protein, so_ids = " ".join(so_ids), impacts = " ".join(impacts), regions = " ".join(regions) ) so_labels_maps = {name: invert_so_map(so_map) for name, so_map in so_maps.items()} pprint.pprint(so_labels_maps) variants = list(gc.searchVariantAnnotations( variant_annotation_set.id, effects = mk_effect_id_filter("SO:0001587 SO:0001819".split()), **region_constraints)) field_names = "g t p so_ids impacts regions".split() pt = prettytable.PrettyTable(field_names=field_names) for v in variants: vrec = build_variant_record(v) pt.add_row([vrec[k] for k in field_names]) display(HTML(pt.get_html_string())) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Search VariantAnnotations using SO term sets Step3: SnpEff map Step4: Region name map Step5: Meta maps Step6: Search for variants by each SO map Step9: Label variants with SO maps
13,679
<ASSISTANT_TASK:> Python Code: #put matplotlib plots in the notebook, then import the package %matplotlib inline import kappa amber = kappa.Amber() print(kappa.lattices) cnt = kappa.build(amber, "cnt") kappa.plot.bonds(cnt) graphene = kappa.build(amber, "graphene", radius=2) kappa.plot.bonds(graphene, indices=True) # print(graphene.posList) print(graphene.bondList) print(graphene.angleList) print(graphene.atomtypes) cnt.hydrogenate() pdb_txt = kappa.md.generate.pdb(cnt) kappa.md.save_file(pdb_txt,'.','cnt.pdb') pdb_txt = kappa.md.generate.pdb(graphene) kappa.md.save_file(pdb_txt,'.','graphene.pdb') gro_txt = kappa.md.generate.gro(cnt) kappa.md.save_file(gro_txt,'.','cnt.gro') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: As it stands users must define a forcefield before building molecules. We will establish an Amber forcefield. We can turn interactions in the forcefield on and off, but we won't concern ourselves with that here. Step2: To build molecules, users call the build function. To see the kinds of molecules available for generation, print kappa.lattices. Step3: We'll build our bread and butter molecule first, an armchair carbon nanotube. Every molecule generated through kappa.build requires a forcefield argument then a lattice string found from kappa.lattices. Step4: We can plot our molecules too. This plot routine will show the bonds. Step5: Attributes of the molecules include, but are not limited to Step6: Molecule.atomtypes comes from its forcefield. Step7: pdb_txt holds a list with each element a line string for the file. Let's write this to the current directory. Step8: Let's give that graphene a shot too. Step9: We can also export .gro files, which contain topology + box size info (recommended).
13,680
<ASSISTANT_TASK:> Python Code: hw_data_directory = '/Users/farr/Documents/Research/KepHackWeek/data' occur_dir = '/Users/farr/Google Drive/Kepler ExoPop Hack 2015/end2end_occ_calc' eff_dir = '/Volumes/KepHacWkWMF/Kepler_HW2015/Dp4_DetectionCountours/v0' rbins = array([1.5**(i-1) for i in range(9)]) pbins = array([10*2**i for i in range(6)]) print rbins, pbins def integrate_efficiency(ps, rs, eff, pmin, pmax, rmin, rmax): sel = (ps >= pmin) & (ps < pmax) p_integral = sint.simps(eff[sel, :], ps[sel], axis=0) p_integral = p_integral / (np.max(ps[sel])-np.min(ps[sel])) sel = (rs >= rmin) & (rs < rmax) return sint.simps(p_integral[sel], rs[sel]) / (np.max(rs[sel]) - np.min(rs[sel])) def rate_and_uncertainty(npl, nexpected): Rbest = npl / nexpected dr = sqrt(npl + 1) / nexpected Rmin = max(0.0, Rbest - 10*dr) Rmax = Rbest + 10*dr Rs = linspace(Rmin, Rmax, 10000) logpost = (npl-0.5)*log(Rs) - nexpected*Rs post = exp(logpost) post = post / trapz(post, Rs) cumpost = sint.cumtrapz(post, Rs) imin = argmin(abs(cumpost - 0.16)) imed = argmin(abs(cumpost - 0.5)) imax = argmin(abs(cumpost - 0.84)) return Rs[imed], Rs[imin], Rs[imax] planets = aa.read(op.join(hw_data_directory, 'dr24_koi.csv')) planets gdata = genfromtxt(op.join(occur_dir, 'hack_week_g_stars.csv'), delimiter=',', names=True) with gzip.open('g-integrated-eff.dat.gz.temp', 'w') as out: out.write('# kepid ') out.write(' '.join(['(%g-%g, %g-%g)'%(plow, phigh, rlow, rhigh) \ for (plow, phigh) in zip(pbins[:-1], pbins[1:]) \ for (rlow, rhigh) in zip(rbins[:-1], rbins[1:])])) out.write('\n') for kid in gdata['kepid']: try: kid = int(round(kid)) eff = af.open(op.join(eff_dir, 'contourV0%09d_tot.fits.gz'%kid)) ps = exp(linspace(log(eff[0].header['MINPER']), log(eff[0].header['MAXPER']), eff[0].header['NPER'])) rs = exp(linspace(log(eff[0].header['MINRP']), log(eff[0].header['MAXRP']), eff[0].header['NRP'])) int_effs = [] for plow, phigh in zip(pbins[:-1], pbins[1:]): for rlow, rhigh in zip(rbins[:-1], rbins[1:]): int_effs.append(integrate_efficiency(ps, rs, eff[0].data.T, plow, phigh, rlow, rhigh)) int_effs = array(int_effs) out.write('%09d '%kid) out.write(' '.join(['%g'%ie for ie in int_effs])) out.write('\n') eff.close() except: print 'Could not process KID ', kid, ' exception: ', sys.exc_info()[0] os.rename('g-integrated-eff.dat.gz.temp', 'g-integrated-eff.dat.gz') g_det_eff = loadtxt('g-integrated-eff.dat.gz') g_det_ids = g_det_eff[:,0] g_det_eff = g_det_eff[:,1:].reshape((-1, pbins.shape[0]-1, rbins.shape[0]-1)) pu.plot_histogram_posterior(g_det_eff[:,2,2]) xlabel(r'$\alpha$') ylabel(r'$p(\alpha)$') gkids_set = set(g_det_ids) g_pl_counts = zeros((pbins.shape[0]-1, rbins.shape[0]-1), dtype=np.int) noutside = 0 for pl in planets: if pl['kepid'] in gkids_set and pl['koi_pdisposition'] == 'CANDIDATE' and pl['koi_max_mult_ev'] >= 15: pind = bisect.bisect(pbins, pl['koi_period']) - 1 rind = bisect.bisect(rbins, pl['koi_prad']) - 1 if (pind < 0) or (rind < 0) or (pind >= g_pl_counts.shape[0]) or (rind >= g_pl_counts.shape[1]): noutside += 1 else: g_pl_counts[pind, rind] += 1 print noutside, ' planets landed outside any bins!' ggammas = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) ggammamins = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) ggammamaxs = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) for i in range(pbins.shape[0]-1): for j in range(rbins.shape[0]-1): rmed, rmin, rmax = rate_and_uncertainty(g_pl_counts[i,j], np.sum(g_det_eff[:,i,j])) ggammas[i,j] = rmed ggammamins[i,j] = rmin ggammamaxs[i,j] = rmax PS, RS = meshgrid(pbins, rbins, indexing='ij') pcolormesh(PS, RS, ggammas, norm=LogNorm()) xscale('log') yscale('log') colorbar() axis(xmin=min(pbins), xmax=max(pbins), ymin=min(rbins), ymax=max(rbins)) xlabel(r'$P$ (d)') ylabel(r'$R$ ($R_\oplus$)') savefig('eta_g.pdf') with gzip.open('pbins.dat.gz', 'w') as out: savetxt(out, pbins.reshape((1,-1))) with gzip.open('rbins.dat.gz', 'w') as out: savetxt(out, rbins.reshape((1, -1))) with gzip.open('eta_g.dat.gz', 'w') as out: savetxt(out, ggammas) with gzip.open('sigma_p_g.dat.gz', 'w') as out: savetxt(out, ggammamaxs-ggammas) with gzip.open('sigma_n_g.dat.gz', 'w') as out: savetxt(out, ggammas-ggammamins) mdata = genfromtxt(op.join(occur_dir, 'hack_week_m_dwarfs.csv'), delimiter=',', names=True) with gzip.open('m-integrated-eff.dat.gz.temp', 'w') as out: out.write('# kepid ') out.write(' '.join(['(%g-%g, %g-%g)'%(plow, phigh, rlow, rhigh) \ for (plow, phigh) in zip(pbins[:-1], pbins[1:]) \ for (rlow, rhigh) in zip(rbins[:-1], rbins[1:])])) out.write('\n') for kid in mdata['kepid']: try: kid = int(round(kid)) eff = af.open(op.join(eff_dir, 'contourV0%09d_tot.fits.gz'%kid)) ps = exp(linspace(log(eff[0].header['MINPER']), log(eff[0].header['MAXPER']), eff[0].header['NPER'])) rs = exp(linspace(log(eff[0].header['MINRP']), log(eff[0].header['MAXRP']), eff[0].header['NRP'])) int_effs = [] for plow, phigh in zip(pbins[:-1], pbins[1:]): for rlow, rhigh in zip(rbins[:-1], rbins[1:]): int_effs.append(integrate_efficiency(ps, rs, eff[0].data.T, plow, phigh, rlow, rhigh)) int_effs = array(int_effs) out.write('%09d '%kid) out.write(' '.join(['%g'%ie for ie in int_effs])) out.write('\n') eff.close() except: print 'Could not process KID ', kid, ' exception: ', sys.exc_info()[0] os.rename('m-integrated-eff.dat.gz.temp', 'm-integrated-eff.dat.gz') m_det_eff = loadtxt('m-integrated-eff.dat.gz') m_det_ids = m_det_eff[:,0] m_det_eff = m_det_eff[:,1:].reshape((-1, pbins.shape[0]-1, rbins.shape[0]-1)) pu.plot_histogram_posterior(m_det_eff[:,1,2]) mkids_set = set(m_det_ids) m_pl_counts = zeros((pbins.shape[0]-1, rbins.shape[0]-1), dtype=np.int) noutside = 0 for pl in planets: if pl['kepid'] in mkids_set and pl['koi_pdisposition'] == 'CANDIDATE' and pl['koi_max_mult_ev'] >= 15: pind = bisect.bisect(pbins, pl['koi_period']) - 1 rind = bisect.bisect(rbins, pl['koi_prad']) - 1 if (pind < 0) or (rind < 0) or (pind >= m_pl_counts.shape[0]) or (rind >= m_pl_counts.shape[1]): noutside += 1 else: m_pl_counts[pind, rind] += 1 print noutside, ' planets landed outside any bins!' mgammas = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) mgammamins = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) mgammamaxs = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) for i in range(pbins.shape[0]-1): for j in range(rbins.shape[0]-1): rmed, rmin, rmax = rate_and_uncertainty(m_pl_counts[i,j], np.sum(m_det_eff[:,i,j])) mgammas[i,j] = rmed mgammamins[i,j] = rmin mgammamaxs[i,j] = rmax PS, RS = meshgrid(pbins, rbins, indexing='ij') pcolormesh(PS, RS, mgammas, norm=LogNorm()) xscale('log') yscale('log') colorbar() axis(xmin=min(pbins), xmax=max(pbins), ymin=min(rbins), ymax=max(rbins)) xlabel(r'$P$ (d)') ylabel(r'$R$ ($R_\oplus$)') savefig('eta_m.pdf') print 'In the (1,2) bin R = ', mgammas[1,2], ' + ', (mgammamaxs[1,2]-mgammas[1,2]), ' - ', (mgammas[1,2] - mgammamins[1,2]) with gzip.open('pbins.dat.gz', 'w') as out: savetxt(out, pbins.reshape((1,-1))) with gzip.open('rbins.dat.gz', 'w') as out: savetxt(out, rbins.reshape((1, -1))) with gzip.open('eta_m.dat.gz', 'w') as out: savetxt(out, mgammas) with gzip.open('sigma_p_m.dat.gz', 'w') as out: savetxt(out, mgammamaxs-mgammas) with gzip.open('sigma_n_m.dat.gz', 'w') as out: savetxt(out, mgammas-mgammamins) kdata = genfromtxt(op.join(occur_dir, 'hack_week_k_dwarfs.csv'), delimiter=',', names=True) with gzip.open('k-integrated-eff.dat.gz.temp', 'w') as out: out.write('# kepid ') out.write(' '.join(['(%g-%g, %g-%g)'%(plow, phigh, rlow, rhigh) \ for (plow, phigh) in zip(pbins[:-1], pbins[1:]) \ for (rlow, rhigh) in zip(rbins[:-1], rbins[1:])])) out.write('\n') for kid in kdata['kepid']: try: kid = int(round(kid)) eff = af.open(op.join(eff_dir, 'contourV0%09d_tot.fits.gz'%kid)) ps = exp(linspace(log(eff[0].header['MINPER']), log(eff[0].header['MAXPER']), eff[0].header['NPER'])) rs = exp(linspace(log(eff[0].header['MINRP']), log(eff[0].header['MAXRP']), eff[0].header['NRP'])) int_effs = [] for plow, phigh in zip(pbins[:-1], pbins[1:]): for rlow, rhigh in zip(rbins[:-1], rbins[1:]): int_effs.append(integrate_efficiency(ps, rs, eff[0].data.T, plow, phigh, rlow, rhigh)) int_effs = array(int_effs) out.write('%09d '%kid) out.write(' '.join(['%g'%ie for ie in int_effs])) out.write('\n') eff.close() except: print 'Could not process KID ', kid, ' exception: ', sys.exc_info()[0] os.rename('k-integrated-eff.dat.gz.temp', 'k-integrated-eff.dat.gz') k_det_eff = loadtxt('k-integrated-eff.dat.gz') k_det_ids = k_det_eff[:,0] k_det_eff = k_det_eff[:,1:].reshape((-1, pbins.shape[0]-1, rbins.shape[0]-1)) kkids_set = set(k_det_ids) k_pl_counts = zeros((pbins.shape[0]-1, rbins.shape[0]-1), dtype=np.int) noutside = 0 for pl in planets: if pl['kepid'] in kkids_set and pl['koi_pdisposition'] == 'CANDIDATE' and pl['koi_max_mult_ev'] >= 15: pind = bisect.bisect(pbins, pl['koi_period']) - 1 rind = bisect.bisect(rbins, pl['koi_prad']) - 1 if (pind < 0) or (rind < 0) or (pind >= k_pl_counts.shape[0]) or (rind >= k_pl_counts.shape[1]): noutside += 1 else: k_pl_counts[pind, rind] += 1 print noutside, ' planets landed outside any bins!' kgammas = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) kgammamins = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) kgammamaxs = zeros((pbins.shape[0]-1, rbins.shape[0]-1)) for i in range(pbins.shape[0]-1): for j in range(rbins.shape[0]-1): rmed, rmin, rmax = rate_and_uncertainty(k_pl_counts[i,j], np.sum(k_det_eff[:,i,j])) kgammas[i,j] = rmed kgammamins[i,j] = rmin kgammamaxs[i,j] = rmax PS, RS = meshgrid(pbins, rbins, indexing='ij') pcolormesh(PS, RS, kgammas, norm=LogNorm()) xscale('log') yscale('log') colorbar() axis(xmin=min(pbins), xmax=max(pbins), ymin=min(rbins), ymax=max(rbins)) xlabel(r'$P$ (d)') ylabel(r'$R$ ($R_\oplus$)') savefig('eta_k.pdf') print 'In K-star bin (40-80, 1.5-2.25)', kgammas[2, 2], ' + ', (kgammamaxs[2,2]-kgammas[2,2]), ' - ', (kgammas[2,2]-kgammamins[2,2]) print 'In K-star bin (20-40, 1.5-2.25)', kgammas[1, 2], ' + ', (kgammamaxs[1,2]-kgammas[1,2]), ' - ', (kgammas[1,2]-kgammamins[1,2]) with gzip.open('pbins.dat.gz', 'w') as out: savetxt(out, pbins.reshape((1,-1))) with gzip.open('rbins.dat.gz', 'w') as out: savetxt(out, rbins.reshape((1, -1))) with gzip.open('eta_k.dat.gz', 'w') as out: savetxt(out, kgammas) with gzip.open('sigma_p_k.dat.gz', 'w') as out: savetxt(out, kgammamaxs-kgammas) with gzip.open('sigma_n_k.dat.gz', 'w') as out: savetxt(out, kgammas-kgammamins) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Planet Database Step2: G Dwarfs Step3: We want to know the average detection efficiency across the bin. (Actually, what we want to know is the fraction of the planets appearing in the bin that would be detected, but since we are assuming that the planet distribution function is flat, this reduces to the average.) Step4: Now load the data file we just generated Step5: The designated bin is the (2,2) bin (1.5 to 2.25 in $R$, 40-80 in $P$). Here is a histogram of the average detection efficiency in the designated bin Step6: The posterior on the rate is proportional to Step7: M-Dwarfs Step8: We are interested in the (1, 2) bin for M-dwarfs. What does the detection efficiency look like there? Step9: And Finally for K-Dwarfs
13,681
<ASSISTANT_TASK:> Python Code: import pandas as pd import os !cat talks.tsv talks = pd.read_csv("talks.tsv", sep="\t", header=0) talks html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): if type(text) is str: return "".join(html_escape_table.get(c,c) for c in text) else: return "False" loc_dict = {} for row, item in talks.iterrows(): md_filename = str(item.date) + "-" + item.url_slug + ".md" html_filename = str(item.date) + "-" + item.url_slug year = item.date[:4] md = "---\ntitle: \"" + item.title + '"\n' md += "collection: talks" + "\n" if len(str(item.type)) > 3: md += 'type: "' + item.type + '"\n' else: md += 'type: "Talk"\n' md += "permalink: /talks/" + html_filename + "\n" if len(str(item.venue)) > 3: md += 'venue: "' + item.venue + '"\n' if len(str(item.location)) > 3: md += "date: " + str(item.date) + "\n" if len(str(item.location)) > 3: md += 'location: "' + str(item.location) + '"\n' md += "---\n" if len(str(item.talk_url)) > 3: md += "\n[More information here](" + item.talk_url + ")\n" if len(str(item.description)) > 3: md += "\n" + html_escape(item.description) + "\n" md_filename = os.path.basename(md_filename) #print(md) with open("../_talks/" + md_filename, 'w') as f: f.write(md) !ls ../_talks !cat ../_talks/2013-03-01-tutorial-1.md <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Data format Step2: Import TSV Step3: Escape special characters Step4: Creating the markdown files Step5: These files are in the talks directory, one directory below where we're working from.
13,682
<ASSISTANT_TASK:> Python Code: %pylab notebook fe = 60 # [Hz] p = 2 n_nl = 3580 # [r/min] n_fl = 3440 # [r/min] n_sync = 120*fe / p print('n_sync = {:.0f} r/min'.format(n_sync)) s_nl = (n_sync - n_nl) / n_sync print(''' s_nl = {:.2f} % ============='''.format(s_nl*100)) f_rnl = s_nl * fe print(''' f_rnl = {:.2f} Hz ==============='''.format(f_rnl)) s_fl = (n_sync - n_fl) / n_sync print(''' s_fl = {:.2f} % ============='''.format(s_fl*100)) f_rfl = s_fl * fe print(''' f_rfl = {:.2f} Hz ==============='''.format(f_rfl)) SR = (n_nl - n_fl) / n_fl print(''' SR = {:.2f} % ==========='''.format(SR*100)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Description Step2: SOLUTION Step3: The slip and electrical frequency at no-load conditions is Step4: $$f_\text{r,nl} = sf_e$$ Step5: The slip and electrical frequency at full load conditions is Step6: $$f_\text{r,fl} = sf_e$$ Step7: The speed regulation is
13,683
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. !pip install tf-nightly except Exception: pass import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds tfds.disable_progress_bar() embedding_layer = layers.Embedding(1000, 5) result = embedding_layer(tf.constant([1,2,3])) result.numpy() result = embedding_layer(tf.constant([[0,1,2],[3,4,5]])) result.shape (train_data, test_data), info = tfds.load( 'imdb_reviews/subwords8k', split = (tfds.Split.TRAIN, tfds.Split.TEST), with_info=True, as_supervised=True) encoder = info.features['text'].encoder encoder.subwords[:20] train_batches = train_data.shuffle(1000).padded_batch(10) test_batches = test_data.shuffle(1000).padded_batch(10) train_batch, train_labels = next(iter(train_batches)) train_batch.numpy() embedding_dim=16 model = keras.Sequential([ layers.Embedding(encoder.vocab_size, embedding_dim), layers.GlobalAveragePooling1D(), layers.Dense(16, activation='relu'), layers.Dense(1, activation='sigmoid') ]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit( train_batches, epochs=10, validation_data=test_batches, validation_steps=20) import matplotlib.pyplot as plt history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(12,9)) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.figure(figsize=(12,9)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.ylim((0.5,1)) plt.show() e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # formato: (vocab_size, embedding_dim) import io encoder = info.features['text'].encoder out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for num, word in enumerate(encoder.subwords): vec = weights[num+1] # pule o 0, está preenchido. out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in vec]) + "\n") out_v.close() out_m.close() try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Embeddings de Palavras Step2: Usando a camada Embedding Step3: Quando você cria uma camada de embedding, os pesos para a incorporação são inicializados aleatoriamente (como qualquer outra camada). Durante o treinamento, eles são ajustados gradualmente via retropropagação. Uma vez treinadas, as combinações de palavras aprendidas codificam aproximadamente semelhanças entre as palavras (como foram aprendidas para o problema específico em que seu modelo é treinado). Step4: Para problemas de texto ou sequência, a camada Embedding usa um tensor 2D de números inteiros, de forma (samples, sequence_length), onde cada entrada é uma sequência de números inteiros. Pode incorporar seqüências de comprimentos variáveis. Você pode alimentar a camada de embedding acima dos lotes com as formas (32, 10) (lote de 32 sequências de comprimento 10) ou (64, 15) (lote de 64 sequências de comprimento 15). Step5: Quando recebe um lote de seqüências como entrada, uma camada de embedding retorna um tensor de ponto flutuante 3D, de forma (amostras, comprimento_de_ sequência, dimensão_de_implantação). Para converter dessa sequência de comprimento variável para uma representação fixa, há uma variedade de abordagens padrão. Você pode usar uma camada RNN, Attention ou pooling antes de passá-la para uma camada Dense. Este tutorial usa o pool porque é mais simples. O tutorial [Classificação de texto com um RNN] (text_classification_rnn.ipynb) é um bom próximo passo. Step6: Obtenha o codificador (tfds.features.text.SubwordTextEncoder) e dê uma rápida olhada no vocabulário. Step7: As críticas de filmes podem ter diferentes comprimentos. Usaremos o método padded_batch para padronizar os comprimentos das revisões. Step8: Conforme importado, o texto das revisões é codificado por número inteiro (cada número inteiro representa uma palavra específica ou parte da palavra no vocabulário). Step9: Crie um modelo simples Step10: Compile e treine o modelo Step11: Com essa abordagem, nosso modelo alcança uma acurácia de validação de cerca de 88% (observe que o modelo está adaptado demais (overfitting), a precisão do treinamento é significativamente maior). Step12: Recuperar os embeddings aprendidos Step13: Vamos agora escrever os pesos no disco. Para usar o [Embedding Projector] (http Step14: Se você estiver executando este tutorial em [Colaboratory] (https
13,684
<ASSISTANT_TASK:> Python Code: import tweepy consumer_key = '' consumer_secret = '' access_token = '' access_token_secret = '' autorizar = tweepy.OAuthHandler(consumer_key, consumer_secret) autorizar.set_access_token(access_token, access_token_secret) api = tweepy.API(autorizar) print(api) tweets = api.search(q='Python', lang='pt') # Teste o parâmetro count=150 for i, tweet in enumerate(tweets, start=1): print("%d ---- %s" % (i, tweet.text)) tweets_salvos = [] ultimo_id = -1 qtde_tweets = 1000 while len(tweets_salvos) < qtde_tweets: contador = qtde_tweets - len(tweets_salvos) try: novos_tweets = api.search(q='Python', count=contador, max_id=str(ultimo_id - 1)) # Teste com o parâmetro lang='pt' if not novos_tweets: print("Nenhum tweet para recuperar") break tweets_salvos.extend(novos_tweets) ultimo_id = novos_tweets[-1].id except tweepy.TweepError as e: print("Erro:", (e)) break for i, tweet in enumerate(tweets_salvos, start=1): print("{} ---- {}".format(i, tweet.text)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Pesquisando Step2: Recuperar 1000 tweets
13,685
<ASSISTANT_TASK:> Python Code: import statsmodels statsmodels.__version__ import pandas as pd import numpy as np import statsmodels.api as sm import statsmodels.formula.api as smf from statsmodels.sandbox.regression.predstd import wls_prediction_std import matplotlib.pyplot as plt df = pd.read_csv('data/SDSS_QSO.dat', delim_whitespace=True) df.head() df.describe() # Keep only rows with positive magnitudes, and 18 < i < 22 df = df[(df.u_mag > 10) & (df.i_mag > 18) & (df.i_mag < 22) ] # Set a limit on u magnitude errors df.loc[df.sig_u_mag<0.02,'sig_u_mag'] = 0.02 df.sig_u_mag.min() mod_ols = smf.ols('u_mag ~ i_mag', data=df) res_ols = mod_ols.fit() print(res_ols.summary()) print('Parameters: ', res_ols.params) print('R2: ', res_ols.rsquared) print('AIC', res_ols.aic) mod_wls = smf.wls('u_mag ~ i_mag', data=df, weights=df.sig_u_mag**-2) res_wls = mod_wls.fit() print(res_wls.summary()) i_range = np.arange(18,22,0.2) p_ols = res_ols.predict({"i_mag": i_range}) p_ols %matplotlib inline prstd, iv_l, iv_u = wls_prediction_std(res_wls,sm.add_constant(i_range), weights=1/(0.02*0.02)) fig, ax = plt.subplots(figsize=(8,6)) ax.scatter(df.i_mag, df.u_mag, marker='.', c='k', s=0.5, label="data", alpha=0.5) ax.plot(i_range, res_wls.predict({"i_mag":i_range}), 'b-', label="Weighted LS") ax.plot(i_range, res_ols.predict({"i_mag":i_range}), 'g--.', label="Ordinary LS") ax.plot(i_range, iv_u, 'r--', label='95% confidence') ax.plot(i_range, iv_l, 'r--') ax.legend(loc='best') plt.xlabel('SDSS i (mag)') plt.ylabel('SDSS u (mag)') plt.xlim(17.9, 21.6) from astroML.cosmology import Cosmology from astroML.datasets import generate_mu_z import numpy as np #------------------------------------------------------------ # Generate data z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0) cosmo = Cosmology() z = np.linspace(0.01, 2, 1000) mu_true = np.asarray(map(cosmo.mu, z)) sndata = pd.DataFrame({"z": z_sample, "mu": mu_sample, "mu_err": dmu}) fig = plt.figure(figsize=(5, 5)) plt.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1) plt.xlabel(r'$z$') plt.ylabel(r'$\mu$') snmod_wls = smf.wls("mu ~ z", data=sndata, weights=dmu**-2) snres_wls = snmod_wls.fit() print(snres_wls.summary()) z_range = np.arange(0,1.8,0.1) fig = plt.figure(figsize=(5, 5)) plt.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1) plt.plot(z_sample, snres_wls.fittedvalues, 'b-', label="Weighted LS") plt.xlabel(r'$z$') plt.ylabel(r'$\mu$') snmod_wls_2nd = smf.wls("mu ~ z + np.power(z,2)", data=sndata, weights=dmu**-2) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The file SDSS_QSO.dat is from Feigelson's Astrostatistics school, September 2014 at Caltech Step2: Weighted least-squares Step3: Exercise Step4: Put the data into a Pandas dataframe Step5: Fit a straight line Step6: Overplot the straight line fit Step7: In the cells below, fit 2nd and 4th degree polynomials and overplot on the data
13,686
<ASSISTANT_TASK:> Python Code: # Load data X = np.concatenate((np.ones((pima.shape[0],1)),pima[:,0:8]), axis=1) Y = pima[:,8] Xs = (X - np.mean(X, axis=0))/np.concatenate((np.ones(1),np.std(X[:,1:], axis=0))) n, p = X.shape M = np.identity(p) ### HMC version def logistic(x): return 1/(1+np.exp(-x)) def U(theta, Y, X): return - (Y.T @ X @ theta - np.sum(np.log(1+np.exp(X @ theta))) - 0.5 * phi * np.sum(theta**2)) def gradU(theta, Y, X, nbatch): '''A function that returns the stochastic gradient. Adapted from Eq. 5. Inputs are: theta, the parameters Y, the response X, the covariates nbatch, the number of samples to take from the full data ''' n = X.shape[0] Y_pred = logistic(X @ theta) epsilon = (Y[:,np.newaxis] - Y_pred[:,np.newaxis]) grad = X.T @ epsilon - phi * theta[:, np.newaxis] return -grad/n def stogradU(theta, Y, X, nbatch): '''A function that returns the stochastic gradient. Adapted from Eq. 5. Inputs are: theta, the parameters Y, the response X, the covariates nbatch, the number of samples to take from the full data ''' n, p = X.shape # Sample minibatch batch_id = np.random.choice(np.arange(n),nbatch,replace=False) Y_pred = logistic(X[batch_id,:] @ theta[:,np.newaxis]) epsilon = (Y[batch_id,np.newaxis] - Y_pred) grad = n/nbatch * X[batch_id,:].T @ epsilon - phi * theta[:, np.newaxis] #return -grad/n return -grad def sghmc(Y, X, gradU, M, eps, m, theta, C, V): n, p = X.shape # Precompute Minv = np.linalg.inv(M) B = 0.5 * V * eps D = 2*(C-B)*eps # Randomly sample momentum r = np.random.multivariate_normal(np.zeros(p),M)[:,np.newaxis] # Hamiltonian dynamics #r = r - (eps/2)*gradU(theta, Y, X, nbatch) for i in range(m): theta = theta + (eps*Minv@r).ravel() r = r - eps*stogradU(theta, Y, X, nbatch) - eps*C @ Minv @ r \ + np.random.multivariate_normal(np.zeros(p),D)[:,np.newaxis] #theta = theta + (eps*Minv@r).ravel() #r = r - (eps/2)*gradU(theta, Y, X, nbatch) return theta def my_gd(Y, X, gradU, M, eps, m, theta, C, V): # gradient descent n = X.shape[0] p = X.shape[1] for i in range(m): theta = theta - eps*gradU(theta, Y, X, nbatch).ravel() return theta from sklearn.linear_model import LogisticRegression # Unscaled mod_logis = LogisticRegression(fit_intercept=False, C=1e50) mod_logis.fit(X,Y) beta_true_unscale = mod_logis.coef_.ravel() beta_true_unscale # Scaled mod_logis = LogisticRegression(fit_intercept=False, C=1e50) mod_logis.fit(Xs,Y) beta_true_scale = mod_logis.coef_.ravel() beta_true_scale X.shape, Y.shape U(np.ones(p)*.1,Y,X) gradU(np.ones(p)*.1, Y, X, 1)*n stogradU(np.ones(p)*.1, Y, X, 768)*n # HMC - Unscaled nsample = 100 m = 20 eps = .0001 #theta = np.zeros(p) theta = beta_true_unscale.copy() phi = 0.01 nbatch = 500 C = 0 * np.identity(p) V = 0 * np.identity(p) np.random.seed(2) samples = np.zeros((nsample, p)) u = np.zeros(nsample) for i in range(nsample): theta = sghmc(Y, X, stogradU, M, eps, m, theta, C, V) samples[i] = theta u[i] = U(theta, Y, X) np.mean(samples, axis=0) - beta_true_unscale plt.plot((samples - beta_true_unscale)[:,3]) plt.show() plt.plot(u) plt.show() # SGHMC - Scaled nsample = 10000 m = 20 eps = .001 theta = np.zeros(p) #theta = beta_true_scale.copy() phi = 0.1 nbatch = 768 C = 0 * np.identity(p) V = 0 * np.identity(p) np.random.seed(2) samples = np.zeros((nsample, p)) u = np.zeros(nsample) for i in range(nsample): theta = sghmc(Y, Xs, stogradU, M, eps, m, theta, C, V) samples[i] = theta u[i] = U(theta, Y, Xs) np.mean(samples, axis=0) - beta_true_scale plt.plot((samples - beta_true_scale)[:,1]) plt.show() plt.plot(u) plt.show() # HMC - Scaled (no intercept) nsample = 1000 m = 20 eps = .002 theta = np.zeros(p-1) #theta = beta_true_scale.copy()[1:] phi = 5 nbatch = 500 C = 1 * np.identity(p-1) V = 0 * np.identity(p-1) np.random.seed(2) samples = np.zeros((nsample, p-1)) u = np.zeros(nsample) for i in range(nsample): theta = sghmc(Y, Xs[:,1:], stogradU, np.identity(p-1), eps, m, theta, C, V) samples[i] = theta u[i] = U(theta, Y, Xs[:,1:]) np.mean(samples, axis=0) - beta_true_scale[1:] plt.plot((samples - beta_true_scale[1:])[:,0]) plt.show() plt.plot(u) plt.show() # Gradient descent - Unscaled np.random.seed(2) #res = my_gd(Y, X, gradU, M, .0001, 10000, np.zeros(p), C, V) # Starting at zero #res = my_gd(Y, X, gradU, M, .0001, 10000, beta_true_unscale.copy(), C, V) # Starting at true values res = my_gd(Y, X, gradU, M, .0001, 10000, beta_true_unscale.copy(), C, V) # Starting at true values res - beta_true_unscale # Gradient descent - Scaled np.random.seed(2) res = my_gd(Y, Xs, gradU, M, .1, 20000, np.zeros(p), C, V) res - beta_true_scale # Cliburn's gradient descent code def gd(X, y, beta, alpha, niter): Gradient descent algorihtm. n, p = X.shape Xt = X.T for i in range(niter): y_pred = logistic(X @ beta) epsilon = y - y_pred grad = Xt @ epsilon / n beta += alpha * grad return beta # Unscaled #res = gd(X, Y.ravel(), np.zeros(p), alpha=.1, niter=2) # Starting at zero res = gd(X, Y.ravel(), beta_true_unscale.copy(), alpha=.0001, niter=10000) # Starting at true coefficients res - beta_true_unscale # Scaled res = gd(Xs, Y.ravel(), np.zeros(p), alpha=.1, niter=20000) res - beta_true_scale <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Correct coefficients Step2: Our code - SGHMC Step3: Our code - Gradient descent Step5: Cliburn's code
13,687
<ASSISTANT_TASK:> Python Code: %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix tf.__version__ from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets("data/MNIST/", one_hot=True) print("Size of:") print("- Training-set:\t\t{}".format(len(data.train.labels))) print("- Test-set:\t\t{}".format(len(data.test.labels))) print("- Validation-set:\t{}".format(len(data.validation.labels))) data.train.images.shape data.train.images[0].shape img1=data.train.images[0].reshape(28,28) img1.shape plt.imshow(img1, interpolation='nearest', cmap=plt.cm.Blues) #img1 data.train.images[0,1] data.train.images[0,:].argmax() # get array index with max value data.test.labels[0:5, :] data.test.cls = np.array([label.argmax() for label in data.test.labels]) data.test.cls[0:5] # We know that MNIST images are 28 pixels in each dimension. img_size = 28 # Images are stored in one-dimensional arrays of this length. img_size_flat = img_size * img_size # Tuple with height and width of images used to reshape arrays. img_shape = (img_size, img_size) # Number of classes, one class for each of 10 digits. num_classes = 10 learning_rate = 0.5 # step size of Gradient Descent Optimizer def plot_images(images, cls_true, cls_pred=None): assert len(images) == len(cls_true) == 9 # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap='binary') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Get the first images from the test-set. images = data.test.images[0:9] # Get the true classes for those images. cls_true = data.test.cls[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) x = tf.placeholder(tf.float32, [None, img_size_flat]) y_true = tf.placeholder(tf.float32, [None, num_classes]) y_true_cls = tf.placeholder(tf.int64, [None]) weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) biases = tf.Variable(tf.zeros([num_classes])) logits = tf.matmul(x, weights) + biases y_pred = tf.nn.softmax(logits) y_pred_cls = tf.argmax(y_pred, dimension=1) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_true) cost = tf.reduce_mean(cross_entropy) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(y_pred_cls, y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) session = tf.Session() session.run(tf.global_variables_initializer()) batch_size = 100 def optimize(num_iterations): for i in range(num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch = data.train.next_batch(batch_size) # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. # Note that the placeholder for y_true_cls is not set # because it is not used during training. feed_dict_train = {x: x_batch, y_true: y_true_batch} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. session.run(optimizer, feed_dict=feed_dict_train) feed_dict_test = {x: data.test.images, y_true: data.test.labels, y_true_cls: data.test.cls} def print_accuracy(): # Use TensorFlow to compute the accuracy. acc = session.run(accuracy, feed_dict=feed_dict_test) # Print the accuracy. print("Accuracy on test-set: {0:.1%}".format(acc)) def print_confusion_matrix(): # Get the true classifications for the test-set. cls_true = data.test.cls # Get the predicted classifications for the test-set. cls_pred = session.run(y_pred_cls, feed_dict=feed_dict_test) # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the 2D confusion matrix as an image. plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) # Make various adjustments to the plot. plt.tight_layout() plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') def plot_example_errors(): # Use TensorFlow to get a list of boolean values # whether each test-image has been correctly classified, # and a list for the predicted class of each image. correct, cls_pred = session.run([correct_prediction, y_pred_cls], feed_dict=feed_dict_test) # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.test.images[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.test.cls[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9]) def plot_example_correct9(): # Use TensorFlow to get a list of boolean values # whether each test-image has been correctly classified, # and a list for the predicted class of each image. prediction, cls_pred = session.run([correct_prediction, y_pred_cls], feed_dict=feed_dict_test) # get the boolean array. correct = (prediction == True) # Get the images from the test-set that have been # correctly classified. images = data.test.images[correct] num_imgs = images.shape[0] i_start = np.random.choice(num_imgs-10, 1)[0] # Get the predicted classes for those images. cls_pred = cls_pred[correct] # Get the true classes for those images. cls_true = data.test.cls[correct] # Plot the first 9 images. plot_images(images=images[i_start:i_start+9], cls_true=cls_true[i_start:i_start+9], cls_pred=cls_pred[i_start:i_start+9]) def plot_sample9(): # Use TensorFlow to get a list of boolean values # whether each test-image has been correctly classified, # and a list for the predicted class of each image. prediction, cls_pred = session.run([correct_prediction, y_pred_cls], feed_dict=feed_dict_test) num_imgs = data.test.images.shape[0] i_start = np.random.choice(num_imgs-10, 1)[0] # Plot the first 9 images. plot_images(images=data.test.images[i_start:i_start+9], cls_true=data.test.cls[i_start:i_start+9], cls_pred=cls_pred[i_start:i_start+9]) def plot_weights(): # Get the values for the weights from the TensorFlow variable. w = session.run(weights) # Get the lowest and highest values for the weights. # This is used to correct the colour intensity across # the images so they can be compared with each other. w_min = np.min(w) w_max = np.max(w) # Create figure with 3x4 sub-plots, # where the last 2 sub-plots are unused. fig, axes = plt.subplots(3, 4) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Only use the weights for the first 10 sub-plots. if i<10: # Get the weights for the i'th digit and reshape it. # Note that w.shape == (img_size_flat, 10) image = w[:, i].reshape(img_shape) # Set the label for the sub-plot. ax.set_xlabel("Weights: {0}".format(i)) # Plot the image. ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic') # Remove ticks from each sub-plot. ax.set_xticks([]) ax.set_yticks([]) print_accuracy() plot_example_errors() optimize(num_iterations=1) print_accuracy() plot_sample9() plot_example_correct9() # np.random.choice(100-9, 1)[0] plot_example_errors() plot_weights() # We have already performed 1 iteration. optimize(num_iterations=9) print_accuracy() plot_sample9() plot_example_errors() plot_weights() # We have already performed 10 iterations. optimize(num_iterations=990) print_accuracy() plot_sample9() plot_example_errors() plot_weights() print_confusion_matrix() # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. session.close() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: This was developed using Python 3.5.2 (Anaconda) and TensorFlow version Step2: 2017-06-04 Step3: The MNIST data-set has now been loaded and consists of 70.000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial. Step4: One-Hot Encoding Step5: We also need the classes as single numbers for various comparisons and performance measures, so we convert the One-Hot encoded vectors to a single number by taking the index of the highest element. Note that the word 'class' is a keyword used in Python so we need to use the name 'cls' instead. Step6: We can now see the class for the first five images in the test-set. Compare these to the One-Hot encoded vectors above. For example, the class for the first image is 7, which corresponds to a One-Hot encoded vector where all elements are zero except for the element with index 7. Step7: Data dimensions Step8: Hyperparameters Step9: Helper-function for plotting images Step10: Plot a few images to see if data is correct Step11: TensorFlow Graph Step12: Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable x. The shape of this placeholder variable is [None, num_classes] which means it may hold an arbitrary number of labels and each label is a vector of length num_classes which is 10 in this case. Step13: Finally we have the placeholder variable for the true class of each image in the placeholder variable x. These are integers and the dimensionality of this placeholder variable is set to [None] which means the placeholder variable is a one-dimensional vector of arbitrary length. Step14: Variables to be optimized Step15: The second variable that must be optimized is called biases and is defined as a 1-dimensional tensor (or vector) of length num_classes. Step16: Model Step17: Now logits is a matrix with num_images rows and num_classes columns, where the element of the $i$'th row and $j$'th column is an estimate of how likely the $i$'th input image is to be of the $j$'th class. Step18: The predicted class can be calculated from the y_pred matrix by taking the index of the largest element in each row. Step19: Cost-function to be optimized Step20: We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications. Step21: Optimization method Step22: Performance measures Step23: This calculates the classification accuracy by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then calculating the average of these numbers. Step24: TensorFlow Run Step25: Initialize variables Step26: Helper-function to perform optimization iterations Step27: Function for performing a number of optimization iterations so as to gradually improve the weights and biases of the model. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. Step28: Helper-functions to show performance Step29: Function for printing the classification accuracy on the test-set. Step30: Function for printing and plotting the confusion matrix using scikit-learn. Step31: Function for plotting examples of images from the test-set that have been mis-classified. Step32: Helper-function to plot the model weights Step33: Performance before any optimization Step34: Performance after 1 optimization iteration Step35: The weights can also be plotted as shown below. Positive weights are red and negative weights are blue. These weights can be intuitively understood as image-filters. Step36: Performance after 10 optimization iterations Step37: Performance after 1000 optimization iterations Step38: The model has now been trained for 1000 optimization iterations, with each iteration using 100 images from the training-set. Because of the great variety of the images, the weights have now become difficult to interpret and we may doubt whether the model truly understands how digits are composed from lines, or whether the model has just memorized many different variations of pixels. Step39: We can also print and plot the so-called confusion matrix which lets us see more details about the mis-classifications. For example, it shows that images actually depicting a 5 have sometimes been mis-classified as all other possible digits, but mostly either 3, 6 or 8. Step40: We are now done using TensorFlow, so we close the session to release its resources.
13,688
<ASSISTANT_TASK:> Python Code: import os import numpy as np import matplotlib.pyplot as plt import mne sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(sample_data_raw_file) print(raw) raw.crop(tmax=60) n_time_samps = raw.n_times time_secs = raw.times ch_names = raw.ch_names n_chan = len(ch_names) # note: there is no raw.n_channels attribute print('the (cropped) sample data object has {} time samples and {} channels.' ''.format(n_time_samps, n_chan)) print('The last time sample is at {} seconds.'.format(time_secs[-1])) print('The first few channel names are {}.'.format(', '.join(ch_names[:3]))) print() # insert a blank line in the output # some examples of raw.info: print('bad channels:', raw.info['bads']) # chs marked "bad" during acquisition print(raw.info['sfreq'], 'Hz') # sampling frequency print(raw.info['description'], '\n') # miscellaneous acquisition info print(raw.info) print(raw.time_as_index(20)) print(raw.time_as_index([20, 30, 40]), '\n') print(np.diff(raw.time_as_index([1, 2, 3]))) eeg_and_eog = raw.copy().pick_types(meg=False, eeg=True, eog=True) print(len(raw.ch_names), '→', len(eeg_and_eog.ch_names)) raw_temp = raw.copy() print('Number of channels in raw_temp:') print(len(raw_temp.ch_names), end=' → drop two → ') raw_temp.drop_channels(['EEG 037', 'EEG 059']) print(len(raw_temp.ch_names), end=' → pick three → ') raw_temp.pick_channels(['MEG 1811', 'EEG 017', 'EOG 061']) print(len(raw_temp.ch_names)) channel_names = ['EOG 061', 'EEG 003', 'EEG 002', 'EEG 001'] eog_and_frontal_eeg = raw.copy().reorder_channels(channel_names) print(eog_and_frontal_eeg.ch_names) raw.rename_channels({'EOG 061': 'blink detector'}) print(raw.ch_names[-3:]) channel_renaming_dict = {name: name.replace(' ', '_') for name in raw.ch_names} raw.rename_channels(channel_renaming_dict) print(raw.ch_names[-3:]) raw.set_channel_types({'EEG_001': 'eog'}) print(raw.copy().pick_types(meg=False, eog=True).ch_names) raw_selection = raw.copy().crop(tmin=10, tmax=12.5) print(raw_selection) print(raw_selection.times.min(), raw_selection.times.max()) raw_selection.crop(tmin=1) print(raw_selection.times.min(), raw_selection.times.max()) raw_selection1 = raw.copy().crop(tmin=30, tmax=30.1) # 0.1 seconds raw_selection2 = raw.copy().crop(tmin=40, tmax=41.1) # 1.1 seconds raw_selection3 = raw.copy().crop(tmin=50, tmax=51.3) # 1.3 seconds raw_selection1.append([raw_selection2, raw_selection3]) # 2.5 seconds total print(raw_selection1.times.min(), raw_selection1.times.max()) sampling_freq = raw.info['sfreq'] start_stop_seconds = np.array([11, 13]) start_sample, stop_sample = (start_stop_seconds * sampling_freq).astype(int) channel_index = 0 raw_selection = raw[channel_index, start_sample:stop_sample] print(raw_selection) x = raw_selection[1] y = raw_selection[0].T plt.plot(x, y) channel_names = ['MEG_0712', 'MEG_1022'] two_meg_chans = raw[channel_names, start_sample:stop_sample] y_offset = np.array([5e-11, 0]) # just enough to separate the channel traces x = two_meg_chans[1] y = two_meg_chans[0].T + y_offset lines = plt.plot(x, y) plt.legend(lines, channel_names) eeg_channel_indices = mne.pick_types(raw.info, meg=False, eeg=True) eeg_data, times = raw[eeg_channel_indices] print(eeg_data.shape) data = raw.get_data() print(data.shape) data, times = raw.get_data(return_times=True) print(data.shape) print(times.shape) first_channel_data = raw.get_data(picks=0) eeg_and_eog_data = raw.get_data(picks=['eeg', 'eog']) two_meg_chans_data = raw.get_data(picks=['MEG_0712', 'MEG_1022'], start=1000, stop=2000) print(first_channel_data.shape) print(eeg_and_eog_data.shape) print(two_meg_chans_data.shape) data = raw.get_data() np.save(file='my_data.npy', arr=data) sampling_freq = raw.info['sfreq'] start_end_secs = np.array([10, 13]) start_sample, stop_sample = (start_end_secs * sampling_freq).astype(int) df = raw.to_data_frame(picks=['eeg'], start=start_sample, stop=stop_sample) # then save using df.to_csv(...), df.to_hdf(...), etc print(df.head()) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Loading continuous data Step2: As you can see above, Step3: By default, the Step4: Querying the Raw object Step5: <div class="alert alert-info"><h4>Note</h4><p>Most of the fields of ``raw.info`` reflect metadata recorded at Step6: Modifying Raw objects Step7: Similar to the Step8: If you want the channels in a specific order (e.g., for plotting), Step9: Changing channel name and type Step10: This next example replaces spaces in the channel names with underscores, Step11: If for some reason the channel types in your Step12: Selection in the time domain Step13: Step14: Remember that sample times don't always align exactly with requested tmin Step15: <div class="alert alert-danger"><h4>Warning</h4><p>Be careful when concatenating Step16: You can see that it contains 2 arrays. This combination of data and times Step17: Extracting channels by name Step18: Extracting channels by type Step19: Some of the parameters of Step20: If you want the array of times, Step21: The Step22: Summary of ways to extract data from Raw objects Step23: It is also possible to export the data to a
13,689
<ASSISTANT_TASK:> Python Code: from pygoose import * from gensim.models.wrappers.fasttext import FastText kg.gpu.cuda_disable_gpus() from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences project = kg.Project.discover() MAX_VOCAB_SIZE = 125000 MAX_SEQUENCE_LENGTH = 30 tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_train.pickle') tokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_test.pickle') embedding_model = FastText.load_word2vec_format(project.aux_dir + 'fasttext_vocab.vec') EMBEDDING_DIM = len(embedding_model['apple']) texts_q1_train = [' '.join(pair[0]) for pair in tokens_train] texts_q2_train = [' '.join(pair[1]) for pair in tokens_train] texts_q1_test = [' '.join(pair[0]) for pair in tokens_test] texts_q2_test = [' '.join(pair[1]) for pair in tokens_test] unique_question_texts = list(set(texts_q1_train + texts_q2_train + texts_q1_test + texts_q2_test)) tokenizer = Tokenizer( num_words=MAX_VOCAB_SIZE, split=' ', lower=True, char_level=False, ) tokenizer.fit_on_texts(unique_question_texts) sequences_q1_train = tokenizer.texts_to_sequences(texts_q1_train) sequences_q2_train = tokenizer.texts_to_sequences(texts_q2_train) sequences_q1_test = tokenizer.texts_to_sequences(texts_q1_test) sequences_q2_test = tokenizer.texts_to_sequences(texts_q2_test) num_words = min(MAX_VOCAB_SIZE, len(tokenizer.word_index)) embedding_matrix = np.zeros((num_words + 1, EMBEDDING_DIM)) for word, index in progressbar(tokenizer.word_index.items()): if word in embedding_model.vocab: embedding_matrix[index] = embedding_model[word] kg.io.save(embedding_matrix, project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle') sequences_q1_padded_train = pad_sequences(sequences_q1_train, maxlen=MAX_SEQUENCE_LENGTH) sequences_q2_padded_train = pad_sequences(sequences_q2_train, maxlen=MAX_SEQUENCE_LENGTH) sequences_q1_padded_test = pad_sequences(sequences_q1_test, maxlen=MAX_SEQUENCE_LENGTH) sequences_q2_padded_test = pad_sequences(sequences_q2_test, maxlen=MAX_SEQUENCE_LENGTH) kg.io.save(sequences_q1_padded_train, project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle') kg.io.save(sequences_q2_padded_train, project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle') kg.io.save(sequences_q1_padded_test, project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle') kg.io.save(sequences_q2_padded_test, project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Hide all GPUs from TensorFlow to not automatically occupy any GPU RAM. Step2: Config Step3: The maximum allowed size of the embedding matrix and the maximum length our sequences will be padded/trimmed to. Step4: Load data Step5: Word embedding database queried from the trained FastText model. Step6: Build features Step7: Create question sequences Step8: Create embedding lookup matrix Step9: Allocate an embedding matrix. Include the NULL word. Step10: Fill the matrix using the vectors for individual words. Step11: Save features Step12: Padded word index sequences.
13,690
<ASSISTANT_TASK:> Python Code: # Importar bibliotecas import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score pd.options.mode.chained_assignment = None # default='warn' # Abrir banco de dados dados = pd.read_csv('~/caRtola/data/desafio_valorizacao/valorizacao_cartola_2018.csv') # Listar nome das variáveis str(list(dados)) # Selecionar variáveis para análise dados = dados[['slug', 'rodada', 'posicao', 'status', 'variacao_preco', 'pontos', 'preco', 'media_pontos']] # Explorar dados de apenas um jogador paqueta = dados[dados.slug == 'lucas-paqueta'] paqueta.head(n=15) # Criar coluna variacao_preco_lag e pontos_lag paqueta['variacao_preco_lag'] = paqueta['variacao_preco'].shift(1) paqueta['pontos_lag'] = paqueta['pontos'].shift(1) paqueta['media_lag'] = paqueta['media_pontos'].shift(-1) paqueta[['slug', 'rodada', 'status', 'pontos_lag', 'variacao_preco_lag', 'preco', 'media_pontos']].head(n=15) # Transformar dados para plotar resultados paqueta_plot = pd.melt(paqueta, id_vars=['slug','rodada'], value_vars=['variacao_preco_lag', 'pontos_lag', 'preco']) # Plotar gráfico com variacao_preco_lag, pontos_lag e preco plt.figure(figsize=(16, 6)) g = sns.lineplot(x='rodada', y='value', hue='variable', data=paqueta_plot) plt.figure(figsize=(16, 6)) g = sns.scatterplot(x='pontos_lag', y='variacao_preco_lag', hue='status', data=paqueta) paqueta[['pontos_lag','variacao_preco_lag','preco','media_pontos']].corr() # Set predictors and dependent variable paqueta_complete = paqueta[(~paqueta.status.isin(['Nulo', 'Suspenso'])) & (paqueta.rodada > 5)] paqueta_complete = paqueta_complete.dropna() predictors = paqueta_complete[['pontos_lag','preco','media_lag']] outcome = paqueta_complete['variacao_preco_lag'] regr = linear_model.LinearRegression() regr.fit(predictors, outcome) paqueta_complete['predictions'] = regr.predict(paqueta_complete[['pontos_lag', 'preco', 'media_lag']]) print('Intercept: \n', regr.intercept_) print('Coefficients: \n', regr.coef_) print("Mean squared error: %.2f" % mean_squared_error(paqueta_complete['variacao_preco_lag'], paqueta_complete['predictions'])) print('Variance score: %.2f' % r2_score(paqueta_complete['variacao_preco_lag'], paqueta_complete['predictions'])) # Plotar variação do preço por valor previsto do modelo linear. plt.figure(figsize=(8, 8)) g = sns.regplot(x='predictions',y='variacao_preco_lag', data=paqueta_complete) # Plotar linhas com rodadas para avaliar se estamos errando alguma rodada específica for line in range(0, paqueta_complete.shape[0]): g.text(paqueta_complete.iloc[line]['predictions'], paqueta_complete.iloc[line]['variacao_preco_lag']-0.25, paqueta_complete.iloc[line]['rodada'], horizontalalignment='right', size='medium', color='black', weight='semibold') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Algumas observações sobre a estrutura dos dados. Na linha '21136', Paquetá está como dúvida é teve pontuação de 0. Na linha abaixo ('21137'), ele está suspenso, no entanto pontuou. Step2: Como podemos observar na tabela acima, os novos atributos que criamos agora estão alinhados com o status do atleta e poderão nos ajudar na etapa da modelagem. Antes de modelar, vamos explorar ainda nossos dados. Step3: Neste gráfico, podemos observar que o preço do atleta foi razoavelmente estável ao longo do tempo. Ao observar o comportamento das linhas azul e laranja, conseguimos notar que quando uma linha tem inclinação negativa a outra parece acompanhar. Isso nos leva a concluir o óbvio, a pontuação do atleta está ligada diretamente a sua variação de preço. Step4: Opa, aparentemente há uma relação entre os pontos e a variação do preço. Vamos analisar a matriz de correlação. Step5: Temos algumas informações uteis que saíram da matriz de correlação. Primeira, a pontuação está correlacionada positivamente com a variação e o preço do atleta negativamente correlacionada. Estas duas variáveis já podem nos ajudar a montar um modelo. Step6: Boa notícia! Nós estamos prevendo os resultados do jogador muito bem. O valor é aproximado, mas nada mal! A fórmula de valorização do jogador para uma dada rodada é
13,691
<ASSISTANT_TASK:> Python Code: import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10, 10) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Only run this cell once in the active kernel or the files in later cells will not be found # Make sure that caffe is on the python path: caffe_root = '../' # this file is expected to be in {caffe_root}/examples import os os.chdir(caffe_root) import sys sys.path.insert(0, 'python') import caffe #Commenting out caffe device setting to allow CPU only #caffe.set_device(0) #caffe.set_mode_gpu() from google.protobuf import text_format from caffe.proto import caffe_pb2 # load PASCAL VOC labels labelmap_file = 'data/VOC0712/labelmap_voc.prototxt' file = open(labelmap_file, 'r') labelmap = caffe_pb2.LabelMap() text_format.Merge(str(file.read()), labelmap) def get_labelname(labelmap, labels): num_labels = len(labelmap.item) labelnames = [] if type(labels) is not list: labels = [labels] for label in labels: found = False for i in xrange(0, num_labels): if label == labelmap.item[i].label: found = True labelnames.append(labelmap.item[i].display_name) break assert found == True return labelnames model_def = 'models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt' #model_weights = 'models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_60000.caffemodel' model_weights = 'models/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300_iter_120000.caffemodel' net = caffe.Net(model_def, # defines the structure of the model caffe.TEST, # use test mode (e.g., don't perform dropout) weights=model_weights) # contains the trained weights # input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2, 0, 1)) transformer.set_mean('data', np.array([104,117,123])) # mean pixel transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1] transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB example = 'examples/images/fish-bike.jpg' #example = 'examples/images/cat.jpg' #Try your images if you mapped a volume at /images into the Docker container #example = '/images/filename.jpg' # set net to batch size of 1 image_resize = 300 net.blobs['data'].reshape(1,3,image_resize,image_resize) image = caffe.io.load_image(example) plt.imshow(image) transformed_image = transformer.preprocess('data', image) net.blobs['data'].data[...] = transformed_image # Forward pass. detections = net.forward()['detection_out'] # Parse the outputs. det_label = detections[0,0,:,1] det_conf = detections[0,0,:,2] det_xmin = detections[0,0,:,3] det_ymin = detections[0,0,:,4] det_xmax = detections[0,0,:,5] det_ymax = detections[0,0,:,6] # Get detections with confidence higher than 0.6. top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6] top_conf = det_conf[top_indices] top_label_indices = det_label[top_indices].tolist() top_labels = get_labelname(labelmap, top_label_indices) top_xmin = det_xmin[top_indices] top_ymin = det_ymin[top_indices] top_xmax = det_xmax[top_indices] top_ymax = det_ymax[top_indices] colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist() plt.imshow(image) currentAxis = plt.gca() for i in xrange(top_conf.shape[0]): xmin = int(round(top_xmin[i] * image.shape[1])) ymin = int(round(top_ymin[i] * image.shape[0])) xmax = int(round(top_xmax[i] * image.shape[1])) ymax = int(round(top_ymax[i] * image.shape[0])) score = top_conf[i] label = int(top_label_indices[i]) label_name = top_labels[i] display_txt = '%s: %.2f'%(label_name, score) coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1 color = colors[label] currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2)) currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5}) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load LabelMap. Step2: Load the net in the test phase for inference, and configure input preprocessing. Step3: 2. SSD detection Step4: Run the net and examine the top_k results Step5: Plot the boxes
13,692
<ASSISTANT_TASK:> Python Code: #%load_ext rpy2.ipython #%R install.packages("nycflights13", repos='http://cran.us.r-project.org') #%R library(nycflights13) #%R write.csv(flights, "flights.csv") # Downloading and unzipg a file, without R method : # source= http://stackoverflow.com/a/34863053/3140336 import io from zipfile import ZipFile import requests def get_zip(file_url): url = requests.get(file_url) zipfile = ZipFile(io.BytesIO(url.content)) zip_names = zipfile.namelist() if len(zip_names) == 1: file_name = zip_names.pop() extracted_file = zipfile.open(file_name) return extracted_file url=r'https://github.com/winpython/winpython_afterdoc/raw/master/examples/nycflights13_datas/flights.zip' with io.open("flights.csv", 'wb') as f: f.write(get_zip(url).read()) # Some prep work to get the data from R and into pandas %matplotlib inline import matplotlib.pyplot as plt #%load_ext rpy2.ipython import pandas as pd import seaborn as sns pd.set_option("display.max_rows", 5) flights = pd.read_csv("flights.csv", index_col=0) # dim(flights) <--- The R code flights.shape # <--- The python code # head(flights) flights.head() # filter(flights, month == 1, day == 1) flights.query("month == 1 & day == 1") # flights[flights$month == 1 & flights$day == 1, ] flights[(flights.month == 1) & (flights.day == 1)] # slice(flights, 1:10) flights.iloc[:9] # arrange(flights, year, month, day) flights.sort_values(['year', 'month', 'day']) # arrange(flights, desc(arr_delay)) flights.sort_values('arr_delay', ascending=False) # select(flights, year, month, day) flights[['year', 'month', 'day']] # select(flights, year:day) flights.loc[:, 'year':'day'] # select(flights, -(year:day)) # No direct equivalent here. I would typically use # flights.drop(cols_to_drop, axis=1) # or fligths[flights.columns.difference(pd.Index(cols_to_drop))] # point to dplyr! # select(flights, tail_num = tailnum) flights.rename(columns={'tailnum': 'tail_num'})['tail_num'] # rename(flights, tail_num = tailnum) flights.rename(columns={'tailnum': 'tail_num'}) # distinct(select(flights, tailnum)) flights.tailnum.unique() # distinct(select(flights, origin, dest)) flights[['origin', 'dest']].drop_duplicates() # mutate(flights, # gain = arr_delay - dep_delay, # speed = distance / air_time * 60) flights.assign(gain=flights.arr_delay - flights.dep_delay, speed=flights.distance / flights.air_time * 60) # mutate(flights, # gain = arr_delay - dep_delay, # gain_per_hour = gain / (air_time / 60) # ) (flights.assign(gain=flights.arr_delay - flights.dep_delay) .assign(gain_per_hour = lambda df: df.gain / (df.air_time / 60))) # transmute(flights, # gain = arr_delay - dep_delay, # gain_per_hour = gain / (air_time / 60) # ) (flights.assign(gain=flights.arr_delay - flights.dep_delay) .assign(gain_per_hour = lambda df: df.gain / (df.air_time / 60)) [['gain', 'gain_per_hour']]) # summarise(flights, # delay = mean(dep_delay, na.rm = TRUE)) flights.dep_delay.mean() # sample_n(flights, 10) flights.sample(n=10) # sample_frac(flights, 0.01) flights.sample(frac=.01) # planes <- group_by(flights, tailnum) # delay <- summarise(planes, # count = n(), # dist = mean(distance, na.rm = TRUE), # delay = mean(arr_delay, na.rm = TRUE)) # delay <- filter(delay, count > 20, dist < 2000) planes = flights.groupby("tailnum") delay = (planes.agg({"year": "count", "distance": "mean", "arr_delay": "mean"}) .rename(columns={"distance": "dist", "arr_delay": "delay", "year": "count"}) .query("count > 20 & dist < 2000")) delay fig, ax = plt.subplots(figsize=(12, 6)) sns.regplot("dist", "delay", data=delay, lowess=True, ax=ax, scatter_kws={'color': 'k', 'alpha': .5, 's': delay['count'] / 10}, ci=90, line_kws={'linewidth': 3}); import statsmodels.api as sm smooth = sm.nonparametric.lowess(delay.delay, delay.dist, frac=1/8) ax = delay.plot(kind='scatter', x='dist', y = 'delay', figsize=(12, 6), color='k', alpha=.5, s=delay['count'] / 10) ax.plot(smooth[:, 0], smooth[:, 1], linewidth=3); std = smooth[:, 1].std() ax.fill_between(smooth[:, 0], smooth[:, 1] - std, smooth[:, 1] + std, alpha=.25); # destinations <- group_by(flights, dest) # summarise(destinations, # planes = n_distinct(tailnum), # flights = n() # ) destinations = flights.groupby('dest') destinations.agg({ 'tailnum': lambda x: len(x.unique()), 'year': 'count' }).rename(columns={'tailnum': 'planes', 'year': 'flights'}) destinations = flights.groupby('dest') r = destinations.agg({'tailnum': {'planes': lambda x: len(x.unique())}, 'year': {'flights': 'count'}}) r # daily <- group_by(flights, year, month, day) # (per_day <- summarise(daily, flights = n())) daily = flights.groupby(['year', 'month', 'day']) per_day = daily['distance'].count() per_day # (per_month <- summarise(per_day, flights = sum(flights))) per_month = per_day.groupby(level=['year', 'month']).sum() per_month # (per_year <- summarise(per_month, flights = sum(flights))) per_year = per_month.sum() per_year # flights %>% # group_by(year, month, day) %>% # select(arr_delay, dep_delay) %>% # summarise( # arr = mean(arr_delay, na.rm = TRUE), # dep = mean(dep_delay, na.rm = TRUE) # ) %>% # filter(arr > 30 | dep > 30) ( flights.groupby(['year', 'month', 'day']) [['arr_delay', 'dep_delay']] .mean() .query('arr_delay > 30 | dep_delay > 30') ) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: using an internet download to get flight.qcsv Step2: Data Step3: Single table verbs Step4: We see the first big language difference between R and python. Step5: Arrange rows with arrange(), sort() Step6: It's worth mentioning the other common sorting method for pandas DataFrames, sort_index. Pandas puts much more emphasis on indicies, (or row labels) than R. Step7: But like Hadley mentions, not that useful since it only returns the one column. dplyr and pandas compare well here. Step8: Pandas is more verbose, but the the argument to columns can be any mapping. So it's often used with a function to perform a common task, say df.rename(columns=lambda x Step9: FYI this returns a numpy array instead of a Series. Step10: OK, so dplyr wins there from a consistency point of view. unique is only defined on Series, not DataFrames. Step11: The first example is pretty much identical (aside from the names, mutate vs. assign). Step12: Summarise values with summarise() Step13: This is only roughly equivalent. Step14: Grouped operations Step15: For me, dplyr's n() looked is a bit starge at first, but it's already growing on me. Step16: Or using statsmodels directly for more control over the lowess, with an extremely lazy Step17: There's a little know feature to groupby.agg Step18: The result is a MultiIndex in the columns which can be a bit awkard to work with (you can drop a level with r.columns.droplevel()). Also the syntax going into the .agg may not be the clearest. Step19: I'm not sure how dplyr is handling the other columns, like year, in the last example. With pandas, it's clear that we're grouping by them since they're included in the groupby. For the last example, we didn't group by anything, so they aren't included in the result.
13,693
<ASSISTANT_TASK:> Python Code: import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG ! pip3 install -U google-cloud-storage $USER_FLAG ! pip3 install $USER kfp --upgrade import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID REGION = "us-central1" # @param {type: "string"} from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ! gsutil mb -l $REGION $BUCKET_NAME ! gsutil ls -al $BUCKET_NAME SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your GCP project id from gcloud shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].strip() print("Service Account:", SERVICE_ACCOUNT) ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME import google.cloud.aiplatform as aip PIPELINE_ROOT = "{}/pipeline_root/control".format(BUCKET_NAME) import json from kfp import dsl from kfp.v2 import compiler from kfp.v2.dsl import component aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME) @component def args_generator_op() -> str: import json return json.dumps( [{"cats": "1", "dogs": "2"}, {"cats": "10", "dogs": "20"}], sort_keys=True, ) @component def print_op(msg: str): print(msg) @component def flip_coin_op() -> str: Flip a coin and output heads or tails randomly. import random result = "heads" if random.randint(0, 1) == 0 else "tails" return result @dsl.pipeline( name="control-{}".format(TIMESTAMP), pipeline_root=PIPELINE_ROOT, ) def pipeline( json_string: str = json.dumps( [ { "snakes": "anaconda", "lizards": "anole", "bunnies": [{"cottontail": "bugs"}, {"cottontail": "thumper"}], }, { "snakes": "cobra", "lizards": "gecko", "bunnies": [{"cottontail": "roger"}], }, { "snakes": "boa", "lizards": "iguana", "bunnies": [ {"cottontail": "fluffy"}, {"fuzzy_lop": "petunia", "cottontail": "peter"}, ], }, ], sort_keys=True, ) ): flip1 = flip_coin_op() with dsl.Condition( flip1.output != "no-such-result", name="alwaystrue" ): # always true args_generator = args_generator_op() with dsl.ParallelFor(args_generator.output) as item: print_op(json_string) with dsl.Condition(flip1.output == "heads", name="heads"): print_op(item.cats) with dsl.Condition(flip1.output == "tails", name="tails"): print_op(item.dogs) with dsl.ParallelFor(json_string) as item: with dsl.Condition(item.snakes == "boa", name="snakes"): print_op(item.snakes) print_op(item.lizards) print_op(item.bunnies) # it is possible to access sub-items with dsl.ParallelFor(json_string) as item: with dsl.ParallelFor(item.bunnies) as item_bunnies: print_op(item_bunnies.cottontail) from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="control_pipeline.json".replace(" ", "_") ) DISPLAY_NAME = "control_" + TIMESTAMP job = aip.PipelineJob( display_name=DISPLAY_NAME, template_path="control_pipeline.json".replace(" ", "_"), pipeline_root=PIPELINE_ROOT, ) job.run() ! rm control_pipeline.json delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True try: if delete_model and "DISPLAY_NAME" in globals(): models = aip.Model.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) model = models[0] aip.Model.delete(model) print("Deleted model:", model) except Exception as e: print(e) try: if delete_endpoint and "DISPLAY_NAME" in globals(): endpoints = aip.Endpoint.list( filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time" ) endpoint = endpoints[0] endpoint.undeploy_all() aip.Endpoint.delete(endpoint.resource_name) print("Deleted endpoint:", endpoint) except Exception as e: print(e) if delete_dataset and "DISPLAY_NAME" in globals(): if "none" == "tabular": try: datasets = aip.TabularDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TabularDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "none" == "image": try: datasets = aip.ImageDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.ImageDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "none" == "text": try: datasets = aip.TextDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TextDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "none" == "video": try: datasets = aip.VideoDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.VideoDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) try: if delete_pipeline and "DISPLAY_NAME" in globals(): pipelines = aip.PipelineJob.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) pipeline = pipelines[0] aip.PipelineJob.delete(pipeline.resource_name) print("Deleted pipeline:", pipeline) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Install the latest GA version of google-cloud-storage library as well. Step2: Install the latest GA version of KFP SDK library as well. Step3: Restart the kernel Step4: Check the versions of the packages you installed. The KFP SDK version should be >=1.6. Step5: Before you begin Step6: Region Step7: Timestamp Step8: Authenticate your Google Cloud account Step9: Create a Cloud Storage bucket Step10: Only if your bucket doesn't already exist Step11: Finally, validate access to your Cloud Storage bucket by examining its contents Step12: Service Account Step13: Set service account access for Vertex AI Pipelines Step14: Set up variables Step15: Vertex AI Pipelines constants Step16: Additional imports. Step17: Initialize Vertex AI SDK for Python Step19: Define pipeline components Step20: Define a pipeline that uses control structures Step21: Compile the pipeline Step22: Run the pipeline Step23: Click on the generated link to see your run in the Cloud Console.
13,694
<ASSISTANT_TASK:> Python Code: data_in_shape = (3, 6) rnn = SimpleRNN(4, activation='tanh') layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3400 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (8, 5) rnn = SimpleRNN(5, activation='sigmoid') layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3500 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3600 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3700 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.3'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True, go_backwards=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3800 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.4'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=False, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3800 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.5'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True, go_backwards=False, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3810 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.6'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=True, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3820 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.7'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', use_bias=False, return_sequences=True, go_backwards=True, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3830 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.8'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } print(json.dumps(DATA)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: [recurrent.SimpleRNN.1] units=5, activation='sigmoid' Step2: [recurrent.SimpleRNN.2] units=4, activation='tanh', return_sequences=True Step3: [recurrent.SimpleRNN.3] units=4, activation='tanh', return_sequences=False, go_backwards=True Step4: [recurrent.SimpleRNN.4] units=4, activation='tanh', return_sequences=True, go_backwards=True Step5: [recurrent.SimpleRNN.5] units=4, activation='tanh', return_sequences=False, go_backwards=False, stateful=True Step6: [recurrent.SimpleRNN.6] units=4, activation='tanh', return_sequences=True, go_backwards=False, stateful=True Step7: [recurrent.SimpleRNN.7] units=4, activation='tanh', return_sequences=False, go_backwards=True, stateful=True Step8: [recurrent.SimpleRNN.8] units=4, activation='tanh', use_bias=False, return_sequences=True, go_backwards=True, stateful=True Step9: export for Keras.js tests
13,695
<ASSISTANT_TASK:> Python Code: e = Element("This is fancy text") print(e._name, e._id) print(e.get_name()) e.render() e = Element("Hello {{kwargs['you']}}, my name is `{{this.get_name()}}`.") e.render(you='World') child = Element('This is the child.') parent = Element('This is the parent.').add_child(child) parent = Element('This is the parent.') child = Element('This is the child.').add_to(parent) print(parent.render(), child.render()) parent = Element("<parent>{% for child in this._children.values() %}{{child.render()}}{% endfor %}</parent>") Element('<child1/>').add_to(parent) Element('<child2/>').add_to(parent) parent.render() parent = Element("<parent>{% for child in this._children.values() %}{{child.render()}}{% endfor %}</parent>") Element('<child1/>').add_to(parent, name='child_1') parent._children Element('<child1_overwritten/>').add_to(parent, name='child_1') parent.render() f = Figure() print(f.render()) f.header.add_child(Element("<style>body {background-color: #00ffff}</style>")) f.html.add_child(Element("<h1>Hello world</h1>")) print(f.render()) f.save('foo.html') print(open('foo.html').read()) f f.width = 300 f.height = 200 f Figure(figsize=(5,5)) macro = MacroElement() macro._template = Template( '{% macro header(this, kwargs) %}' 'This is header of {{this.get_name()}}' '{% endmacro %}' '{% macro html(this, kwargs) %}' 'This is html of {{this.get_name()}}' '{% endmacro %}' '{% macro script(this, kwargs) %}' 'This is script of {{this.get_name()}}' '{% endmacro %}' ) print(Figure().add_child(macro).render()) js_link = JavascriptLink('https://example.com/javascript.js') js_link.render() css_link = CssLink('https://example.com/style.css') css_link.render() html = Html('Hello world') html.render() Html('<b>Hello world</b>').render() Html('<b>Hello world</b>', script=True).render() iframe = IFrame('Hello World') iframe.render() f = Figure(height=180) f.html.add_child(Element("Before the frame")) f.html.add_child(IFrame('In the frame', height='100px')) f.html.add_child(Element("After the frame")) f div = Div() div.html.add_child(Element('Hello world')) print(Figure().add_child(div).render()) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Each element has an attribute _name and a unique _id. You also have a method get_name to get a unique string representation of the element. Step2: You can render an Element using the method render Step3: In the template, you can use keyword this for accessing the object itself ; and the keyword kwargs for accessing any keyword argument provided in the render method Step4: Well, this is not really cool for now. What makes elements useful lies in the fact that you can create trees out of them. To do so, you can either use the method add_child or the method add_to. Step5: Now in the example above, embedding the one in the other does not change anything. Step6: But you can use the tree structure in the template. Step7: As you can see, the child of an element are referenced in the _children attibute in the form of an OrderedDict. You can choose the key of each child in specifying a name in the add_child (or add_to) method Step8: That way, it's possible to overwrite a child in specifying the same name Step9: I hope you start to find it useful. Step10: You can for example create a beatiful cyan "hello-world" webpage in doing Step11: You can simply save the content of the Figure to a file, thanks to the save method Step12: If you want to visualize it in the notebook, you can let Figure._repr_html_ method do it's job in typing Step13: If this rendering is too large for you, you can force it's width and height Step14: Note that you can also define a Figure's size in a matplotlib way Step15: MacroElement Step16: Link Step17: Html Step18: It's designed to render the text as you gave it, so it won't work directly it you want to embed HTML code inside the div. Step19: For this, you have to set script=True and it will work Step20: IFrame Step21: As you can see, it will embed the full content of the iframe in a base64 string so that the ouput looks like Step22: Div
13,696
<ASSISTANT_TASK:> Python Code: %matplotlib inline import numpy as np import scipy as sp import json import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pandas as pd from pyquery import PyQuery as pq import requests pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns sns.set_style("whitegrid") sns.set_context("poster") import random import json import time import csv import datetime import statsmodels.api as sm import pickle from matplotlib import rc import itertools # rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # rc('text', usetex=True) # Default plotting from matplotlib import rcParams dark_colors = ["#99D699", "#B2B2B2", (0.8509803921568627, 0.37254901960784315, 0.00784313725490196), (0.4588235294117647, 0.4392156862745098, 0.7019607843137254), (0.9058823529411765, 0.1607843137254902, 0.5411764705882353), (0.4, 0.6509803921568628, 0.11764705882352941), (0.9019607843137255, 0.6705882352941176, 0.00784313725490196), (0.6509803921568628, 0.4627450980392157, 0.11372549019607843), (0.4, 0.4, 0.4)] rcParams['figure.figsize'] = (12, 9) rcParams['figure.dpi'] = 150 rcParams['axes.color_cycle'] = dark_colors rcParams['lines.linewidth'] = 2 rcParams['axes.facecolor'] = "white" rcParams['axes.titlesize'] = 20 rcParams['axes.labelsize'] = 17.5 rcParams['xtick.labelsize'] = 15 rcParams['ytick.labelsize'] = 15 rcParams['legend.fontsize'] = 17.5 rcParams['patch.edgecolor'] = 'none' rcParams['grid.color']="gray" rcParams['grid.linestyle']="-" rcParams['grid.linewidth'] = 0.3 rcParams['grid.alpha']=1 rcParams['text.color'] = "444444" rcParams['axes.labelcolor'] = "444444" rcParams['ytick.color'] = "444444" rcParams['xtick.color'] = "444444" # Create an initial data frame by loading the earthquake # catalog for year 1980 eq_df = pd.DataFrame() eq_df = pd.read_csv('http://wichita.ogs.ou.edu/eq/catalog/1980/1980.csv') # Add the additional years until 2015 included for year in range(1981,2016): url = 'http://wichita.ogs.ou.edu/eq/catalog/' + str(year) + '/' + str(year) + '.csv' temp_df = pd.read_csv(url) eq_df = eq_df.append(temp_df,ignore_index = True) # the dataframe is chronologically ordered # add a column with year as a float # e.g, 1 Jan 2015 is 2015.0 and 31 Dec 2015 is 2015.99 year_float = [] for date in eq_df.origintime.values: new_date = time.strptime(date[0:10], "%Y-%m-%d") to_add =int(float(new_date.tm_yday)/36.6*100) year_float.append(float(str(new_date.tm_year) + '.' + str(to_add))) eq_df['year_float'] = year_float # Drop the columns we don't want to use because it takes memory # we do this process in place to avoid copying into an other DataFrame # drop the error in time and location for now # drop various other estimations of magnitude # drop information on body waves and surface waves eq_df.drop(['err_lon','err_lat','err_depth','err_origintime','mw','mw_src','mblg_usgs','mb','mblg_ogs', 'ml_ogs', 'ms','mfa','max_mmi','reafile','reamtime','pdlid', 'mw_ogs'], axis=1, inplace=True) # Save data frame for future use eq_df.to_csv('./tempdata/earthquakes_catalog.csv',sep = '|') eq_df = eq_df[eq_df.prefmag >= 3] # Load the data frame # eq_df = pd.DataFrame.from_csv('./tempdata/earthquakes_catalog.csv',sep = '|') eq_df['year'] = [int(year) for year in eq_df.year_float] # Show the first 5 earthquakes in the catalog and their features eq_df.head() # Read data #welldf = pd.read_csv('tempdata/wells_reportyear.csv') # Clean up data #welldf = welldf[np.isfinite(welldf['YEAR'])] # Discard extreme coordinates #welldf = welldf[(welldf.LONGITUDE >= -102.918) & (welldf.LONGITUDE <= -94.466)] #welldf = welldf[(welldf.LATITUDE >= 33.811) & (welldf.LATITUDE <= 36.99869)] # Data set contains futuristic years, we discard those observations #welldf = welldf[welldf.YEAR <= 2015] # Save data frame for future use # welldf.to_csv('tempdata/wells_data.csv') # Load data for future use welldf = pd.read_csv('tempdata/wells_data.csv') # Show first 5 wells print welldf.shape welldf.head() fig, ax = plt.subplots() plt.bar(np.arange(2006,2013),welldf.groupby('year').count().volume.values, color = "#99D699") ax.set_xticks(np.arange(2006,2013)+.5) ax.set_xticklabels(np.arange(2006,2013)) ax.set_xlabel('Year') ax.set_ylabel('Number of Wells') ax.grid(False) fig, ax = plt.subplots() plt.bar(np.arange(2006,2013),welldf.groupby('year').volume.sum().values, color = "#99D699") ax.set_xticks(np.arange(2006,2013)+.5) ax.set_xticklabels(np.arange(2006,2013)) ax.set_xlabel('Year') ax.set_ylabel('Water disposed in gallons') ax.grid(False) #REQUIRES INSTALLATION conda install basemap #received assistance from https://peak5390.wordpress.com/2012/12/08/matplotlib-basemap-tutorial-plotting-points-on-a-simple-map/ from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import numpy as np def plot_earthquakes(df): map = Basemap(llcrnrlon=int(min(df.longitude))-1, llcrnrlat=int(min(df.latitude))-1, urcrnrlon=int(max(df.longitude))+1, urcrnrlat=int(max(df.latitude))+1) map.plot(df.longitude.values, df.latitude.values, 'ro', markersize=2) map.drawmapboundary(fill_color='white') map.drawcoastlines() map.drawstates() map.drawcounties() plt.plot() plt.show() b42010 = eq_df[(eq_df.year_float<2010)] plot_earthquakes(b42010) #plot number of earthquakes by year mask = eq_df['prefmag'] >=3 plt.hist(eq_df[mask]['year_float'].values, bins=2016-1980+1) plt.xlabel('Year') plt.ylabel('Number of Earthquakes') plt.grid(False) # Plot inspired from Ellsworth [Science, 2013] mask = eq_df['prefmag'] >=3 eq_count, base = np.histogram(eq_df[mask]['year_float'], bins = eq_df[mask].shape[0]) # plot the cumulative number of earthquakes of magnitude > 3 plt.figure(figsize = (9,7)) plt.plot(base[:-1], np.cumsum(eq_count), lw=3,c='green', label='Cumulative number of Earthquakes of magnitude > 3' ) mask_time = (eq_df['year_float'] < 2009) & (eq_df['prefmag'] >=3) eq_2010, base_2010 = np.histogram(eq_df[mask_time]['year_float'], bins = eq_df[mask_time].shape[0]) # fit the earthquake count before 2009 with a 1st order polynomial # this is the background seismicity rate fit = np.polyfit(base_2010[:-1],np.cumsum(eq_2010),1) fit_fn = np.poly1d(fit) plt.plot(base[:-1],fit_fn(base[:-1]),'k--',label = 'Background seismicity until 2009') # now fill in between plt.fill_between(base[:-1], np.cumsum(eq_count), fit_fn(base[:-1]), color='grey', alpha = 0.3); plt.xlim([1980,2016]); plt.ylabel('Cumulative number of Earthquakes') plt.xlabel('year') plt.legend(loc =2); plt.grid(False) # Function is taken and modified from: # http://eqrm.googlecode.com/svn-history/r143/trunk/preprocessing/recurrence_from_catalog.py def calc_recurrence(df,min_plot, min_mag = None, max_mag = None, interval = 0.1): This function reads an earthquake catalogue file and calculates the Gutenberg-Richter recurrence parameters using both least squares and maximum likelihood (Aki 1965) approaches. Results are plotted for both straightline fits and bounded Gutenberg-Richter curves. Also plotted is the curve that would result assuming a b-value of 1. Funtion arguments: df: data frame containing earthquake catalog with columns prefmag for magnitude and year_float for year min_mag: minimum magnitude for which data will be used - i.e. catalogue completeness max_mag: maximum magnitude used in bounded G-R curve. If not specified, defined as the maximum magnitude in the catlogue + 0.1 magnitude units. interval: Width of magnitude bins for generating cumulative histogram of earthquake recurrence. Default avlue 0.1 magnitude units. magnitudes = df.prefmag.values years = df.year_float.values ########################################################################### # Read data ########################################################################### # If maximum magnitude is not specified default value to maximum in catalogue if max_mag is not None: pass else: max_mag = magnitudes.max() + 0.1 if min_mag is not None: pass else: min_mag = magnitudes.min() num_eq = len(magnitudes) print 'Minimum magnitude:', min_mag print 'Total number of earthquakes:', num_eq num_years = years.max()-years.min() annual_num_eq = num_eq/num_years print 'Annual number of earthquakes greater than Mw', min_mag,':', \ annual_num_eq print 'Maximum catalog magnitude:', magnitudes.max() print 'Mmax = ', max_mag max_mag_bin = magnitudes.max() + 0.15 # Magnitude bins bins = np.arange(min_mag, max_mag_bin, interval) # Magnitude bins for plotting - we will re-arrange bins later plot_bins = np.arange(min_mag, max_mag, interval) ########################################################################### # Generate distribution ########################################################################### # Generate histogram hist = np.histogram(magnitudes,bins=bins) # Reverse array order hist = hist[0][::-1] bins = bins[::-1] # Calculate cumulative sum cum_hist = hist.cumsum() # Ensure bins have the same length has the cumulative histogram. # Remove the upper bound for the highest interval. bins = bins[1:] # Get annual rate cum_annual_rate = cum_hist/num_years new_cum_annual_rate = [] for i in cum_annual_rate: new_cum_annual_rate.append(i+1e-20) # Take logarithm log_cum_sum = np.log10(new_cum_annual_rate) ########################################################################### # Fit a and b parameters using a varity of methods ########################################################################### # Fit a least squares curve b,a = np.polyfit(bins, log_cum_sum, 1) print 'Least Squares: b value', -1. * b, 'a value', a alpha = np.log(10) * a beta = -1.0 * np.log(10) * b # Maximum Likelihood Estimator fitting # b value b_mle = np.log10(np.exp(1)) / (np.mean(magnitudes) - min_mag) beta_mle = np.log(10) * b_mle print 'Maximum Likelihood: b value', b_mle ########################################################################### # Generate data to plot results ########################################################################### # Generate data to plot least squares linear curve # Calculate y-intercept for least squares solution yintercept = log_cum_sum[-1] - b * min_mag ls_fit = b * plot_bins + yintercept log_ls_fit = [] for value in ls_fit: log_ls_fit.append(np.power(10,value)) # Generate data to plot bounded Gutenberg-Richter for LS solution numer = np.exp(-1. * beta * (plot_bins - min_mag)) - \ np.exp(-1. *beta * (max_mag - min_mag)) denom = 1. - np.exp(-1. * beta * (max_mag - min_mag)) ls_bounded = annual_num_eq * (numer / denom) # Generate data to plot maximum likelihood linear curve mle_fit = -1.0 * b_mle * plot_bins + 1.0 * b_mle * min_mag + np.log10(annual_num_eq) log_mle_fit = [] for value in mle_fit: log_mle_fit.append(np.power(10,value)) # Generate data to plot bounded Gutenberg-Richter for MLE solution numer = np.exp(-1. * beta_mle * (plot_bins - min_mag)) - \ np.exp(-1. *beta_mle * (max_mag - min_mag)) denom = 1. - np.exp(-1. * beta_mle * (max_mag - min_mag)) mle_bounded = annual_num_eq * (numer / denom) # Compare b-value of 1 fit_data = -1.0 * plot_bins + min_mag + np.log10(annual_num_eq) log_fit_data = [] for value in fit_data: log_fit_data.append(np.power(10,value)) ########################################################################### # Plot the results ########################################################################### # Plotting fig = plt.figure(1) ax = fig.add_subplot(111) plt.scatter(bins, new_cum_annual_rate, label = 'Catalogue') ax.plot(plot_bins, log_ls_fit, c = 'r', label = 'Least Squares') ax.plot(plot_bins, ls_bounded, c = 'r', linestyle ='--', label = 'Least Squares Bounded') ax.plot(plot_bins, log_mle_fit, c = 'g', label = 'Maximum Likelihood') ax.plot(plot_bins, mle_bounded, c = 'g', linestyle ='--', label = 'Maximum Likelihood Bounded') ax.plot(plot_bins, log_fit_data, c = 'b', label = 'b = 1') #ax.plot(bins, ls_fit2, c = 'k') ax.set_yscale('log') ax.legend(loc = 3) ax.set_ylim([min_plot, max(new_cum_annual_rate) * 10.]) ax.set_xlim([min_mag - 0.5, max_mag + 0.5]) ax.set_ylabel('Annual probability') ax.set_xlabel('Magnitude') ax.grid(False) # Plot for all dataset calc_recurrence(eq_df,1e-7) import datetime #function to create list of interarrival times in hours from df def get_hours_between(df): dates=[] origintimes = df.origintime.values for date in origintimes: year, month, day = date.split('-') day, hour = day.split(' ') hour, minute, second = hour.split(':') if len(second.split('.'))==2: second, microsecond = second.split('.') elif len(second.split('.'))==1: microsecond=0 dates.append(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second), int(microsecond))) dates=sorted(dates) deltas=[] for i in range(1,len(dates)): delta = dates[i] - dates[i-1] delta = delta.total_seconds()/3600 deltas.append(delta) deltas = np.array(deltas) return deltas deltas = get_hours_between(eq_df[eq_df.prefmag>=3]) #http://statsmodels.sourceforge.net/devel/generated/statsmodels.graphics.gofplots.ProbPlot.html #fit and plot exponential to data def fit_expo(deltas, plot=True, bins=100, xlab1='Hours', ylab1='Probability Density', xlab2='Theoretical Quantiles', ylab2='Sample Quantiles', xmax=100, ymax=0.02, k=1., force_lambd=None, summary_stats=True): #If we are inherting a previous model for comparison, use previous lambda if pd.notnull(force_lambd): #Calculate Summary Statistics lambd=force_lambd mean = np.mean(deltas) #sample mean #If we are fitting the model on this dataset separately, calculate lambda from data elif pd.isnull(force_lambd): #Use Maxmimum Likelihood Estimator to get exponential fit expo_fit = sp.stats.expon.fit(deltas, floc=0) #Calculate Summary Statistics lambd = expo_fit[1] #fitted mean #mean = lambd #Calculate more summary statistics k=float(k) n = float(len(deltas)) ssr = np.sum((deltas-lambd)**2) aic = n*np.log(ssr/n) + 2*k mse = np.mean((deltas-lambd)**2) L1norm = np.mean(np.absolute(deltas-lambd)) actual_95_int = np.percentile(deltas1, 2.5), np.percentile(deltas1, 97.5) pred_95_int = sp.stats.expon.interval(0.95, scale=lambd) if plot==True: #Overlay fitted exponential over histogram f, axes = plt.subplots(nrows=int(1), ncols=int(2), squeeze=False) f.set_figheight(8) f.set_figwidth(18) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5) axes[0][0].hist(deltas, bins=bins, normed=True, zorder=0, color='grey', label='Hours') x = np.arange(0,xmax,0.01) y = sp.stats.expon.pdf(x, scale=lambd) axes[0][0].grid(False) axes[0][0].plot(x,y, color='green', zorder=10, label='Exponential PDF') axes[0][0].set_xlabel(xlab1) axes[0][0].set_ylabel(ylab1) axes[0][0].set_xlim(0,xmax) axes[0][0].set_ylim(0,ymax) axes[0][0].legend() axes[0][0].set_title('Hours Between Quakes') #QQ Plot for fit to exponential ppstats = sm.ProbPlot(deltas, sp.stats.expon, loc=0, scale=lambd) #Calculate one more summary statistic t_quants = ppstats.theoretical_quantiles #x s_quants = ppstats.sample_quantiles #y y_fit = t_quants qq_ssr = np.sum((s_quants-y_fit)**2) qq_sst = np.sum((s_quants-np.mean(s_quants))**2) qq_r2 = 1-(qq_ssr/qq_sst) #Another plot #QQ's axes[0][1].scatter(t_quants,s_quants, marker='o', color='green') #45 degree reference line x = np.arange(0,20000, 1.) y = x axes[0][1].plot(x,y, color='gray') axes[0][1].grid(False) axes[0][1].set_xlabel(xlab2) axes[0][1].set_ylabel(ylab2) axes[0][1].set_xlim(0,20000) axes[0][1].set_ylim(0,20000) axes[0][1].tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off') axes[0][1].set_title('QQ Plot') #Print Summary Statistics if summary_stats==True: print "Summary Statistics for Distributional Fit" print "lambda = " + str(lambd) if pd.notnull(force_lambd): print "sample mean = " + str(mean) print "sample size = " + str(n) print "AIC = " + str(aic) print "QQ R^2 = " + str(qq_r2) print "Actual 95% Confidence Interval = " + str(actual_95_int) print "Predicted 95% Confidence Interval = " + str(pred_95_int) print "Summary Statistics for Prediction" print "MSE = " + str(mse) print "L1 Norm = " + str(L1norm) return(lambd, n, aic, k, mse, qq_r2, t_quants, s_quants, L1norm, actual_95_int, pred_95_int) deltas1 = get_hours_between(eq_df[eq_df.prefmag>=3]) summary1 = fit_expo(deltas1, plot=True, bins=500, xmax=500, ymax=0.024, k=1., force_lambd=None, summary_stats=True) deltas1 #get the 5th quartile int((float(len(deltas1))/100)*5) #get the 9th quartile int((float(len(deltas1))/100)*95) # Separate the data by time period pre2010 = eq_df[eq_df.year_float <= 2010] post2010 = eq_df[eq_df.year_float > 2010] eq_df_priorByYear = pre2010.groupby('year').count().reset_index().prefmag eq_df_postByYear = post2010.groupby('year').count().reset_index().prefmag # Plot samples to see distribution fig, ax = plt.subplots() data = [np.log(eq_df_postByYear), np.log(eq_df_priorByYear)] plt.boxplot(data,vert=0, ) plt.grid(False) ax.set_aspect(1.5) ax.set_xlim([-.5, 7]) ax.set_xlabel('Log Number of earthquakes per year in tens') ax.set_yticks([1,2]) ax.set_yticklabels(['After 2010','Before 2010']) # Perform t-test (Welch’s t-test) # Null hypothesis: number of earthquakes per year are the same in both time periods # Alternative hypothesis: number of earthquakes per year are NOT the same between time periods t_test = sp.stats.ttest_ind(np.log(eq_df_priorByYear), np.log(eq_df_postByYear), equal_var=False) t_test # Prepare data alleq_df_ByYear = eq_df.groupby('year').count().reset_index().prefmag n = len(eq_df_postByYear) N = len(eq_df.groupby('year').count().reset_index().prefmag) # Get observed difference obsdiff = np.log(eq_df_postByYear.mean())-np.log(eq_df_priorByYear.mean()) # Start list vector for storing difference of means mediff = [] # Run permutations for i in range(100000): post = np.random.choice(alleq_df_ByYear.values, size = n, replace = True) prior = np.random.choice(alleq_df_ByYear.values, size = N-n, replace = True) mediff.append(np.log(post.mean())-np.log(prior.mean())) # Plot results plt.hist(mediff, bins = 50) plt.axvline(obsdiff, color='b', linestyle='dashed', linewidth=2, label = 'Observed difference') plt.xlabel('Difference of earthquakes per year between prior and post 2010') plt.ylabel('Frequency') plt.legend() plt.grid(False) # Get p.value count = 0 for diff in mediff: if diff > obsdiff: count += 1 print count/100000. calc_recurrence(pre2010,1e-10) calc_recurrence(post2010,1e-5) deltas2 = get_hours_between(eq_df[(eq_df.prefmag>=3) & (eq_df.year_float<2010)]) summary2 = fit_expo(deltas2, plot=True, bins=100, xmax=5000, ymax=0.00085, k=1., force_lambd=summary1[0], summary_stats=True, xlab1='Hours', ylab1='Probability Density', xlab2='Theoretical Quantiles') summary3 = fit_expo(deltas2, plot=True, bins=30, xmax=10000, ymax=0.0004, k=2., force_lambd=None, summary_stats=True) deltas3 = get_hours_between(eq_df[(eq_df.prefmag>=3) & (eq_df.year_float<2010)]) summary3 = fit_expo(deltas2, plot=True, bins=100, xmax=5000, ymax=0.00085, k=1., force_lambd=summary1[0], summary_stats=True, xlab1='Hours', ylab1='Probability Density', xlab2='Theoretical Quantiles') # ------------------------------------------ # THE REGRESSION SECTION HAS SOME CODES THAT TAKE MORE THAN 40-45 MINUTES TO RUN, # EVEN WITH THE MULTITHREADING TECHNIQUE USED # YOU CAN RUN THE WHOLE SECTION BY RUNNING THE FOLLWING COMMAND: # run grid_regression_sklearn.py # however this won't plot the results from the clustering algorithm # (see details later) # ------------------------------------------ # ------------------------------------------ # LOAD DATAFRAMES # ------------------------------------------ # Load the earthquakes dataframe eq_df = pd.DataFrame.from_csv('./tempdata/earthquakes_catalog_treated.csv',sep = '|') # filter to keep only magnitude >= 3 eq_df = eq_df[eq_df.prefmag >= 3.0] # for ease add column year eq_df['year'] = map(lambda x: int(x), eq_df['year_float']) # Load the wells dataframe. welldf = pd.DataFrame.from_csv('tempdata/wells_data.csv') # define the number of threads for the computation # please optimize according to your number of cores num_threads = 4 # initialize the dictionary in which we store the results of all the regressions results_dic= {} import pickle import itertools as it # library for multithreading import threading # library for print messages from each thread import logging logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s', ) # for time split import datetime # Load package for linear model from sklearn import linear_model from sklearn.cross_validation import train_test_split from sklearn.grid_search import GridSearchCV from sklearn import preprocessing # import library for calculating the distance # if this line does not work then install the library # using: pip install geopy from geopy.distance import great_circle def get_hours_between(df): dates=[] origintimes = df.origintime.values for date in origintimes: year, month, day = date.split('-') day, hour = day.split(' ') hour, minute, second = hour.split(':') if len(second.split('.'))==2: second, microsecond = second.split('.') elif len(second.split('.'))==1: microsecond=0 dates.append(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second), int(microsecond))) dates=sorted(dates) intertimes =[] for i in range(1,len(dates)): delta = dates[i] - dates[i-1] delta = delta.total_seconds()/3600 intertimes .append(delta) return intertimes * 60 def mask_region(df, region): mask_region = (df['latitude'] < region[0][1]) \ & (df['latitude'] >= region[0][0]) \ & (df['longitude'] < region[1][1]) \ & (df['longitude'] >= region[1][0]) return mask_region def partition_state(interval): # ------------------------------------------ # PARTITION THE MAP INTO CELLS = CREATE THE GRID # ------------------------------------------ # Make ranges # Since all earthquakes are in Oklahoma we partition roughly # using the upper bound of the state limit xregions1 = np.arange(33.5, 37.0, interval) xregions2 = np.arange(33.5 + interval, 37.0 + interval, interval) xregions = zip(xregions1, xregions2) yregions1 = np.arange(-103.0,-94. , interval) yregions2 = np.arange(-103.0 + interval ,-94.0 + interval, interval) yregions = zip(yregions1, yregions2) # Create a dictionary with keys = (slice in long, slice in latitude) # value = number of the grid cell regions = it.product(xregions,yregions) locdict = dict(zip(regions, range(len(xregions)*len(yregions)))) return locdict def do_regression(X,Y,reg,locdict,lock,cv, standardization): # -------------------- # SPLIT INTO TRAIN AND TEST # -------------------- # Split in train - test X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) # -------------------- # STANDARDIZATION OF THE DATA -- SCALING # -------------------- if standardization == 'scaler': scaler = preprocessing.StandardScaler().fit(X_train) X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) y_train = scaler.fit_transform(y_train) y_test = scaler.transform(y_test) elif standardization == 'MinMaxScaler': min_max_scaler = preprocessing.MinMaxScaler() X_train = min_max_scaler.fit_transform(X_train) X_test = min_max_scaler.transform(X_test) y_train = min_max_scaler.fit_transform(y_train) y_test = min_max_scaler.transform(y_test) else: pass # -------------------- # OPTIMIZE CLASSIFIER WITH RIDGE REGRESSION # AND ORDINARY LEAST SQUARE REGRESSION # -------------------- # # Using Ordinary Least Square Regression # clf = linear_model.LinearRegression() # clf.fit(X_train, y_train) # logging.debug('For {} cells the score is {}'.format(len(locdict.keys()),clf.score(X_test, y_test))) # # Using Ridge Regression and cross-validation # # doing the selection manually # # uncomment this part to check it matches the next paragraph # clf = linear_model.Ridge() # parameters = {'alpha': [0.1, 0.5]} # gs = GridSearchCV(clf, param_grid=parameters, cv=5) # gs.fit(X_train, y_train) # best = gs.best_estimator_ # best.fit(X_train, y_train) # logging.debug('For {} cells the score of manual Ridge is {}'.format(len(locdict.keys()),best.score(X_test, y_test))) # Using Ridge Regression with built-in cross validation # of the alpha parameters # note that alpha = 0 corresponds to the Ordinary Least Square Regression clf = linear_model.RidgeCV(alphas=[0.0, 0.1, 1, 10.0, 100.0, 1e3,1e4 ,1e5], cv = cv) clf.fit(X_train, y_train) return clf, X_test, y_test def split_in_batch(intervals): # split randomely the letters in batch for the various threads all_batch = [] size_batch = len(intervals) / num_threads for i in range(num_threads-1): batch_per_threads = random.sample(intervals,size_batch) all_batch.append(batch_per_threads) # new set intervals = list(set(intervals) - set(batch_per_threads)) # now get the rest all_batch.append(intervals) print('look at all_batch {}'.format(all_batch)) return all_batch def print_best_score_append_dictionary(reg_type): best_score_prior = [[c[1],c[2]] for c in best_grid_prior] best_score_prior = np.array(best_score_prior) best_index_prior = np.where(best_score_prior[:,0] == max(best_score_prior[:,0]))[0][0] print('Best classifier <2010 is for alpha ={}'.format(best_grid_prior[best_index_prior][0].alpha_)) print('Coefs <2010 are ={}'.format(best_grid_prior[best_index_prior][0].coef_)) print('R^2 <2010 = {}'.format(best_grid_prior[best_index_prior][1])) prior_dic = {'coefs':best_grid_prior[best_index_prior][0].coef_ , 'r2': best_grid_prior[best_index_prior][1]} if reg_type == 'grid': prior_dic['interval'] = best_grid_prior[best_index_prior][2] print('Best interval <2010 is {}'.format(best_grid_prior[best_index_prior][2])) elif reg_type == 'cluster': prior_dic['eps'] = best_grid_prior[best_index_prior][2] print('Best eps <2010 is {}'.format(best_grid_prior[best_index_prior][2])) best_score_post = [[c[1],c[2]] for c in best_grid_post] best_score_post = np.array(best_score_post) best_index_post = np.where(best_score_post[:,0] == max(best_score_post[:,0]))[0][0] print('Best classifier >= 2010 is for alpha ={}'.format(best_grid_post[best_index_post][0].alpha_)) print('Coefs >= 2010 are ={}'.format(best_grid_post[best_index_post][0].coef_)) print('R^2 >= 2010 = {}'.format(best_grid_post[best_index_post][1])) post_dic = {'coefs':best_grid_post[best_index_post][0].coef_ , 'r2': best_grid_post[best_index_post][1]} if reg_type == 'grid': post_dic['interval'] = best_grid_post[best_index_post][2] print('Best interval >= 2010 is {}'.format(best_grid_post[best_index_post][2])) elif reg_type == 'cluster': post_dic['eps'] = best_grid_post[best_index_post][2] print('Best eps >= 2010 is {}'.format(best_grid_post[best_index_post][2])) return prior_dic, post_dic def do_grid_regression(eq_df, welldf, intervals, lock ,cv = 5, standardization = None): global best_grid_prior global best_grid_post for interval in intervals: # Get dictionary for the partitioned state locdict = partition_state(interval) # Filter by time eq_df_prior = eq_df[eq_df.year < 2010] welldf_prior = welldf[welldf.year < 2010] eq_df_post = eq_df[eq_df.year >= 2010] welldf_post = welldf[welldf.year >= 2010] X_prior = [] X_post = [] Y_prior = [] Y_post = [] ### Start grid size loop here for region in locdict.keys(): # generate dataframe for regression with data < 2010 # add the number of quakes per region Y_prior.append(eq_df_prior[mask_region(eq_df_prior,region)].count().values[0]) # add the number of wells per region # add the total volume injected per region # add them with into X_prior as [nb wells, volume] X_prior.append([welldf_prior[mask_region(welldf_prior,region)].count().values[0] , welldf_prior[mask_region(welldf_prior,region)].volume.sum()]) # generate dataframe for regression with data >= 2010 # add the number of quakes per region Y_post.append(eq_df_post[mask_region(eq_df_post,region)].count().values[0]) # add the number of wells per region # add the total volume injected per region # add them with into X_post as [nb wells, volume] X_post.append([welldf_post[mask_region(welldf_post,region)].count().values[0] , welldf_post[mask_region(welldf_post,region)].volume.sum()]) X_prior = np.array(X_prior,dtype=np.float64) X_post = np.array(X_post,dtype=np.float64) Y_post = np.array(Y_post, dtype=np.float64).reshape(-1,1) Y_prior = np.array(Y_prior, dtype = np.float64).reshape(-1,1) # ------------------------------------------ # DOING THE REGRESSION # ------------------------------------------ # logging.debug(' For {} cells, Total number of quakes: prior {}, post {}'\ # .format(len(locdict.keys()),sum(X_prior[:,0]), sum(X_post[:,0]) )) reg_for = ['prior', 'post'] for reg in reg_for: if reg == 'prior': X = X_prior Y = Y_prior elif reg == 'post': X = X_post Y = Y_post clf, X_test, y_test = do_regression(X,Y,reg,locdict,lock, cv ,standardization) logging.debug('{}: For {} cells the score of RidgeCV is {} with alpha = {}'\ .format(reg,len(locdict.keys()),clf.score(X_test, y_test),clf.alpha_)) with lock: if reg == 'prior': best_grid_prior.append([clf,clf.score(X_test, y_test),interval]) elif reg == 'post': best_grid_post.append([clf,clf.score(X_test, y_test),interval]) return # ------------------------------------------ # GRID REGRESSION # RUNNING THIS CELL TAKES A WHILE ~ 15 MIN # ------------------------------------------ best_grid_prior = [] best_grid_post = [] # define the intervals intervals = [0.05, 0.1,0.2, 0.3, 0.4,0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.5, 2.0] # intervals = [0.4, 0.5, 0.8,0.9, 1.0, 1.5] # split randomely the letters in batch for the various threads all_batch = split_in_batch(intervals) # Vary the standardization and find optimum for standardization in ['None','scaler','MinMaxScaler'] : # parallelize the loop of interval threads = [] lock = threading.Lock() for thread_id in range(num_threads): interval = all_batch[thread_id] t = threading.Thread(target = do_grid_regression, \ args = (eq_df, welldf, interval, lock ,5,standardization)) threads.append(t) map(lambda t:t.start(), threads) map(lambda t: t.join(), threads) prior_dic, post_dic = print_best_score_append_dictionary('grid') grid_regression_dic = {'prior': prior_dic, 'post': post_dic } results_dic['grid_regression'] = grid_regression_dic print results_dic def do_grid_interarrival_regression(eq_df, welldf, intervals, lock ,cv = 5, standardization = None): global best_grid_prior global best_grid_post for interval in intervals: # Get dictionary for the partitioned state locdict = partition_state(interval) # Filter by time eq_df_prior = eq_df[eq_df.year < 2010] welldf_prior = welldf[welldf.year < 2010] welldf_prior.reset_index(inplace=True) welldf_post = welldf[welldf.year >= 2010] welldf_post.reset_index(inplace=True) eq_df_post = eq_df[eq_df.year >= 2010] X_prior = [] X_post = [] Y_prior = [] Y_post = [] for region in locdict.keys(): # DO THIS FOR PRIOR # get mask for the cluster_id mask = mask_region(eq_df_prior,region) Y_prior_append = get_hours_between( eq_df_prior[ mask] ) if len(Y_prior_append) != 0: Y_prior_append = [1.0/y for y in Y_prior_append] else: Y_prior_append = [0.0] for y in Y_prior_append: Y_prior.append(y) # add the number of wells per region # add the total volume injected per region # add them with into X_prior as [nb wells, volume] X_prior_append = [welldf_prior[mask_region(welldf_prior,region)].count().values[0] , welldf_prior[mask_region(welldf_prior,region)].volume.sum()] for i in range(len(Y_prior_append)): X_prior.append(X_prior_append) # DO THIS FOR POST # get mask for the cluster_id mask = mask_region(eq_df_post,region) Y_post_append = get_hours_between( eq_df_post[ mask] ) # logging.debug('Y_post {}'.format(Y_post)) if len(Y_post_append) != 0: Y_post_append = [1.0/y for y in Y_post_append if y!=0] else: Y_post_append = [0.0] for y in Y_post_append: Y_post.append(y) # add the number of wells per region # add the total volume injected per region # add them with into X_post as [nb wells, volume] X_post_append = [welldf_post[mask_region(welldf_post,region)].count().values[0] , welldf_post[mask_region(welldf_post,region)].volume.sum()] for i in range(len(Y_post_append)): X_post.append(X_post_append) X_prior = np.array(X_prior,dtype=np.float64) X_post = np.array(X_post,dtype=np.float64) Y_post = np.array(Y_post, dtype=np.float64).reshape(-1,1) Y_prior = np.array(Y_prior, dtype = np.float64).reshape(-1,1) # ------------------------------------------ # DOING THE REGRESSION # ------------------------------------------ # logging.debug(' For {} cells, Total number of quakes: prior {}, post {}'\ # .format(len(locdict.keys()),sum(X_prior[:,0]), sum(X_post[:,0]) )) reg_for = ['prior', 'post'] for reg in reg_for: if reg == 'prior': X = X_prior Y = Y_prior elif reg == 'post': X = X_post Y = Y_post clf, X_test, y_test = do_regression(X,Y,reg,locdict,lock, cv ,standardization) logging.debug('{}: For {} cells the score of RidgeCV is {} with alpha = {}'\ .format(reg,len(locdict.keys()),clf.score(X_test, y_test),clf.alpha_)) with lock: if reg == 'prior': best_grid_prior.append([clf,clf.score(X_test, y_test), interval]) elif reg == 'post': best_grid_post.append([clf,clf.score(X_test, y_test),interval]) return # ------------------------------------------ # GRID 1/INTERARRIVAL REGRESSION # THIS CELL TAKES ABOUT 10-20 MIN TO RUN # ------------------------------------------ best_grid_prior = [] best_grid_post = [] # define the intervals # intervals = [0.05, 0.1,0.2, 0.3, 0.4,0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.5, 2.0] intervals = [0.8,0.9, 1.0, 1.5] # split randomely the letters in batch for the various threads all_batch = split_in_batch(intervals) # Vary the standardization and find optimum for standardization in ['None','scaler','MinMaxScaler'] : # parallelize the loop of interval threads = [] lock = threading.Lock() for thread_id in range(num_threads): interval = all_batch[thread_id] t = threading.Thread(target = do_grid_interarrival_regression, \ args = (eq_df, welldf, interval, lock ,5,standardization)) threads.append(t) map(lambda t:t.start(), threads) map(lambda t: t.join(), threads) prior_dic, post_dic = print_best_score_append_dictionary('grid') regression_grid_interarrival = {'prior': prior_dic, 'post': post_dic } results_dic['regression_grid_interarrival'] = regression_grid_interarrival print results_dic def compute_dbscan(eq_df,mask,period, eps, min_samples): # Create a subset of the dataframe to work with subset = eq_df[mask] # ------------------------------------------------------------------------ # COMPUTE DBSCAN # ------------------------------------------------------------------------ X = [] for (lat,longi) in zip(subset.latitude,subset.longitude): X.append((lat,longi)) # implement the sparse matrix of distance X_dist = np.zeros((len(X),len(X)), dtype = np.float64) # select on index for i1 in range(len(X)): # loop over all the other indices for i2 in range(len(X)): # now find the distance if i1 < i2: # if i1 = i2 , distance = 0 X_dist[i1,i2] = great_circle(X[i1],X[i2]).km # fill the symetric part of the matrix # since distance(x1, x2) = distance(x2, x1) X_dist[i2,i1] = X_dist[i1,i2] db = DBSCAN(eps= eps , min_samples= min_samples , metric = 'precomputed').fit(X_dist) labels = db.labels_ # Number of clusters in labels, ignoring noise if present. # note that noise is when the label is -1 n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) # print('Estimated number of clusters: %d' % n_clusters_) return db, n_clusters_ def add_column_dataframe(eq_df,mask,eps,period, db): # ------------------------------------------------------------------------ # APPEND COLUMN IN DATAFRAME # ------------------------------------------------------------------------ col_name = 'cluster_' + period + '_eps_' + str(eps) eq_df[col_name] = np.zeros((eq_df.shape[0])) c = 0 for i in range(eq_df.shape[0]): if mask[i]: # put the label in the corresponding column eq_df.loc[i,col_name] = db.labels_[c] c += 1 else: eq_df.loc[i,col_name] = - 10 return eq_df def plot_cluster_M(Period, distance_to_search_for_neighboring_points): eps = distance_to_search_for_neighboring_points if Period == 'before 2010': period = 'prior' elif Period == 'after 2010': period = 'post' col_name = 'cluster_' + period + '_eps_' + str(eps) various_labels = list(set(eq_df[col_name].values) - set([-10])) # now plot fig = plt.figure(figsize= (11,9)) ax = fig.add_subplot(111) # --- # PLOT THE CLUSTERS # --- n_clusters_ = len(list(various_labels)) - 1 for i in range(0,len(list(various_labels))): if list(various_labels)[i]!=-1: ax.scatter(eq_df.longitude[eq_df[col_name] == list(various_labels)[i]], \ eq_df.latitude[eq_df[col_name] == list(various_labels)[i]],label=list(various_labels)[i], color = sns.color_palette('hls',n_clusters_)[i], alpha =1, lw = 4); else: ax.scatter(eq_df.longitude[eq_df[col_name] == list(various_labels)[i]],\ eq_df.latitude[eq_df[col_name] == list(various_labels)[i]],label='noise', color = 'k', alpha =0.1, lw=3); csfont = {'fontsize':25} ax.set_ylabel('Latitude',**csfont); ax.set_xlabel('Longitude',**csfont); ax.axis(**csfont) ax.set_title("Number of clusters: {}".format(n_clusters_),**csfont); # --- # PLOT SOME CITIES # --- # add a couple of cities cities = pd.read_csv('./tempdata/OK_cities.txt', sep = '\t') # add cities mask_towns = (cities.Name == 'Oklahoma City') | (cities.Name == 'Prague') | (cities.Name == 'Tulsa') \ | (cities.Name == 'Ardmore') | (cities.Name == 'Stillwater') \ | (cities.Name == 'Enid') | (cities.Name == 'Jones') \ | (cities.Name == 'Cherookee') | (cities.Name == 'Perry') cities_list = ['Oklahoma City','Prague','Tulsa', \ 'Ardmore','Stillwater', 'Enid', 'Jones', 'Cherookee', 'Perry'] for city in cities_list: ax.annotate(city,\ xy=(cities[cities.Name == city].Longitude.values[0] ,\ cities[cities.Name == city].Latitude.values[0]),\ size=20) ax.scatter(cities[mask_towns].Longitude.values[:], cities[mask_towns].Latitude.values[:], marker = '*', s= 100) ax.set_ylim([34,37]) ax.set_xlim([-99.2,-96]) ax.tick_params(labelsize=20) plt.show() return # THIS CELL TAKES ABOUT 15 MIN TO RUN # import libraries for DBSCAN from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.preprocessing import StandardScaler # import library for distance calculation from geopy.distance import great_circle # ------------------------------------------------------------------------ # LOAD THE DATA, GET A SUBSET MAGNITUDE > 3 AND COUNTY = LINCOLN # ------------------------------------------------------------------------ eq_df = pd.DataFrame.from_csv('./tempdata/earthquakes_catalog.csv',sep = '|') mask = (eq_df.prefmag >= 3.5) & (eq_df.prefmag < 2010) # eps = the distance to search for neighboring points # min_samples = the minimum the point required to create an individual cluster for eps in range(5,30): # compute_dbscan(eq_df,mask,'prior', eps = eps, min_samples = 20) db, n_clusters_ = compute_dbscan(eq_df,mask,'prior', eps = eps , min_samples = 20) eq_df = add_column_dataframe(eq_df,mask,eps, 'prior', db) db, n_clusters_ = compute_dbscan(eq_df,mask,'post', eps = eps , min_samples = 20) eq_df = add_column_dataframe(eq_df,mask,eps, 'post', db) eq_df = pd.DataFrame.from_csv('./tempdata/earthquakes_catalog_treated.csv',sep = '|') from IPython.html.widgets import interact global eq_df interact(plot_cluster_M, Period = ('before 2010', 'after 2010') ,distance_to_search_for_neighboring_points=(5,29,1)); def cluster_centroid(eq_df, mask): n = eq_df[ mask].shape[0] sum_lon = eq_df[ mask].longitude.sum() sum_lat = eq_df[mask].latitude.sum() return (sum_lat/n, sum_lon/n ) def get_furthest_distance(eq_df, mask, centroid): furthest_point = None furthest_dist = None for point0, point1 in zip(eq_df[mask].latitude,eq_df[mask].longitude): point = (point0, point1) dist = great_circle( centroid, point ).km if (furthest_dist is None) or (dist > furthest_dist): furthest_dist = dist furthest_point = point return furthest_dist def get_cluster_nwells_volume(welldf, centroid, radius): n_wells = 0 volume = 0 for (i, coords) in enumerate(zip(welldf.latitude,welldf.longitude)): if great_circle((coords[0],coords[1]),centroid ) < radius: n_wells += 1 volume += welldf.loc[i, 'volume'] return [n_wells, volume] def mask_cluster(df, period, eps, cluster_id): ''' INPUT: df = dataframe for which we want to create a mask period = 'post' or 'prior' for after or before 2010 eps = distance between events for the DBSCAN algorithm cluster_id = id of the cluster for which we want to find the mask example: we want cluster 1 from the catalog < 2010 found by DBSCAN for eps = 8: mask_cluster(eq_df, 'prior', 8, 1) ''' # reconstruct the column name col_name = 'cluster_' + period + '_eps_' + str(eps) mask_cluster = df[ col_name ] == cluster_id return mask_cluster def do_cluster_regression(eq_df, welldf, eps_s, lock ,cv = 5, standardization = None): global best_grid_prior global best_grid_post # Filter by time welldf_prior = welldf[welldf.year < 2010] welldf_prior.reset_index(inplace=True) welldf_post = welldf[welldf.year >= 2010] welldf_post.reset_index(inplace=True) for eps in eps_s: X_prior = [] X_post = [] Y_prior = [] Y_post = [] total_prior = [] total_post = [] logging.debug('eps {} from batch {}, standardization method: {}'\ .format(eps, eps_s,standardization)) # DO THIS FOR PRIOR # find the list of clusters col_name = 'cluster_' + 'prior' + '_eps_' + str(eps) clusters = list(set(eq_df[col_name].values) - set([-10])) # this is for the clusters that are not noise for cluster_id in clusters: # get mask for the cluster_id mask = mask_cluster(eq_df, 'prior', eps, cluster_id) Y_prior_append = get_hours_between( eq_df[ mask] ) for y in Y_prior_append: Y_prior.append(y) # find the centroid of the cluster centroid = cluster_centroid(eq_df, mask) # find the largest radius = largest distance between centroid and points # in the cluster radius = get_furthest_distance(eq_df, mask, centroid) # find the numbe of wells and volume within this radius X_prior_append=get_cluster_nwells_volume(welldf_prior, centroid, radius) total_prior.append(X_prior_append) for i in range(len(Y_prior_append)): X_prior.append(X_prior_append) # add the interarrival for the events classified as noise cluster_id = -1 # ------ mask = mask_cluster(eq_df, 'prior', eps, cluster_id) Y_prior_append = get_hours_between( eq_df[ mask] ) for y in Y_prior_append: Y_prior.append(y) # find the volume total_prior = np.array(total_prior) X_prior_append=[welldf_prior.count().values[0] - sum(total_prior[:,0]) , welldf_prior.volume.sum() - sum(total_prior[:,1]) ] for i in range(len(Y_prior_append)): X_prior.append(X_prior_append) #------ # DO THIS FOR POST # find the list of clusters col_name = 'cluster_' + 'post' + '_eps_' + str(eps) clusters = list(set(eq_df[col_name].values) - set([-10])) for cluster_id in clusters: # get mask for the cluster_id mask = mask_cluster(eq_df, 'post', eps, cluster_id) Y_post_append = get_hours_between( eq_df[ mask] ) for y in Y_post_append: Y_post.append(y) # find the centroid of the cluster centroid = cluster_centroid(eq_df, mask) # find the largest radius = largest distance between centroid and points # in the cluster radius = get_furthest_distance(eq_df, mask, centroid) # find the numbe of wells and volume within this radius X_post_append=get_cluster_nwells_volume(welldf_post, centroid, radius) total_post.append(X_post_append) for i in range(len(Y_post_append)): X_post.append(X_post_append) # add the interarrival for the events classified as noise cluster_id = -1 # ------ mask = mask_cluster(eq_df, 'post', eps, cluster_id) Y_post_append = get_hours_between( eq_df[ mask] ) for y in Y_post_append: Y_post.append(y) # find the volume total_post = np.array(total_post) X_post_append=[welldf_post.count().values[0] - sum(total_post[:,0]) , welldf_post.volume.sum() - sum(total_post[:,1]) ] for i in range(len(Y_post_append)): X_post.append(X_post_append) #------ X_prior = np.array(X_prior,dtype=np.float64) X_post = np.array(X_post,dtype=np.float64) # ------------------------------------------ # DOING THE REGRESSION # ------------------------------------------ # logging.debug(' For {} cells, Total number of quakes: prior {}, post {}'\ # .format(len(locdict.keys()),sum(X_prior[:,0]), sum(X_post[:,0]) )) reg_for = ['prior', 'post'] for reg in reg_for: if reg == 'prior': X = X_prior Y = Y_prior elif reg == 'post': X = X_post Y = Y_post # -------------------- # SPLIT INTO TRAIN AND TEST # -------------------- # Split in train - test X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) # -------------------- # STANDARDIZATION OF THE DATA -- SCALING # -------------------- if standardization == 'scaler': scaler = preprocessing.StandardScaler().fit(X_train) X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) y_train = scaler.fit_transform(y_train) y_test = scaler.transform(y_test) elif standardization == 'MinMaxScaler': min_max_scaler = preprocessing.MinMaxScaler() X_train = min_max_scaler.fit_transform(X_train) X_test = min_max_scaler.transform(X_test) y_train = min_max_scaler.fit_transform(y_train) y_test = min_max_scaler.transform(y_test) else: pass # -------------------- # OPTIMIZE CLASSIFIER WITH RIDGE REGRESSION # AND ORDINARY LEAST SQUARE REGRESSION # -------------------- # Using Ridge Regression with built-in cross validation # of the alpha parameters # note that alpha = 0 corresponds to the Ordinary Least Square Regression clf = linear_model.RidgeCV(alphas=[0.0, 0.1, 1, 10.0, 100.0, 1e3,1e4 ,1e5], cv =cv) clf.fit(X_train, y_train) logging.debug('{}: For eps = {}, score : {}'.format(reg,eps,clf.score(X_test, y_test))) with lock: if reg == 'prior': best_grid_prior.append([clf,clf.score(X_test, y_test), eps]) elif reg == 'post': best_grid_post.append([clf,clf.score(X_test, y_test),eps]) return # ------------------------------------------ # CLUSTER INTERARRIVAL REGRESSION # THIS CELL TAKES ABOUT 15 MIN TO RUN # ------------------------------------------ best_grid_prior = [] best_grid_post = [] eps_batch = range(5,30) # eps_batch = [5,7,9,11,20] # split randomely the eps in batch for the various threads all_batch = split_in_batch(eps_batch) # Vary the standardization and find optimum for standardization in ['None','scaler','MinMaxScaler'] : # parallelize the loop of interval threads = [] lock = threading.Lock() for thread_id in range(num_threads): eps_s = all_batch[thread_id] t = threading.Thread(target = do_cluster_regression, \ args = (eq_df, welldf, eps_s, lock ,5, standardization)) threads.append(t) map(lambda t:t.start(), threads) map(lambda t: t.join(), threads) prior_dic, post_dic = print_best_score_append_dictionary('cluster') regression_cluster_interarrival = {'prior': prior_dic, 'post': post_dic } results_dic['regression_cluster_interarrival'] = regression_cluster_interarrival print results_dic # Save a dictionary into a pickle file. # import pickle # pickle.dump( results_dic, open( "./tempdata/results_dic.p", "wb" ) ) # results = {} # results['grid_regression_prior'] = results_dic['grid_regression']['prior'] # results['grid_regression_post'] = results_dic['grid_regression']['post'] # results['regression_cluster_interarrival_prior'] = results_dic['regression_cluster_interarrival']['prior'] # results['regression_cluster_interarrival_post'] = results_dic['regression_cluster_interarrival']['post'] # results['regression_grid_interarrival_prior'] = results_dic['regression_grid_interarrival']['prior'] # results['regression_grid_interarrival_post'] = results_dic['regression_grid_interarrival']['post'] # results_df = pd.DataFrame.from_dict(results, orient = 'index') # results_df.to_csv('./tempdata/results_df.csv', sep = ',') results = pd.read_csv('tempdata/results_df.csv') # Import results results = pd.read_csv('tempdata/results_df.csv') ord_res = results.sort('r2', ascending = False) print results.shape results.head(10) ord_res = results['r2'][[1,0,3,2,5,4]] post = results['r2'][[0,2,4]] prior = results['r2'][[1,3,5]] fig, ax = plt.subplots() b1 = plt.barh(range(6),ord_res, color = ["#99D699", "#B2B2B2","#99D699", "#B2B2B2","#99D699", "#B2B2B2"]) ax.set_title('Pre-2010') ax.set_xlim([0,.8]) ax.set_xlabel("R-squared") ax.set_yticks([1-.1,3-.1,5-.1]) ax.set_yticklabels(['Grid Regression','Regression with Cluster', 'Grid Regression with Interarrival']) ax.grid(False) ax.legend(b1, ['Prior-2010','Post-2010']) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We intentionally ignore some of the data relative to body and surface waves as well as other source of estimations of the magnitude to take less space in memory. A complete description of the columns can be found here. Step2: We created an interactive visualization tool on the website that can be accessed here. Step3: In the first plot above, the number of water disposal wells in Oklahoma are plotted by year. In the second plot, the total amount of water gallons disposed is plotted by year. The largest amount of water was disposed between 2006 and 2010. However, we believe that data in the few past years have not been reported or is not currently available to public use. Step4: Here we show the distribution of earthquakes in Oklahoma before 2010. Step5: We begin with some basic visual inspection of the data. We first plot a histogram of the number of earthquakes by year. Step6: This visual inspection suggests a clear difference in the number of earthquakes starting around 2010 and since water disposal through wells becomes more intense just a few years earlier makes us suspect an association between the number of earthquakes and fracking activity (specifically, based on past research, the use of disposal wells.) Step8: The visualization indeed suggests that the line for the pre-2010 data does not fit the post-2010 data very well. Step9: The constant $b$ is calculated by using least squares and maximum likelihood. The calculated value of $b$ is 1.53, which means there are more earthquakes of lower magnitude than higher magnitude. Step10: The above plot shows that the fit is not too good. The exponential curve like the histogram does decrease at an increasing rate, but overall the curve does not fit too well. Further the QQ Plot shows that the data does not appear to be well approximated by an exponential distribution. Step11: 3. Pre vs. Post 2010 Step12: 3.1 Is the difference statistically significant? Step13: With a p-value of .0003, we reject the null hypothesis and conclude that the medians differ. In addition, we perform a permutation test with replacement in case the log-transformed data does not follow a normal distribution Step14: With a p-value of 0.00015, we reject the null hypothesis and conclude the same as before. Step15: The $b$ value after 2010 is smaller than that prior to 2010, suggesting a smaller ratio of eathquakes of large magnitude to lower magnitude after 2010 than usual. However, after 2010 there are 295 annual earthquakes with magnitude greater than 3 while there were only 2 prior to 2010 annually. Step16: However, the above plot shows that while the model does better on the pre-2010 data, the post-2010 is still not well fit with a Poisson Process. Step17: 4.1 Set up useful functions for the regression analysis Step18: Because we will also do a regression to predic the time between each earthquake we introduce here a function that evaluate the time in seconds between earthquakes from a dataframe df. Step19: Here is a function that creates a mask for a region defined by bounds in latitude and longitude. The mask for the various regions are ouput of the partition_state function that partition Oklahoma state into squares of a given length. Step20: Here is the function that partition Oklahoma state into cells. We provide to the function an interval to split the minimum and maximum latitude and longitude. The ouput of the function is a dictionary with keys the bounds of the cells as ( (minimum latitude, maximum latitude), (minimum longitude, maximum longitude) ) and value the cell number. Step21: 4.2 The Ridge Regression Step22: 4.3 Regression and parallelization of the algorithm Step23: The function print_best_score_append_dictionary is used to print the best results of the trained classifier. We also append into a dictionary results_dic these results. Step24: 4.4 Regression over a regular grid Step25: Here we run the ridge regression on a grid. We loop over a range of grid sizes. Oklahoma state is splitted into 10 to 3100 cells. In each case we do the ridge regression and find the best classifier using the coefficient of determination as a metric. Generating and training the classifier on various grids is an embarrassingly parrallel process. Therefore we use the multithreading module from python to do this. Each thread has a couple of grids to generate and do the regression for. We use a lock to append the results into the global lists best_grid_prior and best_grid_post. This ensure that each thread is not writing at the same time into these shared two lists. Step26: 4.4.2 Predicting the interarrival times Step27: Here we run the regression on 1/interarrival times using the various grids generated. Once more we take advantage of parallel computing method to speed up significantly the algorithm. The best classifier, according to the coefficient of determination measure, is once more added into results_dic dictionary. Step28: 4.5 Regression using clusters of earthquakes Step29: Once the algorithm is performed, we store the labels of the events in the earthquake catalog. Each label corresponds to the cluster for which the event belong. If the label is -10, the event was not considered by the algorithm because we have passed a mask that filters it. If the label is -1, the event is classified as noise, it does not belong to any of the clusters. The event is either to far from all the other event or it belongs to a cluster of events that has less earthquakes than min_samples. Finally the labels range from 0 to the maximimum number of clusters minus one. The columns added are called for example 'cluster_prior_eps_8' for the events before 2010 and with eps = 8 and 'cluster_post_eps_15' for the events after 2010 and eps = 15. Step30: The function plot_cluster_M plots the events on a map of Oklahoma and colors the various clusters. The events in black have been considered as noise by the clustering algorithm. The argument distance_to_search_for_neighboring_points correspond to eps but have been renamed for the interactive plot that is detailed in the next cell. Step31: This cell runs the DBSCAN clustering algorithm for various eps. It takes approximately 10 mins. Instead of running it, you can simply load the dataframe in which the columns that are results of the clustering algorithm are appended. Step32: This cell loads the results from the algorithm and create an interactive plot in which you can visualize the clusters for various eps and before and after 2010. Step33: 4.5.2 Set up some useful functions for regression using clusters Step34: Here we pass the dataframe with the well locations and their volume. We also provide the center of mass of a cluster and the maximum radius of the cluster within which we find the number of wells from welldf and the total volume of water injected in this region. Step35: Here we pass the dataframe with the well locations and their volume. We also provide the center of mass of a cluster and the maximum radius of the cluster within which we find the number of wells from welldf and the total volume of water injected in this region. Step36: Here we define a function that generate a mask for the loading a specific results of the clustering algorithm written in the dataframe. Step37: 4.5.3 Regression with clusters Step38: Run the following cell for saving the results from all the regression into a dictionary results_dic.p. Then the dictionary is converted into a dataframe for plotting. The dataframe is results_df. Step39: Run this cell if you want to load the results from this study saved into the dataframe results_df. Step40: 4.6 Results from the regressions
13,697
<ASSISTANT_TASK:> Python Code: %pylab inline x = numpy.array([[0,0],[-1,0.1],[0.3,-0.05],[0.7,0.3],[-0.2,-0.6],[-0.15,-0.63],[-0.25,0.55],[-0.28,0.67]]) y = numpy.array([0,0,0,0,1,1,2,2]) def plot_data(features,labels,axis,alpha=1.0): # separate features according to their class X0,X1,X2 = features[labels==0], features[labels==1], features[labels==2] # class 0 data axis.plot(X0[:,0], X0[:,1], 'o', color='green', markersize=12, alpha=alpha) # class 1 data axis.plot(X1[:,0], X1[:,1], 'o', color='red', markersize=12, alpha=alpha) # class 2 data axis.plot(X2[:,0], X2[:,1], 'o', color='blue', markersize=12, alpha=alpha) # set axes limits axis.set_xlim(-1.5,1.5) axis.set_ylim(-1.5,1.5) axis.set_aspect('equal') axis.set_xlabel('x') axis.set_ylabel('y') figure,axis = pyplot.subplots(1,1) plot_data(x,y,axis) axis.set_title('Toy data set') pyplot.show() def make_covariance_ellipse(covariance): import matplotlib.patches as patches import scipy.linalg as linalg # the ellipse is centered at (0,0) mean = numpy.array([0,0]) # eigenvalue decomposition of the covariance matrix (w are eigenvalues and v eigenvectors), # keeping only the real part w,v = linalg.eigh(covariance) # normalize the eigenvector corresponding to the largest eigenvalue u = v[0]/linalg.norm(v[0]) # angle in degrees angle = 180.0/numpy.pi*numpy.arctan(u[1]/u[0]) # fill Gaussian ellipse at 2 standard deviation ellipse = patches.Ellipse(mean, 2*w[0]**0.5, 2*w[1]**0.5, 180+angle, color='orange', alpha=0.3) return ellipse # represent the Euclidean distance figure,axis = pyplot.subplots(1,1) plot_data(x,y,axis) ellipse = make_covariance_ellipse(numpy.eye(2)) axis.add_artist(ellipse) axis.set_title('Euclidean distance') pyplot.show() x.T.shape from modshogun import RealFeatures, MulticlassLabels features = RealFeatures(x.T) labels = MulticlassLabels(y.astype(numpy.float64)) from modshogun import LMNN # number of target neighbours per example k = 1 lmnn = LMNN(features,labels,k) # set an initial transform as a start point of the optimization init_transform = numpy.eye(2) lmnn.set_maxiter(5000) lmnn.train(init_transform) # get the linear transform from LMNN L = lmnn.get_linear_transform() # square the linear transform to obtain the Mahalanobis distance matrix M = numpy.matrix(numpy.dot(L.T,L)) # represent the distance given by LMNN figure,axis = pyplot.subplots(1,1) plot_data(x,y,axis) ellipse = make_covariance_ellipse(M.I) axis.add_artist(ellipse) axis.set_title('LMNN distance') pyplot.show() # project original data using L lx = numpy.dot(L,x.T) # represent the data in the projected space figure,axis = pyplot.subplots(1,2, figsize=(10,5)) plot_data(lx.T,y,axis[0]) ellipse0 = make_covariance_ellipse(numpy.eye(2)) axis[0].add_artist(ellipse0) axis[0].set_title('LMNN\'s linear transform') ellipse1 = make_covariance_ellipse(numpy.eye(2)) plot_data(x,y,axis[1],1) axis[1].add_artist(ellipse1) axis[1].set_title('original') statistics = lmnn.get_statistics() pyplot.plot(statistics.obj.get()) pyplot.grid(True) pyplot.xlabel('Number of iterations') pyplot.ylabel('LMNN objective') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: That is, there are eight feature vectors where each of them belongs to one out of three different classes (identified by either 0, 1, or 2). Let us have a look at this data Step2: In the figure above, we can see that two of the classes are represented by two points that are, for each of these classes, very close to each other. The third class, however, has four points that are close to each other with respect to the y-axis, but spread along the x-axis. Step3: A possible workaround to improve the performance of kNN in a data set like this would be to input to the kNN routine a distance measure. For instance, in the example above a good distance measure would give more weight to the y-direction than to the x-direction to account for the large spread along the x-axis. Nonetheless, it would be nicer (and, in fact, much more useful in practice) if this distance could be learnt automatically from the data at hand. Actually, LMNN is based upon this principle Step4: Secondly, perform LMNN training Step5: LMNN is an iterative algorithm. The argument given to train represents the initial state of the solution. By default, if no argument is given, then LMNN uses PCA to obtain this initial value. Step6: Beyond the main idea
13,698
<ASSISTANT_TASK:> Python Code: import skbeam.core.correlation as corr from skbeam.core.correlation import two_time_corr, two_time_state_to_results import skbeam.core.roi as roi import skbeam.core.utils as utils from xray_vision.mpl_plotting.roi import show_label_array_on_image import numpy as np import time as ttime import matplotlib.pyplot as plt %matplotlib notebook # multi-tau scheme info real_data_levels = 7 real_data_bufs = 8 real_data = np.load("100_500_NIPA_GEL.npy") avg_img = np.average(real_data, axis=0) # generate some circular ROIs # define the ROIs roi_start = 65 # in pixels roi_width = 9 # in pixels roi_spacing = (5.0, 4.0) x_center = 7. # in pixels y_center = (129.) # in pixels num_rings = 3 # get the edges of the rings edges = roi.ring_edges(roi_start, width=roi_width, spacing=roi_spacing, num_rings=num_rings) # get the label array from the ring shaped 3 region of interests(ROI's) labeled_roi_array = roi.rings( edges, (y_center, x_center), real_data.shape[1:]) fig, ax = plt.subplots() ax.imshow(np.sum(real_data, axis=0) / len(real_data)) show_label_array_on_image(ax, avg_img, labeled_roi_array) plt.title("ROI's on the real data") plt.show() from mpl_toolkits.axes_grid1 import ImageGrid def make_image_grid(im_shape): Create the image grid with colorbars def _add_inner_title(ax, title, loc, size=None, **kwargs): Add a title on top of the image from matplotlib.offsetbox import AnchoredText from matplotlib.patheffects import withStroke if size is None: size = dict(size=plt.rcParams['legend.fontsize']) at = AnchoredText(title, loc=loc, prop=size, pad=0., borderpad=0.5, frameon=False, **kwargs) ax.add_artist(at) at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)]) return at fig = plt.figure(None, (10, 8)) grid = ImageGrid(fig, 111, # similar to subplot(111) nrows_ncols=(1, 2), direction="row", axes_pad=0.05, add_all=True, label_mode="1", share_all=True, cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="1%", ) ims = [] for ax, im_title in zip(grid, ["Ring 1", "Ring 2"]): t = _add_inner_title(ax, im_title, loc=2) t.patch.set_alpha(0.5) ax.set_xlabel('t1') ax.set_ylabel('t2') im = ax.imshow(np.zeros(im_shape), cmap='viridis', origin="lower") ims.append(im) ax.cax.colorbar(im) return grid, ims def update_plots(grid, ims, g2s): Update the plot as the correlation is running for ax, im, g2 in zip(grid, ims, g2s): im.set_data(g2) ax.cax.colorbar(im) lo, hi = np.min(g2), np.max(g2) # low bound should be at least one lo = lo if lo > 1 else 1 # high bound should be at least the low bound hi = lo if hi < lo else hi im.set_clim(lo, hi) ax.figure.canvas.draw() ttime.sleep(0.01) num_levs = 1 num_bufs = real_data.shape[0] full_gen = corr.lazy_two_time(labeled_roi_array, real_data, real_data.shape[0], num_bufs, num_levs) grid, ims = make_image_grid(real_data.shape[1:]) for idx, intermediate_state1 in enumerate(full_gen): if idx % 25 == 0: print('processing %s' % idx) result1 = corr.two_time_state_to_results(intermediate_state1) update_plots(grid, ims, result1.g2) # provide a final update result1 = corr.two_time_state_to_results(intermediate_state1) update_plots(grid, ims, result1.g2) num_bufs = 8 num_levs = 6 multi_gen = corr.lazy_two_time(labeled_roi_array, real_data, real_data.shape[0], num_bufs, num_levs) grid, ims = make_image_grid(real_data.shape[1:]) for idx, intermediate_state in enumerate(multi_gen): if idx % 25 == 0: print('processing %s' % idx) m_result = corr.two_time_state_to_results(intermediate_state) update_plots(grid, ims, m_result.g2) ttime.sleep(0.01) #provide a final update result = corr.two_time_state_to_results(intermediate_state) update_plots(grid, ims, result.g2) import skbeam print(skbeam.__version__) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step4: Brute force correlation Step5: Using the NIPA gel data Step6: Multi tau two time correlation
13,699
<ASSISTANT_TASK:> Python Code: import numpy as np import matplotlib.pyplot as plt np.random.seed(seed=1) import math import os import random try: import torch except ModuleNotFoundError: %pip install -qq torch import torch from torch import nn from torch.nn import functional as F import requests import zipfile import hashlib import collections from IPython import display import time !mkdir figures # for saving plots # Required functions for downloading data def download(name, cache_dir=os.path.join("..", "data")): Download a file inserted into DATA_HUB, return the local filename. assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}." url, sha1_hash = DATA_HUB[name] os.makedirs(cache_dir, exist_ok=True) fname = os.path.join(cache_dir, url.split("/")[-1]) if os.path.exists(fname): sha1 = hashlib.sha1() with open(fname, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) if sha1.hexdigest() == sha1_hash: return fname # Hit cache print(f"Downloading {fname} from {url}...") r = requests.get(url, stream=True, verify=True) with open(fname, "wb") as f: f.write(r.content) return fname def download_extract(name, folder=None): Download and extract a zip/tar file. fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == ".zip": fp = zipfile.ZipFile(fname, "r") elif ext in (".tar", ".gz"): fp = tarfile.open(fname, "r") else: assert False, "Only zip/tar files can be extracted." fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir DATA_HUB = dict() DATA_URL = "http://d2l-data.s3-accelerate.amazonaws.com/" DATA_HUB["ptb"] = (DATA_URL + "ptb.zip", "319d85e578af0cdc590547f26231e4e31cdf1e42") # @save def read_ptb(): data_dir = download_extract("ptb") with open(os.path.join(data_dir, "ptb.train.txt")) as f: raw_text = f.read() return [line.split() for line in raw_text.split("\n")] sentences = read_ptb() f"# sentences: {len(sentences)}" class Vocab: Vocabulary for text. def __init__(self, tokens=None, min_freq=0, reserved_tokens=None): if tokens is None: tokens = [] if reserved_tokens is None: reserved_tokens = [] # Sort according to frequencies counter = count_corpus(tokens) self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True) # The index for the unknown token is 0 self.unk, uniq_tokens = 0, ["<unk>"] + reserved_tokens uniq_tokens += [token for token, freq in self.token_freqs if freq >= min_freq and token not in uniq_tokens] self.idx_to_token, self.token_to_idx = [], dict() for token in uniq_tokens: self.idx_to_token.append(token) self.token_to_idx[token] = len(self.idx_to_token) - 1 def __len__(self): return len(self.idx_to_token) def __getitem__(self, tokens): if not isinstance(tokens, (list, tuple)): return self.token_to_idx.get(tokens, self.unk) return [self.__getitem__(token) for token in tokens] def to_tokens(self, indices): if not isinstance(indices, (list, tuple)): return self.idx_to_token[indices] return [self.idx_to_token[index] for index in indices] def count_corpus(tokens): Count token frequencies. # Here `tokens` is a 1D list or 2D list if len(tokens) == 0 or isinstance(tokens[0], list): # Flatten a list of token lists into a list of tokens tokens = [token for line in tokens for token in line] return collections.Counter(tokens) vocab = Vocab(sentences, min_freq=10) f"vocab size: {len(vocab)}" def count_corpus(tokens): Count token frequencies. # Here `tokens` is a 1D list or 2D list if len(tokens) == 0 or isinstance(tokens[0], list): # Flatten a list of token lists into a list of tokens tokens = [token for line in tokens for token in line] return collections.Counter(tokens) def subsampling(sentences, vocab): # Map low frequency words into <unk> sentences = [[vocab.idx_to_token[vocab[tk]] for tk in line] for line in sentences] # Count the frequency for each word counter = count_corpus(sentences) num_tokens = sum(counter.values()) # Return True if to keep this token during subsampling def keep(token): return random.uniform(0, 1) < math.sqrt(1e-4 / counter[token] * num_tokens) # Now do the subsampling return [[tk for tk in line if keep(tk)] for line in sentences] subsampled = subsampling(sentences, vocab) def compare_counts(token): return ( f'# of "{token}": ' f"before={sum([line.count(token) for line in sentences])}, " f"after={sum([line.count(token) for line in subsampled])}" ) print(compare_counts("the")) print(compare_counts("join")) corpus = [vocab[line] for line in subsampled] print(corpus[0:3]) def get_centers_and_contexts(corpus, max_window_size): centers, contexts = [], [] for line in corpus: # Each sentence needs at least 2 words to form a "central target word # - context word" pair if len(line) < 2: continue centers += line for i in range(len(line)): # Context window centered at i window_size = random.randint(1, max_window_size) indices = list(range(max(0, i - window_size), min(len(line), i + 1 + window_size))) # Exclude the central target word from the context words indices.remove(i) contexts.append([line[idx] for idx in indices]) return centers, contexts tiny_dataset = [list(range(7)), list(range(7, 10))] print("dataset", tiny_dataset) for center, context in zip(*get_centers_and_contexts(tiny_dataset, 2)): print("center", center, "has contexts", context) all_centers, all_contexts = get_centers_and_contexts(corpus, 5) f"# center-context pairs: {len(all_centers)}" class RandomGenerator: Draw a random int in [0, n] according to n sampling weights. def __init__(self, sampling_weights): self.population = list(range(len(sampling_weights))) self.sampling_weights = sampling_weights self.candidates = [] self.i = 0 def draw(self): if self.i == len(self.candidates): self.candidates = random.choices(self.population, self.sampling_weights, k=10000) self.i = 0 self.i += 1 return self.candidates[self.i - 1] generator = RandomGenerator([2, 3, 4]) [generator.draw() for _ in range(10)] def get_negatives(all_contexts, corpus, K): counter = count_corpus(corpus) sampling_weights = [counter[i] ** 0.75 for i in range(len(counter))] all_negatives, generator = [], RandomGenerator(sampling_weights) for contexts in all_contexts: negatives = [] while len(negatives) < len(contexts) * K: neg = generator.draw() # Noise words cannot be context words if neg not in contexts: negatives.append(neg) all_negatives.append(negatives) return all_negatives all_negatives = get_negatives(all_contexts, corpus, 5) def batchify(data): max_len = max(len(c) + len(n) for _, c, n in data) centers, contexts_negatives, masks, labels = [], [], [], [] for center, context, negative in data: cur_len = len(context) + len(negative) centers += [center] contexts_negatives += [context + negative + [0] * (max_len - cur_len)] masks += [[1] * cur_len + [0] * (max_len - cur_len)] labels += [[1] * len(context) + [0] * (max_len - len(context))] return ( torch.tensor(centers).reshape((-1, 1)), torch.tensor(contexts_negatives), torch.tensor(masks), torch.tensor(labels), ) x_1 = (1, [2, 2], [3, 3, 3, 3]) x_2 = (1, [2, 2, 2], [3, 3]) batch = batchify((x_1, x_2)) names = ["centers", "contexts_negatives", "masks", "labels"] for name, data in zip(names, batch): print(name, "=", data) def load_data_ptb(batch_size, max_window_size, num_noise_words): num_workers = 4 # Use 4 processes to read the data. sentences = read_ptb() vocab = Vocab(sentences, min_freq=10) subsampled = subsampling(sentences, vocab) corpus = [vocab[line] for line in subsampled] all_centers, all_contexts = get_centers_and_contexts(corpus, max_window_size) all_negatives = get_negatives(all_contexts, corpus, num_noise_words) class PTBDataset(torch.utils.data.Dataset): def __init__(self, centers, contexts, negatives): assert len(centers) == len(contexts) == len(negatives) self.centers = centers self.contexts = contexts self.negatives = negatives def __getitem__(self, index): return (self.centers[index], self.contexts[index], self.negatives[index]) def __len__(self): return len(self.centers) dataset = PTBDataset(all_centers, all_contexts, all_negatives) data_iter = torch.utils.data.DataLoader( dataset, batch_size, shuffle=True, collate_fn=batchify, num_workers=num_workers ) return data_iter, vocab data_iter, vocab = load_data_ptb(512, 5, 5) for batch in data_iter: for name, data in zip(names, batch): print(name, "shape:", data.shape) break batch_size, max_window_size, num_noise_words = 512, 5, 5 data_iter, vocab = load_data_ptb(batch_size, max_window_size, num_noise_words) def skip_gram(center, contexts_and_negatives, embed_v, embed_u): v = embed_v(center) u = embed_u(contexts_and_negatives) pred = torch.bmm(v, u.permute(0, 2, 1)) return pred embed = nn.Embedding(num_embeddings=20, embedding_dim=4) print(f"Parameter embedding_weight ({embed.weight.shape}, " "dtype={embed.weight.dtype})") skip_gram(torch.ones((2, 1), dtype=torch.long), torch.ones((2, 4), dtype=torch.long), embed, embed).shape class SigmoidBCELoss(nn.Module): "BCEWithLogitLoss with masking on call." def __init__(self): super().__init__() def forward(self, inputs, target, mask=None): out = nn.functional.binary_cross_entropy_with_logits(inputs, target, weight=mask, reduction="none") return out.mean(dim=1) loss = SigmoidBCELoss() pred = torch.tensor([[0.5] * 4] * 2) label = torch.tensor([[1.0, 0.0, 1.0, 0.0]] * 2) mask = torch.tensor([[1, 1, 1, 1], [1, 1, 0, 0]]) loss(pred, label, mask) loss(pred, label, mask) / mask.sum(axis=1) * mask.shape[1] embed_size = 100 net = nn.Sequential( nn.Embedding(num_embeddings=len(vocab), embedding_dim=embed_size), nn.Embedding(num_embeddings=len(vocab), embedding_dim=embed_size), ) # setting device to use device = "cuda" if torch.cuda.is_available() else "cpu" # Functions for plotting and accumulating sum def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): Set the axes for matplotlib. axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) axes.set_xscale(xscale) axes.set_yscale(yscale) axes.set_xlim(xlim) axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() class Animator: For plotting data in animation. def __init__( self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale="linear", yscale="linear", fmts=("-", "m--", "g-.", "r:"), nrows=1, ncols=1, figsize=(3.5, 2.5), ): # Incrementally plot multiple lines if legend is None: legend = [] display.set_matplotlib_formats("svg") self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize) if nrows * ncols == 1: self.axes = [ self.axes, ] # Use a lambda function to capture arguments self.config_axes = lambda: set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend) self.X, self.Y, self.fmts = None, None, fmts def add(self, x, y): # Add multiple data points into the figure if not hasattr(y, "__len__"): y = [y] n = len(y) if not hasattr(x, "__len__"): x = [x] * n if not self.X: self.X = [[] for _ in range(n)] if not self.Y: self.Y = [[] for _ in range(n)] for i, (a, b) in enumerate(zip(x, y)): if a is not None and b is not None: self.X[i].append(a) self.Y[i].append(b) self.axes[0].cla() for x, y, fmt in zip(self.X, self.Y, self.fmts): self.axes[0].plot(x, y, fmt) self.config_axes() display.display(self.fig) display.clear_output(wait=True) class Accumulator: For accumulating sums over `n` variables. def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self.data, args)] def reset(self): self.data = [0.0] * len(self.data) def __getitem__(self, idx): return self.data[idx] def train(net, data_iter, lr, num_epochs, device=device): def init_weights(m): if type(m) == nn.Embedding: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net = net.to(device) optimizer = torch.optim.Adam(net.parameters(), lr=lr) animator = Animator(xlabel="epoch", ylabel="loss", xlim=[1, num_epochs]) metric = Accumulator(2) # Sum of losses, no. of tokens for epoch in range(num_epochs): seconds, num_batches = time.time(), len(data_iter) for i, batch in enumerate(data_iter): optimizer.zero_grad() center, context_negative, mask, label = [data.to(device) for data in batch] pred = skip_gram(center, context_negative, net[0], net[1]) l = loss(pred.reshape(label.shape).float(), label.float(), mask) / mask.sum(axis=1) * mask.shape[1] l.sum().backward() optimizer.step() metric.add(l.sum(), l.numel()) if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[1],)) print( f"loss {metric[0] / metric[1]:.3f}, " f"{metric[1] / (time.time() - seconds):.1f} tokens/sec on {str(device)}" ) lr, num_epochs = 0.01, 5 train(net, data_iter, lr, num_epochs) def get_similar_tokens(query_token, k, embed): W = embed.weight.data x = W[vocab[query_token]] # Compute the cosine similarity. Add 1e-9 for numerical stability cos = torch.mv(W, x) / torch.sqrt(torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9) topk = torch.topk(cos, k=k + 1)[1].cpu().numpy().astype("int32") for i in topk[1:]: # Remove the input words print(f"cosine sim={float(cos[i]):.3f}: {vocab.idx_to_token[i]}") get_similar_tokens("chip", 3, net[0]) get_similar_tokens("president", 3, net[0]) get_similar_tokens("dog", 3, net[0]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step3: Data Step6: We make a vocabulary, replacing any word that occurs less than 10 times with unk. Step8: Mikolov suggested keeping word $w$ with probability Step9: We compare the frequency of certain common and rare words in the original and subsampled data below. Step10: Let's tokenize the subsampled data. Step11: Extracting central target words and their contexts Step12: Example. Suppose we have a corpus with 2 sentences of length 7 and 3, and we use a max context of size 2. Here are the centers and contexts. Step13: Extract context for the full dataset. Step15: Negative sampling Step16: Example. Step17: Now we generate $K$ negatives for each context. These are drawn from $p(w) \propto \text{freq}(w)^{0.75}$. Step18: Minibatching Step19: Example. We make a ragged minibatch with 2 examples, and then pad them to a standard size. Step20: Dataloader Step21: Let's print the first minibatch. Step22: Model Step23: Example. Assume the vocab size is 20 and we use $E=4$ embedding dimensions. Step24: Loss Step25: Different masks can lead to different results. Step26: If we normalize by the number of valid masked entries, then predictions with the same per-token accuracy will score the same. Step30: Training Step31: Test