code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="BWIyC9Ip_bcq" import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt # %matplotlib inline torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # + id="lGVy-1EllAc_" train_data = np.load("train_blob_data.npy",allow_pickle=True) test_data = np.load("test_blob_data.npy",allow_pickle=True) # + id="uL771xuGZC5Q" mosaic_list_of_images = train_data[0]["mosaic_list"] mosaic_label = train_data[0]["mosaic_label"] fore_idx = train_data[0]["fore_idx"] test_mosaic_list_of_images = test_data[0]["mosaic_list"] test_mosaic_label = test_data[0]["mosaic_label"] test_fore_idx = test_data[0]["fore_idx"] # + id="x2qfRXfNZCao" class MosaicDataset1(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label,fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx] # + id="uf76JwkxZCT0" batch = 250 train_dataset = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx) train_loader = DataLoader( train_dataset,batch_size= batch ,shuffle=False) test_dataset = MosaicDataset1(test_mosaic_list_of_images, test_mosaic_label, test_fore_idx) test_loader = DataLoader(test_dataset,batch_size= batch ,shuffle=False) # + id="DOpZfj1bq7wN" bg = [] for i in range(12): torch.manual_seed(i) betag = torch.randn(250,9)#torch.ones((250,9))/9 bg.append( betag.requires_grad_() ) # + colab={"base_uri": "https://localhost:8080/"} id="fzb3ii4drXpu" outputId="7c00736e-7922-4ec2-ba66-126f493675a9" bg # + id="HbrMidFCla6h" class Module2(nn.Module): def __init__(self): super(Module2, self).__init__() self.linear1 = nn.Linear(5,100) self.linear2 = nn.Linear(100,3) def forward(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x # + id="rRqj2VELllkX" torch.manual_seed(1234) what_net = Module2().double() what_net.load_state_dict(torch.load("blob_what_net.pt")) what_net = what_net.to("cuda") # + id="6d8Wch99l4yB" def attn_avg(x,beta): y = torch.zeros([batch,5], dtype=torch.float64) y = y.to("cuda") alpha = F.softmax(beta,dim=1) # alphas #print(alpha[0],x[0,:]) for i in range(9): alpha1 = alpha[:,i] y = y + torch.mul(alpha1[:,None],x[:,i]) return y,alpha # + id="Rz1Kpw12loV6" def calculate_attn_loss(dataloader,what,criter): what.eval() r_loss = 0 alphas = [] lbls = [] pred = [] fidices = [] correct = 0 tot = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels,fidx= data lbls.append(labels) fidices.append(fidx) inputs = inputs.double() beta = bg[i] # beta for ith batch inputs, labels,beta = inputs.to("cuda"),labels.to("cuda"),beta.to("cuda") avg,alpha = attn_avg(inputs,beta) alpha = alpha.to("cuda") outputs = what(avg) _, predicted = torch.max(outputs.data, 1) correct += sum(predicted == labels) tot += len(predicted) pred.append(predicted.cpu().numpy()) alphas.append(alpha.cpu().numpy()) loss = criter(outputs, labels) r_loss += loss.item() alphas = np.concatenate(alphas,axis=0) pred = np.concatenate(pred,axis=0) lbls = np.concatenate(lbls,axis=0) fidices = np.concatenate(fidices,axis=0) #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) analysis = analyse_data(alphas,lbls,pred,fidices) return r_loss/i,analysis,correct.item(),tot,correct.item()/tot # + id="sAY-x6UAwrwE" # for param in what_net.parameters(): # param.requires_grad = False # + id="_toCktPanH0S" def analyse_data(alphas,lbls,predicted,f_idx): ''' analysis data is created here ''' batch = len(predicted) amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0 for j in range (batch): focus = np.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): amth +=1 else: alth +=1 if(focus == f_idx[j] and predicted[j] == lbls[j]): ftpt += 1 elif(focus != f_idx[j] and predicted[j] == lbls[j]): ffpt +=1 elif(focus == f_idx[j] and predicted[j] != lbls[j]): ftpf +=1 elif(focus != f_idx[j] and predicted[j] != lbls[j]): ffpf +=1 #print(sum(predicted==lbls),ftpt+ffpt) return [ftpt,ffpt,ftpf,ffpf,amth,alth] # + id="S633XgMToeN3" optim1 = [] for i in range(12): optim1.append(optim.RMSprop([bg[i]], lr=10)) # + colab={"base_uri": "https://localhost:8080/"} id="qPaYaojinMTA" outputId="e2a6fd86-ac5c-408b-cf82-29df384b1aeb" # instantiate optimizer optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.001)#, momentum=0.9)#,nesterov=True) criterion = nn.CrossEntropyLoss() acti = [] analysis_data_tr = [] analysis_data_tst = [] loss_curi_tr = [] loss_curi_tst = [] epochs = 200 # calculate zeroth epoch loss and FTPT values running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,criterion) print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy)) loss_curi_tr.append(running_loss) analysis_data_tr.append(anlys_data) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what_net.train() for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() beta = bg[i] # alpha for ith batch #print(labels) inputs, labels,beta = inputs.to("cuda"),labels.to("cuda"),beta.to("cuda") # zero the parameter gradients optimizer_what.zero_grad() optim1[i].zero_grad() # forward + backward + optimize avg,alpha = attn_avg(inputs,beta) outputs = what_net(avg) loss = criterion(outputs, labels) # print statistics running_loss += loss.item() #alpha.retain_grad() loss.backward(retain_graph=False) optimizer_what.step() optim1[i].step() running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,criterion) analysis_data_tr.append(anls_data) loss_curi_tr.append(running_loss_tr) #loss per epoch print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy)) if running_loss_tr<=0.08: break print('Finished Training run ') analysis_data_tr = np.array(analysis_data_tr) # + id="AciJnAh5nfug" columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ] df_train = pd.DataFrame() df_test = pd.DataFrame() df_train[columns[0]] = np.arange(0,epoch+2) df_train[columns[1]] = analysis_data_tr[:,-2] df_train[columns[2]] = analysis_data_tr[:,-1] df_train[columns[3]] = analysis_data_tr[:,0] df_train[columns[4]] = analysis_data_tr[:,1] df_train[columns[5]] = analysis_data_tr[:,2] df_train[columns[6]] = analysis_data_tr[:,3] # + colab={"base_uri": "https://localhost:8080/", "height": 436} id="NoQpS_6scRsC" outputId="6462d8bd-5104-41b0-94fd-a269826d4b9b" df_train # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="IMAhRdxOcVf6" outputId="99420696-33c6-45f4-cf64-b75f9042f8d7" fig= plt.figure(figsize=(6,6)) plt.plot(df_train[columns[0]],df_train[columns[3]]/30, label ="focus_true_pred_true ") plt.plot(df_train[columns[0]],df_train[columns[4]]/30, label ="focus_false_pred_true ") plt.plot(df_train[columns[0]],df_train[columns[5]]/30, label ="focus_true_pred_false ") plt.plot(df_train[columns[0]],df_train[columns[6]]/30, label ="focus_false_pred_false ") plt.title("On Train set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("percentage of data") plt.xticks([0,50,100,150,200]) #plt.vlines(vline_list,min(min(df_train[columns[3]]/300),min(df_train[columns[4]]/300),min(df_train[columns[5]]/300),min(df_train[columns[6]]/300)), max(max(df_train[columns[3]]/300),max(df_train[columns[4]]/300),max(df_train[columns[5]]/300),max(df_train[columns[6]]/300)),linestyles='dotted') plt.show() fig.savefig("train_analysis.pdf") fig.savefig("train_analysis.png") # + id="VCnS6r2_3WdU" aph = [] for i in bg: aph.append(F.softmax(i,dim=1).detach().numpy()) aph = np.concatenate(aph,axis=0) torch.save({ 'epoch': 500, 'model_state_dict': what_net.state_dict(), #'optimizer_state_dict': optimizer_what.state_dict(), "optimizer_alpha":optim1, "FTPT_analysis":analysis_data_tr, "alpha":aph }, "type4_what_net_500.pt") # + colab={"base_uri": "https://localhost:8080/"} id="KVzrDOGS4UxU" outputId="39934a51-16e7-4126-ba19-57fc151f7890" aph[0] # + id="BwTDpx6STIPh"
1_mosaic_data_attention_experiments/3_stage_wise_training/Attention_weights_for_every_data/blob_data/both_pretrained_what/lr_10/blob_both_atttention_weights_lr_10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Configuration # ES connector config es_user = 'admin' es_pass = '<PASSWORD>' es_host = 'elasticsearch-1' es_port = 9200 auth=(es_user, es_pass) # samples postgres connector config pg_user = 'test' pg_pass = '<PASSWORD>' pg_host = 'samples-db' pg_port = 5432 pg_db_name = "db_samples" # # Init and import libraries # + # %matplotlib inline # data wrangling + vis import pandas as pd import pandas.io.sql as psql from matplotlib import pyplot as plt import ssl # databases from opensearchpy import OpenSearch import opensearchpy.helpers # postgres import psycopg2 as pg # ms sql import pyodbc # - # # Init connectors (Opensearch/OpenDistro version) # + es = OpenSearch( hosts = [{'host': es_host, 'port': 9200}], http_compress = True, # enables gzip compression for request bodies http_auth = auth, # client_cert = client_cert_path, # client_key = client_key_path, use_ssl = False, verify_certs = False, ssl_assert_hostname = False, ssl_show_warn = False ) # check status print('checking ES connection ...') if not es.ping(): raise Exception("Connection failed") print('connected to ES') # + # PostgreSQL connector # print('connecting to PostgreSQL ...') pg_conn = pg.connect(database=pg_db_name,user=pg_user, password=pg_pass, host=pg_host, port=pg_port) print('connected to PostgreSQL') # - # # Example queries def print_plot(cui_occ): cui_names = [] cui_counts = [] for cui, cnt in sorted(cui_occ.items(), key=lambda kv: kv[1], reverse=True): cui_names.append(cui) cui_counts.append(cnt) # display the results plt.figure(figsize=(25,8)) plt.bar(range(len(cui_names)), list(cui_counts), align='center') plt.xticks(range(len(cui_occ)), list(cui_names), rotation=90) plt.tick_params(axis='x', which='major', labelsize=20) plt.tick_params(axis='y', which='major', labelsize=20) plt.show() # ## ElasticSearch # ### Direct data manipulations # + # example 1: # retrieve the number of documents satisfying the query criteria # # an example query -- search for keyword 'cancer' in documents query_body_text = { "query": { "match": { "document": "cancer"} } } index_to_query_text = 'medical_reports_text' documents = es.search(index=index_to_query_text, body=query_body_text) print(documents['hits']['total']) # + # example 2: # - retrieve all the documents matching the query criteria # - calculate the CUI codes occurencies # - visualize the results query_body_medcat = { "query": { "match": { "nlp.source_value": "skin"} } } index_to_query_medcat = "medical_reports_anns_medcat_medmen_cancer" # query the elasticsearch results = opensearchpy.helpers.scan(es, index=index_to_query_medcat, query=query_body_medcat) # calculate the occurencies bio_cui_occ = {} bio_tui_arr = {} for item in results: cui = item['_source']['nlp.pretty_name'] if cui not in bio_cui_occ: bio_cui_occ[cui] = 1 else: bio_cui_occ[cui] += 1 tui = item['_source']['nlp.tui'] if tui not in bio_tui_arr: bio_tui_arr[tui] = set() bio_tui_arr[tui].add(cui) print_plot(bio_cui_occ) # + # example 2: # - retrieve all the documents matching the query criteria # - calculate the CUI codes occurencies # - visualize the results def get_tui_cui_occ(es, index_name, cui_field_name, tui_field_name, query_field_name, query_phrase): # query the elasticsearch query = { "query": { "match": { query_field_name : query_phrase} } } results = opensearchpy.helpers.scan(es, index=index_name, query=query) # calculate the occurencies cui_occ = {} tui_arr = {} for item in results: #cui = (item['_source']['nlp.inst'], item['_source']['nlp.text']) cui = item['_source'][cui_field_name] if cui not in cui_occ: cui_occ[cui] = 1 else: cui_occ[cui] += 1 tui = item['_source'][tui_field_name] if tui not in tui_arr: tui_arr[tui] = set() tui_arr[tui].add(cui) return (cui_occ, tui_arr) # + #bioyodie_results = get_tui_cui_occ(es, 'nifi_is_doc_gp_refletters_bioyodie', 'nlp.inst', 'nlp.TUI', 'nlp.text', 'cancer') medcat_results = get_tui_cui_occ(es, 'medical_reports_anns_medcat_medmen_cancer', 'nlp.cui', 'nlp.tui', 'nlp.source_value', 'skin') #print_plot(bioyodie_results[0]) print_plot(medcat_results[0]) # - # ### Using Pandas # + # query template # query_body_medcat = { "query": { "match": { "nlp.source_value": "skin"} } } index_to_query_medcat = 'medical_reports_anns_medcat_medmen_cancer' # need to re-query ES to fetch the results # results = opensearchpy.helpers.scan(es, index=index_to_query_medcat, query=query_body_medcat) # create a data frame from the results # es_df = pd.DataFrame.from_dict([item['_source'] for item in results]) es_df.head() # + # select only a subset of columns and perform filtering based on TUI and CUI es_df_sub = es_df.filter(items=['meta.docid', 'nlp.source_value', 'nlp.cui', 'nlp.tui']) rows = es_df_sub.loc[(es_df_sub['nlp.tui'] == 'T191')] print(rows[0:10]) # - # ## PostgreSQL (samples-db) # example: read the DB and create pandas data frame # pg_df = pd.read_sql('SELECT docid, sampleid, dct FROM medical_reports_text', pg_conn) pg_df.head()
services/jupyter-hub/notebooks/query-annotations-opensearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pymatgen.core import Structure, Lattice s = Structure(Lattice.cubic(3.147), ['Mo', 'Mo'], [[0, 0, 0], [0.5, 0.5, 0.5]]) # a simple Mo bcc lattice # - # ## Compute bispectrum coefficients of structures # + from maml.describers import BispectrumCoefficients bc = BispectrumCoefficients(rcutfac=0.5, twojmax=4, element_profile={'Mo': {'r': 8, 'w': 1}}) # - bc.transform([s, s*[1, 2, 1]]) s_large = s * [5, 5, 5] # + # %%timeit -n1 -r1 _ = bc.transform([s_large] * 100) # time for calculating 100 structures # - # ## Parallel computation with progress report bc_w_parallel = BispectrumCoefficients(rcutfac=0.5, twojmax=4, element_profile={'Mo': {'r': 8, 'w': 1}}, verbose=True, # show progress n_jobs=2) # use 2 CPUs # %%timeit -n1 -r1 _ = bc_w_parallel.transform([s_large] * 100) # time for calculating 100 structures # ## With cache bc_w_cache = BispectrumCoefficients(rcutfac=0.5, twojmax=4, element_profile={'Mo': {'r': 8, 'w': 1}}, verbose=True, # show progress memory=True, # saved to local n_jobs=-1) # use all CPU # %%timeit -n1 -r1 _ = bc_w_cache.transform([s_large] * 100) # time for calculating 100 structures # We can see that the run time has been reduced substantially.
notebooks/describer/site_describer.ipynb
# --- # title: "Detecting Outliers" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "How to detect outliers for machine learning in Python." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ## Preliminaries # Load libraries import numpy as np from sklearn.covariance import EllipticEnvelope from sklearn.datasets import make_blobs # ## Create Data # + # Create simulated data X, _ = make_blobs(n_samples = 10, n_features = 2, centers = 1, random_state = 1) # Replace the first observation's values with extreme values X[0,0] = 10000 X[0,1] = 10000 # - # ## Detect Outliers # # `EllipticEnvelope` assumes the data is normally distributed and based on that assumption "draws" an ellipse around the data, classifying any observation inside the ellipse as an inlier (labeled as `1`) and any observation outside the ellipse as an outlier (labeled as `-1`). A major limitation of this approach is the need to specify a `contamination` parameter which is the proportion of observations that are outliers, a value that we don't know. # + # Create detector outlier_detector = EllipticEnvelope(contamination=.1) # Fit detector outlier_detector.fit(X) # Predict outliers outlier_detector.predict(X)
docs/machine_learning/preprocessing_structured_data/detecting_outliers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={} # # _RecoGym_: IPS vs Non-IPS # # In this notebook you shall find comparision of performance of two _`Agents`_: # * _Logistic Regression_ _**without**_ IPS; # * _Logistic Regression_ _**with**_ IPS. # # In all cases, an training data will be used as a _Random_ _`Agent`_ has been applied in the _`Environment`_. # # **Note:** the evaluation of the data provided in that notebook takes some time. # + pycharm={} import gym import matplotlib.pyplot as plt # %matplotlib notebook # %config InlineBackend.figure_format = 'retina' plt.rcParams['figure.figsize'] = [6, 3] from reco_gym import build_agent_init, env_1_args, gather_agent_stats, plot_agent_stats RandomSeed = 42 TrainingDataSamples = (100, 500, 1000, 2000, 3000, 5000, 8000, 10000, 13000, 14000, 15000) TestingDataSamples = 15000 StatEpochs = 5 StatEpochsNewRandomSeed = True std_env_args = { **env_1_args, 'random_seed': RandomSeed, } env = gym.make('reco-gym-v1') # + [markdown] pycharm={} # ## _Uniform_ Data Set # # In this experiment, we will use a uniform data set obtained after applying so called _Random_ _`Agent`_ i.e. the _`Agent`_ that randomply selects a _`Product`_. # # Data uniformity means that _Propensity Score_ is _**always**_ the same. In case of _10_ _`Products`_ $PS=\frac{1}{10}=0.1$. # + [markdown] pycharm={} # Import _`Agents`_. # + pycharm={} from agents import LogregPolyAgent, logreg_poly_args from agents import LogregMulticlassIpsAgent, logreg_multiclass_ips_args # + pycharm={} agent_inits = { **build_agent_init( 'LogReg Poly IPS', LogregPolyAgent, { **logreg_poly_args, 'with_ips': True, } ), **build_agent_init( 'LogReg MC IPS', LogregMulticlassIpsAgent, { **logreg_multiclass_ips_args, } ), **build_agent_init( 'LogReg Poly Non-IPS', LogregPolyAgent, { **logreg_poly_args, 'with_ips': False, } ), } # + [markdown] pycharm={} # Gathering statistics... # + pycharm={} agent_stats01 = gather_agent_stats( env, std_env_args, { 'num_products': 10, 'number_of_flips': 5, }, agent_inits, TrainingDataSamples, TestingDataSamples, StatEpochs, StatEpochsNewRandomSeed ) # + [markdown] pycharm={} # Plotting results... # + pycharm={} plot_agent_stats(agent_stats01) # + [markdown] pycharm={} # <sup>_**Note:**_ here, _`Samples #`_ is the amount of user's data taken for training.</sup> # + [markdown] pycharm={} # ## _Non-Uniform_ Data Set # # On the contrary to the previous case, a _**non**-uniform_ data set will be used. # The data are obtained via using _Organic User Event Counter_ _`Agent`_, i.e. the _`Agent`_ that selects the most popular _`Product`_ (most frequently viewed in _Organic_ _`Events`_) with the highest probability. # + pycharm={} from reco_gym import Configuration from agents import OrganicUserEventCounterAgent, organic_user_count_args # + [markdown] pycharm={} # Gathering statistics... # + pycharm={} agent_stats02 = gather_agent_stats( env, std_env_args, { 'num_products': 10, 'number_of_flips': 5, 'agent': OrganicUserEventCounterAgent(Configuration({ **organic_user_count_args, **std_env_args, 'select_randomly': True, })), }, agent_inits, TrainingDataSamples, TestingDataSamples, StatEpochs, StatEpochsNewRandomSeed ) # + [markdown] pycharm={} # Plotting results... # + pycharm={} plot_agent_stats(agent_stats02) # + [markdown] pycharm={} # <sup>_**Note:**_ here, _`Samples #`_ is the amount of user's data taken for training.</sup> # + [markdown] pycharm={} # # Resolution # # * _**Uniform**_ Data Set # * All kinds of _Logistic Regressions_ show almost the same performance. # * _**Non-Uniform**_ Data Set # * _Logistic Regression with Multi-Classification_ (_LogReg MC IPS_) reveals a pretty good performance results under _Non-Uniform_ training data set. # * _Logistic Regression_ _**without**_ IPS (_LogReg Poly Non-IPS_) shows a slightly better results, rather than _Logistic Regression **with** IPS_ (_LogReg Poly IPS_). # # # Also, in that study, it is quite evident that for _Logistic Regression_ there is a strong correlation: the more significant volume of training data is, the better _Click-Through Rare_ is.
IPS vs Non-IPS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import datajoint as dj import pandas as pd from datetime import date import numpy as np from matplotlib import pyplot as plt from scipy import signal from datetime import date,datetime,timedelta from allensdk.brain_observatory.ecephys.ecephys_project_cache import EcephysProjectCache import os # Establish connection dj.config['database.host'] = '172.16.17.32' dj.config['database.user'] = 'yonib' dj.config['database.password'] = '<PASSWORD>' dj.conn() # configure a schema for testing stuff schema = dj.schema('yonib_observatory_test',locals()) # - import os # + @schema class Genotype(dj.Lookup): definition = """ genotype:varchar(255) """ contents = zip(['Pvalb-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'Sst-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'Vip-IRES-Cre/wt;Ai32(RCL-ChR2(H134R)_EYFP)/wt', 'wt/wt']) @schema class SessionType(dj.Lookup): definition = """ session_type:varchar(255) """ contents = zip(['brain_observatory_1.1', 'functional_connectivity']) @schema class Mouse(dj.Manual): definition = """ # mouse information specimen_id: bigint # unique mouse ID --- sex:enum('M','F','U') # Sex: Male, Female, Unkown -> Genotype dob:date """ # In my present formulation, things like channel and probe counts # and area ID can be found with queries but aren't included in the # Sessions table. @schema class Session(dj.Manual): definition = """ session_id:bigint --- ->Mouse session_datetime:datetime ->SessionType publication_datetime:datetime has_nwb:bool isi_experiment_id:bigint """ @schema class ProbePhase(dj.Lookup): definition = """ probe_phase:varchar(255) """ contents = zip(['3a', 'PXI']) @schema class Probe(dj.Manual): definition = """ probe_id:bigint --- ->Session ->ProbePhase probe_name:varchar(10) air_channel_index:int surface_channel_index:int sampling_rate:float lfp_sampling_rate:float """ @schema class BrainStructure(dj.Lookup): definition = """ brain_structure:varchar(10) """ contents = zip(['APN', 'BMAa', 'CA1', 'CA2', 'CA3', 'COAa', 'COApm', 'CP', 'DG', 'Eth', 'HPF', 'IGL', 'IntG', 'LD', 'LGd', 'LGv', 'LP', 'LT', 'MB', 'MGd', 'MGm', 'MGv', 'MRN', 'NOT', 'OLF', 'OP', 'PF', 'PIL', 'PO', 'POL', 'POST', 'PP', 'PPT', 'PRE', 'PoT', 'ProS', 'RPF', 'RT', 'SCig', 'SCiw', 'SCop', 'SCsg', 'SCzo', 'SGN', 'SUB', 'TH', 'VIS', 'VISal', 'VISam', 'VISl', 'VISli', 'VISmma', 'VISmmp', 'VISp', 'VISpm', 'VISrl', 'VL', 'VPL', 'VPM', 'ZI', 'grey', 'nan']) @schema class Channel(dj.Manual): definition = """ channel_id:bigint --- ->Probe ->BrainStructure structure_id = null:float local_index:int probe_horizontal_position:int probe_vertical_position:int anterior_posterior_ccf_coordinate = null:float dorsal_ventral_ccf_coordinate = null:float left_right_ccf_coordinate=null:float """ @schema class Unit(dj.Manual): definition = """ unit_id:bigint --- ->Channel local_index=null:int pt_ratio = null:float amplitude = null:float amplitude_cutoff = null:float cumulative_drift = null:float d_prime = null:float duration = null:float firing_rate = null:float halfwidth = null:float isi_violations = null:float isolation_distance = null:float l_ratio = null:float max_drift = null:float nn_hit_rate = null:float nn_miss_rate = null:float presence_ratio = null:float recovery_slope = null:float repolarization_slope = null:float silhouette_score = null:float snr = null:float spread = null:float velocity_above = null:float velocity_below = null:float """ # I would prefer to have spiketrain data be part of the unit, # But this is going to make more sense if we don't load all NWB files @schema class SpikeTrain(dj.Manual): definition = """ ->Unit --- spike_ts:longblob """ @schema class LFP(dj.Manual): definition = """ ->Channel --- lfp_sampling_rate:float lfp:longblob """ dj.ERD(schema) # + # This notation is borrowed from the mesoscale folks. # I am assuming that it is best practices? data_directory = 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData' manifest_path = os.path.join(data_directory, "manifest.json") cache = EcephysProjectCache.from_warehouse(manifest=manifest_path) @schema class SessionCSV(dj.Manual): definition = """ session_csv:varchar(255) """ # Fix this later to get rid of For Loop @schema class SessionIngest(dj.Imported): definition = """ ->SessionCSV """ def make(self,key): # For now, there is only one session file. self.insert1({'session_csv': key['session_csv']},skip_duplicates = True) # df=pd.read_csv(key['session_csv'],index_col = 'id') for session_id,row in df.iterrows(): session_datetime = datetime.strptime(row['date_of_acquisition'], "%Y-%m-%dT%H:%M:%S%z") publication_datetime = datetime.strptime(row['published_at'], "%Y-%m-%dT%H:%M:%S%z") specimen_id = row['specimen_id'] # Add the mouse data mouse_data = {'specimen_id':row['specimen_id'], 'sex':row['sex'], 'genotype':row['genotype'], 'dob':session_datetime.date()-timedelta(row['age_in_days'])} Mouse().insert1(mouse_data,skip_duplicates = True) # Add the Session data session_data = {'session_id':session_id, 'specimen_id':row['specimen_id'], 'session_datetime':session_datetime, 'publication_datetime':publication_datetime, 'session_type':row['session_type'], 'has_nwb':row['has_nwb'], 'isi_experiment_id':row['isi_experiment_id'], } Session().insert1(session_data,skip_duplicates = True) @schema class ProbeCSV(dj.Manual): definition = """ probe_csv:varchar(255) """ # Fix this later to get rid of For Loop @schema class ProbeIngest(dj.Imported): definition = """ ->ProbeCSV """ def make(self,key): self.insert1({'probe_csv': key['probe_csv']},skip_duplicates = True) # df=pd.read_csv(key['probe_csv'],index_col = 'id') for probe_id,row in df.iterrows(): # Add the probe probe_data = {'probe_id':probe_id, 'session_id':row['ecephys_session_id'], 'probe_phase':row['phase'], 'probe_name':row['name'], 'air_channel_index':row['air_channel_index'], 'surface_channel_index':row['surface_channel_index'], 'sampling_rate':row['sampling_rate'], 'lfp_sampling_rate':row['lfp_sampling_rate']} Probe().insert1(probe_data,skip_duplicates = True) @schema class ChannelCSV(dj.Manual): definition = """ channel_csv:varchar(255) """ # Note the difference in the insert commands between this Channel code and the code above. # Before, tables were small enough form repeat insert calls. # Here, we needed to brake things down to a single call. # This switches it from takeing "so long yoni stopped waiting " to ~20 seconds to run. @schema class ChannelIngest(dj.Imported): definition = """ ->ChannelCSV """ def make(self,key): self.insert1({'channel_csv': key['channel_csv']},skip_duplicates = True) df=pd.read_csv(key['channel_csv']) df.rename(columns = {'id':'channel_id', 'ecephys_probe_id':'probe_id', 'ecephys_structure_acronym':'brain_structure', 'ecephys_structure_id':'structure_id'},inplace =True) df['brain_structure'] = df['brain_structure'].astype(str) df['structure_id'] = df['structure_id'].astype(float) df['anterior_posterior_ccf_coordinate'] = df['anterior_posterior_ccf_coordinate'].astype(float) df['dorsal_ventral_ccf_coordinate'] = df['dorsal_ventral_ccf_coordinate'].astype(float) df['left_right_ccf_coordinate'] = df['left_right_ccf_coordinate'].astype(float) df_dict = df.to_dict(orient = 'records') Channel().insert(tuple(df_dict),skip_duplicates = True) @schema class UnitCSV(dj.Manual): definition = """ unit_csv:varchar(255) """ @schema class UnitIngest(dj.Imported): definition = """ ->UnitCSV """ def make(self,key): self.insert1({'unit_csv': key['unit_csv']},skip_duplicates = True) #df=pd.read_csv(key['unit_csv']) df = cache.get_units() df.reset_index(inplace = True) print(df.keys()) df.rename(columns = {'id':'unit_id', 'waveform_PT_ratio':'pt_ratio', 'waveform_amplitude':'amplitude', 'waveform_halfwidth':'halfwidth', 'waveform_duration':'duration', 'waveform_recovery_slope':'recovery_slope', 'waveform_repolarization_slope':'repolarization_slope', 'waveform_velocity_above':'velocity_above', 'waveform_velocity_below':'velocity_below', 'waveform_spread':'spread', 'L_ratio':'l_ratio', 'ecephys_channel_id':'channel_id'},inplace =True) df_dict = df[['unit_id', 'channel_id', 'local_index', 'pt_ratio', 'amplitude', 'amplitude_cutoff', 'cumulative_drift', 'd_prime', 'duration', 'firing_rate', 'halfwidth', 'isi_violations', 'isolation_distance', 'l_ratio', 'max_drift', 'nn_hit_rate', 'nn_miss_rate', 'presence_ratio', 'recovery_slope', 'repolarization_slope', 'silhouette_score', 'snr', 'spread', 'velocity_above', 'velocity_below',]].to_dict(orient = 'records') # gets anything that wasn't checkpointed num_records = len(df_dict) steps = np.append(np.arange(0,num_records,10000),num_records) print(num_records) for ii,sss in enumerate(steps[0:-1]): Unit().insert(tuple(df_dict[sss:steps[ii+1]]),skip_duplicates = True) # There is a super annoying bug whereby if you don't draw the table, # then it won't work # dj.ERD(schema) is effectivly the same as a "commit" call dj.ERD(schema) # - # %%timeit -n 1 -r 1 SessionCSV.insert1({'session_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\sessions.csv'},skip_duplicates=True) SessionIngest.populate() # %%timeit -n 1 -r 1 ProbeCSV.insert1({'probe_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\probes.csv'},skip_duplicates=True) ProbeIngest.populate() Probe() # %%timeit -n 1 -r 1 ChannelCSV.insert1({'channel_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\channels.csv'},skip_duplicates=True) ChannelIngest.populate() Channel() # %%timeit -n 1 -r 1 UnitCSV.insert1({'unit_csv': 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\units.csv'},skip_duplicates=True) UnitIngest.populate() @schema class NWBSession(dj.Manual): definition = """ nwb_file:varchar(255) --- ->Session """ @schema class NWBProbeLFP(dj.Manual): definition = """ nwb_file:varchar(255) ->Probe """ # + data_directory = 'C:/Users/yoni.browning/Documents/DataJoint/AllenData' # This can be schematized better, but for what I am doing now it doesn't matter. get_session_ids = [715093703,719161530,721123822] # Get the sessions nwb_session = NWBSession() for ii in range(0,len(get_session_ids)): directory = os.path.join(data_directory + '/session_' + str(get_session_ids[ii])) files = os.listdir(directory) for jj,file in enumerate(files): print(file) if 'probe' in file: NWBProbeLFP().insert1({'nwb_file':os.path.join(directory,file),'probe_id':int(file.split('_')[1].split('.')[0])}) else: NWBSession().insert1({'nwb_file':os.path.join(directory,file),'session_id':get_session_ids[ii]},skip_duplicates=True) # Get the probe data # - import h5py f = h5py.File('C:/Users/yoni.browning/Documents/DataJoint/AllenData/session_715093703/session_715093703.nwb','r') f['units'].keys() spike_times = f['units']['spike_times'][:] spike_times_index = f['units']['spike_times_index'][:] plt.plot(spike_times[spike_times_index[0]:spike_times_index[10]]) plt.show() f['units']['cluster_id'][:] df = pd.read_csv('C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData\\units.csv') df.keys() df = cache.get_units() print(df.keys()) df.reset_index(inplace = True) # + df.rename(columns = {'id':'unit_id', 'waveform_PT_ratio':'pt_ratio', 'waveform_amplitude':'amplitude', 'waveform_halfwidth':'halfwidth', 'waveform_duration':'duration', 'waveform_recovery_slope':'recovery_slope', 'waveform_repolarization_slope':'repolarization_slope', 'waveform_velocity_above':'velocity_above', 'waveform_velocity_below':'velocity_below', 'waveform_spread':'spread', 'L_ratio':'l_ratio', 'ecephys_channel_id':'channel_id'},inplace =True) df[['unit_id', 'channel_id', 'local_index', 'pt_ratio', 'amplitude', 'amplitude_cutoff', 'cumulative_drift', 'd_prime', 'duration', 'firing_rate', 'halfwidth', 'isi_violations', 'isolation_distance', 'l_ratio', 'max_drift', 'nn_hit_rate', 'nn_miss_rate', 'presence_ratio', 'recovery_slope', 'repolarization_slope', 'silhouette_score', 'snr', 'spread', 'velocity_above', 'velocity_below',]] # - df.keys() Unit()& (Channel()&(Probe()&'session_id = 715093703')) Unit()
datajoint/.ipynb_checkpoints/Allen_Data_DataJoint-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.9 64-bit # metadata: # interpreter: # hash: 4ce0e62306dd6a5716965d4519ada776f947e6dfc145b604b11307c10277ef29 # name: Python 3.7.9 64-bit # --- # + import json list = [[280, 281, "WP"], [281, 269, "COO"]] print(json.dumps(list))
python/json.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from mkv.google_bert.feature_extraction import BertFeatureExtractor import tensorflow as tf # ## Configuration BERT_MODEL_FOLDER='/Users/matejkvassay/data/google_bert/uncased_L-12_H-768_A-12/' BERT_CONFIG_FILE=BERT_MODEL_FOLDER+'bert_config.json' INIT_CHECKPOINT=BERT_MODEL_FOLDER+'bert_model.ckpt' VOCAB_FILE=BERT_MODEL_FOLDER+'vocab.txt' LOWER_CASED=False LAYERS="-1"#"-1,-2,-3,-4 # = last 4 layers" LOG_LEVEL=tf.logging.ERROR # ## Init BERT feature extractor from mkv.google_bert.feature_extraction.functions.io import convert_to_input_examples extractor=BertFeatureExtractor(layers=LAYERS, bert_config_file=BERT_CONFIG_FILE, init_checkpoint=INIT_CHECKPOINT, vocab_file=VOCAB_FILE, do_lower_case=LOWER_CASED, log_verbosity=LOG_LEVEL) # ## Extract features from single sentences DATASET=('This is my second semester.'.lower(), 'Are you still there?','I\'m single sentence'.lower()) # %%time result = tuple(extractor.extract_features(DATASET)) result[0]['tokens'][0] result[0]['tokens'] result[0]['vectors'][0].shape # ## Extract features from sentence pairs DATASET=(('Pikachu is stronger than Charmander.','Charmander is weaker than Pikachu.'), ('If it burns, then it hurts.','It doesn\'t hurt even though it burns.')) # %%time result = list(extractor.extract_features(DATASET,tuples=True,include_class=True)) result[0]['cls'].shape result[0]['tokens_B'] result[0]['vectors_B']
jupyter/feature_extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_mxnet_p36) # language: python # name: conda_mxnet_p36 # --- # # Bring Your Own Model (k-means) # _**Hosting a Pre-Trained Model in Amazon SageMaker Algorithm Containers**_ # # --- # # --- # # ## Contents # # 1. [Background](#Background) # 1. [Setup](#Setup) # 1. [(Optional)](#Optional) # 1. [Data](#Data) # 1. [Train Locally](#Train Locally) # 1. [Convert](#Convert) # 1. [Host](#Host) # 1. [Confirm](#Confirm) # 1. [Extensions](#Extensions) # # --- # ## Background # # Amazon SageMaker includes functionality to support a hosted notebook environment, distributed, managed training, and real-time hosting. We think it works best when all three of these services are used together, but they can also be used independently. Some use cases may only require hosting. Maybe the model was trained prior to Amazon SageMaker existing, in a different service. # # This notebook shows how to use a pre-existing model with an Amazon SageMaker Algorithm container to quickly create a hosted endpoint for that model. # # --- # ## Setup # # _This notebook was created and tested on an ml.m4.xlarge notebook instance._ # # Let's start by specifying: # # - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. # - The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s). # + isConfigCell=true tags=["parameters"] # Define IAM role import boto3 import re import sagemaker from sagemaker import get_execution_role role = get_execution_role() bucket = sagemaker.Session().default_bucket() prefix = 'sagemaker/DEMO-kmeans-byom' # - import numpy as np import sklearn.cluster import pickle import gzip import urllib.request import json import mxnet as mx import boto3 import time import io import os # ## (Optional) # # _This section is only included for illustration purposes. In a real use case, you'd be bringing your model from an existing process and not need to complete these steps._ # # ### Data # # For simplicity, we'll utilize the MNIST dataset. This includes roughly 70K 28 x 28 pixel images of handwritten digits from 0 to 9. More detail can be found [here](https://en.wikipedia.org/wiki/MNIST_database). urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz") f = gzip.open('mnist.pkl.gz', 'rb') train_set, valid_set, test_set = pickle.load(f, encoding='latin1') f.close() # ### Train Locally # # Again for simplicity, let's stick with the k-means algorithm. kmeans = sklearn.cluster.KMeans(n_clusters=10).fit(train_set[0]) # --- # ## Convert # # The model format that Amazon SageMaker's k-means container expects is an MXNet NDArray with dimensions (num_clusters, feature_dim) that contains the cluster centroids. For our current example, the 10 centroids for the MNIST digits are stored in a (10, 784) dim NumPy array called `kmeans.cluster_centers_`. # # _Note: model formats will differ across algorithms, but this concept is generalizable. Documentation, or just running a toy example and interrogating the resulting model artifact is the best way to understand the specific model format required for different algorithms._ # # Let's: # - Convert to a MXNet NDArray # - Save to a file `model_algo-1` centroids = mx.ndarray.array(kmeans.cluster_centers_) mx.ndarray.save('model_algo-1', [centroids]) # - tar and gzip the model array # !tar czvf model.tar.gz model_algo-1 # - Load to s3 boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'model.tar.gz')).upload_file('model.tar.gz') # --- # ## Host # # Stary by defining our model to hosting. Amazon SageMaker Algorithm containers are published to accounts which are unique across region, so we've accounted for that here. # + from sagemaker.amazon.amazon_estimator import get_image_uri kmeans_model = 'DEMO-kmeans-byom-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) sm = boto3.client('sagemaker') container = get_image_uri(boto3.Session().region_name, 'kmeans') create_model_response = sm.create_model( ModelName=kmeans_model, ExecutionRoleArn=role, PrimaryContainer={ 'Image': container, 'ModelDataUrl': 's3://{}/{}/model.tar.gz'.format(bucket, prefix)}) print(create_model_response['ModelArn']) # - # Then setup our endpoint configuration. # + kmeans_endpoint_config = 'DEMO-kmeans-byom-endpoint-config-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) print(kmeans_endpoint_config) create_endpoint_config_response = sm.create_endpoint_config( EndpointConfigName=kmeans_endpoint_config, ProductionVariants=[{ 'InstanceType': 'ml.m4.xlarge', 'InitialInstanceCount': 1, 'ModelName': kmeans_model, 'VariantName': 'AllTraffic'}]) print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn']) # - # Finally, initiate our endpoints. # + # %%time kmeans_endpoint = 'DEMO-kmeans-byom-endpoint-' + time.strftime("%Y%m%d%H%M", time.gmtime()) print(kmeans_endpoint) create_endpoint_response = sm.create_endpoint( EndpointName=kmeans_endpoint, EndpointConfigName=kmeans_endpoint_config) print(create_endpoint_response['EndpointArn']) resp = sm.describe_endpoint(EndpointName=kmeans_endpoint) status = resp['EndpointStatus'] print("Status: " + status) sm.get_waiter('endpoint_in_service').wait(EndpointName=kmeans_endpoint) resp = sm.describe_endpoint(EndpointName=kmeans_endpoint) status = resp['EndpointStatus'] print("Arn: " + resp['EndpointArn']) print("Status: " + status) if status != 'InService': raise Exception('Endpoint creation did not succeed') # - # ### Confirm # Let's confirm that our model is producing the same results. We'll take the first 100 records from our training dataset, score them in our hosted endpoint... def np2csv(arr): csv = io.BytesIO() np.savetxt(csv, arr, delimiter=',', fmt='%g') return csv.getvalue().decode().rstrip() # + runtime = boto3.Session().client('runtime.sagemaker') payload = np2csv(train_set[0][0:100]) response = runtime.invoke_endpoint(EndpointName=kmeans_endpoint, ContentType='text/csv', Body=payload) result = json.loads(response['Body'].read().decode()) scored_labels = np.array([r['closest_cluster'] for r in result['predictions']]) # - # ... And then compare them to the model labels from our k-means example. scored_labels == kmeans.labels_[0:100] # --- # # ## Extensions # # This notebook showed how to seed a pre-existing model in an already built container. This functionality could be replicated with other Amazon SageMaker Algorithms, as well as the TensorFlow and MXNet containers. Although this is certainly an easy method to bring your own model, it is not likely to provide the flexibility of a bringing your own scoring container. Please refer to other example notebooks which show how to dockerize your own training and scoring container which could be modified appropriately to your use case. # Remove endpoint to avoid stray charges sm.delete_endpoint(EndpointName=kmeans_endpoint)
advanced_functionality/kmeans_bring_your_own_model/kmeans_bring_your_own_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from qiskit import * #teleport data on qbit 0 to qbit 2 circuit = QuantumCircuit(3,3) # %matplotlib inline circuit.draw(output='mpl') circuit.x(0) circuit.barrier() circuit.draw(output='mpl') circuit.h(1) circuit.cx(1,2) circuit.draw(output='mpl') circuit.cx(0,1) circuit.h(0) circuit.draw(output='mpl') circuit.barrier() circuit.measure([0,1],[0,1]) circuit.draw(output='mpl') circuit.barrier() circuit.cx(1,2) circuit.cz(0,2) circuit.draw(output='mpl') circuit.measure(2,2) simulator = Aer.get_backend('qasm_simulator') result = execute(circuit, backend=simulator,shots=1024).result() counts = result.get_counts() from qiskit.tools.visualization import plot_histogram plot_histogram(counts)
quantum_teleportation_algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # data manipulation import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # set options pd.set_option('display.max_columns', 20) pd.set_option('precision', 2) sns.set_style('whitegrid') # preprocessing from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer from sklearn.model_selection import train_test_split # models from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor # evaluation metrics from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error # tuning from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV # - from sklearn.datasets import load_boston boston = load_boston() print(boston.DESCR) boston_df = boston.data boston_df
Projects/House Price Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 📝 Exercise 02 # # The goal of this exercise is to evaluate the impact of feature preprocessing # on a pipeline that uses a decision-tree-based classifier instead of logistic # regression. # # - The first question is to empirically evaluate whether scaling numerical # feature is helpful or not; # - The second question is to evaluate whether it is empirically better (both # from a computational and a statistical perspective) to use integer coded or # one-hot encoded categories. # + import pandas as pd adult_census = pd.read_csv("../datasets/adult-census.csv") # - target_name = "class" target = adult_census[target_name] data = adult_census.drop(columns=[target_name, "education-num"]) # As in the previous notebooks, we use the utility `make_column_selector` # to only select column with a specific data type. Besides, we list in # advance all categories for the categorical columns. # + from sklearn.compose import make_column_selector as selector numerical_columns_selector = selector(dtype_exclude=object) categorical_columns_selector = selector(dtype_include=object) numerical_columns = numerical_columns_selector(data) categorical_columns = categorical_columns_selector(data) # - # ## Reference pipeline (no numerical scaling and integer-coded categories) # # First let's time the pipeline we used in the main notebook to serve as a # reference: # + # %%time from sklearn.model_selection import cross_validate from sklearn.pipeline import make_pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) preprocessor = ColumnTransformer([ ('categorical', categorical_preprocessor, categorical_columns)], remainder="passthrough") model = make_pipeline(preprocessor, HistGradientBoostingClassifier()) cv_results = cross_validate(model, data, target) scores = cv_results["test_score"] print("The mean cross-validation accuracy is: " f"{scores.mean():.3f} +/- {scores.std():.3f}") # - # ## Scaling numerical features # # Let's write a similar pipeline that also scales the numerical features using # `StandardScaler` (or similar): # + # Write your code here. # - # ## One-hot encoding of categorical variables # # For linear models, we have observed that integer coding of categorical # variables can be very detrimental. However for # `HistGradientBoostingClassifier` models, it does not seem to be the case as # the cross-validation of the reference pipeline with `OrdinalEncoder` is good. # # Let's see if we can get an even better accuracy with `OneHotEncoder`. # # Hint: `HistGradientBoostingClassifier` does not yet support sparse input # data. You might want to use # `OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of a # dense representation as a workaround. # + # Write your code here.
notebooks/03_categorical_pipeline_ex_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <NAME> personnal project # # Machine Learning project: predict the species of an iris. # # ## The iris dataset # The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by the British statistician and biologist <NAME> in his 1936 paper The use of multiple measurements in taxonomic problems as an example of linear discriminant analysis. # The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters. Based on the combination of these four features, Fisher developed a linear discriminant model to distinguish the species from each other. from IPython.display import IFrame IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200) # ## Import the iris dataset # import the dataset we previously download from sklearn.datasets import load_iris # save "bunch" object containing iris dataset and its attributes iris = load_iris() type(iris) # show the iris data print(iris.data) # show the feature print(iris.feature_names) # print integers representing the species of each observation print(iris.target) # code 0 = setosa, 1 = versicolor, 2 = virginica print(iris.target_names) # ## Checking the shape of the features and the response print(type(iris.data)) print(type(iris.target)) print(iris.data.shape) # 150 is the number of observations and 4 is the number of features print(iris.target.shape) # 150 is the shape of the response # + # store feature matrix in "X" X = iris.data # store response vector in "y" y = iris.target # - # ## Logistic regression and KNN classification model with the train/test split evaluation procedure # First we will split the dataset in two parts: the training set and the testing set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4) print(X_train.shape) print(X_test.shape) # Our train set has now 90 observations and the test set 40 print(y_train.shape) print(y_test.shape) # ### Logistic regression from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(solver='liblinear', multi_class='auto') logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) print(metrics.accuracy_score(y_test, y_pred)) # ### KNN for k=5 knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print(metrics.accuracy_score(y_test, y_pred)) # The better testing accuracy is 96.7% with the KNN- k=5 model. # ### Can we locate an even better value for K? # # + # we will try K=1 through K=25 and record testing accuracy k_range = list(range(1, 26)) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) scores.append(metrics.accuracy_score(y_test, y_pred)) # - # Now we will plot the testing accuracy according to the value of K # + import matplotlib.pyplot as plt # %matplotlib inline plt.plot(k_range, scores) plt.xlabel('Value of K for KNN') plt.ylabel('Testing Accuracy') # - # The training accuracy increase with the complexity of the model (value of k) # ## Downsides of train/test split? # The train/test split is very flexible and speed, the problem of this method is that it provides a high-variance estimation. The K-fold cross-validation overcomes this limitation. # ## K-fold cross-validation # Cross-validation is a resampling procedure used to evaluate machine learning models on a limited data sample. # The procedure has a single parameter called k that refers to the number of groups that a given data sample is to be split into. # As such, the procedure is often called k-fold cross-validation. When a specific value for k is chosen, it may be used in place of k in the reference to the model, such as k=10 becoming 10-fold cross-validation. from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics # + # read in the iris data iris = load_iris() # create X (features) and y (response) X = iris.data y = iris.target # - # The dataset contains 25 observations, we will do a 5-fold cross-validation, thus it runs for 5 iterations. # At each iteration, every observation is either in the training set or the testing set and every observation is in the testing set exactly once. from sklearn.model_selection import cross_val_score # 10-fold cross-validation with K=5 for KNN (the n_neighbors parameter) knn = KNeighborsClassifier(n_neighbors=5) scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') print(scores) # use average accuracy as an estimate of out-of-sample accuracy print(scores.mean()) # search for an optimal value of K for KNN k_range = list(range(1, 31)) k_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') k_scores.append(scores.mean()) print(k_scores) # + import matplotlib.pyplot as plt # %matplotlib inline # plot the value of K for KNN (x-axis) versus the cross-validated accuracy (y-axis) plt.plot(k_range, k_scores) plt.xlabel('Value of K for KNN') plt.ylabel('Cross-Validated Accuracy') # - # Compare using CV the best KNN model # # Put your code here -> 10-fold cross-validation with the best KNN model knn = KNeighborsClassifier(n_neighbors=20) print(cross_val_score(knn, X, y, cv=10, scoring='accuracy').mean()) # Put your code here -> 10-fold cross-validation with logistic regression from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(solver='liblinear', multi_class='auto') print(cross_val_score(logreg, X, y, cv=10, scoring='accuracy').mean()) # On the iris dataset the KNN-k=20 model is the best model for prediction with a 98% accuracy. # Advantages of cross-validation: # # More accurate estimate of out-of-sample accuracy # More "efficient" use of data (every observation is used for both training and testing) # # Advantages of train/test split: # # Runs K times faster than K-fold cross-validation # Simpler to examine the detailed results of the testing process # ## More efficient parameter using GridSearchCV # The GridSearchCV instance implements the usual estimator API: when “fitting” it on a dataset all the possible combinations of parameter values are evaluated and the best combination is retained. # + from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target # - # define the parameter values that should be searched k_range = list(range(1, 31)) print(k_range) # create a parameter grid: map the parameter names to the values that should be searched param_grid = dict(n_neighbors=k_range) print(param_grid) # instantiate the grid grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy', return_train_score=False) # fit the grid with data grid.fit(X, y) import pandas as pd pd.DataFrame(grid.cv_results_)[['mean_test_score', 'std_test_score', 'params']] # examine the first result print(grid.cv_results_['params'][0]) print(grid.cv_results_['mean_test_score'][0]) # print the array of mean scores only grid_mean_scores = grid.cv_results_['mean_test_score'] print(grid_mean_scores) # plot the results plt.plot(k_range, grid_mean_scores) plt.xlabel('Value of K for KNN') plt.ylabel('Cross-Validated Accuracy') # examine the best model print(grid.best_score_) print(grid.best_params_) print(grid.best_estimator_) # ## Searching multiple parameters simultaneously # - **Example:** tuning max_depth and min_samples_leaf for a DecisionTreeClassifier # - Could tune parameters **independently**: change max_depth while leaving min_samples_leaf at its default value, and vice versa # - But, best performance might be achieved when **neither parameter** is at its default value # define the parameter values that should be searched k_range = list(range(1, 31)) weight_options = ['uniform', 'distance'] # create a parameter grid: map the parameter names to the values that should be searched param_grid = dict(n_neighbors=k_range, weights=weight_options) print(param_grid) # instantiate and fit the grid grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy', return_train_score=False) grid.fit(X, y) # view the results pd.DataFrame(grid.cv_results_)[['mean_test_score', 'std_test_score', 'params']] # examine the best model print(grid.best_score_) print(grid.best_params_) # ## Using the best parameters to make predictions # + # train your model using all data and the best known parameters knn = KNeighborsClassifier(n_neighbors=13, weights='uniform') knn.fit(X, y) # make a prediction on out-of-sample data knn.predict([[3, 5, 4, 2]]) # - # shortcut: GridSearchCV automatically refits the best model using all of the data grid.predict([[3, 5, 4, 2]]) # ## Reducing computational expense using `RandomizedSearchCV` # GridSearchCV can be computationally expensive, especially if you are searching over a large hyperparameter space and dealing with multiple hyperparameters. A solution to this is to use RandomizedSearchCV, in which not all hyperparameter values are tried out. Instead, a fixed number of hyperparameter settings is sampled from specified probability distributions from sklearn.model_selection import RandomizedSearchCV # specify "parameter distributions" rather than a "parameter grid" param_dist = dict(n_neighbors=k_range, weights=weight_options) # - **Important:** Specify a continuous distribution (rather than a list of values) for any continous parameters # n_iter controls the number of searches rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5, return_train_score=False) rand.fit(X, y) pd.DataFrame(rand.cv_results_)[['mean_test_score', 'std_test_score', 'params']] # examine the best model print(rand.best_score_) print(rand.best_params_) # run RandomizedSearchCV 20 times (with n_iter=10) and record the best score best_scores = [] for _ in range(20): rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10, return_train_score=False) rand.fit(X, y) best_scores.append(round(rand.best_score_, 3)) print(best_scores)
notebook/Iris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython import display # %matplotlib inline df = pd.read_csv('./survey_results_public.csv') df.head() # - # ## Data Understanding df.shape #check missling value ratio per column df.isnull().mean().plot(kind='bar',figsize=(15, 8),fontsize=13) # As I also did practice data analysis using 2017 survey data before, in comparison, 2020 survey data greatly improved in terms of data quality -- fewer missing value is found in each column this year. #check columns with most missing values, benchmark set at 45% most_m_columns = df.columns[df.isnull().mean()>0.45] most_m_columns # The highest missing data columns are related to compensation, which is understandble as it is a privacy matter. As no significant missing values are found in any columns. No specific processing is needed. df.columns #The shcema is long but I am only showing first 30 rows here as illustration. schema = pd.read_csv('./survey_results_schema.csv') schema.head(30) # I am interested in the age data in this survey as I personally think it is a job for young to mid-year professionals. Let's check. But first, let me clean the some age data. # ## Data Processing df_dream = df[['Age','Age1stCode','YearsCodePro','Employment','Gender']] df_dream.head() df_dream = df_dream.dropna(subset=('Age','Age1stCode','YearsCodePro','Gender'),how='any') df_dream['YearsCodePro'].unique() df_dream['YearsCodePro'] = df_dream['YearsCodePro'].replace(['Less than 1 year'], 0) df_dream = df_dream.reset_index(drop=True) df_dream = df_dream.loc[(df_dream['YearsCodePro']!= 'More than 50 years')&(df_dream['Employment']!='Student')&(df_dream['Age1stCode']!='Younger than 5 years')&(df_dream['Age1stCode']!='Older than 85')] df_dream['Employment'].value_counts() df_dream['Age1stCode'].unique() df_dream.dtypes df_dream[['Age1stCode','YearsCodePro','Age']] = df_dream[['Age1stCode','YearsCodePro','Age']].astype(int) df_dream.head() # ## Question 1. How long did it take from coding first time to coding professionally? df_dream['Yrs_of_Transit'] = df_dream['Age']-df_dream['YearsCodePro']-df_dream['Age1stCode'] #remove some unresonable outliers df_dream = df_dream[(df_dream['Yrs_of_Transit']>=0)&(df_dream['Yrs_of_Transit']<=50)] # + #Make a graph showing how many years it took people from coding first time to professionally import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k') # An "interface" to matplotlib.axes.Axes.hist() method n, bins, patches = plt.hist(x=df_dream['Yrs_of_Transit'], bins='auto', color='#0504aa', alpha=0.7, rwidth=1) plt.grid(axis='y', alpha=0.75) plt.xlabel('Yrs_of_Transit') plt.ylabel('Frequency') plt.title('How many years it took to transit coding dream into career') plt.text(30, 50, r'$\mu=8.6, sigma=5.4$') maxfreq = n.max() # Set a clean upper y-axis limit. plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10) # - df_dream['Yrs_of_Transit'].describe() # It takes about 8 years for the most people transit from coding first time to coding professionally. This insight may be useful for parents/schools when helping kids develop coding interests. # ## Question 2. What jobs are respondents doing? df_job = df.DevType.str.split(';').explode().value_counts()/len(df)*100 df_job = df_job.rename_axis('DevType').reset_index(name='Percent') plt.figure(figsize=(15,9)) plt.xlabel("Percentage%", size=15) plt.ylabel("Develop Type", size=15) plt.title("Ratio of Each Job Category (%)", size=15) plt.xticks(size=15) plt.yticks(size=15) df_sorted_desc= df_job.sort_values('Percent',ascending=True) df_sorted_desc.set_index('DevType').Percent.plot(kind='barh') # The majority of job families among respondents in this survey is developer. Other than traditional developers, We also see diversity in this survey. There are 6% respondents are Data or Business analysts, and 6% are Data Scientist or Machine Learning specialists. Since I am also one of this job category, I am more interested the data in these two types: # 1. Data or business analyst # 2. Data scientist or machine learning specialist. # ### I brainstormed some questions I am interested to deep dive about Data Scientist/Data/Business Analyst: # - What were their undergraduate majors in school? # - What are their education level? # - What % of people codes as a hobby? # - How satisfied about the job? # - What job factors matter the most? # - How does work life balance look like? # - What tools do they use? ds = df[(df['DevType'].str.contains('Data scientist'))&(df['DevType'].str.contains('Data or business analyst'))] ds.head() ds.shape # ## Question 3. What were their undergraduate majors in school? # + df1 = ds['UndergradMajor'].value_counts()/len(ds)*100 df1 = df1.rename_axis('Major').reset_index(name='Percent') df1 # + df2 = df[(df['DevType'].str.contains('Data scientist')==False)&(df['DevType'].str.contains('Data or business analyst')==False)] df2 = df['UndergradMajor'].value_counts()/len(df)*100 df2 = df2.rename_axis('Major').reset_index(name='Percent') df2 # - #visualize it by concating two group of respondents pd.concat({ 'DS/DA/BA Respondents': df1.set_index('Major').Percent, 'Other Respondents': df2.set_index('Major').Percent }, axis=1).plot.bar(figsize=(10,6),fontsize = 14) # There are 13 major options in the survey. Most survey respondents are Computer Science background, but DS/DA/DB respondents are relatively more diverse. Among DS/DA/BA respondents, top five are STEM majors. In top 5, Math/Statistic major respondents have a much higher ratio that went into DS field, meaning that it is a relatively favored major to break into the DS professionally. Followed by that, other non-traditional majors in Data Science include Social Science and Business. # ## Question 4. What are their education level? ds = ds.reset_index(drop=True) # + #Cleaning Education name as it is too long for readers ds['Education'] = "" for i in range(len(ds['EdLevel'])): if ds['EdLevel'][i]=='Some college/university study without earning a degree': ds.loc[i,'Education']='College without degrees' elif ds['EdLevel'][i]=='Bachelor’s degree (B.A., B.S., B.Eng., etc.)': ds.loc[i,'Education']='Bachelor' elif ds['EdLevel'][i]=='Other doctoral degree (Ph.D., Ed.D., etc.)': ds.loc[i,'Education']='PhD' elif ds['EdLevel'][i]=='Master’s degree (M.A., M.S., M.Eng., MBA, etc.)': ds.loc[i,'Education']='Master' elif ds['EdLevel'][i]=='Secondary school (e.g. American high school, German Realschule or Gymnasium, etc.)': ds.loc[i,'Education']='Secondary' elif ds['EdLevel'][i]=='Professional degree (JD, MD, etc.)': ds.loc[i,'Education']='Professional/JD/MD' elif ds['EdLevel'][i]=='Associate degree (A.A., A.S., etc.)': ds.loc[i,'Education']='Associate' elif ds['EdLevel'][i]=='I never completed any formal education': ds.loc[i,'Education']='No Education' elif ds['EdLevel'][i]=='Primary/elementary school': ds.loc[i,'Education']='Primary/Elementary' # - ds = ds[ds['Education']!=''] df3 = ds['Education'].value_counts()/len(ds['Education']) df3 = df3.rename_axis('Education').reset_index(name='Percent') df3 df4 = df[(df['DevType'].str.contains('Data scientist')==False)& (df['DevType'].str.contains('Data or business analyst')==False)& (df['EdLevel'].notnull())] df4 = df4.reset_index(drop=True) # + df4['Education'] = "" for i in range(len(df4['EdLevel'])): if df4['EdLevel'][i]=='Some college/university study without earning a degree': df4.loc[i,'Education']='College without degrees' elif df4['EdLevel'][i]=='Bachelor’s degree (B.A., B.S., B.Eng., etc.)': df4.loc[i,'Education']='Bachelor' elif df4['EdLevel'][i]=='Other doctoral degree (Ph.D., Ed.D., etc.)': df4.loc[i,'Education']='PhD' elif df4['EdLevel'][i]=='Master’s degree (M.A., M.S., M.Eng., MBA, etc.)': df4.loc[i,'Education']='Master' elif df4['EdLevel'][i]=='Secondary school (e.g. American high school, German Realschule or Gymnasium, etc.)': df4.loc[i,'Education']='Secondary' elif df4['EdLevel'][i]=='Professional degree (JD, MD, etc.)': df4.loc[i,'Education']='Professional/JD/MD' elif df4['EdLevel'][i]=='Associate degree (A.A., A.S., etc.)': df4.loc[i,'Education']='Associate' elif df4['EdLevel'][i]=='I never completed any formal education': df4.loc[i,'Education']='No Education' elif df4['EdLevel'][i]=='Primary/elementary school': df4.loc[i,'Education']='Primary/Elementary' # - df4 = df4['Education'].value_counts()/len(df4['Education']) df4 = df4.rename_axis('Education').reset_index(name='Percent') df4 #visualize education level results in two groups pd.concat({ 'DS/DA/BA Respondents': df3.set_index('Education').Percent, 'Other Respondents': df4.set_index('Education').Percent }, axis=1).plot.bar(figsize=(10,6),fontsize = 14) # It indicates that DS Analytic respondents have a relatively higher ratio of high-education users, including Master and PhD degrees. # ## Question 5. What % of people codes as a hobby? ds['Hobbyist'].value_counts()/len(ds)*100 ds['Hobbyist'].value_counts() # + df_h = df[(df['DevType'].str.contains('Data scientist')==False)& (df['DevType'].str.contains('Data or business analyst')==False)] df_h['Hobbyist'].value_counts()/len(df_h)*100 # - df_h['Hobbyist'].value_counts() # It seems that more users in DS/BA/DA group code as hobby compared to the rest. However, is this difference by accident? In another word, does job category (DS versus others) is associated with this coding hobby? Let's do a hypothesis testing on association using Chi-square method. # # The Chi-Squared test is a statistical hypothesis test that assumes (the null hypothesis) that the observed frequencies for a categorical variable match the expected frequencies for the categorical variable. I set the hypothesis as below: # # Step1: # - Ho: There is no difference in "coding as a hobby" ratio in each career category ( DS/BA/DA versus The rest). # - Ha: There is a difference in "coding as a hobby" ratio in each career category ( DS/BA/DA versus The rest). # # Step2: Choose a significance Level # We choose α = 0.05 # # Step3: Create Contingency table # # Step4: Calculate Expected Frequency # # Step5: Calculate the Chi-Square value or Chi-Square Statistic # ![image.png](attachment:image.png) # # Step6: Calculate degrees of freedom # # Step7: Find p-value # # Step8: Decide whether to reject or keep our null hypothesis # # df_h1 = ds['Hobbyist'].value_counts() df_h1 = df_h1.rename_axis('Hobbyist').reset_index(name='Count') df_h2 = df_h['Hobbyist'].value_counts() df_h2 = df_h2.rename_axis('Hobbyist').reset_index(name='Count') #Create Contingency table chi_test = pd.DataFrame( [ list(df_h1['Count']), list(df_h2['Count']) ], index=["DS/DA/BA","Others"], columns=["Yes","No"]) chi_test #Thankfully we can get this chi square oupput with one line of code from scipy.stats import chi2_contingency chi2_contingency(chi_test) #to print a clean output summary print('Chi square is '+str(chi2_contingency(chi_test)[0]) + ', P-value is '+ str(chi2_contingency(chi_test)[1])) # The P-value is 0.00275078721546433, which is smaller than our critial value 0.05. It says that the possibility of getting a chi-square value at least 8.96580996596258 is very small. # # So, we reject the null hypothesis and conclude that there is a difference between DS respondents and the rest respondents regarding if they code as a hobby statistically significance. # # Personal interest is a key charateristic for a Data Scientist or Business/Data Analyst. The metric we used to examine this statement is to compare the coding as hobby ratio among DS/DA group and other respondents. The statistic method we used is chi-square hypothesis testing. df_h3 = ds['Hobbyist'].value_counts()/len(ds)*100 df_h3 = df_h3.rename_axis('Hobbyist').reset_index(name='Percent') df_h4 = df_h['Hobbyist'].value_counts()/len(df_h)*100 df_h4 = df_h4.rename_axis('Hobbyist').reset_index(name='Percent') pd.concat({ 'DS/DA/BA Respondents': df_h3.set_index('Hobbyist').Percent, 'Other Respondents': df_h4.set_index('Hobbyist').Percent }, axis=1).plot.bar(figsize=(10,6),fontsize = 14) # ## Question 6. How satisfied about the job? df_s1 = ds['JobSat'].value_counts()/len(ds)*100 df_s1 = df_s1.rename_axis('JobSat').reset_index(name='Percent') list(df_s1['JobSat']) df_s2 = df['JobSat'].value_counts()/len(df)*100 df_s2 = df_s2.rename_axis('JobSat').reset_index(name='Percent') pd.concat({ 'DS/DA/BA Respondents': df_s1.set_index('JobSat').Percent, 'Other Respondents': df_s2.set_index('JobSat').Percent }, axis=1).plot.bar(title="Job Satisfaction by Percentage%",figsize=(10,6),fontsize = 14) plt.xlabel("Satisfaction Level",fontsize = 14) plt.ylabel("% of Group",fontsize = 14) # We see a higher ratio of professionals in DS/DA/BA job family are satisfied with their jobs compared to other respondents. # ## Question 7. What job factors matter the most? # Get possible answers for 'JobFactors' ds_rank = ds.JobFactors.str.split(';').explode().value_counts()/len(ds)*100 ds_rank = ds_rank.rename_axis('JobFactors').reset_index(name='Percent') ds_rank # Top 3 factors for DS/DA/BA professionals are below. It is useful for employers to consider when hiring such roles! # 1. Opportunities for professional development # 2. Flex time or a flexible schedule # 3. Office environment/company culture # + #Check other respondents job factors df = pd.read_csv('./survey_results_public.csv') df = df[(df['DevType'].str.contains('Data scientist')==False)& (df['DevType'].str.contains('Data or business analyst')==False)] df_rank1 = df.JobFactors.str.split(';').explode().value_counts()/len(df)*100 df_rank1 = df_rank1.rename_axis('JobFactors').reset_index(name='Percent') df_rank1 # - # Among the top 3 factors for other repondent, two of them are same as DS/BA/DA respondents' ranking. However, other respondents value the language/technologies the most. In comparison, career development opportunity is not the first priority to them. # # 1. Languages, frameworks, and other technologies I’d be working with # 2. Office environment or company culture # 3. Flex time or a flexible schedule pd.concat({ 'DS/DA/BA Respondents': ds_rank.set_index('JobFactors').Percent, 'Other Respondents': df_rank1.set_index('JobFactors').Percent }, axis=1).plot.bar(title="Job Factors by Percentage%",figsize=(10,6),fontsize = 14) plt.xlabel("JobFactors",fontsize = 14) plt.ylabel("% of Group",fontsize = 14) # The biggest difference between the two group is "Language/Framework/Technologies". I guess that's because the traditional developers are exposed to a larger range of toolbox they use, and they are expected to be the expert in their specific area. However, DS/BA/DA professionals have relatively similiar tools they use to analyze, which we can dig deeper on tools in next questions. # ## Question 7. What tools do they use and will learn next year? # Check the tools DS/DA/BA use ds_tool = ds.MiscTechWorkedWith.str.split(';').explode().value_counts()/len(ds)*100 ds_tool = ds_tool.rename_axis('MiscTechWorkedWith').reset_index(name='Percent') ds_tool # + #Check the tools other respondents use df_tool = df.MiscTechWorkedWith.str.split(';').explode().value_counts()/len(df)*100 df_tool = df_tool.rename_axis('MiscTechWorkedWith').reset_index(name='Percent') df_tool # - pd.concat({ 'DS/DA/BA Respondents': ds_tool.set_index('MiscTechWorkedWith').Percent, 'Other Respondents': df_tool.set_index('MiscTechWorkedWith').Percent }, axis=1).plot.bar(title="Frameworks/libraries/tools Use Ratio%",figsize=(15,8),fontsize = 14) plt.xlabel("Frameworks/libraries/tools",fontsize = 14) plt.ylabel("% of Group",fontsize = 14) # It shows that Pandas and TensorFlow are popular tools that Data Scientist & Data/Business Analyst use, which makes sense as these tools support their data analysis and machine learning work well. Another big variances exist in Keras, Apache Spark, Hadoop, Torch/PyTorch, which are used to handle big data or natual language processing, heavily used in Data Science field. # # Meanwhile, we notice that Node.js and .Net and .NET Core are favored by other respondents. ds.MiscTechDesireNextYear.str.split(';').explode().value_counts().plot(kind='bar',figsize=(15,8),fontsize = 14) # For next year, a lot of users in DS responded that they would learn Torch/PyTorch, which was ranked 9th in their current used tools. This is an important note to whoever wants to catch up with the technologies in the industry. # ## Insignts Recap # 1. There are a number of people who attempted to code 8 years before they professionally become a developer - momentums and persistence are keys to sccucess. # 2. The majority of StackoverFlow survey respondents in 2020 are traditional developers. There is about 6% respondents are data scientist/Business Analyst/Data Analyst. # 3. Most survey respondents are Computer Science background, but DS/DA/DB respondents are relatively more diverse. Math/Statistic major is a favored major in data scientist/Business Analyst/Data Analyst professions, followed by CS majors. # 4. Compared to other respondents, DS respondents have a relatively higher ratio of high-education professionals, including Master and PhD degrees. # 5. We conclude that there is a difference between DS respondents and the rest respondents regarding if they code as a hobby, tested with statistically significance. # 6. DS/DA/BA job family are relatively happy with their jobs. # 7. DS/DA/BA professionals mainly use big data analysis and machine learning tools (with Python) while other respondents create JavaScript code primarily. Next year, a learning trend towards Torch/PyTorch is surging among DS/DA/BA professionals.
2020Survey.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="bSnrBPRhf6S9" executionInfo={"status": "ok", "timestamp": 1637305931146, "user_tz": -60, "elapsed": 1050, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score # + colab={"base_uri": "https://localhost:8080/"} id="vSItFovkkaJy" executionInfo={"status": "ok", "timestamp": 1637305946124, "user_tz": -60, "elapsed": 351, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="234b1963-9cd7-4484-8385-686c6bcbd4a2" # cd "/content/drive/MyDrive/Colab Notebooks/dw_transform" # + colab={"base_uri": "https://localhost:8080/"} id="JyyB9LlFk0Cb" executionInfo={"status": "ok", "timestamp": 1637063301750, "user_tz": -60, "elapsed": 260, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="79a017a3-23c7-449d-87ce-cf2efb50d418" # ls data # + colab={"base_uri": "https://localhost:8080/"} id="mcAaWy4Rk5Xz" executionInfo={"status": "ok", "timestamp": 1637305953424, "user_tz": -60, "elapsed": 2340, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="94bc544a-6ea3-45dc-e989-97b43bf74a1d" df = pd.read_csv("data/men_shoes.csv", low_memory=False) df.shape # + colab={"base_uri": "https://localhost:8080/"} id="9hqec4pZlEG-" executionInfo={"status": "ok", "timestamp": 1637063428801, "user_tz": -60, "elapsed": 272, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="a19aa494-9e76-4717-87ca-a540e2286233" df.columns # + colab={"base_uri": "https://localhost:8080/"} id="XYuCUV16lYgF" executionInfo={"status": "ok", "timestamp": 1637063505837, "user_tz": -60, "elapsed": 250, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="2aa2219f-4684-4bbd-e0d2-c6ee5903a250" mean_price = np.mean(df["prices_amountmin"]) mean_price # + colab={"base_uri": "https://localhost:8080/"} id="EQFDYKv2lpyl" executionInfo={"status": "ok", "timestamp": 1637063802277, "user_tz": -60, "elapsed": 264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="f30cb9b9-c38f-4efd-e3b7-c9a3cc37c8ac" y_true = df["prices_amountmin"] y_pred = [mean_price] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="1XbP0RG3l8X0" executionInfo={"status": "ok", "timestamp": 1637064048700, "user_tz": -60, "elapsed": 720, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="4fc414b1-f485-475c-c2aa-9286aefbe206" df["prices_amountmin"].hist(bins=100) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="D5NzqiI0nvsK" executionInfo={"status": "ok", "timestamp": 1637064109545, "user_tz": -60, "elapsed": 687, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="8418851c-084a-4a28-ee13-4ea1aaa57e75" np.log( df["prices_amountmin"] + 1).hist(bins=100) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="LE0w-DDtn-nO" executionInfo={"status": "ok", "timestamp": 1637064174900, "user_tz": -60, "elapsed": 667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="6cdf5976-2b6e-4c21-ab47-f34decc3df16" np.log1p( df["prices_amountmin"] ).hist(bins=100) # + colab={"base_uri": "https://localhost:8080/"} id="rkqvodKJpDF4" executionInfo={"status": "ok", "timestamp": 1637064392946, "user_tz": -60, "elapsed": 263, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="972a753e-bdf1-4636-c132-ac8df04cba16" np.median(y_true) # + colab={"base_uri": "https://localhost:8080/"} id="8Jk8PN7QosdO" executionInfo={"status": "ok", "timestamp": 1637064557225, "user_tz": -60, "elapsed": 263, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="2bd8f76a-44d3-4aaf-bba5-c33bd6c709db" y_true = df["prices_amountmin"] y_pred = [np.median(y_true)] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="aUAJ9APRpqJ9" executionInfo={"status": "ok", "timestamp": 1637064794426, "user_tz": -60, "elapsed": 287, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="4a3cc5a7-787b-4316-afb2-2de6ba1c2752" y_true = df["prices_amountmin"] price_log_mean = np.expm1( np.mean( np.log1p(y_true) ) ) y_pred = [price_log_mean] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="n2v0YqyHpuhY" executionInfo={"status": "ok", "timestamp": 1637064570938, "user_tz": -60, "elapsed": 255, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="ccce9400-32f7-48d7-ffdc-d01f50d850a5" np.mean( np.log1p(y_true) ) # + colab={"base_uri": "https://localhost:8080/"} id="-s3Wm3d3pvV-" executionInfo={"status": "ok", "timestamp": 1637064677607, "user_tz": -60, "elapsed": 245, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="82566991-61bb-44d5-e6ed-1c432c2936bb" np.exp( np.mean( np.log1p(y_true) ) ) - 1 # + colab={"base_uri": "https://localhost:8080/"} id="XUmLATaOqJZ1" executionInfo={"status": "ok", "timestamp": 1637064840835, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="069d2781-2370-4804-f2ea-3bf847878ee5" df.columns # + colab={"base_uri": "https://localhost:8080/"} id="PoHuuvdxqxS1" executionInfo={"status": "ok", "timestamp": 1637305975636, "user_tz": -60, "elapsed": 336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="c6d6fefb-2337-4564-8b72-97d421cd0c3f" df.brand.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="ci7xq2U5q3qj" executionInfo={"status": "ok", "timestamp": 1637305978675, "user_tz": -60, "elapsed": 549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="416453a2-5d82-4617-872f-3e791c8afe1a" df["brand"].factorize() # + colab={"base_uri": "https://localhost:8080/"} id="q18l4_-QrYX6" executionInfo={"status": "ok", "timestamp": 1637306185399, "user_tz": -60, "elapsed": 350, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="afb2958b-a30e-4670-fdd3-d4adaca2acbc" df["brand"].factorize()[0] # + id="OaiZqjRSrkO3" executionInfo={"status": "ok", "timestamp": 1637306189408, "user_tz": -60, "elapsed": 343, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} df["brand_cat"] = df["brand"].factorize()[0] # + colab={"base_uri": "https://localhost:8080/"} id="tluAAWnsr1nv" executionInfo={"status": "ok", "timestamp": 1637308172124, "user_tz": -60, "elapsed": 344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="dfd4b3c1-1efe-4f6f-9b0e-17340228d485" feats = ["brand_cat"] X = df[feats].values y = df["prices_amountmin"].values model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, scoring="neg_mean_absolute_error") np.mean(scores), np.std(scores) # + colab={"base_uri": "https://localhost:8080/"} id="fPA9UE1ItEVr" executionInfo={"status": "ok", "timestamp": 1637065582764, "user_tz": -60, "elapsed": 286, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="6eb1272f-5421-41fa-8871-65cf20f7caa2" import sklearn sklearn.metrics.SCORERS.keys() # + id="_LJOLQzxtkAP" executionInfo={"status": "ok", "timestamp": 1637308164185, "user_tz": -60, "elapsed": 356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} def run_model(feats): X = df[feats].values y = df["prices_amountmin"].values model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, scoring="neg_mean_absolute_error") return np.mean(scores), np.std(scores) # + colab={"base_uri": "https://localhost:8080/"} id="_v7NbWjbuThV" executionInfo={"status": "ok", "timestamp": 1637306198727, "user_tz": -60, "elapsed": 353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="a977d051-db17-4dfc-f96c-b9e2e93a1468" run_model(["brand_cat"]) # + id="rJuRZC-zvAaq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1637306202461, "user_tz": -60, "elapsed": 373, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="37acfc1f-c4aa-4aab-831a-30e8ac8cb37d" df["colors"].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="W03RrNoMvkfx" executionInfo={"status": "ok", "timestamp": 1637306208207, "user_tz": -60, "elapsed": 340, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="bdb6afa7-708c-4e33-ae41-c3fa8c69c9b1" df["colors_cat"] = df["colors"].factorize()[0] run_model(["colors_cat"]) # + colab={"base_uri": "https://localhost:8080/"} id="hLJX4j5KxYXF" executionInfo={"status": "ok", "timestamp": 1637306212487, "user_tz": -60, "elapsed": 328, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="fe6b7ef6-d330-41b9-eb06-55d236a096b5" run_model(["colors_cat", "brand_cat"]) # + id="L3IwR-xvwF5h" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1637306215820, "user_tz": -60, "elapsed": 576, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="7d01b11f-e6c2-4ad1-e01d-d38d06b5f031" df["manufacturer"].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="ea4CtNVvwVXU" executionInfo={"status": "ok", "timestamp": 1637306219594, "user_tz": -60, "elapsed": 508, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="1eb8edec-4a8f-496e-c3b2-63d11a0578b7" df["manufacturer_cat"] = df["manufacturer"].factorize()[0] run_model(["manufacturer_cat"]) # + colab={"base_uri": "https://localhost:8080/"} id="fqX410muwguu" executionInfo={"status": "ok", "timestamp": 1637306223286, "user_tz": -60, "elapsed": 643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="d097d7e6-62f4-4f3c-def2-0a621b2091de" run_model(["manufacturer_cat", "brand_cat"]) # + colab={"base_uri": "https://localhost:8080/"} id="ml6gMp_Aww-U" executionInfo={"status": "ok", "timestamp": 1637306226076, "user_tz": -60, "elapsed": 384, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="aaf67e82-a873-4ce3-b108-864c9f103c37" run_model(["manufacturer_cat", "brand_cat", "colors_cat"]) # + colab={"base_uri": "https://localhost:8080/"} id="kyxmoUSLxths" executionInfo={"status": "ok", "timestamp": 1637067644068, "user_tz": -60, "elapsed": 252, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="8ec893de-755d-4771-f924-3aa43456959f" # !git status # + id="jSUzKnTEyveR" # !git add transform_one/day_4.ipynb # + id="M2mSVd6dzT4O" # !git config --global user.email "<EMAIL>" # !git config --global user.name "<NAME>" # + colab={"base_uri": "https://localhost:8080/"} id="4h0Z0jhqzjRn" executionInfo={"status": "ok", "timestamp": 1637067364214, "user_tz": -60, "elapsed": 1199, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14502376587314830749"}} outputId="85dc69eb-20cd-47b1-b49a-1cd1993140a5" # !git push origin main # + id="qiD6L32rzr_7"
transform_one/day_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Copyright 2019 The Kubeflow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # - # Install Pipeline SDK - This only needs to be ran once in the enviroment. # !python3 -m pip install 'kfp>=0.1.31' --quiet # !pip3 install tensorflow==1.14 --upgrade # ## KubeFlow Pipelines Serving Component # In this notebook, we will demo: # # * Saving a Keras model in a format compatible with TF Serving # * Creating a pipeline to serve a trained model within a KubeFlow cluster # # Reference documentation: # * https://www.tensorflow.org/tfx/serving/architecture # * https://www.tensorflow.org/beta/guide/keras/saving_and_serializing # * https://www.kubeflow.org/docs/components/serving/tfserving_new/ # ### Setup # # + tags=["parameters"] # Set your output and project. !!!Must Do before you can proceed!!! project = 'Your-Gcp-Project-ID' #'Your-GCP-Project-ID' model_name = 'model-name' # Model name matching TF_serve naming requirements import time ts = int(time.time()) model_version = str(ts) # Here we use timestamp as version to avoid conflict output = 'Your-Gcs-Path' # A GCS bucket for asset outputs KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:1.7.1' # - model_path = '%s/%s' % (output,model_name) model_version_path = '%s/%s/%s' % (output,model_name,model_version) # ### Load a Keras Model # Loading a pretrained Keras model to use as an example. import tensorflow as tf model = tf.keras.applications.NASNetMobile(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) # ### Saved the Model for TF-Serve # Save the model using keras export_saved_model function. Note that specifically for TF-Serve the output directory should be structure as model_name/model_version/saved_model. tf.keras.experimental.export_saved_model(model, model_version_path) # ### Create a pipeline using KFP TF-Serve component # def kubeflow_deploy_op(): return dsl.ContainerOp( name = 'deploy', image = KUBEFLOW_DEPLOYER_IMAGE, arguments = [ '--model-export-path', model_path, '--server-name', model_name, ] ) # + import kfp import kfp.dsl as dsl # The pipeline definition @dsl.pipeline( name='sample-model-deployer', description='Sample for deploying models using KFP model serving component' ) def model_server(): deploy = kubeflow_deploy_op() # - # Submit pipeline for execution on Kubeflow Pipelines cluster # + kfp.Client().create_run_from_pipeline_func(model_server, arguments={}) #vvvvvvvvv This link leads to the run information page. (Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working)
samples/core/kubeflow_tf_serving/kubeflow_tf_serving.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Abhipsakumaripriyadarshinee/18cse155/blob/main/Assignment_4Disimilarity_matrix.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="yVdYn9lUg1Zt" path="https://raw.githubusercontent.com/chirudukuru/DMDW/main/student-mat.csv" # + id="TykUWcwxinNc" import pandas as pd import numpy as np # + id="42lI6oQMiykP" df=pd.read_csv(path) # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="aTjG0JvCi_fL" outputId="e4b17e32-43c7-4d30-b4e0-e3f1dbc74720" df # + id="ED2SodInjASh" #proxmity measures of binary attributes # + id="lKm9LdLLkDRF" df1=df[['schoolsup','famsup','paid','activities','nursery','higher','internet','romantic']] # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="Pvh9RmYSkzJs" outputId="280b0ed9-c5bb-4478-901e-8a64e44cdc22" df1.head() # + id="kDDupsGJlVpJ" df1=df1.replace('no',0) df1=df1.replace('yes',1) # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="6Zgw27_MlwH3" outputId="13106e56-d5c3-423a-85c7-39df6a455306" df1.head() # + colab={"base_uri": "https://localhost:8080/"} id="DeYb2XLGl2Sf" outputId="d474206b-3c5a-4601-83c5-20c746a961de" n=np.array(df1[['schoolsup','famsup']]) n=n.reshape(-1,2) n.shape # + colab={"base_uri": "https://localhost:8080/"} id="TvVHeuPfmkpv" outputId="93a76ee1-1547-4117-b24f-26777f79bfe7" m=np.array(df1[['internet','romantic']]) m=m.reshape(-1,2) m.shape # + colab={"base_uri": "https://localhost:8080/"} id="HCOTFS8QnDST" outputId="b5e9746c-5d04-4d48-9d6b-f6e64de0a006" from scipy.spatial import distance dist_matrix=distance.cdist(n,m) print(dist_matrix) # + id="NjFULsD0oWEO" import seaborn as sns import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 277} id="hsL3NgwJokyc" outputId="8b138b0b-e090-41fb-fa7e-4d6c753e4b20" sns.heatmap(dist_matrix) plt.show() # + id="gGhYsuY7ovkr" #nominal attribute # + id="mPqhX7gqqRSA" nominal=df[['Mjob','Fjob','reason','guardian']] nominal=nominal.replace('at_home','home') nominal=(nominal.astype('category')) # + id="8g7EPx_VsGoc" from sklearn.preprocessing import LabelEncoder lb=LabelEncoder() nominal['Mjob']=lb.fit_transform(nominal['Mjob']) nominal['Fjob']=lb.fit_transform(nominal['Fjob']) nominal['reason']=lb.fit_transform(nominal['reason']) nominal['guardian']=lb.fit_transform(nominal['guardian']) # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="4o_KHxTvtjSe" outputId="47008238-fc43-4d7d-f4a9-5d2bf3451e80" nominal.head() # + colab={"base_uri": "https://localhost:8080/"} id="n0x5SB9guBOF" outputId="febe83a2-96c2-482d-b45a-42e0832c4375" nominal1=np.array(nominal) nominal1.reshape(-1,2) nominal2=np.array(nominal) nominal2.reshape(-1,2) # + colab={"base_uri": "https://localhost:8080/"} id="1GwFzKQ_utOU" outputId="853c5373-66b2-47b7-8e6c-5414238e6424" from scipy.spatial import distance dist_matrix=distance.cdist(nominal1,nominal2) print(dist_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 277} id="iEGGAxqfvTMV" outputId="6ded3277-7d1b-429a-c953-9d792c16a7aa" sns.heatmap(dist_matrix) plt.show() # + id="96VIa7lQvaxL" #Numeric Attributes # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="J2XaEfUgw8Tr" outputId="943de907-3f17-424a-aacd-f53a7e0489c2" numeric=df[['age','Medu','Fedu','traveltime','studytime','failures']] numeric.head() # + colab={"base_uri": "https://localhost:8080/"} id="Tmr_9vMrw-jw" outputId="8b112de7-3258-437d-9830-7b95aeca8b38" num1=np.array(numeric[['age','failures']]) num1.reshape(-1,2) num1.shape # + colab={"base_uri": "https://localhost:8080/"} id="nffRdkLvxBCF" outputId="877b54a4-ec87-4531-c352-a21334262d7f" num2=np.array(numeric[['Fedu','Medu']]) num2.reshape(-1,2) num2.shape # + colab={"base_uri": "https://localhost:8080/"} id="RY8BFGotxDy8" outputId="0f4ca0b5-f0be-4ace-a4ad-95816201c0c2" from scipy.spatial import distance dist_matrix=distance.cdist(num1,num2) print(dist_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="8ESOe77_xItI" outputId="2871e493-f2ed-4e3f-c3f3-f85a4eac44ba" dist_matrix.shape sns.heatmap(dist_matrix) # + id="68kdkOvqxPZF"
Assignment_4Disimilarity_matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysing Simulation Runs # + import sys import pandas as pd sys.path.append("/Users/pvs262/Documents/rec-sys-dynamics/code") from src.analysis.cluster import post_process, cluster def getLatents(folder): directory = "../simulation_runs/Explore_Threshold_2/"+folder latents = [] for i in range(100): df = pd.read_pickle(directory+'/L'+str(i)+'pkl.gzip', compression = 'gzip') latents.append(df) return latents def getResults(folder): directory = "../simulation_runs/Explore_Threshold_2/"+folder results = [] for i in range(100): df = pd.read_pickle(directory+'/R'+str(i)+'pkl.gzip', compression = 'gzip') results.append(df) return results def recalResults(folder): latents = getLatents(folder) results_list = [] for i in range(len(latents)): cluster_obj = cluster(latents[i],0.3) results_list.append(cluster_obj.gmm(3, covariance_type='full', df='proba', svd = False)) return results_list def getUI(folder): directory = "../simulation_runs/Explore_Threshold_2/"+folder UI = [] for i in range(5): df = pd.read_pickle(directory+'/UI'+str(i)+'pkl.gzip', compression = 'gzip') UI.append(df) return UI # + # Store PNGs of cluster percent plots # Explore #folders = ['cosin_AN_10_0_30_100','cosin_BNC_10_0_30_100','cosin_1BCC_10_0_30_100','cosin_1BCMU_10_0_30_100','cosin_1BCLI_10_0_30_100','cosin_2BCC_10_0_30_100','cosin_2BCMU_10_0_30_100','cosin_2BCLI_10_0_30_100','mf_AN_10_0_30_100','mf_BNC_10_0_30_100','mf_1BCC_10_0_30_100','mf_1BCMU_10_0_30_100','mf_1BCLI_10_0_30_100','mf_2BCC_10_0_30_100','mf_2BCMU_10_0_30_100','mf_2BCLI_10_0_30_100'] folders = ['item_based_AN_10_0_30_100','item_based_BNC_10_0_30_100','item_based_1BCC_10_0_30_100','item_based_1BCMU_10_0_30_100','item_based_1BCLI_10_0_30_100','item_based_2BCC_10_0_30_100','item_based_2BCMU_10_0_30_100','item_based_2BCLI_10_0_30_100'] for a in folders: latents = getLatents(a) results = getResults(a) UI = getUI(a) run = post_process(latents, results, UI) run.rename_cluster(1,99) run.plot_percent(show=False, loc='../results/'+a+'.svg') # - # ### cosin_AN_10_0_30_100 sim = 'cosin_AN_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_AN_10_0_30_100 = post_process(latents, results, UI) cosin_AN_10_0_30_100.rename_cluster(1,99) cosin_AN_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_AN_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_AN_10_0_30_100.examine(99,'gmm'); # ### cosin_BNC_10_0_30_100 sim = 'cosin_BNC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_BNC_10_0_30_100 = post_process(latents, results, UI) cosin_BNC_10_0_30_100.rename_cluster(1,99) cosin_BNC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_BNC_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_BNC_10_0_30_100.examine(99,'gmm'); # ### cosin_1BCC_10_0_30_100 sim = 'cosin_1BCC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_1BCC_10_0_30_100 = post_process(latents, results, UI) cosin_1BCC_10_0_30_100.rename_cluster(1,99) cosin_1BCC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_1BCC_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_1BCC_10_0_30_100.examine(99,'gmm'); # ### cosin_1BCMU_10_0_30_100 sim = 'cosin_1BCMU_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_1BCMU_10_0_30_100 = post_process(latents, results, UI) cosin_1BCMU_10_0_30_100.rename_cluster(1,99) cosin_1BCMU_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_1BCMU_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_1BCMU_10_0_30_100.examine(99,'gmm'); # ### cosin_1BCLI_10_0_30_100 sim = 'cosin_1BCLI_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_1BCLI_10_0_30_100 = post_process(latents, results, UI) cosin_1BCLI_10_0_30_100.rename_cluster(1,99) cosin_1BCLI_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_1BCLI_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_1BCLI_10_0_30_100.examine(99,'gmm'); # ### cosin_2BCC_10_0_30_100 sim = 'cosin_2BCC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_2BCC_10_0_30_100 = post_process(latents, results, UI) cosin_2BCC_10_0_30_100.rename_cluster(1,99) cosin_2BCC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_2BCC_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_2BCC_10_0_30_100.examine(99,'gmm'); # ### cosin_2BCMU_10_0_30_100 sim = 'cosin_2BCMU_10_0_30_100' latents = getLatents(sim) results = recalResults(sim) UI = getUI(sim) cosin_2BCMU_10_0_30_100 = post_process(latents, results, UI) cosin_2BCMU_10_0_30_100.rename_cluster(1,99) cosin_2BCMU_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_2BCMU_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_2BCMU_10_0_30_100.examine(99,'gmm'); # ### cosin_2BCLI_10_0_30_100 sim = 'cosin_2BCLI_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) cosin_2BCLI_10_0_30_100 = post_process(latents, results, UI) cosin_2BCLI_10_0_30_100.rename_cluster(1,99) cosin_2BCLI_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') cosin_2BCLI_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') cosin_2BCLI_10_0_30_100.examine(99,'gmm'); # ### mf_AN_10_0_30_100 sim = 'mf_AN_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_AN_10_0_30_100 = post_process(latents, results, UI) mf_AN_10_0_30_100.rename_cluster(1,99) mf_AN_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_AN_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') mf_AN_10_0_30_100.examine(99,'gmm'); # ### mf_BNC_10_0_30_100 sim = 'mf_BNC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_BNC_10_0_30_100 = post_process(latents, results, UI) mf_BNC_10_0_30_100.rename_cluster(1,99) mf_BNC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_BNC_10_0_30_100.examine(0,'gmm'); print('ITERATION 99 CLUSTERING') mf_BNC_10_0_30_100.examine(99,'gmm'); # ### mf_1BCC_10_0_30_100 sim = 'mf_1BCC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_1BCC_10_0_30_100 = post_process(latents, results, UI) mf_1BCC_10_0_30_100.rename_cluster(1,99) mf_1BCC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_1BCC_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_1BCC_10_0_30_100.examine(99,'gmm'); # ### mf_1BCMU_10_0_30_100 sim = 'mf_1BCMU_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_1BCMU_10_0_30_100 = post_process(latents, results, UI) mf_1BCMU_10_0_30_100.rename_cluster(1,99) mf_1BCMU_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_1BCMU_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_1BCMU_10_0_30_100.examine(99,'gmm'); # ### mf_1BCLI_10_0_30_100 sim = 'mf_1BCLI_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_1BCLI_10_0_30_100 = post_process(latents, results, UI) mf_1BCLI_10_0_30_100.rename_cluster(1,99) mf_1BCLI_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_1BCLI_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_1BCLI_10_0_30_100.examine(99,'gmm'); # ### mf_2BCC_10_0_30_100 sim = 'mf_2BCC_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_2BCC_10_0_30_100 = post_process(latents, results, UI) mf_2BCC_10_0_30_100.rename_cluster(1,99) mf_2BCC_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_2BCC_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_2BCC_10_0_30_100.examine(99,'gmm'); # ### mf_2BCMU_10_0_30_100 sim = 'mf_2BCMU_10_0_30_100' latents = getLatents(sim) results = recalResults(sim) UI = getUI(sim) mf_2BCMU_10_0_30_100 = post_process(latents, results, UI) mf_2BCMU_10_0_30_100.rename_cluster(1,99) mf_2BCMU_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_2BCMU_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_2BCMU_10_0_30_100.examine(99,'gmm'); # ### mf_2BCLI_10_0_30_100 sim = 'mf_2BCLI_10_0_30_100' latents = getLatents(sim) results = getResults(sim) UI = getUI(sim) mf_2BCLI_10_0_30_100 = post_process(latents, results, UI) mf_2BCLI_10_0_30_100.rename_cluster(1,99) mf_2BCLI_10_0_30_100.plot_percent() print('ITERATION 1 CLUSTERING') mf_2BCLI_10_0_30_100.examine(1,'gmm'); print('ITERATION 99 CLUSTERING') mf_2BCLI_10_0_30_100.examine(99,'gmm');
notebooks/run_analysis_Ex2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PacBio 2019 Alzheimer Sequel2 dataset # # This notebook demonstrates the data preprocessing, import and basic vizualization of genes with Sashimi plots. As it is based on a full sized isoseq dataset, it involves some preparation, and takes several hours to complete. For a smaller example, please check the test data notebook on the [github page](https://github.com/MatthiasLienhard/isotools/blob/master/notebooks/test_data.ipynb). # # ## Preparation # 1) Prepare the working directory and download the reference and data # ``` bash # cd /my/working/directory # # create some subdirectories # mkdir -p reference alzheimer/flnc alzheimer/aligned alzheimer/pickle tables plots # # # download a reference genome (806 MB) # genome_link='ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_36/GRCh38.p13.genome.fa.gz' # wget -P reference/ -O GRCh38.p13.genome.fa.gz ${genome_link} # gunzip reference/GRCh38.p13.genome.fa.gz # # # download gencode reference annotation (46.2 MB) # gff='gencode.v36.chr_patch_hapl_scaff.annotation' # annotation_link= ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_36/${gff}.gff3.gz # wget -P reference/ ${annotation_link} # # # sort by chromosome and position # (zcat ${gff}.gff3.gz| grep ^"#" ; zcat reference/${gff}.gff3.gz|grep -v ^"#"| sort -k1,1 -k4,4n)|bgzip > reference/${gff}_sorted.gff3.gz # # create index # tabix -p gff reference/${gff}_sorted.gff3.gz # # # download the isoseq flnc data (4.1 GB) # isoseq_link='https://downloads.pacbcloud.com/public/dataset/Alzheimer2019_IsoSeq/FullLengthReads/flnc.bam' # wget -P alzheimer/flnc -O alzheimer2019_isoseq_flnc.bam ${isoseq_link} # ``` # # 2) Align the isoseq data to the reference genome using minimap2. # If the pacbio isoseq3 workflow is [installed](https://github.com/PacificBiosciences/IsoSeq_SA3nUP/wiki/Tutorial:-Installing-and-Running-Iso-Seq-3-using-Conda) you can use the pacbio version of minimap2 as follows: # # ``` sh # #activate the isoseq3 environement (assuming it is called pacbio) # conda activate pacbio # n_threads=60 # sample='alzheimer2019_isoseq' # ref='reference/GRCh38.p13.genome.fa' # pbmm2 align ${ref} alzheimer/flnc/${sample}_flnc.bam alzheimer/aligned/${sample}_aligned.sorted.bam --preset ISOSEQ --sort -j $n_threads # ``` # # # ## Data import # + import isotools print(f'This is isootools version {isotools.__version__}') import matplotlib.pyplot as plt import numpy as np import pandas as pd import logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) logger=logging.getLogger('isotools') # + sample='alzheimer2019_isoseq' bam=f'alzheimer/aligned/{sample}_aligned.sorted.bam' genome='reference/GRCh38.p13.genome.fa' anno='reference/gencode.v36.chr_patch_hapl_scaff.annotation_sorted' # + try: #try to restore previously prepared data (about 2 minutes) isoseq=isotools.Transcriptome.load(f'alzheimer/pickle/{sample}_isotools.pkl') except FileNotFoundError: try: #try to restore the reference isoseq=isotools.Transcriptome.from_reference(anno+'.isotools.pkl') except FileNotFoundError: #import the reference from gff3 (3 minutes) isoseq=isotools.Transcriptome.from_reference(anno+'.gff3.gz') # save the reference, so it it can be reused for other datasets/analysis isoseq.save_reference(anno+'.isotools.pkl') # import the long read data and compare to reference (only one sample for this dataset, takes 30-40 minutes) isoseq.add_sample_from_bam(bam, sample_name='alzheimer_1', group='alzheimer', min_align_fraction=0) # compute QC metrics for all transcripts isoseq.add_qc_metrics(genome) #takes 15-20 min # update the index for fast gene access by name/id isoseq.make_index() # save the current data isoseq.save(f'alzheimer/pickle/{sample}_isotools.pkl') # - # ## Saturation Anlalysis # To estimate the saturation of discovered isoforms, IsoTools offers two complimentary figures: # * The saturation plot models the expected probability of discovering a transcript # * at given level of confidence (number of supporting long reads) # * which is present at a given abundance level in the sample (in transcripts per milion, TPM) # * depending on the total number of long reads # * The rarefaction analysis depicts the number of different discovered transcripts when subsampling the long reads # # The saturation analysis reveals that at the given sequencing depth, transcripts expressed at 1 TPM have about 90% probability of beeing covered by at least two reads in the individual samples, while transcripts expressed at 0.5 TPM have 60% probability. The slope of the rarefaction curve at the right end reflects the number of novel transcripts that could be discovered if sequencing depth would be increased. # + from isotools.plots import plot_saturation, plot_rarefaction plt.rcParams["figure.figsize"] = (14,7) fig, axs=plt.subplots(1,2) plot_saturation(isoseq,cov_th=2, x_range=(1e4,5e6,1e4), ax=axs[0]) rarefaction, total=isoseq.rarefaction(min_coverage=2, tr_filter={'query':'FSM'}) plot_rarefaction(rarefaction, total=total, ax=axs[1]) fig.tight_layout() # - # ## Quality control and filtering # Isotools allows to filter genes and transcripts based on user defined criteria. This is helpful e.g. to mark potential artefacts. Isotools comes with some default criteria, stored in key value pairs (in dicts), where the key is a tag (single word in ALLCAPS) and the value is an expression, that gets evaluated on the properties of the gene or transcript. # There are three sets of criteria, one evalutated in gene context, one for transcripts, and one for reference transcripts. # For example, the expression for INTERNAL_PRIMING flag is 'len(exons)==1 and downstream_A_content and downstream_A_content>.5', e.g. it selects (e.g. returns TRUE) mono exon genes with more than 50% A downstream of the transcript. # These tags, or a boolian expression including multiple tags, can be used in "queries" to filter reads. # # Users can modify existing criteria, for example to adjust thresholds, or define additional criteria, based on custom properties. # # The following example shows how the user can define additional flags, in this case "HIGH_SUPPORT" and "PROTEIN_CODING" for the reference transcripts, which is based on the GENCODE annotation information on "transcript_support_level" and "transcript_type". # # As additional examples, we print all the default definitions. # + #add gencode specific filters in "reference" context isoseq.add_filter( "HIGH_SUPPORT", 'transcript_support_level=="1"', context='reference') isoseq.add_filter( "PROTEIN_CODING", 'transcript_type=="protein_coding"', context='reference') #print all defined filter expressions for context in isoseq.filter: print(f'\n{context}\n{"="*len(context)}') for tag,expression in isoseq.filter[context].items(): print(f'- {tag}:\t{expression}') # - # To demonstrate the use of the filter queries, we compute some summary statistics based on subsets of the transcripts. # * For the trasncript length distribution # * we select all transcripts where the NOVEL_GENE expression evaluates to FALSE (should be same as " # * For the reference annotation, only transcripts with HIGH support are selected. # * The A content downstream of the gene is interesting for novel unspliced genes ("NOVEL and UNSPLICED"), contrasted with full splice reference matching transcripts ("FSM") # * The length distribution of direct repeats is computed for known, novel canonical, and novel noncanonical splice junctions. This is done for all transcripts, as we do not define a query here. # + #compute some summary statistics on technical artifacts. tr_stats=[ isoseq.transcript_length_hist(groups=isoseq.groups(), add_reference=True, min_coverage=2,tr_filter=dict( query='not NOVEL_GENE'), ref_filter=dict(query='HIGH_SUPPORT')), isoseq.downstream_a_hist(groups=isoseq.groups(), tr_filter=dict( query='FSM'), ref_filter=dict(query='not REF_UNSPLICED')), isoseq.downstream_a_hist(groups=isoseq.groups(), tr_filter=dict(query='NOVEL_GENE and UNSPLICED')), isoseq.direct_repeat_hist(groups=isoseq.groups(),bins=np.linspace(-.5,10.5,12))] tr_stats.append((pd.concat([tr_stats[1][0].add_suffix(' known'),tr_stats[2][0].add_suffix(' novel')]),tr_stats[2][1])) # - # We can depict the summary statistics on the quality control metrics and filter flags. # Isotools distinguishes 3 main types of artefacts: # # * **Internal priming**, where the primer binds genomic stretches of high adenosine content, yielding apparent novel genes which are typically unspliced. # * **Reverse transcriptase template switching (RTTS)**, resulting in apperant novel introns, often without canonical splice sites. # * **Truncated fragments**, of transcripts, yielding apperant novel transcription start sites or polyA sites. # + #statistic on the filter flags f_stats=isoseq.filter_stats( weight_by_coverage=True,min_coverage=1, tags=['INTERNAL_PRIMING', 'RTTS', 'FRAGMENT']) f_stats[0].index=f_stats[0].index.str.replace('_','\n') # + #QC plot from isotools.plots import plot_bar, plot_distr plt.rcParams["figure.figsize"] = (15,15) plt.rcParams.update({'font.size': 14}) fig, axs = plt.subplots(2,2) #A) transcript length plot_distr(tr_stats[0][0],smooth=3,ax=axs[0,0],**tr_stats[0][1]) #B) internal priming plot_distr(tr_stats[4][0],smooth=3,ax=axs[0,1],density=True,fill=True, **tr_stats[4][1]) #C) RTTS plot_distr(tr_stats[3][0],ax=axs[1,0],density=True,**tr_stats[3][1]) #D) frequency of artifacts plot_bar(f_stats[0],ax=axs[1,1],drop_categories=['PASS' ],colors=['blue'], **f_stats[1]) # note that colors must be set in v0.2.8 - this will be fixed in v0.2.9 fig.tight_layout() # - # This particular dataset seems to have relativly high fraction of internal priming, affecting 20% of the full lenght reads. # ## Novel alternative splicing classification # In order to assess the types of novel transcripts, isotools extends the SQANTI classification scheme (FSM, ISM, NIC, NNC, novel gene) by subcategories, that facilitate direct biological interpretation. # # The following artificial example exemplifies the different subcategories. In the original state, it covers all subcategories (except categories for novel genes). However, users are encouraged to modify the examples, in order to explore edge cases, combinations and other specific cases. # The labels in the plots produced below reflect the subcategories assigned by isoseq. # + ref=[[[12,20],[30,40], [50,60],[70,81]], [[11,20],[35,40], [75,79]], [[10,20],[30,40], [50,60],[70,80]]] novel={'FSM': [[10,20],[30,40], [50,60],[70,80]], "5' fragment": [[33,40], [50,60],[70,80]], "3' fragment": [[10,20],[30,40], [50,55]], "mono exon" : [[22,35]], "exon skipping" : [[10,20], [50,60],[70,80]], "intron retention" : [[10,40], [50,60],[70,80]], "novel combination" : [[10,20],[35,40], [50,60],[70,80]], "novel junction" : [[10,20],[30,40], [50,60], [75,80]], "novel exonic TSS" : [[26,40], [50,60],[70,80]], "novel exonic PAS" : [[10,20],[30,40], [50,66]], "novel 5' splice site":[[10,24],[30,40], [50,60],[70,80]], "novel 3' splice site":[[10,20],[26,40], [50,60],[70,80]], "novel exon" : [[10,20],[30,40],[43,47], [50,60],[70,80]], "novel intronic TSS" : [[43,47],[50,60],[70,80]], "novel intronic PAS" : [[10,20],[30,40], [82,90]]} ref={'transcripts':[{'exons':e, 'transcript_name':f'reference {i+1}'} for i,e in enumerate(ref)]} transcripts=[{'exons':e, 'transcript_name':n} for n,e in novel.items()] example=isotools.Gene(10,80,{'strand':'+','ID':'example','reference':ref, 'transcripts':transcripts},None) f,axs=plt.subplots(2,figsize=(10,7), gridspec_kw={'height_ratios': [1, 4]}) cat=['FSM','ISM','NIC','NNC','novel gene'] sg=example.ref_segment_graph for novel in example.transcripts: alt_splice=sg.get_alternative_splicing(novel['exons']) print(f"{novel['transcript_name']}: {alt_splice[1]}") novel['transcript_name']=f"{','.join(alt_splice[1])} ({cat[alt_splice[0]]}) " example.gene_track(ax=axs[0], x_range=[10,90], title='') example.gene_track(reference=False,ax=axs[1], x_range=[10,90], title='', color='green') for ax in axs: ax.get_xaxis().set_visible(False) f.tight_layout() # - # During import, all isoseq transcripts get classified using this scheme. # We can depict the number of identified novel classes. # Note the query we use to filter out transcripts affected by one of the 3 artefacts. # + cnr={} for g, trid, tr in isoseq.iter_transcripts(): for anno in tr['annotation'][1]: cnr[anno]=min(cnr.get(anno,5),tr['annotation'][0]) del cnr['FSM'] altsplice=[ isoseq.altsplice_stats(groups=isoseq.groups(), weight_by_coverage=True, min_coverage=1, tr_filter=dict( query="not( RTTS or FRAGMENT or INTERNAL_PRIMING)")), isoseq.altsplice_stats(groups=isoseq.groups(), weight_by_coverage=True, min_coverage=2, tr_filter=dict( query="not( RTTS or FRAGMENT or INTERNAL_PRIMING)")), isoseq.altsplice_stats(groups=isoseq.groups(), weight_by_coverage=False, min_coverage=20, tr_filter=dict( query="not( RTTS or FRAGMENT or INTERNAL_PRIMING)"))] for i in range(3): altsplice[i][0].index=altsplice[i][0].index+[f'\n({cat[cnr[subcat]]})' if subcat in cnr else '' for subcat in altsplice[i][0].index] altsplice[i][0].index=altsplice[i][0].index.str.replace('splice ','\nsplice ') # + from isotools.plots import plot_bar, plot_distr plt.rcParams["figure.figsize"] = (20,10) _=plot_bar(altsplice[0][0],bar_width=.9,ylabel='fraction of reads [%]',colors=['blue'], legend=False, rot=90,drop_categories=['FSM']) # - # ## Data exploration # This cell provides some examples, how the data can be assessed in isotools, and what information is stored on gene and transcript level. As an example, we focus on the 'MAPT' gene, which features alternative splicing and is related to Alzheimer's disease. # + #access gene of interest by Name g=isoseq['MAPT'] print(g) #this reveals that the MAPT has 267 different variants, according to isoseq. total_cov=g.coverage.sum() #However, most are supported by few reads only print(f'{sum([cov>total_cov *.01 for cov in g.coverage][0])} transcripts contribute at least 1% to that gene') #lets look at the primary transcript max_i=np.argmax(g.coverage) print(f'The primary transcript is number {max_i} and contributes {g.coverage[0,max_i]/total_cov:.2%} ({g.coverage[0,max_i]}/{total_cov})') #all the information for this transcript are stored in this dict: primary=g.transcripts[max_i] print(f'\nThese are the infos for this transcript:') for k,v in primary.items(): print(f'{k}: {str(v)[:100]}{"..." if len(str(v))>100 else ""}') # this reveals that it is a mono exon in the 3'UTR of that gene. second_i=np.argsort(g.coverage[0])[-2] second=g.transcripts[second_i] print(f'\nThese are the infos for the second transcript:') for k,v in second.items(): print(f'{k}: {str(v)[:100]}{"..." if len(str(v))>100 else ""}') # this reveals that it is a FSM with reference transcript nr 7. print(f'\nThe corresponding reference transcript: ') for k,v in g.ref_transcripts[second["annotation"][1]["FSM"][0]].items(): print(f'{k}: {str(v)[:100]}{"..." if len(str(v))>100 else ""}') # - #we can iterate over transcripts and filter with a query: i=0 for g,trnr,tr in isoseq.iter_transcripts(query='INTERNAL_PRIMING'): print(f'transcript nr {trnr} of {g} with a coverage of {g.coverage[0,trnr]}') i+=1 if i>10: break # + #we can also iterate over transcripts and filter by region (chr[:start-end]), novelty category, or request a minimum coverage: for g,trnr,tr in isoseq.iter_transcripts(region='chr1', query='NOVEL_EXON', min_coverage=100): print(f'Gene {g.name} has a highly covered transcript with novel exon:') print(f'Transcript nr {trnr} with a coverage of {g.coverage[0,trnr]}/{g.coverage.sum()}') print(f'The novel exon is at position {tr["annotation"][1]["novel exon"]}') # - # ## Data vizualization # We depict the MAPT gene with its two major exon skipping events as sashimi plot. plt.rcParams["figure.figsize"] = (20,10) fig,axs=isoseq['MAPT'].sashimi_figure(x_range=[45960000,46015000]) fig.tight_layout()
docs/notebooks/isotools_alzheimer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import requests import matplotlib.pyplot as plt import pandas as pd # - from fiber.cohort import Cohort from fiber.condition import Procedure, Diagnosis from fiber.database import read_with_progress from fiber.database.hana import engine as hana_engine from fiber.database.mysql import engine as mysql_engine from fiber.utils import Timer def slack_notification(text): webhook_url = 'https://hooks.slack.com/services/xxxx/yyyy' slack_data = {'text': text} response = requests.post( webhook_url, data=json.dumps(slack_data), headers={'Content-Type': 'application/json'} ) if response.status_code != 200: raise ValueError( 'Request to slack returned an error %s, the response is:\n%s' % (response.status_code, response.text) ) # # `sample_cohort.values_for(Diagnosis('584.9', 'ICD-9'))` # # This notebook executes the benchmark for value fetching of a specific diagnosis. # The values are fetched for a cohort of heart surgery patients `sample_cohort`. # # The queries in `build_query` emulate FIBER's translation process. # However, they have a possibility to limit the number of included MRNs in the result, which should control the result size. # # The benchmark is run for up to 15,000 medical record numbers and reports the execution and fetching time of the queries on HANA and MySQL as well as the number of rows fetched per iteration. sample_cohort = Cohort(Procedure('35.%', 'ICD-9') | Procedure('36.1%', 'ICD-9')) hs_mrns = sample_cohort.mrns() def build_query(mrns, limit): mrn_query = '(' for p in list(mrns)[0:limit]: mrn_query += "'" + p + "'," mrn_query = mrn_query[:-1] + ')' hana_query = """ SELECT DISTINCT D_PERSON.MEDICAL_RECORD_NUMBER, FACT.AGE_IN_DAYS, FD_DIAGNOSIS.CONTEXT_NAME, FD_DIAGNOSIS.CONTEXT_DIAGNOSIS_CODE FROM "MSDW_2018"."FACT" JOIN "MSDW_2018"."D_PERSON" ON "MSDW_2018"."FACT"."PERSON_KEY" = "MSDW_2018"."D_PERSON"."PERSON_KEY" JOIN "MSDW_2018"."B_DIAGNOSIS" ON "MSDW_2018"."FACT"."DIAGNOSIS_GROUP_KEY" = "MSDW_2018"."B_DIAGNOSIS"."DIAGNOSIS_GROUP_KEY" JOIN "MSDW_2018"."FD_DIAGNOSIS" ON "MSDW_2018"."FD_DIAGNOSIS"."DIAGNOSIS_KEY" = "MSDW_2018"."B_DIAGNOSIS"."DIAGNOSIS_KEY" WHERE "MSDW_2018"."FD_DIAGNOSIS"."CONTEXT_NAME" LIKE 'ICD-9' AND upper("MSDW_2018"."FD_DIAGNOSIS"."CONTEXT_DIAGNOSIS_CODE") LIKE '584.9' AND "MSDW_2018"."D_PERSON"."MEDICAL_RECORD_NUMBER" IN """ + mrn_query mysql_query = """ SELECT DISTINCT `D_PERSON`.`MEDICAL_RECORD_NUMBER`, `FACT`.`AGE_IN_DAYS`, `FD_DIAGNOSIS`.`CONTEXT_NAME`, `FD_DIAGNOSIS`.`CONTEXT_DIAGNOSIS_CODE` FROM `FACT` INNER JOIN `D_PERSON` ON `FACT`.`PERSON_KEY` = `D_PERSON`.`PERSON_KEY` INNER JOIN `B_DIAGNOSIS` ON `FACT`.`DIAGNOSIS_GROUP_KEY` = `B_DIAGNOSIS`.`DIAGNOSIS_GROUP_KEY` INNER JOIN `FD_DIAGNOSIS` ON `FD_DIAGNOSIS`.`DIAGNOSIS_KEY` = `B_DIAGNOSIS`.`DIAGNOSIS_KEY` WHERE `FD_DIAGNOSIS`.`CONTEXT_NAME` LIKE 'ICD-9' AND upper(`FD_DIAGNOSIS`.`CONTEXT_DIAGNOSIS_CODE`) LIKE '584.9' AND `D_PERSON`.`MEDICAL_RECORD_NUMBER` IN """ + mrn_query return hana_query, mysql_query def execute_benchmark(mrns, limits, query_builder): hana_benchmark_results = [] mysql_benchmark_results = [] number_of_rows = [] for limit in limits: queries = query_builder(mrns, limit) with Timer() as t: df = read_with_progress(queries[0], hana_engine, silent=True) number_of_rows.append((limit, len(df))) hana_benchmark_results.append([limit, t.elapsed]) with Timer() as t: read_with_progress(queries[1], mysql_engine, silent=True) mysql_benchmark_results.append([limit, t.elapsed]) slack_notification(f'Done value fetching for {str(limit)} MRNs') return ( pd.DataFrame(hana_benchmark_results, columns=['# Patients', 'Runtime in s']), pd.DataFrame(mysql_benchmark_results, columns=['# Patients', 'Runtime in s']), pd.DataFrame(number_of_rows, columns=['# Patients', '# Rows']) ) limits = [10, 100, 500, 1000, 5000, 10000, 15000] hana_results, mysql_results, number_of_rows = execute_benchmark(hs_mrns, limits, build_query) # ### Result Persisting hana_results.to_csv('../results/value_fetching/hana.csv', index=False) mysql_results.to_csv('../results/value_fetching/mysql.csv', index=False) hana_results = pd.read_csv('../results/value_fetching/hana.csv') mysql_results = pd.read_csv('../results/value_fetching/mysql.csv') # ### Visualization mysql_results.plot.line(x='# Patients', y='Runtime in s') hana_results.plot.line(x='# Patients', y='Runtime in s') # + results = pd.merge(mysql_results, hana_results, on='# Patients') results.rename(columns={'Runtime in s_x': 'MySQL Runtime in s', 'Runtime in s_y': 'IMDB Runtime in s'}, inplace=True) plt.figure() plt.plot(results['# Patients'], results['MySQL Runtime in s'], '--', linewidth=2, markersize=12, label='MySQL Runtime in s') plt.plot(results['# Patients'], results['IMDB Runtime in s'], '-', linewidth=2, markersize=12, label='IMDB Runtime in s') plt.yscale('log') plt.ylabel('Runtime in s') plt.xlabel('# Patients') plt.xlim(0) plt.legend() plt.title('Fetching of Diagnoses for Cohort') plt.savefig('../figures/value_fetching/runtime.png', dpi=600, bbox_inches="tight")
scripts/value-fetching-for-cohort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Session-4:-Dynamic-Programming" data-toc-modified-id="Session-4:-Dynamic-Programming-1">Session 4: Dynamic Programming</a></span></li><li><span><a href="#Learning-Outcomes" data-toc-modified-id="Learning-Outcomes-2">Learning Outcomes</a></span></li><li><span><a href="#What-is-dynamic-programming?" data-toc-modified-id="What-is-dynamic-programming?-3">What is dynamic programming?</a></span></li><li><span><a href="#Fibonacci-sequence-" data-toc-modified-id="Fibonacci-sequence--4">Fibonacci sequence </a></span></li><li><span><a href="#1)-Maximum-cumulative-sum-with-the-constraint-problem" data-toc-modified-id="1)-Maximum-cumulative-sum-with-the-constraint-problem-5">1) Maximum cumulative sum with the constraint problem</a></span></li><li><span><a href="#2)-Problem-31-from-Project-Euler-(aka-make-change)" data-toc-modified-id="2)-Problem-31-from-Project-Euler-(aka-make-change)-6">2) Problem 31 from Project Euler (aka make change)</a></span></li><li><span><a href="#Takeaways" data-toc-modified-id="Takeaways-7">Takeaways</a></span></li></ul></div> # - # Session 4: Dynamic Programming # ------- # <br> # # <center><img src="images/dynamic programming quote.png" width="75%"/></center> # <center><h2>Learning Outcomes</h2></center> # # __By the end of this session, you should be able to__: # # - Write Python code to solve problems with dynamic programming. # - Describe in your own words how dynamic programming works. # - Explain the pros and cons of dynamic programming. # What is dynamic programming? # ------ # # Dynamic programming is a way to effectively and efficiently solve problems. # # Dynamic programming finds the optimal solution by looking at all possible options once and then selecting the best solution. # # Dynamic programming is the best strategy when a problem has overlapping subproblems. It remembers previous solutions (via caching) and uses those previous solutions to reduce the number of calculations needed. # # Dynamic programming requires sequential problems. # # __How to solve problems with dynamic programming__: # # 1. Recognize there is a sequence of steps with overlapping sub-problems (hardest step) # 1. Explicitly define how a single sub-problems is solved. # 1. Explicitly define how that sub-problem overlaps with next sub-problem. # 1. Pick data structure for cache. # 1. Walk through the problem. Storing results in the cache. # # Fibonacci sequence # ------ # # Fibonacci sequence is a cumulative sum of the last two values. # # A solution with dynamic programming can two variables as a cache since it does not need to store all of the history. reset -fs # + # Choose to use a function instead of + symbol from operator import add def fib(n): two_back, one_back = 0, 1 for _ in range(n): two_back, one_back = one_back, add(one_back, two_back) # New values are combinations of old values return one_back def print_fib_values(fib_func, fib_n=10): print(f"{'Item':<4} {'Value':>4}") for fib_n in range(1, 11): print(f"{fib_n:<4} -> {fib_func(fib_n):>4}") print_fib_values(fib_func=fib) # - # 1) Maximum cumulative sum with the constraint problem # ----- # # The solution can use the same logic since it is a variation of cumulative sum. from metakernel import register_ipython_magics register_ipython_magics() # + # %%tutor # Let's take the same idea and extend it # Our new value is the max sum def max_constrained(nums): "Get maximum cumulative sum with the constraint of not taking two numbers in a row." total_current, total_previous = 0, 0 for n in nums: total_previous, total_current = total_current, max(total_previous + n, total_current) return total_current assert max_constrained([1, 2, 3, 1]) == 4 assert max_constrained([2, 1, 1, 2]) == 4 assert max_constrained([2, 7, 9, 3, 1]) == 12 # - # The goal of Reinforcement Learning is to maximum the cumulative sum sum of rewards (e.g., points in a video game). Dynamic programming is one method to find the optimal policy to do reach that goal. # # The biggest difference between this problem and Reinforcement Learning is that Reinforcement Learning has a stochastic element(s): # # - Rewards are random. # - States are random. # - Ability to collect rewards are random. # 2) Problem 31 from Project Euler (aka make change) # ----- # # In the United Kingdom the currency is made up of pound (£) and pence (p). There are eight coins in general circulation: # 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p), and £2 (200p). # # It is possible to make £2 in the following way: # 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p # # How many different ways can £2 be made using any number of coins? # # https://projecteuler.net/problem=31 # + # %%tutor # Assign variables target = 2 # 1, 2, 3, 4, 5, 6, 7, … 200 coins = [1, 2, 5, 10, 20, 50, 100, 200] print(f"{'Coin':^6} {'Value':^6} {'# of Ways':^6}") # Use dynamic programming to build up the solution ways = [1] + [0]*target # Total number number of ways to make change for given number of coins (index) for coin in coins: # Find ways to make change for each coin individually for i in range(coin, target+1): # Keep adding ways to make change, progressively for each coin value ways[i] += ways[i-coin] # Current number of ways builds on the previous number of ways print(f"{coin:^6} {i:^6} {ways[i]:^6}") # print("#"*20) print() print(f"For the value of {target}, there are {ways[-1]:,} total ways to make change.") # - # <center><h2>Takeaways</h2></center> # # - Dynamic Programming Pros # - Easy to ask and proves you can code! # - If it can be applied, it is guaranteed to find optimal solution. # - Spends up the solving of certain classes of problems. # - Dynamic Programming Cons: # - You have to have experience with solving problems in that style. # - Requires memory for the cache. Your computer might not have a big enough, fast enough cache. # - Requires visiting every state in a sequences. # - Sometimes there are too many states. # - It is not easily parallelizable. # <br> # <br> # <br> # # ----
04_dynamic_programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Utils # ## Base functions def identity(x): """" Identity method return parameter """ return x # ## Image utils # # Please install : py-opencv # ### Read & display images def assert_img_path(img_path, debug=False): if (debug): print('Loading image at ' + img_path) if img_path.endswith('.DS_Store'): raise Exception('Attention, wrong image path=' + img_path) # + from imageio import imread def img_read(img_path, debug=False): """" Read an image in RGB using imageio library """ assert_img_path(img_path, debug) return imread(img_path) # + # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html import numpy as np import cv2 def img_read_cv(img_path, color_mode=cv2.IMREAD_COLOR, debug=False): """ Read an image in BGR using cv2 color_mode : cv2.IMREAD_UNCHANGED (-1) | cv2.IMREAD_GRAYSCALE (0) | cv2.IMREAD_COLOR (default : 1) """ assert_img_path(img_path, debug) img = cv2.imread(img_path, color_mode) return img # https://www.ccoderun.ca/programming/doxygen/opencv/group__imgproc__color__conversions.html def img_convert_color(img, color_mode=cv2.COLOR_BGR2RGB): """ Convert an image from BGR to RGB """ img = cv2.cvtColor(img, color_mode) return img # + # https://matplotlib.org/tutorials/introductory/images.html # https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.imshow.html # https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow from matplotlib import pyplot as plt def img_show(img, func=identity): """ Display below cell an image using matplotlib """ plt.imshow(func(img)) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() # https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html def img_show_using_color(img, color_map='gray'): """ Display an image using the color map using matplotlib NOTE : IT DOESN'T CHANGE the color map, it INTERPRETS IT """ plt.imshow(img, cmap=color_map) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() # - # ### Modify images # + from skimage.transform import resize def resize_img(img, image_size=160): """ Resize an image based on a standard size """ return resize(img, (image_size, image_size), mode='reflect') # - # ## Object detection # # * More info for classifer at https://www.superdatascience.com/blogs/opencv-face-detection # * More info for OpenCV modules at https://docs.opencv.org/3.4.1/modules.html # + import cv2 lbpcascade_dir = '../cascades/lbpcascades/' haarcascade_dir = '../cascades/haarcascades/' cascade_frontalface_alt2 = cv2.CascadeClassifier(haarcascade_dir + 'haarcascade_frontalface_alt2.xml') # https://stackoverflow.com/questions/36218385/parameters-of-detectmultiscale-in-opencv-using-python def detect_object(img, scale_factor=1.1, min_neighbors=5, coordinate_mapper=None, cascade=cascade_frontalface_alt2 , debug=False): """ Detect object specified with cascade """ # https://docs.opencv.org/3.4.1/d1/de5/classcv_1_1CascadeClassifier.html#aaf8181cb63968136476ec4204ffca498 faces = cascade.detectMultiScale(img, scaleFactor=scale_factor, minNeighbors=min_neighbors) if (debug): print('Item found: ', len(faces)) if (coordinate_mapper == None): return faces face_collector = [] for (x, y, w, h) in faces: face = coordinate_mapper(x, y, w, h) face_collector.append(face) return face_collector # + def extract_object_mapper_margin(img, margin=10, debug=False): """ Currying a object extractor from an input img and using (x, y, w, h) """ def extract_object_mapper(x, y, w, h): if (debug): print(x, y, w, h) cropped = img[y-margin//2:y+h+margin//2, x-margin//2:x+w+margin//2, :] return cropped return extract_object_mapper def highlight_object_mapper_color(img, color=(0, 255, 0), debug=False): """ Currying a object highlighter from an input img and using (x, y, w, h) """ def highlight_object_mapper(x, y, w, h): if (debug): print(x, y, w, h) cv2.rectangle(img, (x, y), (x+w, y+h), color, 3) return highlight_object_mapper # -
notebook/image_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Sequence sets and the Burrows-Wheeler Transform # ===== # # The <a href="https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform">Burrows-Wheeler Transform</a> (**BWT**) is traditionally used to improve the compressibility of large texts. For bioinformatics applications, it allows for very rapid searches of arbitrarily long strings for substring $W$ in $O(\ |W|\ )$ time. The BWT is generally applicable to strings of symbols that have a well-defined unique position for each symbol, such as genomes and chromosomes. # # Every possible substring of the original string of symbols can be represented by a range of BWT entries. # # [figure] # # Spiral's sequence set format extends the BWT to support multiple strings with ambiguous position. The memory efficiency of BWT representation is leveraged, making the sequence set extremely compact. This makes it suitable for searching for substrings and overlaps in large numbers of similar strings (such as sequencing reads). # # Every possible substring of every original string, as well as sequences that overlap two or more original strings, can be represented by a range of sequence set entries. # # [figure] # Conventions # ----- # # Terminology and definitions used here are similar to <a href="http://bioinformatics.oxfordjournals.org/content/25/14/1754">doi:10.1093/bioinformatics/btp324</a>, _Fast and accurate short read alignment with Burrows–Wheeler transform_ by <NAME> and <NAME>, with some important differences. # # We use common programming conventions relating to sets: # * The first element of a sequence is always the 0th element # * Ranges are represented as <a href="http://mathworld.wolfram.com/Half-ClosedInterval.html">half-closed intervals</a>, and use the notation [start, end) # # For example this range of integers: # # > 2, 3, 4, 5, 6 # # ...is represented by the interval [2,7). The value at index 2 is 4. There are five items in the list, equivalent to _end - start = 7 - 2 = 5_. # Definitions # ----- # # For an alphabet of symbols $\Sigma = [ A, C, G, T ] $ # # $\alpha = $ one symbol of alphabet $\Sigma$ # # ${X}$ = source string $\alpha_{0}\alpha_{1}\ldots\alpha_{n-1}$ # # $\$ =$ end of string # # ${n}$ = number of symbols in ${X}$ # # ${i} = 0,1,\ldots,{n-1}$ # # ${X}[i] = \alpha_i$ # # ${X}[i,j+1) = \alpha_i\ldots\alpha_j$ (substring) # # ${X_i} = X[i,n)$ (suffix) # # $X[n] = \$$ (string terminator) # # $S(i) = i$ th lexicographically smallest suffix (aka index into X where suffix starts) # # $B$ is the BWT string: list of symbols that precede the first symbol in the sorted suffix list # # >$B[i] = \$$ when $S(i) = 0$ # # >$B[i] = X[S(i) - 1]$ # # $W =$ a potential substring of $X$ # # Bounds: # # >$\underline{R}(W) = min\{k:W $ is the prefix of $X_{S(k)}\}$ # # >$\overline{R}(W) = max\{k:W $ is the prefix of $X_{S(k)}\}$ # # For empty string $W = \$$: # # >$\underline{R}(W) = 0$ # # >$\overline{R}(W) = n - 1$ # # _Note that Li and Durbin define $\underline{R}(W) = 1$ for empty string W, but this requires off-by-one fix-ups later._ # # Set of positions of all occurrences of $W$ in $X$: # # >$\{S(k):\underline{R}(W) <= k <= \overline{R}(W)\}$ # # Is W a substring of X? # # > If $\underline{R}(W) > \overline{R}(W)$ then $W$ is not a substring of $X$. # # > If $\underline{R}(W) = \overline{R}(W)$ then $W$ matches exactly one BWT entry. # # > If $\underline{R}(W) < \overline{R}(W)$ then $W$ matches all BWT entries between (inclusive). # # $SA$ interval $= \big[\ \underline{R}(W), \overline{R}(W)\ \big] =$ the range of BWT entries that match $W$ (inclusive). # # When using half-closed intervals, _end - start = number of matches_: # # $SA$ interval $= \big[\ \underline{R}(W), \overline{R}(W) + 1\ \big)$ (exclusive) # # Backward search in $O(\ |W|\ )$ time: # # >$C(\alpha) =$ the number of symbols in $X[0,n-1)$ that are lexicographically smaller than $\alpha$ # # >$O(\alpha,i) =$ # of occurrences of $\alpha$ in $B[0,i]$ (inclusive) # # If $W$ is a substring of $X$: # # >$\underline{R}(\alpha{W}) = C(\alpha) + O(\alpha,\underline{R}(W)-1)+1$ # # >$\overline{R}(\alpha{W}) = C(\alpha) + O(\alpha, \overline{R}(W))$ # An example BWT implementation # ----- # # The following code demonstrates a simple (but inefficient) method for implementing and querying the BWT. It is intended to illustrate the general BWT approach. Real implementations use vastly more efficient methods for BWT production and search. # + # For string X X = "ATTGCTAC$" # Sort all suffixes suffixes = sorted([X[i:] for i in range(len(X))]) print "# suffix" for i, suffix in enumerate(suffixes): print "{i} {suffix}".format(i=i, suffix=suffix) # + # Calculate S S = [] for suffix in suffixes: S.append(X.find(suffix)) print S # + # C(a) = # of symbols in X[0,n−1) that are lexicographically smaller than a. # Precalculate the C(a) table. This lets us look up C(a) without knowing B. Ca = {} # all unique symbols in X except for $ symbols = ''.join(sorted(list(set(X)))[1:]) for symbol in symbols: smaller = [x for x in X[:-1] if x < symbol] print symbol + ': ' + str(smaller) Ca[symbol] = len(smaller) print '\n', Ca # + # B: X[S(i)-1] def B(i): return X[S[i]-1] # n == |X| == |B| == |S| n = len(X) # String representation of B B_str = ''.join([B(i) for i in range(n)]) print B_str print n # + # O(a,i): number of occurrences of a in B up to index i (inclusive) def O(a, i): count = 0 for base in B_str[:i+1]: if base == a: count += 1 return count # r('$') == 0 r_cache = {'': 0} # r underbar: lower limit of substring W in BWT def r(w): # Precache all substrings. for aW in [w[i:] for i in range(len(w))][::-1]: if(not aW in r_cache): a = aW[0] W = aW[1:] r_cache[aW] = Ca[a] + O(a, r(W) - 1) + 1 return r_cache[w] # by definition, R('$') == n - 1 R_cache = {'': n - 1} # R overbar: upper limit of substring W in BWT def R(w): for aW in [w[i:] for i in range(len(w))][::-1]: if(not aW in R_cache): a = aW[0] W = aW[1:] R_cache[aW] = Ca[a] + O(a, R(W)) return R_cache[w] # SA value: compute [i,j) for W def calc_SA(w): return [r(w), R(w)] # Print calc_SA[i,j] in [inclusive, exclusive) format def SA(W): i, j = calc_SA(W) return "'{}': [{}, {})".format(W, i, j + 1) # + # Let's find SA values for some substrings print "# suffix" for i, suffix in enumerate(suffixes): print "{i} {suffix}".format(i=i, suffix=suffix) print "\nB = " + B_str + "\n" for symbol in symbols: print symbol + ':', SA(symbol) queries = [ 'GCT', # exactly one match 'GA', # not in X 'T', # more than one match '', # empty string, full range ] print for q in queries: print "SA('" + q + "') = " + str(SA(q)) # + # Turn BWT entry numbers back into indexes in X by looking them up in S def find_it(query): begin, end = calc_SA(query) print '\n{} is found at:'.format(query) for i in range(begin, end + 1): print ' {} ({})'.format(S[i], X[S[i]:S[i]+len(query)]) print X find_it('A') find_it('TG') find_it('TAC') # - # The Biograph sequence set format # -----
python/jupyter/bwt/Sequence Sets and the Burrows-Wheeler Transform.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The physics of musical notes # --- # - Author: <NAME> # - GitHub: [github.com/diegoinacio](https://github.com/diegoinacio) # - Notebook: [musical-notes-physics.ipynb](https://github.com/diegoinacio/creative-coding-notebooks/blob/master/Sound-Design/musical-notes-physics.ipynb) # --- # A brief overview of how to calculate the frequency of notes and their wavelengths. # %matplotlib inline import matplotlib.pyplot as plt from IPython.display import Audio import numpy as np import pandas as pd plt.rcParams['figure.figsize'] = (16, 4) # $$ # \large f_n=f_0 \times a^{n-49} # $$ # # where: # - $f_{0}$ is the reference frequency for $A_4$, usually is 440 Hz; # - $a$ is equivalent to $\sqrt[12]{2}$ or $2^{\frac{1}{12}}$; # - $n$ denotes the note index. def frequencies(f0=440, no=9): ''' Returns the frequency table for all note within the octaves 0-8 no = number of octaves ''' ne = 12 a = 2**(1/ne) n = np.arange(no*ne).reshape((no, ne)).T - 8 return f0*a**(n - 49) def wavelengths(f, c=343): ''' Returns the wavelength table in cm c is the speed of sound in air. Temperature 20°C ''' return 100*c/f fn = frequencies() # f in Hz wl = wavelengths(fn) # wavelenght in cm fw = np.vstack((fn, wl)) fw = fw.reshape( (12, -1), order='F' ) octaves = [('Octave {0}'.format(o), e) for o in range(9) for e in [r'$f$', r'$\lambda$']] scale = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] notes = pd.DataFrame(fw, index=scale, columns=octaves) notes.columns = pd.MultiIndex.from_tuples(notes.columns, names=['Octave','Hz | cm']) notes.T.round(2)
Sound-Design/musical-notes-physics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/vp1099/Sentiment_Analysis_Heroku_Streamlit/blob/main/Sentiment_Analysis_Major_Project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Oo7_InVVz5gD" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="841b52aa-a784-4c14-b983-040a02c561d1" import pandas as pd df = pd.read_csv('amazonreviews.tsv',sep='\t') df.head() # + id="HNqUKUB00OIU" #importing all the required libraries import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize,sent_tokenize from nltk.stem import WordNetLemmatizer from nltk import tokenize # + id="5DaUZ0DB1vsK" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="3bc2a273-0f0d-4724-ac8b-bea9ab932733" #converting the reviews into lower case df.review=df.review.apply(lambda x: x.lower()) df # + id="g_Uy3N8d4R9J" colab={"base_uri": "https://localhost:8080/"} outputId="0b8f20f4-e242-4e78-ed93-63553725b1fb" # !pip install contractions # + id="Jzm-VyRE4Z7e" import contractions # + id="kge0_15b3Niw" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="e3d8072d-23f0-457a-b56d-879b8e604657" # Expanding contractions def con(text): expand=contractions.fix(text) return expand df.review=df.review.apply(con) df['review'][0] # + id="-ptOhioF4gVS" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="29af5d77-c023-40d9-94a7-1dc296d8247c" import re def remove_sp(text): pattern=r'[^A-Za-z0-9\s]' text=re.sub(pattern,'',text) return text df.review=df.review.apply(remove_sp) df # + id="0TMzky4t5uJx" import string punctuations=list(string.punctuation) # + id="gAVP-5pw597-" df.review=df.review.apply(lambda x : " ".join(x for x in x.split() if x not in punctuations)) # + id="RtYAS4cp00cb" colab={"base_uri": "https://localhost:8080/"} outputId="4ff191eb-e31b-469e-c231-4ff2bf953613" # Removing stopwords nltk.download('stopwords') stopword_list=stopwords.words('english') # + id="mgyp901z1I5t" stopword_list.remove('no') stopword_list.remove('not') # + id="bk3XB2xg1SBN" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="50aaec92-9152-40a2-af18-3e9cd411770e" df.review=df.review.apply(lambda x : " ".join(x for x in x.split() if x not in stopword_list)) df['review'][5] # + id="RUFQTX2p6cQO" colab={"base_uri": "https://localhost:8080/"} outputId="19ffb8a3-50bc-41ea-e61c-068a2ff7cbbf" nltk.download('punkt') # + id="On5HcPrW84Hy" colab={"base_uri": "https://localhost:8080/"} outputId="80629314-adbc-4b52-91c0-db17b5acff64" df['review']=df.review.apply(word_tokenize) df['review'][0] # + id="22Dm8e8u-Sfy" colab={"base_uri": "https://localhost:8080/"} outputId="b710d94a-3232-4d3c-82a5-0126f8e76213" nltk.download('wordnet') # + id="MVQL8PSo-_0J" lemmatizer=WordNetLemmatizer() # + id="I0w6wG_t_UAw" df['review']=df.review.apply(lambda x:[lemmatizer.lemmatize(word) for word in x]) # + id="DtrZezLmgB-6" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="ac90eca2-0a77-4c66-eeaa-5cf65cdb724d" df # + id="nW2bJe8MLT72" df.review= df.review.astype(str) # + id="wbY0gxoVbZwA" colab={"base_uri": "https://localhost:8080/"} outputId="128dc61f-d620-4e0d-9f29-4dfbe948fc4c" import numpy as np review_list=df['review'].array review_list.shape # + id="sbH1PlWjQVpP" x = df.iloc[:,1].values y = df.iloc[:,0].values # + id="1YINHBKlRBO-" colab={"base_uri": "https://localhost:8080/"} outputId="53b20aa9-bd22-4c65-c7bb-344c5dadc266" # #!pip uninstall scikit-learn -y # #!pip install -U scikit-learn from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,random_state=0) sklearn_version = sklearn.__version__ print(sklearn_version) # + colab={"base_uri": "https://localhost:8080/"} id="XLg8QhPKjj9l" outputId="a86321ae-8ba7-40a3-cc13-2fbc1cc4a2e7" import sklearn sklearn_version = sklearn.__version__ print(sklearn_version) # + id="nPvuXLznSTuL" colab={"base_uri": "https://localhost:8080/"} outputId="31b3c9f7-ad5e-41f4-d2ee-3c2ee455396d" print(x_train.shape) x_test.shape # + id="dGmg1TGiRGJZ" colab={"base_uri": "https://localhost:8080/"} outputId="d95f51bd-1dd0-4046-beb5-57e45bc521cf" from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.svm import SVC sklearn_version = sklearn.__version__ print(sklearn_version) # + id="AgkMEEf9RM68" final = Pipeline([('Vect',TfidfVectorizer()), ('model',SVC(probability=True))]) # + id="fL2hU_CyRQlv" colab={"base_uri": "https://localhost:8080/"} outputId="9b48aa8f-f860-44f3-9a36-38b6eb822d16" final.fit(x_train,y_train) # + id="8EPvSWmnRZTR" colab={"base_uri": "https://localhost:8080/"} outputId="6ae3a156-f080-46b4-c49a-25ecc5afba5d" y_pred=final.predict(x_test) y_pred # + id="qVIlJBYgtF6r" colab={"base_uri": "https://localhost:8080/"} outputId="f70475cf-2106-44b2-8dc3-c77753f7e3f6" y_test # + id="Z42Gt3jjtYeD" colab={"base_uri": "https://localhost:8080/"} outputId="c3431d33-1ff6-4598-8ea4-c269b1fb15da" from sklearn.metrics import accuracy_score,classification_report, confusion_matrix sklearn_version = sklearn.__version__ print(sklearn_version) # + id="wwYGPDibtk7J" colab={"base_uri": "https://localhost:8080/"} outputId="05d46d5b-7892-4ebf-9df2-d725f5cfd9c6" accuracy_score(y_pred,y_test) # + id="11HtER79tl5e" colab={"base_uri": "https://localhost:8080/"} outputId="0b0950db-ef22-48d6-8643-07d182207655" confusion_matrix(y_pred,y_test) # + id="0PMfJmFutqDk" colab={"base_uri": "https://localhost:8080/"} outputId="4c3c9fe6-99b0-4a83-e409-c2e641c70148" print(classification_report(y_pred,y_test)) # + id="TrtaFfgstuR-" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="45469a5c-9f79-4bc7-e227-dbac384aaec7" x_test[1] # + id="XuLLUBgPt9By" colab={"base_uri": "https://localhost:8080/"} outputId="ae028ac0-f01d-4a72-dcd5-e23ba7b522ae" final.predict([x_test[1]]) # + id="wJaX5T9duILs" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="5aafa584-758d-4c7a-ca0d-36f44391189b" x_test[7] # + id="pLzGvej4udT7" colab={"base_uri": "https://localhost:8080/"} outputId="e6e1d9d2-4f13-4ec2-bb98-e4df553a4419" final.predict([x_test[7]]) # + id="6Pg4GM8OugPl" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="b0946224-29b4-43e7-e9eb-1686c0c2912d" x_test[17] # + id="oxeNpjnKulWI" colab={"base_uri": "https://localhost:8080/"} outputId="594efe1d-5ab9-408d-9bbe-4bb4572752be" final.predict([x_test[17]]) # + id="sFodDcoEuofj" import pickle pickle.dump(final,open('sentiment_analysis_model.p','wb')) # + id="QAI27Utei8SU" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="231a7df9-bee2-4dec-9c12-fa6ee1284457" # !pip install streamlit # !pip install pyngrok===4.1.1 from pyngrok import ngrok # + id="TvjTaMBxdUIk" colab={"base_uri": "https://localhost:8080/"} outputId="6f971db8-50d9-4cb8-ad19-d78ceb3dce02" # %%writefile sent-analysis-app.py import numpy as np import pandas as pd import pickle import streamlit as st import base64 from PIL import Image #df = pd.read_csv('/content/drive/My Drive/amazonreviews.tsv',sep='\t') model=pickle.load(open('sentiment_analysis_model.p','rb')) st.set_page_config(page_title="Sentiment Analysis Web App",page_icon="",layout="centered",initial_sidebar_state="expanded",) st.title('Sentiment Analysis Model') st.subheader('by <NAME> ') image='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARcAAAC1CAMAAABCrku3AAABX1BMVEX////ta1r9yC442ouPj5mIiJOMjJbtaViGhpHLy8/8/PyRkZv/ySrsYlChoamWlp/tZlQu2Yfw8fHBwcb9xhu4pX+EiZ/nvlOqqrHc3N+Tipqlpa2ztLm6vcEl2ISbm6Tm5uiHkZ3sXkrS0tbs7O3IyM39xAD+8N+Vh5q5ub7+8+xg4J/xjYHscGD//P/5/vzn+e/Q9uL81GbwubKo7sn1sKKGmJf4xsDsfnDZ9ub74t/98dJS25b85MH9zV9G3pT8yz3++u/8zW783q382py97dNy5Kr8zk6e6sH81YnjcmX50Muhi5F7oZb+3oj/9uT2ta38263+23n2s6DzpJWG4rH86rv65+GQ57jxm4r90nr82qD84cL80o3pj4T72s7KfnrfuV2km49xrpRmuJKCm5fZdm34wUdKzY6xhonLooSwoIS7nYzisW3Vs2XUqXpZwJDst19vs5Sph4u/gYA/7A6dAAAdyUlEQVR4nO1dC3vTRrp2EkmWZVuyZZe1YsdyfElqJ45NSCAkIRcgFxIKlOYC6S50CwX27Nk97Lb//znfjOamuxyyPXsWvc9TYsszo5l3vuvMSM1kUqRIkSJFihQpUqT4mqBp16vXqnRutiPXhmnX63p8Z7qVTtKhalajZtv1vjV5ZzqyLDcmr5YEWq7T6UzQJVNVFDkXV8qSFbmWrMGWLSuKIkGr9qTMaIYiSao5Ya1k6KjAuZJcjE1ZkpRYXmzU4W6S9izghEBRkxHTAmAyboaXLmrPdzWPeiQnn6kb5gUPTZLR3EhyJVEPRiCxag9/hElVv1iPGiDbtveiJeOZ0hO3kowXCzqcSI/w/Y0WCIGedIQjFXrg8JLp5lrJKkWgAUrs46XuSHFyYUzGS+IO9xUmrYk9C/ASb+GSI4gXNEoJuqYklsaEvCQF5qXlvqZ1anberhGeepVKxcpYDcNuIMU0KxXU31oFeegO/IaUD/3tZFoN2661kCup23a/KzRnQHPEVFhQtJcx53Sjjm9QqdhIYuGqaGPRPXTcN81VrQ/VmM2xGnY+b/dbIi+oQ05LWg9XQT3WjXy90qLNOD+jykYtF2bYUQdAj8SfW4aMLiqyjq/CN7kyhy5hP2ipWMIVbBPANCgSlKkh99cBuwOWJ5fBDk5WCNstyWlOreHm5lTk+NA1Ra7DFUV2bL4sC+ZQA6sr97oyF0zogKxbCq7mGAgL3QZ3JCfw0kfGDxPeQp8qOJZwitVMpxnk5LSaiivLYSLWxaOU7R7rVQs5bWyHHek24Heb+iwTRQDOQJBNBBWQ8ogXhZdRdFLAcLov40GjIdboRBh5xwnKMACFNqcIvHRQn7sZ1BLRsDlcTcKF8bhNGccW6AJ2o4QXxKWCb4SlDfqLSUFjMkynmbyGJhL3CnxNP0xgVGe6FCKfGoxP0S3TsomDMvDveTxsuNCy8bgNXW+4eaFloBeksEVmXqm3TMsgc48FFEjF02VrGV1HHjmv67pgYnUFu6KczIzfHK2G/tgO4bLey/UVwgO1L8hgqxoZRx13TdE7nQamhfJiokH3rZ6uhtECHaUCIM+hrz2Z3NeE/hqaw4sypzlzCD3QsN3tZZBWiLwoNacM2Eotk0MXUHt9mUyfqWAaMC+K3c2YqF2YTw1PrJ3JCKrcUhxZ6tJBOwNSbJNUw2U7PSaPAi/oO6prOpxqBqHW6tJmgBdUCImzFuVrun3ZoUbuOUNUeiZAA+aRRUYdQfygD3hkhJeMhxc0SThuwvqLCtedC3IHN4c62HV4wR3FwtDKYIH3+CPUrKRROTTpgHDpnuK0g6GZLYuElpQXKicwv7hrSH6MDq1AeOmiUKNmxUbTVg1bPzR9hqNVCraH6EZ0hI4RCecF2V+nF+iD7vQOCR1vDvGB5cUkE40J8vGCK+koCsaFe3RAvBoiKNOa02XSbkvw0z2HKOiJgsxATsU9kBwvRO0Lnj85QebTMpx+YzlVKNDwDSrLMbzkSRlnjGTWTMnVXIfYl2heKo70ykSMsbDO0WodyksDuT9s0N28oFsqHU2mQeGcgk27jO065cW0qZOMI6bn6CWWF71GgEKML+Ml72rOSsKLZksu4EI+XlB/lXrPwgZd5AXHiToUo0lEt29TI894QSpiyBIbiB9dvcUmCfUAyd+cWOCLeMF/XSFCIC+SyAtOTaiISeTuPl7ypBM41HHxgqx23nasLx0k6qjaEnlB6yvMYvphQt5Y6WqmhQ2B6ZhDBXvYWl8L5oWFCAl4qSDzh+ML3dFwPy8VJ9BgQI3J/YoDrIfdAF5UYn7pdyEPIAEUGf8cuqiRUkyPaqhuD0lWIC8mMkCyYmCZwoPFCbas9xuGIhudQF7QfRW93kvCi2Ng6v0GhHI2ZcHNSwenrvU66SBOjQzaQXQHpR/AC+qWbrV6kte+kAZxLTwxSr3TqROvRXjRbDnf6PTybIK9aOVlpseK4SieE13jQLISyAu+Lw534nmBvEFizbG4zsWL6dydijeOU5nWt3DYqYXYF2KaPbyQtZsW+ojckaOPOLIlvBhOZXTbViAvGbOhOpE4zV8wV1itZSfxQPkRHmEdPjjU6djxzmGfINP8SGJlMC+0Fqg7aU7C+o7yI6w1FspfLKfvuOfO7TUYqiIsLxgyzrlYtQ6t1nDSsT7cCVkOk68KYVdNrC4kQk565KyiQPqE8iM+xHBHbebqIONGQyAOck3FqJNcs5PL5Sz6gSwadxqQBkAFCy4h8uhfp4zrA27OgOZI1RYqij52e7kcycq6c3W7RuYaXc4JYSgu33HuwKph2lpzdR1S/I7zXUP1yBgsRbC63ZyeV2wyPNZRa86W8vXQdPo/E1it/zXLzv+f0VJC7enXi17NSe5b/9cd+TdDw/FQqbh4gIM6OTha+4qhSaqsSsk2fL4qaGYr9UQpUqRIkSJFihQpUqRIkSJFihQpUqRIkSLFvw8Gm5ubp6fwz+DGm17Z2vr+2cvVnaPVly+ffL+1tXJzTWv3bx8cHi5t724vHR6uHdwf3VzTQMnpD8vDqVKpCCiVpobLP5xe3lTbK1uPjvaq1WqBAH3cW320dRNt315bKpebzXJ5BqEMn8vl3eODm+Fm82xYAjay2SmCbBYYKg3PNm+g8edPpoGHaS+Anb3Hz7+s6fEx0DDjBxB1fv9L+z24N2wLlAjIltrDO1+oUg+OFqo+TiiqCztfIDQHS4GkUGp2175EaAbr7WIQJ5SaYvvs+sysXHxc8EuKS2oWdi6u1zawEkqKg+bMtZkZrIMtiUGpeF1mtnYC9CeAmWto0/0oWeHM7N6+VsfvDKNkhcvM8M41Gt+/8skKsbs+S/OnCed1dO5npYzg16bD8cQdv1x2s4JsbbENQB7J/UtxY2Ln9GCv6h78QnVv5+XLq5cPd7BvctmZo4nMzO3dspuRZnN36fz81fn50jbyTa4fZ9Ym7PhbUYWypWJ74+zF27fdwaC7+fbF2Ua7KHJTKp5O1PjKY1FYCtXC1ePF/X0naFnZ3198/NBFTaF6kTygWSu7JKJ8uHZ/xARuNF47n3FR03w1kTSeCcICcnJ26jm4o52ug6sWROb1BI2PHgrCUq1eBYiDdvFQ9FTVJ0nbPm+KrBwGWZD752Wx0HZyYrTXRc5KezlEGE5ftzkzxeXkR56O+IirhUdh9bQfq0K51WRNC26oXD4OHfHBrlBuJqmR0TaKXBCWI2zH5WsuVqWNhMTsHzEVKUw/ilKQlcdcmxIRM1oq89GGs4IgMrObjBhto8RYGcbEtJcbjJmExHBaCoWH+zGFn+9UJyFGoOU8bqyjY144GTHLjJbSnfih3suWJiFGY0pUmP5jgs58zxx3PDGHTSYsBwmaHjPHlcjGnBXZOBMlQJdM65IY3ytKS3UnTlgcPP/IavwpuuQrRstSMsUYnfMasYXvtOko3yQ0GBpjsrgeV/ZHNshnSV2vtsrqRCYFt/kgkza9wqhsHscUfUvtxSSelxHTfhtdcHGB0RLQyy3A84AxvWTERKQcYxaRHAb8eBth7G97jRETrXnakNJy5v1p//TO2TLg7M6pr3vrRWqRImVsZa8QQsvzR6t7EMqgcG7v4YVXw6jEFFbDJYHa3LKXlrGzBtNEyzDbx94FhmMmZZEm5nWJhiPu6yNIliABgGQgixanhl6D/IYQU/LUc+MZGV/1pfv6xY4Y3xaq1YeeUG+V8vkorOmDZqASrXiWG9ACg7vrVJV8fIrYZMNzsTc48yw3ZIvtN664ZvSGENqOyAj26bR/dPX9YtqXWBcW9lzMDIigFfZCjPVol7pcV8cPtv0pZLPszomooDUj1qo2nNFnp0RN0dbbAcsNpfa6SDxVwGyEs35IZ11cOtjfCVyZKixciRRsUUkLSQjYrIuB//gweLmhuS1SMKKXt0M7flokVkJ00G959OtG8US0spskkSyGrjqwsT0QLl5Mhy3CVKdF90MdWTVwOWZM51x0KwczoYswZbEcdWThppeIS0m0uffCF2GyLg6I7c0OwwTmiujCjnDtcfgyJkqjeUGNhMnBQcwrsrC9Ldx7LWppqnkuqPI5KRkWxFBxGQpadKcdtTbVfsFLDogmFe8Ft75IKRBm/PFCOC2ABYEYKm2FAF89ouIiaNFa9EKmSAzVpGbI+t2Gf2B3QnQoiBhKa4iFeVL1WYgYWtyRHLFO1R/9TVMOhAk/iF3fPeeFibMOcUmDNhEXcahxK5kiiYTXdmAGvk/tCJ/vBxFKRIlZZKWZvPnb3vXN9+04WqA0d0vM9AbmD+sl70gHpdgF3myRs0AEpuQLCREe+cRlP9TkchSOeAtUYHzLWGOfuIy2E6x7lzkLJLduBq5qOiRkp/iV5djtAJRd8vKOwAQHvf5RXcWLC5R/zMo/8CuiA2J1BX9yHC8urmSR+rOlgKDXP9v3YowL0STulPwSx0BiusIRM3dbSWgRHfMKddXetqlisEGNk9DiInLJJ0IMLxwWiix2YblSjCYN2YN4mw4vpYC0mlgHIZBfjdciXIOnUsRyL3g8EpnsMjek5wm0aMbl1omZDgphhl41SiYuLoE5cdoY+lsngQq3o4vJxEWkgSqSZ7nBN6ZRMnERq5BsvPzK1/FR26tGG1xcst4d6qxgkQXHTBSp7Y8xdhzp2GMXnnBevLtp7u017phXHLdeuHI3Tc0LU6MDcfF7xrWf5t5f4455Zdu54g/tLtse23DZZiMvnfz0k+iasqWffjrhNrnNVI/YqKJ/GWbPGdJD+n1foOHPPxvf7Anfj74xfv4DJ4Zz6cS8oo9COMQDLe+yC7ts3DPv7kp33+1yXt7/IknffeLMMHtCVG/XZ3jJkPhUv2BqVPowPz8vnfBzHicSXPjAiOH2RCt6yGXwmhe2QjVd+Mct+datn4U9EePWLXn2IyNmgYkjzRrcTe96VICpUfmTNItwlxHzHf4++5kSwyMeqow+XqgKsAtMjbJD9MKZ+b9wHv4yj64worLcVTtS5Y9gLhc8poEnRtWf5/+r8PMtJiCFv9/6a+G/5//KC7BKNAZyqSkJyngqyNXo7izIxqe7s3eJZX43K32e2YV/P1HmGJm3iTL6FhucWEUYIlOj7AdMg8R5eYqJ+sAEqM1mlEQwG97WL7xm95nAy62/eXmp/s8tgRdmYKjhdUV2973RC00Yy58xIWWgh/Cg5JEKNd/NvvMt7o3DIjtnRCW2qjvgvGB5kZ4KvGCiuBvnkf+Zw67PIV14Znplh2+u/e3W/Oz8U25gjmbnv52/9Q9egNnZxShemEoc0kG/+/Y9+lj+p/N35tO3v7j+ihskxCGF8sI04K2wFfvTvDQv0JAdAjHzPwmbsGyRbj2EF6oBTH25mZ2u/t249fNHYR3z49Nbxt+F73u0kubVRgSqAcyELlElee/oS/mz5BiUT9I/8d/d+XfM8tIOrXi1kWLK4YXlx6dC9FI6+fU3McjLDn/79YOQI3A7S/Lvord1sqzEQ1XB/8DA/+BKleD7XmE6MS/NEF6AgJC/AthEhfFSiuAFHX7xHoYRv/t4aXtbfxzFy7QvgXSHM9fmJQEYL7uR8lIM5iUGXygvMfhdeAnVo2GUvEzIi5iTO6B2l2WNR8myI8wLW/gcEF4C7S7zsIfJsiMMFscRbx/vj4RwNxZ8m/F1jD9ijusqOS98SfcykZ8+Ts6L4KejeeGRxyiIl6wvUXJ4YRk1iYJOvK3/0TsiT3oUAM4LC5J9UZAwIr4r5FrC9HMk5ks8B7/tZZeCRB5ZdsG7ypDNlqZOAFO+Y87CFsDQI3UUA69l4GuYhT9/EwSeIC0wGh57tRHDmweIqy+fxGTI0Zx37/kVTkNoHkBWFdrsh3X3Yl3p5MOvTyEtmn/6q5gz4p9Y0DMiZte/h+RViX0x3g3CN/yoFGvkKjBvJHaWp8JCwvz+W8lFDAS/334n8MKMNVU+Hy9kC5aHaB5H/ZuEOfn1V5RD/hbipn2NMJBUeI9d4MtSe38IAnNYBb4wVfXYYQckFW4yoRUNzHez0numTOXy57uzBo9ihJVMshvrPyBE4n7ukLQpYYnlBETl6YcTFLecfECfBZHJTrEe3fEm5QzUnngXmUINDPuVqxHJwb07JVQFWCLgWsZ8Nzubfz/TxAsvwMrsL7tBakSN1HnGB7LszU0mVyS0sPD0A7G58AeYEZYdfEtZ2ZK/9Quv4d3fS+aRBK15FOSOwCGR3R8eeiyJphUtNkjz33333V206vBe1Cq+2kKDoIANgTOyaM18y4BuHmFa3OtSIjF8p8ScCjG7QANZa+OH5H6sRtHBxYXvZhMmF7xtUwa8TFGp+EyWXWZ/ee/yTwILh94ki+Oez2byk3ZgUFyWdqr4G1t2EFjwN8FA95en2SmFUSKBEYzJYrB54YO67b3CzEr50+fPnz95HxHg4jL2XeGg241ckfiGgP+5EnZFPBNCN3KDjns99q7YJdsoKSz6WvCdsiNKIBiH8YwXAY9NCLHKgU8VBZCjPSW+OBu/Detas7wkJirwzBSdbb6BJMZ2YRBsrEYv+Vmn2/ZcC2K3p93n8Mj2ZND2USbzlngkYVhnsUmSeDyRHEILeYiCOGbx9EvwkSCRFuHAGRGXQsCJxWPfDhI/tBqGcsAmf/BBD6o2ReFU0HIMMeIuLBWXqSA1Yo65cMR3abUYEyMeZR6QawuL/qapnRXme+U8mhjXgbPtqO1pdrJHFJhRNDGlDaF1qoeB2/bCyZ7H4rUoiXGd8L4Ks7oIxM6KBzVWDqOIKW8Lbyulx0SCrC7CYCrgKFgUMa6HSIgzymbDHrR4RPeXBXkarYYTsyCe2qTbk9XAw/NUYFxpX4QqNcUNehoGhomLcBRMfPXrWdiJqWxblIzRMOAQmgt0K63wUbz6Y8iznwXXmVSqcd7ciII5ZnHKww7Yld0nMpeC6rpABabk2ufYDHzO0fusCTkTks2Gn8mmob/7pMbgKPD56VVXO/RMSDXAuiDcp0N2mc7Rud87I2FxuZ1XQbLmAd2qL75wXfY/AZr1Hm2mjxWEH8eEbtLYbuGB6/LWUcFFDXxbdcf6P5LdSe+BaGF0QScyQUfOZ1zaBGHMkvsUHfPo4cdU0aNHZPxt906qdrqBTnsTlIrFjXvusz+nhJbw05gI7AzDgifFWXxyhE/BY0wfPfbI3AXbtA0VxhFNB71WYnxwuNt00sZmc2bp2BOisANnUcea0UIBJcYbhGyevlkeol+Gy2/ueR/AeUttUMyDE3z31XcY7PnW988ATx5s+U7oXiR5oIQP0Gc+x+ODY4SD+764jepf7AMlP7AnIAKiswGG//opXcEr/hDd+gpddSkUkj/7y2kJ1SIEdnYs9pkZDmaZI58OwKBn6rJh53D9YOlC9FMTCNo0I+b7hK0zGRMWtQLB/ErTf7onGAd8wSr2gTXqk4CYUI/rxjpbjwiJdEXw8x3Vl0kenxrwAMd7fswLZmJmmttJ3jEvRn4J3u/Bll2mihsJXr0wYI8PiEdWw3HBF3Y/xj/gKDyX7z+f6gXPosvl+Acc7/PoJkHpDH8Cwhu5BWKdRX3upy3CwYkpLMSIzGCVBX2JDNKYD7W5FC0CYyF/SkYLEMNXdovDyDcMnApPWmdjnuFjuBCeiq56XbKAwRMeCie002Nx8fYwnJnxq3LQKm8cNod8abe44X9szwEENTxFyE4lf0WD+GxNdfrZVlDIoz24Eh63L+wldF9jYWm33DxcC1pQGR2cCwlCuTzB204GQ54tZotTr/2v2hpsnolBcGk4ySs9FsU0ulo9euZ+3HPl+YNne2IEXN1J/I6ZsZhGl5sz6LUVYtPjg3PXM2zl7YleqaSJ2SIEuMPlF5ubAw1uMdIGm5svlofiCz2y7dcT/g9pxDdXoAcaCzv4PWRbWw++f7a6437RSWHhKr5BjjXPA43l3aXz44OD2wcHx/hNJ66MqXk46UuD7hTFNV3nTVvDDcCJ8xYu8UdPOpUEF9PuBQb8HjIHnlfjVP2xcTTu73oWGMrsedgZ7w+JTQvH5bJ3gYHmR+7L2fbkr8UB7D+LebkUlaUnk/+/kdaC0mgfys3z671h6nQqft07WwxKGBJhcS/2BVOFhaOQhYVojAMXGDysxHjyKNwD2xr1PjKwydd5uRTF4k41ch2z+vBarCCMPe8F8mlQhBdPgnsbpTChAZuz8SWsICwixxMkNejFfk+uzQrCOPQNduWm/7n7yXG5foLXXtyCUioWh+s38S5I7cHLI7exRSa4cHQVGNZMhvuvDmfc5ha9CXLp/PbN/O/ctM07bzam2uCFHBTbUxtv7tzc60NRvPJyx3krWWF6b+fZsz9+4SsgOca3114d7u4CPc2Z3d3DV2sHk7+aLQra4PLeixdngBcv7l0Obv7/n7eyv7//HEDfS3aTGAHG6J8bbzlFihQpUqRIkSJFihQpUqRIkSJFihQpbgxa4Mff43b/4huZZswZqK6FEFxIayg5+jmn1Ca7tQnNonFqltVKVkOrS9Zk98hk4CZd9Lfl3Cxx5xS1Hl2iosoIejeotqoY8K+J7qgoclCRcFiqrCJWW6paiSur4XtYqhLTWX9FW5UN9KGuyiG8aGbAD2Y+bporimTokqJIAbW1fh5GpuT78DlnNCbrsiVLkgJy2JWVWF46+XwLblfLTywvuiIpc/C3puRDeNHzuv9iEl4UK2M2FBl3ibUtkKwqwYxo0YKLealjoat4ygta63zsyHIrsJHYg5DAi6SCIDcIL/5O2YotdtgpkIQXxIglgyUxG0beqKB63Xo+b3c0rW7PWbYi5e1KpmHrpm7r8GvFNmAQHTufD1Q+CuAlL0HbhJecnTfqeJiWnjc6PRv61a3BDRtapmZIkm2bpm3nLNvuQecN9LOG+tOP4yUvKTrlBTeIlLdnG3Avw+6bdl6S9Jqm27me0oF7Q4FOQnnJtWDwqmXKiqzKigS1JEVWVMnUDLlhqTDtaiNTk/NwcxUYyaMJqMiyYchKxHwC1T1DyRNeGrJq52UF+p5T0W1QI2D8VPjSyBggWrLaNRW5oqlolJYs54AcWdVlObr/ulIDWe84vHRlWbFlGYjtKyp0TZUbXei+pNqaokBrFrJ5Kgw1mX1RoDm4va0onW4F9dNCYg0+SjOUhtaSlRo4NVBgZCr6YEdBtOBTBw02onFoxIL/0EgryKSCVNblfkaTlHyn25DR8FsVLK+mWUG31rqSDHIJ39Eo4R9kUU08FVG81DMGzCXmxUCDtRRVo7yABegaigFOGYivdcxMzkKKpSfjRQJFyaFWkMyi8cPQjblWBvNC7Qu6jrQZJAWkpAPGtFLB18J5Af50sF7I7vaUPC5vZFoy9v0G1vpuDq6Bn4Pm4HaYlxZQCc6vhm6m9yoVQ4602jo0CezPYV6gp3AXpLycF8e+gAY4VsaqzKEqiexLBxtZRwSQXmmZXF6WQXc8vGgwPLmLW+yDlIEPy4c5R8pLV5YMCXip0fLUxNagn1oNnKwhwXeBFw2m08K+AIwO1MmHWH3Gi61l6tCylAfJRhWgRieIF9RMKw8zbiTlhThHFRe1UciS0VpoHn28QPN11OdMTlYs0+xGBY2Yl0wfeaUKksqWU76LxVJD81dBbHRlNy/APdzDwD3RtZh7EF5MGcm8BhrbxzU0NKoummk3L5otg+mvyJPxUlOURq6ugMUz6zmzwniBKe+0SIAAZSRDw/fRLavWiOVFkzAv0MVay6qj8qBatQrYMhs4U0wUIWBeapZjX6BpMHg91DNQwG7HzoXfgvICRREvYL/yuVbFtpDjV+wc3ATzAo6F8ALjMbsJ5UVVCS+ajiPfGr6GQuCMlgcjnOmDBa/APVHkB9bB0XdQByikdiJ4UVU0ug6uDQKGy8OtukhHJRgPMiWKbCDDqiEfZXZlFYdoMlYC9AEcl2pEhUkgAeh3cF3I15kGrgFBE/RcUXXkTZD/U+umjEaSmVNlSTXAs5qSHBNaW3N9FoV0GnoDk2Q16rWeltEqFRi4lmvUrUyugvkAu+bIiDlX0/tR8Uu3P4fb6lXw3xYqj6tqubmepmN/1K/1zD662u3XGppJSvYrPdKfvl7LRUePYJk1pw76C13Va/iuZr8+53QfRlXPkY/wuWFBWTSyaDn8/WGifptytDn9ClFT9TmwLCGB/1cLDRwdQIqwTF8pup1Kv5PkybgUKVKkSPEfjf8FVaKAa1hs3AIAAAAASUVORK5CYII=' st.image(image, caption='',use_column_width=False) #main_bg = "https://cdn.miscellaneoushi.com/1440x900/20121018/abstract%20technology%20hearts%201440x900%20wallpaper_www.miscellaneoushi.com_20.jpg" #main_bg_ext = "jpg" #st.markdown( # f""" # <style> # .reportview-container {{ # background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()}) #}} #</style> #""", #unsafe_allow_html=True #) st.markdown(""" <style> body { color: #ff0000; background-color: #001f; etc. } </style> """, unsafe_allow_html=True) st.subheader('Enter Text') message = st.text_area("","Type Here ...") if st.button('PREDICT'): disp="" a=model.predict([message])[0] if(a== 'pos'): disp = "Positive Review!" else: disp = "Negative Review!" st.header(f"**{a}**") q = model.predict_proba([message]) #for index, item in enumerate(df['review']): #st.write(f'{item} : {q[0][index]*100}%') st.sidebar.subheader("About App") st.sidebar.info("This web app is made as part of Sentiment Analysis Major Project") st.sidebar.info("Scroll down and type your text in the writing area") st.sidebar.info("Click on the 'Predict' button to check whether the entered text is 'Positive' or 'Negative' ") st.sidebar.info("Don't forget to rate this app") feedback = st.sidebar.slider('How much would you rate this app?',min_value=0,max_value=10,step=1) if feedback: st.header("Thank you for rating the app!") # + id="3gJgC1LZjc-y" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="44d9b95b-410c-44b9-94d1-811de157104b" # !nohup streamlit run sent-analysis-app.py & url = ngrok.connect(port='8501') url # + id="lygiMPNQj2Lz"
Sentiment-Analysis/Sentiment_Analysis_Major_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # Importing standard Qiskit libraries and configuring account from qiskit import QuantumCircuit, execute, Aer, IBMQ from qiskit.compiler import transpile, assemble from qiskit.tools.jupyter import * from qiskit.visualization import * # Loading your IBM Q account(s) provider = IBMQ.load_account() # # Chapter 4 - # + from qiskit.visualization import plot_bloch_multivector qc = QuantumCircuit(1) # execute the quantum circuit backend = Aer.get_backend('statevector_simulator') result = execute(qc, backend).result() stateVectorResult = result.get_statevector(qc) #Display the Bloch sphere plot_bloch_multivector(stateVectorResult) # - #Place the qubit in a superposition state by adding a Hadamard (H)gate qc.h(0) #Draw the circuit qc.draw() # + #Execute the circuit again and plot the result in the Bloch sphere result = execute(qc, backend).result() #Get the state vector results of the circuit stateVectorResult = result.get_statevector(qc) #Display the Bloch sphere plot_bloch_multivector(stateVectorResult) # - #Reset the circuit qc = QuantumCircuit(1) #Rotate the qubit from 0 to 1 using the X (NOT) gate qc.x(0) #Add a Hadamard gate qc.h(0) #Draw the circuit qc.draw() #Reset the circuit qc = QuantumCircuit(1,1) #Add a Hadamard gate qc.h(0) #Create a measurement circuit with 1 qubit and 1 bit measurement_circuit = QuantumCircuit(1,1) #Measure function used to map the qubit and bit by their index value on the circuit, respectively measurement_circuit.measure(0,0) #Concatenate the circuits together full_circuit = qc+measurement_circuit #Draw the full circuit full_circuit.draw() #Execute the circuit again and print the results backend = Aer.get_backend('qasm_simulator') result = execute(full_circuit, backend, shots=1000).result() counts = result.get_counts(full_circuit) print(counts) # ## Understanding Entanglement #Create a circuit with 2 qubits and 2 classic bits qc = QuantumCircuit(2,2) #Add an H gate to each qc.h(0) qc.h(1) #Measure the qubits to the classical bit qc.measure([0,1],[0,1]) #Draw the circuit qc.draw() #Execute the circuit again and print the results backend = Aer.get_backend('qasm_simulator') result = execute(qc, backend, shots=1000).result() counts = result.get_counts(qc) plot_histogram(counts) #Create a circuit with 2 qubits and 2 classic bits qc = QuantumCircuit(2,2) #Add an H gate to just the first qubit qc.h(0) #Add the CNOT gate to entangle the two qubits, where the first qubit is the Control, and the second qubit is the Target. qc.cx(0,1) #Measure the qubits to the classical bit qc.measure([0,1],[0,1]) #Draw the circuit qc.draw() #Execute the circuit again and print the results result = execute(qc, backend, shots=1000).result() counts = result.get_counts(qc) plot_histogram(counts) # ## Quantum Teleportation q = QuantumRegister(3) c = ClassicalRegister(3) qc = QuantumCircuit(q, c) qc.x(0) qc.z(0) qc.barrier() qc.h(1) qc.cx(1,2) qc.barrier() qc.cx(0,1) qc.h(0) qc.measure(0,0) qc.measure(1,1) qc.barrier() qc.cx(1,2) qc.barrier() qc.z(2) qc.x(2) qc.measure(2,2) qc.draw(output='mpl') backend = Aer.get_backend('qasm_simulator') job = execute(qc, backend, shots=1024) job_result = job.result() results = job_result.get_counts(qc) plot_histogram(results) import qiskit.tools.jupyter # %qiskit_version_table
Chapter04/Chapter 4 - Understanding Quantum Computation Basics.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Data preprocessing and trajectory analysis # + jupyter={"outputs_hidden": true} library(data.table) # Fread library(dplyr) library(geosphere) # Calculate vehicle distance library(lubridate)# Date column extraction library(robfilter)# Smooth the data library(ggplot2) # Plot library(ggpubr) # Combine different plots library(ggmap) # plot points on the map library(RColorBrewer) # - # # Input location data # Read data from folder # Suggestion: Just do one year at one time. One year's table has 94363502 rows(green) YEARLIST = c('19') MONTHlIST = c("04") # FOR FULL TABLE DISTANCE_FILEPATH = "F:/data/raw/vehicle-location/" # Add different time scale columns add_dd_mm_yy_cols = function(df) { df$day = day(df$trxtime) df$month = month(df$trxtime) df$year = year(df$trxtime) return(df) } # Read heavy rail location raw data get_heavy_rail_trajectories = function(year, month){ assign("dh", fread(paste(DISTANCE_FILEPATH, paste("heavyrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) dh = add_dd_mm_yy_cols(dh) return(dh) } # Read light rail location raw data get_light_rail_trajectories = function(year, month){ assign("dg", fread(paste(DISTANCE_FILEPATH, paste("lightrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) dg = add_dd_mm_yy_cols(dg) return(dg) } # Get the raw table df_heavyrail = get_heavy_rail_trajectories(YEARLIST,MONTHlIST) df_lightrail = get_light_rail_trajectories(YEARLIST,MONTHlIST) # # Table subset by trajectory index # Subset table by a given day get_day_trajectories = function(df, dayid){ day_df = df[day == dayid, .SD, keyby = .(trainid, vehicleid, routeid) ] # print(paste("Number of observations", nrow(day_df), "on day", dayid )) return(day_df) } # Create trajectory index table get_unique_trajectory_indices = function(day_df) { trajectory_index_df = unique(day_df[, .(trainid, vehicleid, routeid, car1, car2, car3)]) # print(paste("Number of unique trajectories extracted: ", dim(trajectory_index_df)[1])) return(trajectory_index_df) } # Subset the raw table by unique index extract_unique_trajectory_light = function(day_df, traj_index_df, index){ day_df[["car2"]][is.na(day_df[["car2"]])] <- 9999999 day_df[["car3"]][is.na(day_df[["car3"]])] <- 9999999 traj_index_df[["car2"]][is.na(traj_index_df[["car2"]])] <- 9999999 traj_index_df[["car3"]][is.na(traj_index_df[["car3"]])] <- 9999999 trajectory = day_df[trainid == traj_index_df[index, 1][[1]] & vehicleid == traj_index_df[index, 2][[1]] & # routeid == traj_index_df[index, 3][[1]] & car1 == traj_index_df[index, 4][[1]] & car2 == traj_index_df[index, 5][[1]] & car3 == traj_index_df[index, 6][[1]],][order(trxtime)] clean_trajectory = trajectory[, .SD[1], by = trxtime] # takes first observation of multiple with same time clean_trajectory[["car2"]][clean_trajectory[["car2"]] == 9999999] = NA clean_trajectory[["car3"]][clean_trajectory[["car3"]] == 9999999] = NA # transfer trxtime to timestamp options(tz = "America/New_York") clean_trajectory$time = as.POSIXct(clean_trajectory$trxtime,tz = getOption("tz")) return (clean_trajectory) } extract_unique_trajectory_heavy = function(day_df, traj_index_df, index){ day_df[["heavyrailbranchid"]][is.na(day_df[["heavyrailbranchid"]])] <- 9999999 day_df[["tripid"]][is.na(day_df[["tripid"]])] <- 9999999 day_df[["vehicleid"]][is.na(day_df[["vehicleid"]])] <- 9999999 traj_index_df[["heavyrailbranchid"]][is.na(traj_index_df[["heavyrailbranchid"]])] <- 9999999 traj_index_df[["tripid"]][is.na(traj_index_df[["tripid"]])] <- 9999999 traj_index_df[["vehicleid"]][is.na(traj_index_df[["vehicleid"]])] <- 9999999 trajectory = day_df[trainid == traj_index_df[index, 1][[1]] & vehicleid == traj_index_df[index, 2][[1]] & lineid == traj_index_df[index, 3][[1]] & heavyrailbranchid == traj_index_df[index, 4][[1]] & tripid == traj_index_df[index, 5][[1]],][order(trxtime)] clean_trajectory = trajectory[, .SD[1], by = trxtime] # takes first observation of multiple with same time clean_trajectory[["heavyrailbranchid"]][clean_trajectory[["heavyrailbranchid"]] == 9999999] = NA clean_trajectory[["tripid"]][clean_trajectory[["tripid"]] == 9999999] = NA clean_trajectory[["vehicleid"]][clean_trajectory[["vehicleid"]] == 9999999] = NA # transfer trxtime to timestamp options(tz = "America/New_York") clean_trajectory$time = as.POSIXct(clean_trajectory$trxtime,tz = getOption("tz")) return (clean_trajectory) } # generate the selected trajectory table light_subset = function(df,day,index){ day_df = get_day_trajectories(df, day) trajectory_index_df = get_unique_trajectory_indices(day_df) clean_trajectory_df = extract_unique_trajectory_light(day_df, trajectory_index_df, index) return(clean_trajectory_df) } df = light_subset(df_lightrail,1,3) # + jupyter={"outputs_hidden": true} df # - # generate the selected trajectory table heavy_subset = function(df,day,index){ day_df = get_day_trajectories(df, day) trajectory_index_df = get_unique_trajectory_indices(day_df) clean_trajectory_df = extract_unique_trajectory_heavy(day_df, trajectory_index_df, index) return(list(clean_trajectory = clean_trajectory_df , index = trajectory_index_df)) } df_test = light_subset(df_lightrail,2,7)$clean_trajectory # + jupyter={"source_hidden": true} compute_day_trajectories = function(month_df, dd) { df_dd = get_day_trajectories(month_df, dd) traj_indices_dd = get_unique_trajectory_indices(df_dd) print(head(traj_indices_dd)) num_traj = nrow(traj_indices_dd) for (tt in seq(num_traj)[1:5] ) { # ideally this should be for the whole sequence traj = extract_unique_trajectory(df_dd, traj_indices_dd, tt) traj$trajid = tt # add a new column traj %<>% preprocess_data() %>% compute_distance() %>% compute_speed_acceleration() if (tt==1) { processed_traj_df = traj } else { processed_traj_df = rbind(processed_traj_df, traj) } } return (processed_traj_df) } # - case_1 = function(clean_trajectory){ clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = compute_distance(clean_trajectory) clean_trajectory = compute_speed_acceleration(clean_trajectory) clean_trajectory = compute_cumulative_time_distance(clean_trajectory) trajectory_plot(clean_trajectory) return(clean_trajectory) } case_2 = function(clean_trajectory,w){ clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = compute_distance(clean_trajectory) filter = robreg.filter(clean_trajectory$dist_meters, width = w, online = FALSE, method= "MED") clean_trajectory$dist_meters = filter$level$MED clean_trajectory = compute_speed_acceleration(clean_trajectory) clean_trajectory = compute_cumulative_time_distance(clean_trajectory) trajectory_plot(clean_trajectory) return(clean_trajectory) } # + case_3 = function(clean_trajectory){ clean_trajectory = data.table(clean_trajectory) clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = clean_trajectory[interval_seconds < 2500] clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = compute_distance(clean_trajectory) clean_trajectory = compute_speed_acceleration(clean_trajectory) clean_trajectory = compute_cumulative_time_distance(clean_trajectory) trajectory_plot(clean_trajectory) return(clean_trajectory) } # - case_4 = function(clean_trajectory,w){ clean_trajectory = data.table(clean_trajectory) clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = clean_trajectory[interval_seconds > 5] clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = compute_distance(clean_trajectory) filter = robreg.filter(clean_trajectory$dist_meters, width = w, online = FALSE, method= "MED") clean_trajectory$dist_meters = filter$level$MED clean_trajectory = compute_speed_acceleration(clean_trajectory) clean_trajectory = compute_cumulative_time_distance(clean_trajectory) trajectory_plot(clean_trajectory) return(clean_trajectory) } # + jupyter={"outputs_hidden": true} df # - # Remove the outlier speed case_5 = function(clean_trajectory){ clean_trajectory = data.table(clean_trajectory) clean_trajectory = compute_time_interval(clean_trajectory) # Remove short time interval observations clean_trajectory = clean_trajectory[interval_seconds > 1] clean_trajectory = compute_time_interval(clean_trajectory) clean_trajectory = compute_distance(clean_trajectory) clean_trajectory = compute_speed_acceleration(clean_trajectory) clean_trajectory = compute_cumulative_time_distance(clean_trajectory) # Remove outlier speed observations clean_trajectory = clean_trajectory[speed_kph < 120] clean_trajectory = clean_trajectory[accel_mps2 > -6 & accel_mps2 < 6 ] # # redo the calculation # clean_trajectory = compute_time_interval(clean_trajectory) # clean_trajectory = compute_distance(clean_trajectory) # clean_trajectory = compute_speed_acceleration(clean_trajectory) # clean_trajectory = compute_cumulative_time_distance(clean_trajectory) trajectory_plot(clean_trajectory) } # + jupyter={"outputs_hidden": true} case_5(df) # - d_compare_table = compare_table(df_test_2_case1,df_test_2_case2_5,df_test_2_case2_7,df_test_2_case2_9,df_test_2_case3,df_test_2_case4_5,df_test_2_case4_7,df_test_2_case4_9) # # Function used for trajectory investigation # plot each trajectory trajectory_plot = function(df){ p_title = title(df) # plot histogram for speed and acceleration clean_hist = melt(df, id.vars = c("time","month","day") , measure.vars = c("dist_meters","interval_seconds", "speed_kph","accel_mps2")) # Name new labels for hist plot levels(clean_hist$variable) = c("Distance (meters)", "Time interval (s)", "Speed (km/h)","Acceleration (meters/s^2)") # geom_jitter() p_hist = ggplot(clean_hist,aes(x = value)) + geom_histogram(color="black", fill="lightblue") + facet_wrap(~variable,ncol = 2 , scales = "free") + labs(title = "(b) Histograms") + theme(strip.text = element_text(size = rel(2)), # title axis.title.x = element_blank(), axis.title.y = element_blank(), title= element_text(size = 20), # axis label axis.text.x = element_text(size = 15), axis.text.y = element_text(size = 15), # space between facet plot panel.spacing = unit(2, "lines")) # plot time series clean_trajectory_melt = melt(df, id.vars = c("time","month","day") , measure.vars = c("dist_meters","cumdist_km","interval_seconds", "cumtime_hrs","speed_kph","accel_mps2")) # Name new labels for facet plot levels(clean_trajectory_melt$variable) = c("Distance (meters)","Cumulative distance (km)", "Time interval (s)","Cumulative time (hrs)","Speed (km/h)", "Acceleration (meters/s^2)") # generate the facet plot p_main = ggplot(clean_trajectory_melt,aes(x = time,y = value)) + geom_point(colour="darkorange") + facet_wrap(~variable,ncol = 2 , scales = "free_y") + labs(title = "(a) Time series ", x = "Time") + theme(strip.text = element_text(size = rel(2)), axis.title.x = element_blank(), axis.title.y = element_blank(), title= element_text(size = 20), axis.text.x = element_text(size = 15), axis.text.y = element_text(size = 15), panel.spacing = unit(2, "lines")) # trajectory map # get map ma <- get_stamenmap(bbox = c(left = min(df$lon), bottom = min(df$lat), right = max(df$lon), top = max(df$lat)), zoom = 14) map_label = pretty(df$time, 5) # plot trajectory map p_map = ggmap(ma, darken = c(0.6, "white")) + geom_point(data = df, aes(x = lon, y = lat, color = as.numeric(time), alpha = 0.7), size = 4, shape = 16) + geom_text(data = df, aes(label = ifelse(speed_kph > 160,round(speed_kph,0),'')),hjust=0,vjust=0,size = 4) + scale_color_gradient(low="red", breaks = as.integer(map_label), labels = format(map_label, "%H:%M") ) + labs(title = "(c) Map", x = "Lon", y = "Lat" ,color = "Time") + theme(axis.title.x = element_text(size = 20), axis.title.y = element_text(size = 20), title= element_text(size = 20), axis.text.x = element_text(size = 15,angle = 90), axis.text.y = element_text(size = 15), legend.text = element_text(size = 15), plot.caption = element_text(hjust = 0,margin = unit(c(-15,0,0,0), "mm"))) + guides(alpha=FALSE, size=FALSE) p1 = ggarrange(p_hist, p_map, nrow = 2, ncol = 1,heights = c(15,10),widths = c(20,50)) # plot for ggarrange tgrob <- text_grob(p_title,size = 30) plot_0 <- as_ggplot(tgrob) + theme(plot.margin = margin(0,3,0,0, "cm")) options(repr.plot.width = 20, repr.plot.height = 10) p_plot = ggarrange(p_main, p1, nrow = 1, ncol = 2, widths = c(20,15)) p = ggarrange(plot_0,p_plot,nrow = 2, ncol = 1,heights = c(5,20)) %>% ggexport(filename = "../../figures/case_green_example.png",width = 1500, height = 700) print(p) return(p) } # compute time interval compute_time_interval <- function(d) { d$interval_seconds = NA n <- nrow(d) if (n >= 2) { # Compute time interval d$interval_seconds[2:n] = as.numeric(difftime(d$trxtime[2:n], d$trxtime[1:n-1], units = "secs")) } return(d) } # compute vehicle distance compute_distance <- function(d) { d$dist_meters = NA n <- nrow(d) if (n >= 2) { # Compute interval distance using Haversine function d$dist_meters[2:n] = distHaversine(cbind(d$lon[1:n-1],d$lat[1:n-1]),cbind(d$lon[2:n],d$lat[2:n])) } return(d) } # compute speed and acceleration compute_speed_acceleration <- function(d) { d$speed_mps = NA d$speed_kph = NA d$accel_mps2 = NA n <- nrow(d) if (n >= 2) { d$speed_mps[2:n] = d$dist_meters[2:n] / d$interval_seconds[2:n] # Convert speed to kph d$speed_kph[2:n] = d$speed_mps[2:n] * 3.6 d$accel_mps2[2:n] = (d$speed_mps[2:n] - d$speed_mps[1:n-1])/d$interval_seconds[2:n] } return(d) } # Calculate the cumulative dist and time compute_cumulative_time_distance = function(d){ df = d # no rm.na argument in cumsum function,so we make distance and time with NA as 0 df[is.na(df)] <- 0 # Calculate the cumulative dist and time df = df %>% mutate(cumdist = cumsum(dist_meters)) %>% mutate(cumtime = cumsum(interval_seconds)) d$cumdist_km = df$cumdist/1000 d$cumtime_hrs = df$cumtime/3600 return(d) } # Add title for each trajectory plot title = function (clean_trajectory){ linetype = paste("Line:","Green") trainid = paste("Trainid:",unique(clean_trajectory$trainid)) vehicleid = paste("Vehicleid:",unique(clean_trajectory$vehicleid)) # routeid = paste("Routeid:",unique(clean_trajectory$routeid)) car1id = paste("Car1id:",unique(clean_trajectory$car1)) car2id = paste("Car2id:",unique(clean_trajectory$car2)) car3id = paste("Car3id:",unique(clean_trajectory$car3)) month = unique(clean_trajectory$month) day = unique(clean_trajectory$day) date = paste("Date:",paste(month , day , sep = "-" )) df_title = paste(linetype,trainid,vehicleid, # routeid, car1id,car2id,car3id,date,sep = "|") print(df_title) return(df_title) } # Add title for each trajectory plot title = function (clean_trajectory){ if (unique(clean_trajectory$lineid) == 1){ linetype = paste("Line:","Red") } else if(unique(clean_trajectory$lineid) == 2){ linetype = paste("Line:","Blue") } else if(unique(clean_trajectory$lineid) == 3){ linetype = paste("Line:","Orange") } trainid = paste("Trainid:",unique(clean_trajectory$trainid)) vehicleid = paste("Vehicleid:",unique(clean_trajectory$vehicleid)) branchid = paste("Branchid:",unique(clean_trajectory$heavyrailbranchid)) tripid = paste("Tripid:",unique(clean_trajectory$tripid)) month = unique(clean_trajectory$month) day = unique(clean_trajectory$day) date = paste("Date:",paste(month , day , sep = "-" )) df_title = paste(linetype,trainid,vehicleid,branchid,tripid,date,sep = "|") print(df_title) return(df_title) } comparison_table[,2:11] = round(comparison_table[,2:11],2) compare_table = function(case_1_test,case_2_test_5,case_2_test_7,case_2_test_9,case_3_test,case_4_test_5,case_4_test_7,case_4_test_9){ comparison_table = data.frame(case = c("case 1","case 2_5","case 2_7","case 2_9","case 3","case 4_5","case 4_7","case 4_9"), speed_median = c(median(case_1_test$speed_kph,na.rm = TRUE), median(case_2_test_5$speed_kph,na.rm = TRUE), median(case_2_test_7$speed_kph,na.rm = TRUE), median(case_2_test_9$speed_kph,na.rm = TRUE), median(case_3_test$speed_kph,na.rm = TRUE), median(case_4_test_5$speed_kph,na.rm = TRUE), median(case_4_test_7$speed_kph,na.rm = TRUE), median(case_4_test_9$speed_kph,na.rm = TRUE)) , speed_max = c(max(case_1_test$speed_kph,na.rm = TRUE), max(case_2_test_5$speed_kph,na.rm = TRUE), max(case_2_test_7$speed_kph,na.rm = TRUE), max(case_2_test_9$speed_kph,na.rm = TRUE), max(case_3_test$speed_kph,na.rm = TRUE), max(case_4_test_5$speed_kph,na.rm = TRUE), max(case_4_test_7$speed_kph,na.rm = TRUE), max(case_4_test_9$speed_kph,na.rm = TRUE)) , speed_min = c(min(case_1_test$speed_kph,na.rm = TRUE), min(case_2_test_5$speed_kph,na.rm = TRUE), min(case_2_test_7$speed_kph,na.rm = TRUE), min(case_2_test_9$speed_kph,na.rm = TRUE), min(case_3_test$speed_kph,na.rm = TRUE), min(case_4_test_5$speed_kph,na.rm = TRUE), min(case_4_test_7$speed_kph,na.rm = TRUE), min(case_4_test_9$speed_kph,na.rm = TRUE)) , speed_mean = c(mean(case_1_test$speed_kph,na.rm = TRUE), mean(case_2_test_5$speed_kph,na.rm = TRUE), mean(case_2_test_7$speed_kph,na.rm = TRUE), mean(case_2_test_9$speed_kph,na.rm = TRUE), mean(case_3_test$speed_kph,na.rm = TRUE), mean(case_4_test_5$speed_kph,na.rm = TRUE), mean(case_4_test_7$speed_kph,na.rm = TRUE), mean(case_4_test_9$speed_kph,na.rm = TRUE)) , acceleration_median = c(median(case_1_test$accel_mps2,na.rm = TRUE), median(case_2_test_5$accel_mps2,na.rm = TRUE), median(case_2_test_7$accel_mps2,na.rm = TRUE), median(case_2_test_9$accel_mps2,na.rm = TRUE), median(case_3_test$accel_mps2,na.rm = TRUE), median(case_4_test_5$accel_mps2,na.rm = TRUE), median(case_4_test_7$accel_mps2,na.rm = TRUE), median(case_4_test_9$accel_mps2,na.rm = TRUE)) , acceleration_max = c(max(case_1_test$accel_mps2,na.rm = TRUE), max(case_2_test_5$accel_mps2,na.rm = TRUE), max(case_2_test_7$accel_mps2,na.rm = TRUE), max(case_2_test_9$accel_mps2,na.rm = TRUE), max(case_3_test$accel_mps2,na.rm = TRUE), max(case_4_test_5$accel_mps2,na.rm = TRUE), max(case_4_test_7$accel_mps2,na.rm = TRUE), max(case_4_test_9$accel_mps2,na.rm = TRUE)) , acceleration_min = c(min(case_1_test$accel_mps2,na.rm = TRUE), min(case_2_test_5$accel_mps2,na.rm = TRUE), min(case_2_test_7$accel_mps2,na.rm = TRUE), min(case_2_test_9$accel_mps2,na.rm = TRUE), min(case_3_test$accel_mps2,na.rm = TRUE), min(case_4_test_5$accel_mps2,na.rm = TRUE), min(case_4_test_7$accel_mps2,na.rm = TRUE), min(case_4_test_9$accel_mps2,na.rm = TRUE)) , acceleration_mean = c(mean(case_1_test$accel_mps2,na.rm = TRUE), mean(case_2_test_5$accel_mps2,na.rm = TRUE), mean(case_2_test_7$accel_mps2,na.rm = TRUE), mean(case_2_test_9$accel_mps2,na.rm = TRUE), mean(case_3_test$accel_mps2,na.rm = TRUE), mean(case_4_test_5$accel_mps2,na.rm = TRUE), mean(case_4_test_7$accel_mps2,na.rm = TRUE), mean(case_4_test_9$accel_mps2,na.rm = TRUE)) , total_distance_km = c(sum(case_1_test$dist_meters,na.rm = TRUE)/1000, sum(case_2_test_5$dist_meters,na.rm = TRUE)/1000, sum(case_2_test_7$dist_meters,na.rm = TRUE)/1000, sum(case_2_test_9$dist_meters,na.rm = TRUE)/1000, sum(case_3_test$dist_meters,na.rm = TRUE)/1000, sum(case_4_test_5$dist_meters,na.rm = TRUE)/1000, sum(case_4_test_7$dist_meters,na.rm = TRUE)/1000, sum(case_4_test_9$dist_meters,na.rm = TRUE)/1000) , total_time_hrs = c(sum(case_1_test$interval_seconds,na.rm = TRUE)/3600, sum(case_2_test_5$interval_seconds,na.rm = TRUE)/3600, sum(case_2_test_7$interval_seconds,na.rm = TRUE)/3600, sum(case_2_test_9$interval_seconds,na.rm = TRUE)/3600, sum(case_3_test$interval_seconds,na.rm = TRUE)/3600, sum(case_4_test_5$interval_seconds,na.rm = TRUE)/3600, sum(case_4_test_7$interval_seconds,na.rm = TRUE)/3600, sum(case_4_test_9$interval_seconds,na.rm = TRUE)/3600) ) return(comparison_table) } case_3_test = distance_filter(clean_trajectory) # ## Data preprocessing # ## something which will be used or not (this part wiil be deleted when this script has been done) # aggregrate_trajectory_table aggregate_line_trajectories = function(year, month){ assign("dg", fread(paste(DISTANCE_FILEPATH, paste("lightrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) assign("dh", fread(paste(DISTANCE_FILEPATH, paste("heavyrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) # Combine the original tables to a single one for analysis dg = subset(dg, select = c(trxtime, trainid, lineid, lat, lon)) dh = subset(dh, select = c(trxtime, trainid, lineid, lat, lon)) df = rbind(dg, dh) return(df) } # Fixed file path assign("df", fread(paste(DISTANCE_FILEPATH, paste(DISTANCE_FILEPATH, paste("heavyrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) # + add_dd_mm_yy_cols = function(df) { df$day = day(df$trxtime) df$month = month(df$trxtime) df$year = year(df$trxtime) return(df) } get_heavy_rail_trajectories = function(year, month){ assign("df", fread(paste(DISTANCE_FILEPATH, paste(DISTANCE_FILEPATH, paste("heavyrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) df = add_dd_mm_yy_cols(df) return(df) } get_light_rail_trajectories = function(year, month){ assign("dg", fread(paste(DISTANCE_FILEPATH, paste("lightrail", "trajectories", month, year, ".csv", sep = "-", collapse = ""), sep=""))) dg = add_dd_mm_yy_cols(dg) return(dg) }
bin/jupyter/vehicle-trajectory-computation-investigations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pycoin.key.key_from_text import key_from_text from pycoin.key import Key from pycoin.key.BIP32Node import BIP32Node mKey.from_text('<KEY>') master=BIP32Node.from_text('<KEY>') k1=master.subkey_for_path('0/0/0') k1 Key.from_text('<KEY>').subkey(0) master.subkey_for_path('0/0/0/0')
notebooks/Bip32.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # The database object is responsible for maintaining the # snapshots (versions) of datasets that are manipulated using # openclean operators in this notebook. from openclean_notebook import DB # Define a base directory on the local file system where all # data is stored. The create=True flag will erase any data # that previously exists in the base directory. db = DB(basedir='.openclean', create=True) # + from openclean.data.load import dataset # # Load full dataset 'VDH-COVID-19-PublicUseDataset-Cases' from the data.virginia.gov portal. df = db.load_dataset(source='./data/bre9-aqqr.tsv.gz', name='covid-cases').checkout() # + # Add a user-defined function that operates on two columns and # that takes an additional parameter as input. # # When registering the function we can specify the number of # input columns the function operates on. The convention is that # the first n arguments of the registered function will receive # their values from the n input columns that the user selects. # Additional parameters will be called as keyword arguments. # # If the collables argument is not given when the function is # registered the names of the first n function arguments are # used as the defaults. from openclean.engine.object.function import String @db.register.eval( name='concat', label='Concat Columns', description='Concatenate values from two columns', columns=2, collabels=['Left Column', 'Right Column'], parameters=[String(name='delim', label='Delimiter', default=':')] ) def concat_columns(value1, value2, delim): """Concatenate two values with the given delimiter.""" return '{}{}{}'.format(value1, delim, value2) # + # This is an example for a user-defined function that operates # on a single input column but generates a pair of values for # two output columns. # # When registering the function we can specify the number of # output columns using the 'outputs' parameter. from openclean.engine.object.function import String @db.register.eval( name='split', label='Split Column', description='Split values from one column', columns=1, collabels=['Input Column'], outputs=2, parameters=[String(name='delim', label='Split delimiter', default=':')] ) def split_column(value, delim): """Split column value on the first occurrence of the given delimiter.""" if delim in value: pos = value.find(delim) return value[:pos], value[pos + len(delim):] else: return value, None # + # Print serialization of function library that will be available to the Spreadsheet view. import json print(json.dumps(db.library_dict(), indent=4)) # - df.head() # + # Apply the concat function to update column 'Locality' with the # concatenated values from column 'Locality' and 'FIPS'. # # Note that the 'columns' argument of the update function refers # to the column(s) that are being updated. If the updated column # is different from the input columns (like in this example) the # input column names have to be specified using the 'sources' parameter. # Additional arguments that are passed evaluated update function # (i.e., in this case 'delim') are specified as a dictionary via # the 'args' parameter. df = db\ .dataset('covid-cases')\ .update( columns='Locality', # Column that is being updated func=db.library.functions().get(name='concat'), # Function that generates updated values args={'delim': '-'}, # Parameters (in addition to the input columns) for the update function sources=['Locality', 'FIPS'] # Names of input columns (if different from output column) ) # - df.head() # + # Insert a new column from multiple input columns. df = db\ .dataset('covid-cases')\ .insert( names='VDH Health District-FIPS', # Column that is being inserted values=db.library.functions().get(name='concat'), # Function that generates column values args={'delim': '/'}, # Parameters (in addition to the input columns) for the column values function sources=['VDH Health District', 'FIPS'] # Names of input columns (if different from output column) ) # - df.head() # + # Split VDH Health District on first occurrence of a blank space # character. Insert the resulting values as two new columns. df = db\ .dataset('covid-cases')\ .insert( names=['VDH Health', 'District'], # Column that is being inserted values=db.library.functions().get(name='split'), # Function that generates column values args={'delim': ' '}, # Parameters (in addition to the input columns) for the column values function sources=['VDH Health District'] # Names of input columns (if different from output column) ) # - df.head()
examples/notebooks/Function Signature Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # What else is in an evolved name? Exploring Evolvable Specificity with SignalGP # %matplotlib inline import os import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from scipy.stats import spearmanr data_dir = "../data/" mape_data_fpath = os.path.join(data_dir, "mape.csv") evo_dom_data_fpath = os.path.join(data_dir, "evo_dom.csv") # Load data mape_df = pd.read_csv(mape_data_fpath, dtype={"run_id":str,"agent_id":str,"update":int,"distraction_sigs":bool,"fitness":float,"fun_cnt":int,"fun_used":float,"inst_entropy":float,"sim_thresh":float}) evo_df = pd.read_csv(evo_dom_data_fpath, dtype={"run_id":str,"sim_thresh":float,"distraction_sigs":bool,"update":int,"fitness":float}) # + # evo_df = evo_df[evo_df["distraction_sigs"] == True] # a = evo_df[evo_df["update"] == 10000] # a87 = a[a["sim_thresh"] == 0.875] # a75 = a[a["sim_thresh"] == 0.75] # + # a87 = a87[a87["fitness"] == 256] # a87 # + # a75 = a75[a75["fitness"] == 256] # len(a75) # - # ## MAP-Elites Visualization # Organize data a bit min_fitness = 256 # - Filter data by min fitness mape_df = mape_df[mape_df["fitness"] >= min_fitness] # - Separate by distraction signals vs. no distraction signals mape_df_DS0 = mape_df[mape_df["distraction_sigs"] == False] mape_df_DS1 = mape_df[mape_df["distraction_sigs"] == True] # + # Set a few constants min_sim_thresh = 0.0 max_sim_thresh = 1.0 min_fun_used = 0 max_fun_used = 32 xy_label_fs = 18 xy_tick_fs = 14 cmap = sns.cubehelix_palette(as_cmap=True) # - # ### The importance of inexactness fig = plt.figure(1) fig.set_size_inches(7,7) with sns.axes_style("white"): g = sns.jointplot(data=mape_df_DS0, x="sim_thresh", y="fun_used", kind="kde", xlim=(min_sim_thresh, max_sim_thresh), ylim=(min_fun_used, max_fun_used), stat_func=None, shade=True, cmap=cmap, shade_lowest=False, color="Grey") g.set_axis_labels("Similarity Threshold", "Functions Used") ax = g.ax_joint ax.xaxis.label.set_fontsize(xy_label_fs) ax.yaxis.label.set_fontsize(xy_label_fs) for tick in ax.get_xticklabels(): tick.set_fontsize(xy_tick_fs) for tick in ax.get_yticklabels(): tick.set_fontsize(xy_tick_fs) plt.savefig("mape_DS0.png", bbox_inches='tight') plt.savefig("mape_DS0.pdf", format='pdf', bbox_inches='tight') # ### The value of not listening fig = plt.figure(1) fig.set_size_inches(7,7) with sns.axes_style("white"): g = sns.jointplot(data=mape_df_DS1, x="sim_thresh", y="fun_used", kind="kde", xlim=(min_sim_thresh, max_sim_thresh), ylim=(min_fun_used, max_fun_used), stat_func=None, shade=True, cmap=cmap, shade_lowest=False, color="Grey") g.set_axis_labels("Similarity Threshold", "Functions Used") ax = g.ax_joint ax.xaxis.label.set_fontsize(xy_label_fs) ax.yaxis.label.set_fontsize(xy_label_fs) for tick in ax.get_xticklabels(): tick.set_fontsize(xy_tick_fs) for tick in ax.get_yticklabels(): tick.set_fontsize(xy_tick_fs) plt.savefig("mape_DS1.png", bbox_inches='tight') plt.savefig("mape_DS1.pdf", format='pdf', bbox_inches='tight') # ## Dominant performance during evolution run # Organize data a bit # - Separate by distraction signals vs. no distraction signals evo_df_DS0 = evo_df[evo_df["distraction_sigs"] == False] evo_df_DS1 = evo_df[evo_df["distraction_sigs"] == True] # + # Some constants labels = ["0.0%", "12.5%", "25.0%", "37.5%", "50.0%", "62.5%", "75.0%", "87.5%", "100.0%"] min_sim_thresh = 0.0 max_sim_thresh = 1.0 min_fitness = 0.0 max_fitness = 256.0 x_tick_fs = 18 y_tick_fs = 18 y_label_fs = 24 x_label_fs = 24 # - # ### Importance of inexactness # + fig = plt.figure(1) gridspec.GridSpec(1,12) fig.set_size_inches(21, 7) with sns.axes_style("darkgrid"): ax1 = plt.subplot2grid((1,12), (0,0), colspan=6) p1 = sns.boxplot(x="sim_thresh", y="fitness", data=evo_df_DS0[evo_df_DS0["update"] == 1000], ax=ax1) sns.swarmplot(x="sim_thresh", y="fitness", data=evo_df_DS0[evo_df_DS0["update"] == 1000], ax=ax1, color=".1") ax1.set_xticklabels(labels) ax1.set_xlabel("(A) Generation 1,000") ax1.set_ylabel("Fitness") ax1.set_ylim(min_fitness, max_fitness + 10) for tick in ax1.get_yticklabels(): tick.set_fontsize(y_tick_fs) for tick in ax1.get_xticklabels(): tick.set_fontsize(x_tick_fs) ax1.yaxis.label.set_fontsize(y_label_fs) ax1.xaxis.label.set_fontsize(x_label_fs) ax1.xaxis.set_label_position('top') ax2 = plt.subplot2grid((1,12), (0,6), colspan=6) p2 = sns.boxplot(x="sim_thresh", y="fitness", data=evo_df_DS0[evo_df_DS0["update"] == 10000], ax=ax2) sns.swarmplot(x="sim_thresh", y="fitness", data=evo_df_DS0[evo_df_DS0["update"] == 10000], ax=ax2, color=".1") ax2.set_xticklabels(labels) ax2.set_xlabel("(B) Generation 10,000") ax2.set_ylabel("") plt.setp(ax2.get_yticklabels(), visible = False) ax2.set_ylim(min_fitness, max_fitness + 10) for tick in ax2.get_yticklabels(): tick.set_fontsize(y_tick_fs) for tick in ax2.get_xticklabels(): tick.set_fontsize(x_tick_fs) ax2.yaxis.label.set_fontsize(y_label_fs) ax2.xaxis.label.set_fontsize(x_label_fs) ax2.xaxis.set_label_position('top') plt.savefig("evo_dom_DS0.png", bbox_inches='tight') plt.savefig("evo_dom_DS0.pdf", format='pdf', bbox_inches='tight') # - # ### Value of not listening # + fig = plt.figure(1) gridspec.GridSpec(1,12) fig.set_size_inches(21, 7) with sns.axes_style("darkgrid"): ax1 = plt.subplot2grid((1,12), (0,0), colspan=6) p1 = sns.boxplot(x="sim_thresh", y="fitness", data=evo_df_DS1[evo_df_DS1["update"] == 1000], ax=ax1) sns.swarmplot(x="sim_thresh", y="fitness", data=evo_df_DS1[evo_df_DS1["update"] == 1000], ax=ax1, color=".1") ax1.set_xticklabels(labels) ax1.set_xlabel("(A) Generation 1,000") ax1.set_ylabel("Fitness") ax1.set_ylim(min_fitness, max_fitness + 10) for tick in ax1.get_yticklabels(): tick.set_fontsize(y_tick_fs) for tick in ax1.get_xticklabels(): tick.set_fontsize(x_tick_fs) ax1.yaxis.label.set_fontsize(y_label_fs) ax1.xaxis.label.set_fontsize(x_label_fs) ax1.xaxis.set_label_position('top') ax2 = plt.subplot2grid((1,12), (0,6), colspan=6) p2 = sns.boxplot(x="sim_thresh", y="fitness", data=evo_df_DS1[evo_df_DS1["update"] == 10000], ax=ax2) sns.swarmplot(x="sim_thresh", y="fitness", data=evo_df_DS1[evo_df_DS1["update"] == 10000], ax=ax2, color=".1") ax2.set_xticklabels(labels) ax2.set_xlabel("(B) Generation 10,000") ax2.set_ylabel("") plt.setp(ax2.get_yticklabels(), visible = False) ax2.set_ylim(min_fitness, max_fitness + 10) for tick in ax2.get_yticklabels(): tick.set_fontsize(y_tick_fs) for tick in ax2.get_xticklabels(): tick.set_fontsize(x_tick_fs) ax2.yaxis.label.set_fontsize(y_label_fs) ax2.xaxis.label.set_fontsize(x_label_fs) ax2.xaxis.set_label_position('top') plt.savefig("evo_dom_DS1.png", bbox_inches='tight') plt.savefig("evo_dom_DS1.pdf", format='pdf', bbox_inches='tight') # -
analysis/GPTP-Exploring-Evolvable-Specificity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # test 101 # ## laws of motion # ### law of inertia # $ y= mx + c$ # # $ a = \frac {F}{M}$ # Equation: # # $x^2 + bx+c=0$ # Newton's gravitational force equation : # $ F= \frac{G m_1 m_2}{r^2}$ # COulomb Interaction: # $ F= \frac{k q_1 q_2}{r^2}$ # Lennard-Jones Potential: # ![image](home/Github/lennard-jones_potential.png) # ![image](~/Github/lennard-jones_potential.png) # ![image](lennard-jones_potential.png)
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linear Models - Part 2 # # In the previous chapter, we learned the concepts behind linear and logistic regression. These concepts included: # # 1. continuity of linear models with using the mean as an estimate. # 2. interpretation of the coefficients of linear models (linear regression and logistic regression) with numeric and binary categorical features as well as simple interaction terms. # 3. application of the Bootstrap to linear models. # # In this chapter, we're going to delve more deeply into linear models including: # # 1. how to make estimates and evaluate them. # 2. evaluating *residuals* (short for "residual error"). # 4. extending the models to multiple features of both kinds and how to build models using domain knowledge and metaheuristics. # 5. transforming variables to improve performance and or interpretation of the model. # # We will defer a full discussion of model evaluation until the next chapter. # # Most of this discussion is taken from <NAME> and <NAME>'s, *Data Analysis Using Regression and Multi-Level/Hierarchical Models*. # + [markdown] nbsphinx-toctree={} # 1. [Building Linear Models](building.ipynb) # 2. [Residuals](residuals.ipynb) # 3. [Transformations](transformations.ipynb) # 4. [Example](example.ipynb) # 5. [Conclusion](conclusion2.ipynb)
fundamentals_2018.9/linear/part2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3] # language: python # name: conda-env-py3-py # --- # # Deep Learning & Art: Neural Style Transfer # # Welcome to the second assignment of this week. In this assignment, you will learn about Neural Style Transfer. This algorithm was created by Gatys et al. (2015) (https://arxiv.org/abs/1508.06576). # # **In this assignment, you will:** # - Implement the neural style transfer algorithm # - Generate novel artistic images using your algorithm # # Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values! # + import os import sys import scipy.io import scipy.misc import matplotlib.pyplot as plt from matplotlib.pyplot import imshow from PIL import Image from nst_utils import * import numpy as np import tensorflow as tf # %matplotlib inline # - # ## 1 - Problem Statement # # Neural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely, a "content" image (C) and a "style" image (S), to create a "generated" image (G). The generated image G combines the "content" of the image C with the "style" of image S. # # In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by <NAME>, a leader of the impressionist movement (style image S). # <img src="images/louvre_generated.png" style="width:750px;height:200px;"> # # Let's see how you can do this. # ## 2 - Transfer Learning # # Neural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning. # # Following the original NST paper (https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the earlier layers) and high level features (at the deeper layers). # # Run the following code to load parameters from the VGG model. This may take a few seconds. model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat") print(model) # The model is stored in a python dictionary where each variable name is the key and the corresponding value is a tensor containing that variable's value. To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this: # ```python # model["input"].assign(image) # ``` # This assigns the image as an input to the model. After this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows: # ```python # sess.run(model["conv4_2"]) # ``` # ## 3 - Neural Style Transfer # # We will build the NST algorithm in three steps: # # - Build the content cost function $J_{content}(C,G)$ # - Build the style cost function $J_{style}(S,G)$ # - Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$. # # ### 3.1 - Computing the content cost # # In our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre. content_image = scipy.misc.imread("images/louvre.jpg") imshow(content_image) # The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds. # # ** 3.1.1 - How do you ensure the generated image G matches the content of the image C?** # # As we saw in lecture, the earlier (shallower) layers of a ConvNet tend to detect lower-level features such as edges and simple textures, and the later (deeper) layers tend to detect higher-level features such as more complex textures as well as object classes. # # We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. In practice, you'll get the most visually pleasing results if you choose a layer in the middle of the network--neither too shallow nor too deep. (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.) # # So, suppose you have picked one particular hidden layer to use. Now, set the image C as the input to the pretrained VGG network, and run forward propagation. Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be a $n_H \times n_W \times n_C$ tensor. Repeat this process with the image G: Set G as the input, and run forward progation. Let $$a^{(G)}$$ be the corresponding hidden layer activation. We will define as the content cost function as: # # $$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$ # # Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the volumes corresponding to a hidden layer's activations. In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below. (Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style const $J_{style}$.) # # <img src="images/NST_LOSS.png" style="width:800px;height:400px;"> # # **Exercise:** Compute the "content cost" using TensorFlow. # # **Instructions**: The 3 steps to implement this function are: # 1. Retrieve dimensions from a_G: # - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()` # 2. Unroll a_C and a_G as explained in the picture above # - If you are stuck, take a look at [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape). # 3. Compute the content cost: # - If you are stuck, take a look at [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract). # + # GRADED FUNCTION: compute_content_cost def compute_content_cost(a_C, a_G): """ Computes the content cost Arguments: a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G Returns: J_content -- scalar that you compute using equation 1 above. """ ### START CODE HERE ### # Retrieve dimensions from a_G (≈1 line) m, n_H, n_W, n_C = None # Reshape a_C and a_G (≈2 lines) a_C_unrolled = None a_G_unrolled = None # compute the cost with tensorflow (≈1 line) J_content = None ### END CODE HERE ### return J_content # + tf.reset_default_graph() with tf.Session() as test: tf.set_random_seed(1) a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4) a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4) J_content = compute_content_cost(a_C, a_G) print("J_content = " + str(J_content.eval())) # - # **Expected Output**: # # <table> # <tr> # <td> # **J_content** # </td> # <td> # 6.76559 # </td> # </tr> # # </table> # <font color='blue'> # **What you should remember**: # - The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are. # - When we minimize the content cost later, this will help make sure $G$ has similar content as $C$. # ### 3.2 - Computing the style cost # # For our running example, we will use the following style image: style_image = scipy.misc.imread("images/monet_800600.jpg") imshow(style_image) # This painting was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*. # # Lets see how you can now define a "style" const function $J_{style}(S,G)$. # ### 3.2.1 - Style matrix # # The style matrix is also called a "Gram matrix." In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large. # # Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature, but $G$ is used to denote the Style matrix (or Gram matrix) as well as to denote the generated image $G$. We will try to make sure which $G$ we are referring to is always clear from the context. # # In NST, you can compute the Style matrix by multiplying the "unrolled" filter matrix with their transpose: # # <img src="images/NST_GM.png" style="width:900px;height:300px;"> # # The result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters. The value $G_{ij}$ measures how similar the activations of filter $i$ are to the activations of filter $j$. # # One important part of the gram matrix is that the diagonal elements such as $G_{ii}$ also measures how active filter $i$ is. For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{ii}$ measures how common vertical textures are in the image as a whole: If $G_{ii}$ is large, this means that the image has a lot of vertical texture. # # By capturing the prevalence of different types of features ($G_{ii}$), as well as how much different features occur together ($G_{ij}$), the Style matrix $G$ measures the style of an image. # # **Exercise**: # Using TensorFlow, implement a function that computes the Gram matrix of a matrix A. The formula is: The gram matrix of A is $G_A = AA^T$. If you are stuck, take a look at [Hint 1](https://www.tensorflow.org/api_docs/python/tf/matmul) and [Hint 2](https://www.tensorflow.org/api_docs/python/tf/transpose). # + # GRADED FUNCTION: gram_matrix def gram_matrix(A): """ Argument: A -- matrix of shape (n_C, n_H*n_W) Returns: GA -- Gram matrix of A, of shape (n_C, n_C) """ ### START CODE HERE ### (≈1 line) GA = None ### END CODE HERE ### return GA # + tf.reset_default_graph() with tf.Session() as test: tf.set_random_seed(1) A = tf.random_normal([3, 2*1], mean=1, stddev=4) GA = gram_matrix(A) print("GA = " + str(GA.eval())) # - # **Expected Output**: # # <table> # <tr> # <td> # **GA** # </td> # <td> # [[ 6.42230511 -4.42912197 -2.09668207] <br> # [ -4.42912197 19.46583748 19.56387138] <br> # [ -2.09668207 19.56387138 20.6864624 ]] # </td> # </tr> # # </table> # ### 3.2.2 - Style cost # After generating the Style matrix (Gram matrix), your goal will be to minimize the distance between the Gram matrix of the "style" image S and that of the "generated" image G. For now, we are using only a single hidden layer $a^{[l]}$, and the corresponding style cost for this layer is defined as: # # $$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{ij} - G^{(G)}_{ij})^2\tag{2} $$ # # where $G^{(S)}$ and $G^{(G)}$ are respectively the Gram matrices of the "style" image and the "generated" image, computed using the hidden layer activations for a particular hidden layer in the network. # # **Exercise**: Compute the style cost for a single layer. # # **Instructions**: The 3 steps to implement this function are: # 1. Retrieve dimensions from the hidden layer activations a_G: # - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()` # 2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above. # - You may find [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape) useful. # 3. Compute the Style matrix of the images S and G. (Use the function you had previously written.) # 4. Compute the Style cost: # - You may find [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract) useful. # + # GRADED FUNCTION: compute_layer_style_cost def compute_layer_style_cost(a_S, a_G): """ Arguments: a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G Returns: J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2) """ ### START CODE HERE ### # Retrieve dimensions from a_G (≈1 line) m, n_H, n_W, n_C = None # Reshape the images to have them of shape (n_H*n_W, n_C) (≈2 lines) a_S = None a_G = None # Computing gram_matrices for both images S and G (≈2 lines) GS = None GG = None # Computing the loss (≈1 line) J_style_layer = None ### END CODE HERE ### return J_style_layer # + tf.reset_default_graph() with tf.Session() as test: tf.set_random_seed(1) a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4) a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4) J_style_layer = compute_layer_style_cost(a_S, a_G) print("J_style_layer = " + str(J_style_layer.eval())) # - # **Expected Output**: # # <table> # <tr> # <td> # **J_style_layer** # </td> # <td> # 9.19028 # </td> # </tr> # # </table> # ### 3.2.3 Style Weights # # So far you have captured the style from only one layer. We'll get better results if we "merge" style costs from several different layers. After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$. But for now, this is a pretty reasonable default: STYLE_LAYERS = [ ('conv1_1', 0.2), ('conv2_1', 0.2), ('conv3_1', 0.2), ('conv4_1', 0.2), ('conv5_1', 0.2)] # You can combine the style costs for different layers as follows: # # $$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$ # # where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`. # # We've implemented a compute_style_cost(...) function. It simply calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. Read over it to make sure you understand what it's doing. # # <!-- # 2. Loop over (layer_name, coeff) from STYLE_LAYERS: # a. Select the output tensor of the current layer. As an example, to call the tensor from the "conv1_1" layer you would do: out = model["conv1_1"] # b. Get the style of the style image from the current layer by running the session on the tensor "out" # c. Get a tensor representing the style of the generated image from the current layer. It is just "out". # d. Now that you have both styles. Use the function you've implemented above to compute the style_cost for the current layer # e. Add (style_cost x coeff) of the current layer to overall style cost (J_style) # 3. Return J_style, which should now be the sum of the (style_cost x coeff) for each layer. # !--> # def compute_style_cost(model, STYLE_LAYERS): """ Computes the overall style cost from several chosen layers Arguments: model -- our tensorflow model STYLE_LAYERS -- A python list containing: - the names of the layers we would like to extract style from - a coefficient for each of them Returns: J_style -- tensor representing a scalar value, style cost defined above by equation (2) """ # initialize the overall style cost J_style = 0 for layer_name, coeff in STYLE_LAYERS: # Select the output tensor of the currently selected layer out = model[layer_name] # Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out a_S = sess.run(out) # Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name] # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that # when we run the session, this will be the activations drawn from the appropriate layer, with G as input. a_G = out # Compute style_cost for the current layer J_style_layer = compute_layer_style_cost(a_S, a_G) # Add coeff * J_style_layer of this layer to overall style cost J_style += coeff * J_style_layer return J_style # **Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below. # # <!-- # How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers # !--> # # # <font color='blue'> # **What you should remember**: # - The style of an image can be represented using the Gram matrix of a hidden layer's activations. However, we get even better results combining this representation from multiple different layers. This is in contrast to the content representation, where usually using just a single hidden layer is sufficient. # - Minimizing the style cost will cause the image $G$ to follow the style of the image $S$. # </font color='blue'> # # # ### 3.3 - Defining the total cost to optimize # Finally, let's create a cost function that minimizes both the style and the content cost. The formula is: # # $$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$ # # **Exercise**: Implement the total cost function which includes both the content cost and the style cost. # + # GRADED FUNCTION: total_cost def total_cost(J_content, J_style, alpha = 10, beta = 40): """ Computes the total cost function Arguments: J_content -- content cost coded above J_style -- style cost coded above alpha -- hyperparameter weighting the importance of the content cost beta -- hyperparameter weighting the importance of the style cost Returns: J -- total cost as defined by the formula above. """ ### START CODE HERE ### (≈1 line) J = None ### END CODE HERE ### return J # + tf.reset_default_graph() with tf.Session() as test: np.random.seed(3) J_content = np.random.randn() J_style = np.random.randn() J = total_cost(J_content, J_style) print("J = " + str(J)) # - # **Expected Output**: # # <table> # <tr> # <td> # **J** # </td> # <td> # 35.34667875478276 # </td> # </tr> # # </table> # <font color='blue'> # **What you should remember**: # - The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$ # - $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style # ## 4 - Solving the optimization problem # Finally, let's put everything together to implement Neural Style Transfer! # # # Here's what the program will have to do: # <font color='purple'> # # 1. Create an Interactive Session # 2. Load the content image # 3. Load the style image # 4. Randomly initialize the image to be generated # 5. Load the VGG16 model # 7. Build the TensorFlow graph: # - Run the content image through the VGG16 model and compute the content cost # - Run the style image through the VGG16 model and compute the style cost # - Compute the total cost # - Define the optimizer and the learning rate # 8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step. # # </font> # Lets go through the individual steps in detail. # You've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)". Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph. This allows you to run variables without constantly needing to refer to the session object, which simplifies the code. # # Lets start the interactive session. # + # Reset the graph tf.reset_default_graph() # Start interactive session sess = tf.InteractiveSession() # - # Let's load, reshape, and normalize our "content" image (the Louvre museum picture): content_image = scipy.misc.imread("images/louvre_small.jpg") content_image = reshape_and_normalize_image(content_image) # Let's load, reshape and normalize our "style" image (<NAME>'s painting): style_image = scipy.misc.imread("images/monet.jpg") style_image = reshape_and_normalize_image(style_image) # Now, we initialize the "generated" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. (Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.) generated_image = generate_noise_image(content_image) imshow(generated_image[0]) # Next, as explained in part (2), let's load the VGG16 model. model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat") # To get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following: # # 1. Assign the content image to be the input to the VGG model. # 2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2". # 3. Set a_G to be the tensor giving the hidden layer activation for the same layer. # 4. Compute the content cost using a_C and a_G. # + # Assign the content image to be the input of the VGG model. sess.run(model['input'].assign(content_image)) # Select the output tensor of layer conv4_2 out = model['conv4_2'] # Set a_C to be the hidden layer activation from the layer we have selected a_C = sess.run(out) # Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2'] # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that # when we run the session, this will be the activations drawn from the appropriate layer, with G as input. a_G = out # Compute the content cost J_content = compute_content_cost(a_C, a_G) # - # **Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below. # + # Assign the input of the model to be the "style" image sess.run(model['input'].assign(style_image)) # Compute the style cost J_style = compute_style_cost(model, STYLE_LAYERS) # - # **Exercise**: Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. Use `alpha = 10` and `beta = 40`. ### START CODE HERE ### (1 line) J = None ### END CODE HERE ### # You'd previously learned how to set up the Adam optimizer in TensorFlow. Lets do that here, using a learning rate of 2.0. [See reference](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer) # + # define optimizer (1 line) optimizer = tf.train.AdamOptimizer(2.0) # define train_step (1 line) train_step = optimizer.minimize(J) # - # **Exercise**: Implement the model_nn() function which initializes the variables of the tensorflow graph, assigns the input image (initial generated image) as the input of the VGG16 model and runs the train_step for a large number of steps. def model_nn(sess, input_image, num_iterations = 200): # Initialize global variables (you need to run the session on the initializer) ### START CODE HERE ### (1 line) None ### END CODE HERE ### # Run the noisy input image (initial generated image) through the model. Use assign(). ### START CODE HERE ### (1 line) None ### END CODE HERE ### for i in range(num_iterations): # Run the session on the train_step to minimize the total cost ### START CODE HERE ### (1 line) None ### END CODE HERE ### # Compute the generated image by running the session on the current model['input'] ### START CODE HERE ### (1 line) generated_image = None ### END CODE HERE ### # Print every 20 iteration. if i%20 == 0: Jt, Jc, Js = sess.run([J, J_content, J_style]) print("Iteration " + str(i) + " :") print("total cost = " + str(Jt)) print("content cost = " + str(Jc)) print("style cost = " + str(Js)) # save current generated image in the "/output" directory save_image("output/" + str(i) + ".png", generated_image) # save last generated image save_image('output/generated_image.jpg', generated_image) return generated_image # Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs. model_nn(sess, generated_image) # **Expected Output**: # # <table> # <tr> # <td> # **Iteration 0 : ** # </td> # <td> # total cost = 5.05035e+09 <br> # content cost = 7877.67 <br> # style cost = 1.26257e+08 # </td> # </tr> # # </table> # You're done! After running this, in the upper bar of the notebook click on "File" and then "Open". Go to the "/output" directory to see all the saved images. Open "generated_image" to see the generated image! :) # # You should see something the image presented below on the right: # # <img src="images/louvre_generated.png" style="width:800px;height:300px;"> # # We didn't want you to wait too long to see an initial result, and so had set the hyperparameters accordingly. To get the best looking results, running the optimization algorithm longer (and perhaps with a smaller learning rate) might work better. After completing and submitting this assignment, we encourage you to come back and play more with this notebook, and see if you can generate even better looking images. # Here are few other examples: # # - The beautiful ruins of the ancient city of Persepolis (Iran) with the style of Van Gogh (The Starry Night) # <img src="images/perspolis_vangogh.png" style="width:750px;height:300px;"> # # - The tomb of Cyrus the great in Pasargadae with the style of a Ceramic Kashi from Ispahan. # <img src="images/pasargad_kashi.png" style="width:750px;height:300px;"> # # - A scientific study of a turbulent fluid with the style of a abstract blue fluid painting. # <img src="images/circle_abstract.png" style="width:750px;height:300px;"> # ## 5 - Test with your own image (Optional/Ungraded) # ## 5. 使用你自己的图片进行测 # 要求图片是(255\*300),并且修改喂入的C和S的图片。 # 重新运行这个notebook里的代码,也可以重启kernel # # 你也可以调整超参数,哪一层能更好的代表风格呢?在STYLE_LAYERS里可以修改不同层占的比重,运行算法需要迭代多少次呢?numitrations,关于风格代价和内容代价应该如何设置相对的占比呢?alpha/beta # # ## 6. 总结 # 现在你已经可以使用神经风格迁移来生成艺术图了,并且也是第一次建立模型来优化算法的输入内容的像素值而不再是超参数,深度学习有很多的模型,这只是其中的一种。 # # <font color='blue'> # What you should remember: # - Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image # - It uses representations (hidden layer activations) based on a pretrained ConvNet. # - The content cost function is computed using one hidden layer's activations. # - The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers. # - Optimizing the total cost function results in synthesizing new images. # ## 6 - Conclusion # # Great job on completing this assignment! You are now able to use Neural Style Transfer to generate artistic images. This is also your first time building a model in which the optimization algorithm updates the pixel values rather than the neural network's parameters. Deep learning has many different types of models and this is only one of them! # # <font color='blue'> # What you should remember: # - Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image # - It uses representations (hidden layer activations) based on a pretrained ConvNet. # - The content cost function is computed using one hidden layer's activations. # - The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers. # - Optimizing the total cost function results in synthesizing new images. # # # # This was the final programming exercise of this course. Congratulations--you've finished all the programming exercises of this course on Convolutional Networks! We hope to also see you in Course 5, on Sequence models! # # ### References: # # The Neural Style Transfer algorithm was due to Gatys et al. (2015). <NAME> and Github user "log0" also have highly readable write-ups from which we drew inspiration. The pre-trained network used in this implementation is a VGG network, which is due to Simonyan and Zisserman (2015). Pre-trained weights were from the work of the MathConvNet team. # # - <NAME>, <NAME>, <NAME>, (2015). A Neural Algorithm of Artistic Style (https://arxiv.org/abs/1508.06576) # - <NAME>, Convolutional neural networks for artistic style transfer. https://harishnarayanan.org/writing/artistic-style-transfer/ # - Log0, TensorFlow Implementation of "A Neural Algorithm of Artistic Style". http://www.chioka.in/tensorflow-implementation-neural-algorithm-of-artistic-style # - <NAME> and <NAME> (2015). Very deep convolutional networks for large-scale image recognition (https://arxiv.org/pdf/1409.1556.pdf) # - MatConvNet. http://www.vlfeat.org/matconvnet/pretrained/ #
course4/week4/Neural Style Transfer/Art Generation with Neural Style Transfer - v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] Collapsed="false" # <img src='./img/EU-Copernicus-EUM_3Logos.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='50%'></img> # + [markdown] Collapsed="false" # <br> # + [markdown] Collapsed="false" # # LTPy functions # - # This notebook lists all `functions` that are defined and used throughout the `LTPy course`. # The following functions are listed: # # **[Data loading and re-shaping functions](#load_reshape)** # * [generate_xr_from_1D_vec](#generate_xr_from_1D_vec) # * [load_l2_data_xr](#load_l2_data_xr) # * [generate_geographical_subset](#generate_geographical_subset) # * [generate_masked_array](#generate_masked_array) # * [load_masked_l2_da](#load_masked_l2_da) # * [select_channels_for_rgb](#rgb_channels) # * [normalize](#normalize) # * [slstr_frp_gridding](#slstr_frp_gridding) # * [df_subset](#df_subset) # # **[Data visualization functions](#visualization)** # * [visualize_scatter](#visualize_scatter) # * [visualize_pcolormesh](#visualize_pcolormesh) # * [visualize_s3_pcolormesh](#visualize_s3_pcolormesh) # * [visualize_s3_frp](#visualize_s3_frp) # * [viusalize_s3_aod](#visualize_s3_aod) # <hr> # #### Load required libraries # + Collapsed="false" import os from matplotlib import pyplot as plt import xarray as xr from netCDF4 import Dataset import numpy as np import glob from matplotlib import pyplot as plt import matplotlib.colors from matplotlib.colors import LogNorm import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import cartopy.feature as cfeature import matplotlib.cm as cm import warnings warnings.simplefilter(action = "ignore", category = RuntimeWarning) warnings.simplefilter(action = "ignore", category = FutureWarning) # - # <hr> # ## <a id="load_reshape"></a>Data loading and re-shaping functions # ### <a id='generate_xr_from_1D_vec'></a>`generate_xr_from_1D_vec` def generate_xr_from_1D_vec(file, lat_path, lon_path, variable, parameter_name, longname, no_of_dims, unit): """ Takes a netCDF4.Dataset or xarray.DataArray object and returns a xarray.DataArray object with latitude / longitude information as coordinate information Parameters: file(netCDF4 data file or xarray.Dataset): AC SAF or IASI Level 2 data file, loaded a netCDF4.Dataset or xarray.DataArray lat_path(str): internal path of the data file to the latitude information, e.g. 'GEOLOCATION/LatitudeCentre' lon_path(str): internal path of the data file to the longitude information, e.g. 'GEOLOCATION/LongitudeCentre' variable(array): extracted variable of interested parameter_name(str): parameter name, preferably extracted from the data file longname(str): Long name of the parameter, preferably extracted from the data file no_of_dims(int): Define the number of dimensions of your input array unit(str): Unit of the parameter, preferably extracted from the data file Returns: 1 or 2-dimensional (depending on the given number of dimensions) xarray.DataArray with latitude / longitude information as coordinate information """ latitude = file[lat_path] longitude = file[lon_path] param = variable if (no_of_dims==1): param_da = xr.DataArray( param[:], dims=('ground_pixel'), coords={ 'latitude': ('ground_pixel', latitude[:]), 'longitude': ('ground_pixel', longitude[:]) }, attrs={'long_name': longname, 'units': unit}, name=parameter_name ) else: param_da = xr.DataArray( param[:], dims=["x","y"], coords={ 'latitude':(['x','y'],latitude[:]), 'longitude':(['x','y'],longitude[:]) }, attrs={'long_name': longname, 'units': unit}, name=parameter_name ) return param_da # + [markdown] Collapsed="false" # ### <a id='load_l2_data_xr'></a>`load_l2_data_xr` # - def load_l2_data_xr(directory, internal_filepath, parameter, lat_path, lon_path, no_of_dims, paramname, unit, longname): """ Loads a Metop-A/B Level 2 dataset in HDF format and returns a xarray.DataArray with all the ground pixels of all directory files. Uses function 'generate_xr_from_1D_vec' to generate the xarray.DataArray. Parameters: directory(str): directory where the HDF files are stored internal_filepath(str): internal path of the data file that is of interest, e.g. TOTAL_COLUMNS parameter(str): paramter that is of interest, e.g. NO2 lat_path(str): name of latitude variable lon_path(str): name of longitude variable no_of_dims(int): number of dimensions of input array paramname(str): name of parameter unit(str): unit of the parameter, preferably taken from the data file longname(str): longname of the parameter, preferably taken from the data file Returns: 1 or 2-dimensional xarray.DataArray with latitude / longitude information as coordinate information """ fileList = glob.glob(directory+'/*') datasets = [] for i in fileList: tmp=Dataset(i) param=tmp[internal_filepath+'/'+parameter] da_tmp= generate_xr_from_1D_vec(tmp,lat_path, lon_path, param, paramname, longname, no_of_dims, unit) if(no_of_dims==1): datasets.append(da_tmp) else: da_tmp_st = da_tmp.stack(ground_pixel=('x','y')) datasets.append(da_tmp_st) return xr.concat(datasets, dim='ground_pixel') # + [markdown] Collapsed="false" # ### <a id='generate_geographical_subset'></a>`generate_geographical_subset` # + Collapsed="false" def generate_geographical_subset(xarray, latmin, latmax, lonmin, lonmax, reassign=False): """ Generates a geographical subset of a xarray.DataArray and if kwarg reassign=True, shifts the longitude grid from a 0-360 to a -180 to 180 deg grid. Parameters: xarray(xarray.DataArray): a xarray DataArray with latitude and longitude coordinates latmin, latmax, lonmin, lonmax(int): lat/lon boundaries of the geographical subset reassign(boolean): default is False Returns: Geographical subset of a xarray.DataArray. """ if(reassign): xarray = xarray.assign_coords(longitude=(((xarray.longitude + 180) % 360) - 180)) return xarray.where((xarray.latitude < latmax) & (xarray.latitude > latmin) & (xarray.longitude < lonmax) & (xarray.longitude > lonmin),drop=True) # + [markdown] Collapsed="false" # ### <a id='generate_masked_array'></a>`generate_masked_array` # + Collapsed="false" def generate_masked_array(xarray, mask, threshold, operator, drop=True): """ Applies a mask (e.g. a cloud mask) onto a given xarray.DataArray, based on a given threshold and operator. Parameters: xarray(xarray DataArray): a three-dimensional xarray.DataArray object mask(xarray DataArray): 1-dimensional xarray.DataArray, e.g. cloud fraction values threshold(float): any number specifying the threshold operator(str): operator how to mask the array, e.g. '<', '>' or '!=' drop(boolean): default is True Returns: Masked xarray.DataArray with NaN values dropped, if kwarg drop equals True """ if(operator=='<'): cloud_mask = xr.where(mask < threshold, 1, 0) #Generate cloud mask with value 1 for the pixels we want to keep elif(operator=='!='): cloud_mask = xr.where(mask != threshold, 1, 0) elif(operator=='>'): cloud_mask = xr.where(mask > threshold, 1, 0) else: cloud_mask = xr.where(mask == threshold, 1, 0) xarray_masked = xr.where(cloud_mask ==1, xarray, np.nan) #Apply mask onto the DataArray xarray_masked.attrs = xarray.attrs #Set DataArray attributes if(drop): return xarray_masked[~np.isnan(xarray_masked)] #Return masked DataArray else: return xarray_masked # + [markdown] Collapsed="false" # ### <a id='load_masked_l2_da'></a>`load_masked_l2_da` # + Collapsed="false" def load_masked_l2_da(directory, internal_filepath, parameter, lat_path, lon_path, no_of_dims, paramname, longname, unit, threshold, operator): """ Loads a Metop-A/B Gome-2 Level 2 data and cloud fraction information and returns a masked xarray.DataArray. It combines the functions `load_l2_data_xr` and `generate_masked_array`. Parameters: directory(str): Path to directory with Level 2 data files. internal_filepath(str): Internal file path under which the parameters are strored, e.g. TOTAL_COLUMNS parameter(str): atmospheric parameter, e.g. NO2 lat_path(str): name of the latitude variable within the file lon_path(str): path to the longitude variable within the file no_of_dims(int): specify the number of dimensions, 1 or 2 paramname(str): parameter name longname(str): long name of the parameter that shall be used unit(str): unit of the parameter threshold(float): any number specifying the threshold operator(str): operator how to mask the xarray.DataArray, e.g. '<', '>' or '!=' Returns: Masked xarray.DataArray keeping NaN values (drop=False) """ da = load_l2_data_xr(directory, internal_filepath, parameter, lat_path, lon_path, no_of_dims, paramname, unit, longname) cloud_fraction = load_l2_data_xr(directory, 'CLOUD_PROPERTIES', 'CloudFraction', lat_path, lon_path, no_of_dims, 'CloudFraction', unit='-', longname='Cloud Fraction') return generate_masked_array(da, cloud_fraction, threshold, operator, drop=False) # - # ### <a id='rgb_channels'></a> `select_channels_for_rgb` def select_channels_for_rgb(xarray, red_channel, green_channel, blue_channel): """ Selects the channels / bands of a multi-dimensional xarray for red, green and blue composite based on Sentinel-3 OLCI Level 1B data. Parameters: xarray(xarray.Dataset): xarray.Dataset object that stores the different channels / bands. red_channel(str): Name of red channel to be selected green_channel(str): Name of green channel to be selected blue_channel(str): Name of blue channel to be selected Returns: Three xarray DataArray objects with selected channels / bands """ return xarray[red_channel], xarray[green_channel], xarray[blue_channel] # ## <a id='normalize'></a> `normalize` def normalize(array): """ Normalizes a numpy array / xarray.DataArray object to values between 0 and 1. Parameters: xarray(numpy array or xarray.DataArray): xarray.DataArray or numpy array object whose values should be normalized. Returns: xarray.DataArray with normalized values """ array_min, array_max = array.min(), array.max() return ((array - array_min)/(array_max - array_min)) # ### <a id='slstr_frp_gridding'></a>`slstr_frp_gridding` def slstr_frp_gridding(parameter_array, parameter, lat_min, lat_max, lon_min, lon_max, sampling_lat_FRP_grid, sampling_lon_FRP_grid, n_fire, lat_frp, lon_frp, **kwargs): """ Produces gridded data of Sentinel-3 SLSTR NRT Fire Radiative Power Data Parameters: parameter_array(xarray.DataArray): xarray.DataArray with extracted data variable of fire occurences parameter(str): NRT S3 FRP channel - either `mwir`, `swir` or `swir_nosaa` lat_min, lat_max, lon_min, lon_max(float): Floats of geographical bounding box sampling_lat_FRP_grid, sampling_long_FRP_grid(float): Float of grid cell size n_fire(int): Number of fire occurences lat_frp(xarray.DataArray): Latitude values of occurred fire events lon_frp(xarray.DataArray): Longitude values of occurred fire events **kwargs: additional keyword arguments to be added. Required for parameter `swir_nosaa`, where the function requires the xarray.DataArray with the SAA FLAG information. Returns: the gridded xarray.Data Array and latitude and longitude grid information """ n_lat = int( (np.float32(lat_max) - np.float32(lat_min)) / sampling_lat_FRP_grid ) + 1 # Number of rows per latitude sampling n_lon = int( (np.float32(lon_max) - np.float32(lon_min)) / sampling_lon_FRP_grid ) + 1 # Number of lines per longitude sampling slstr_frp_gridded = np.zeros( [n_lat, n_lon], dtype='float32' ) - 9999. lat_grid = np.zeros( [n_lat, n_lon], dtype='float32' ) - 9999. lon_grid = np.zeros( [n_lat, n_lon], dtype='float32' ) - 9999. if (n_fire >= 0): # Loop on i_lat: begins for i_lat in range(n_lat): # Loop on i_lon: begins for i_lon in range(n_lon): lat_grid[i_lat, i_lon] = lat_min + np.float32(i_lat) * sampling_lat_FRP_grid + sampling_lat_FRP_grid / 2. lon_grid[i_lat, i_lon] = lon_min + np.float32(i_lon) * sampling_lon_FRP_grid + sampling_lon_FRP_grid / 2. # Gridded SLSTR FRP MWIR Night - All days if(parameter=='swir_nosaa'): FLAG_FRP_SWIR_SAA_nc = kwargs.get('flag', None) mask_grid = np.where( (lat_frp[:] >= lat_min + np.float32(i_lat) * sampling_lat_FRP_grid) & (lat_frp[:] < lat_min + np.float32(i_lat+1) * sampling_lat_FRP_grid) & (lon_frp[:] >= lon_min + np.float32(i_lon) * sampling_lon_FRP_grid) & (lon_frp[:] < lon_min + np.float32(i_lon+1) * sampling_lon_FRP_grid) & (parameter_array[:] != -1.) & (FLAG_FRP_SWIR_SAA_nc[:] == 0), False, True) else: mask_grid = np.where( (lat_frp[:] >= lat_min + np.float32(i_lat) * sampling_lat_FRP_grid) & (lat_frp[:] < lat_min + np.float32(i_lat+1) * sampling_lat_FRP_grid) & (lon_frp[:] >= lon_min + np.float32(i_lon) * sampling_lon_FRP_grid) & (lon_frp[:] < lon_min + np.float32(i_lon+1) * sampling_lon_FRP_grid) & (parameter_array[:] != -1.), False, True) masked_slstr_frp_grid = np.ma.array(parameter_array[:], mask=mask_grid) if len(masked_slstr_frp_grid.compressed()) != 0: slstr_frp_gridded[i_lat, i_lon] = np.sum(masked_slstr_frp_grid.compressed()) return slstr_frp_gridded, lat_grid, lon_grid # ### <a id='df_subset'></a>`df_subset` def df_subset(df,low_bound1, high_bound1, low_bound2, high_bound2): """ Creates a subset of a pandas.DataFrame object with time-series information Parameters: df(pandas.DataFrame): pandas.DataFrame with time-series information low_bound1(str): dateTime string, e.g. '2018-11-30' high_bound1(str): dateTime string, e.g. '2018-12-01' low_bound2(str): dateTime string, e.g. '2019-12-30' high_bound2(str): dateTime string, e.g. '2020-01-15' Returns: the subsetted time-series as pandas.DataFrame object """ return df[(df.index>low_bound1) & (df.index<high_bound1)], df[(df.index>low_bound2) & (df.index<high_bound2)] # <hr> # ## <a id="visualization"></a>Data visualization functions # + [markdown] Collapsed="false" # ### <a id='visualize_scatter'></a>`visualize_scatter` # + Collapsed="false" def visualize_scatter(xr_dataarray, conversion_factor, projection, vmin, vmax, point_size, color_scale, unit, title): """ Visualizes a xarray.DataArray in a given projection using matplotlib's scatter function. Parameters: xr_dataarray(xarray.DataArray): a one-dimensional xarray DataArray object with latitude and longitude information as coordinates conversion_factor(int): any number to convert the DataArray values projection(str): choose one of cartopy's projection, e.g. ccrs.PlateCarree() vmin(int): minimum number on visualisation legend vmax(int): maximum number on visualisation legend point_size(int): size of marker, e.g. 5 color_scale(str): string taken from matplotlib's color ramp reference unit(str): define the unit to be added to the color bar title(str): define title of the plot """ fig, ax = plt.subplots(figsize=(40, 10)) ax = plt.axes(projection=projection) ax.coastlines() if (projection==ccrs.PlateCarree()): gl = ax.gridlines(draw_labels=True, linestyle='--') gl.top_labels=False gl.right_labels=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':14} gl.ylabel_style={'size':14} # plot pixel positions img = ax.scatter( xr_dataarray.longitude.data, xr_dataarray.latitude.data, c=xr_dataarray.data*conversion_factor, cmap=plt.cm.get_cmap(color_scale), marker='o', s=point_size, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax ) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel("Longitude", fontsize=16) plt.ylabel("Latitude", fontsize=16) cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.04, pad=0.1) cbar.set_label(unit, fontsize=16) cbar.ax.tick_params(labelsize=14) ax.set_title(title, fontsize=20, pad=20.0) plt.show() # + [markdown] Collapsed="false" # ### <a id='visualize_pcolormesh'></a>`visualize_pcolormesh` # + Collapsed="false" def visualize_pcolormesh(data_array, longitude, latitude, projection, color_scale, unit, long_name, vmin, vmax, set_global=True, lonmin=-180, lonmax=180, latmin=-90, latmax=90): """ Visualizes a xarray.DataArray with matplotlib's pcolormesh function. Parameters: data_array(xarray.DataArray): xarray.DataArray holding the data values longitude(xarray.DataArray): xarray.DataArray holding the longitude values latitude(xarray.DataArray): xarray.DataArray holding the latitude values projection(str): a projection provided by the cartopy library, e.g. ccrs.PlateCarree() color_scale(str): string taken from matplotlib's color ramp reference unit(str): the unit of the parameter, taken from the NetCDF file if possible long_name(str): long name of the parameter, taken from the NetCDF file if possible vmin(int): minimum number on visualisation legend vmax(int): maximum number on visualisation legend set_global(boolean): optional kwarg, default is True lonmin,lonmax,latmin,latmax(float): optional kwarg, set geographic extent is set_global kwarg is set to False """ fig=plt.figure(figsize=(20, 10)) ax = plt.axes(projection=projection) img = plt.pcolormesh(longitude, latitude, data_array, cmap=plt.get_cmap(color_scale), transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, shading='auto') ax.add_feature(cfeature.BORDERS, edgecolor='black', linewidth=1) ax.add_feature(cfeature.COASTLINE, edgecolor='black', linewidth=1) if (projection==ccrs.PlateCarree()): ax.set_extent([lonmin, lonmax, latmin, latmax], projection) gl = ax.gridlines(draw_labels=True, linestyle='--') gl.top_labels=False gl.right_labels=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':14} gl.ylabel_style={'size':14} if(set_global): ax.set_global() ax.gridlines() cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.04, pad=0.1) cbar.set_label(unit, fontsize=16) cbar.ax.tick_params(labelsize=14) ax.set_title(long_name, fontsize=20, pad=20.0) # plt.show() return fig, ax # - # ### <a id='visualize_s3_pcolormesh'></a>`visualize_s3_pcolormesh` def visualize_s3_pcolormesh(color_array, array, latitude, longitude, title): """ Visualizes a xarray.DataArray or numpy.MaskedArray (Sentinel-3 OLCI Level 1 data) with matplotlib's pcolormesh function as RGB image. Parameters: color_array (numpy.MaskedArray): any numpy.MaskedArray, e.g. loaded with the NetCDF library and the Dataset function array(numpy.Array): numpy.Array to get dimensions of the resulting plot longitude (numpy.Array): array with longitude values latitude (numpy.Array) : array with latitude values title (str): title of the resulting plot """ fig=plt.figure(figsize=(20, 12)) ax=plt.axes(projection=ccrs.Mercator()) ax.coastlines() gl = ax.gridlines(draw_labels=True, linestyle='--') gl.top_labels=False gl.right_labels=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':14} gl.ylabel_style={'size':14} img1 = plt.pcolormesh(longitude, latitude, array*np.nan, color=color_array, clip_on = True, edgecolors=None, zorder=0, transform=ccrs.PlateCarree()) ax.set_title(title, fontsize=20, pad=20.0) plt.show() # ### <a id='visualize_s3_frp'></a> `visualize_s3_frp` def visualize_s3_frp(data, lat, lon, unit, longname, textstr_1, textstr_2, vmax): """ Visualizes a numpy.Array (Sentinel-3 SLSTR NRT FRP data) with matplotlib's pcolormesh function and adds two text boxes to the plot. Parameters: data(numpy.MaskedArray): any numpy MaskedArray, e.g. loaded with the NetCDF library and the Dataset function lat(numpy.Array): array with longitude values lon(numpy.Array) : array with latitude values unit(str): unit of the resulting plot longname(str): Longname to be used as title textstr_1(str): String to fill box 1 textstr_2(str): String to fill box 2 vmax(float): Maximum value of color scale """ fig=plt.figure(figsize=(20, 15)) ax = plt.axes(projection=ccrs.PlateCarree()) img = plt.pcolormesh(lon, lat, data, cmap=cm.autumn_r, transform=ccrs.PlateCarree(), vmin=0, vmax=vmax) ax.add_feature(cfeature.BORDERS, edgecolor='black', linewidth=1) ax.add_feature(cfeature.COASTLINE, edgecolor='black', linewidth=1) gl = ax.gridlines(draw_labels=True, linestyle='--') gl.bottom_labels=False gl.right_labels=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':14} gl.ylabel_style={'size':14} cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.029, pad=0.025) cbar.set_label(unit, fontsize=16) cbar.ax.tick_params(labelsize=14) ax.set_title(longname, fontsize=20, pad=40.0) props = dict(boxstyle='square', facecolor='white', alpha=0.5) # place a text box on the right side of the plot ax.text(1.1, 0.9, textstr_1, transform=ax.transAxes, fontsize=16, verticalalignment='top', bbox=props) props = dict(boxstyle='square', facecolor='white', alpha=0.5) # place a text box in upper left in axes coords ax.text(1.1, 0.85, textstr_2, transform=ax.transAxes, fontsize=16, verticalalignment='top', bbox=props) plt.show() # ### <a id='visualize_s3_aod'></a> `visualize_s3_aod` def visualize_s3_aod(aod_ocean, aod_land, latitude, longitude, title, unit, vmin, vmax, color_scale, projection): """ Visualizes two xarray.DataArrays from the Sentinel-3 SLSTR NRT AOD dataset onto the same plot with matplotlib's pcolormesh function. Parameters: aod_ocean(xarray.DataArray): xarray.DataArray with the Aerosol Optical Depth for ocean values aod_land(xarray.DataArray): xarray.DataArray with Aerosol Optical Depth for land values longitude(xarray.DataArray): xarray.DataArray holding the longitude values latitude(xarray.DataArray): xarray.DataArray holding the latitude values title(str): title of the resulting plot unit(str): unit of the resulting plot vmin(int): minimum number on visualisation legend vmax(int): maximum number on visualisation legend color_scale(str): string taken from matplotlib's color ramp reference projection(str): a projection provided by the cartopy library, e.g. ccrs.PlateCarree() """ fig=plt.figure(figsize=(12, 12)) ax=plt.axes(projection=projection) ax.coastlines(linewidth=1.5, linestyle='solid', color='k', zorder=10) gl = ax.gridlines(draw_labels=True, linestyle='--') gl.top_labels=False gl.right_labels=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':12} gl.ylabel_style={'size':12} img1 = plt.pcolormesh(longitude, latitude, aod_ocean, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=color_scale) img2 = plt.pcolormesh(longitude, latitude, aod_land, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=color_scale) ax.set_title(title, fontsize=20, pad=20.0) cbar = fig.colorbar(img1, ax=ax, orientation='vertical', fraction=0.04, pad=0.05) cbar.set_label(unit, fontsize=16) cbar.ax.tick_params(labelsize=14) plt.show() # + [markdown] Collapsed="false" # <hr> # + [markdown] Collapsed="false" # <img src='./img/copernicus_logo.png' alt='Logo EU Copernicus' align='right' width='20%'><br><br><br><br> # # <p style="text-align:right;">This project is licensed under the <a href="./LICENSE">MIT License</a> and is developed under a Copernicus contract.
90_workshops/202109_EMS_short_course/functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="8NddOVMxgFp0" # # Genetic Algorithm # # The genetic algorithm is a nature-inspired algorithm based on natural selection, that the fittest individuals of a population are selected to reproduce the next generation. 1st of a 3-part series on evolutionary computation (Part 2 - [Neuroevolution](https://jetnew.io/posts/2020/11/neuroevolution/), Part 3 - [Novelty Search](https://jetnew.io/posts/2020/11/novelty-search/)). # # The genetic algorithm consists of 5 processes: # 1. Initial population # 2. Fitness function # 3. Selection # 4. Crossing-over # 5. Mutation # # Terminology: # - Population refers to the set of individuals (solution). # - Individual is defined by its chromsome (set of parameters/variables). # - Fitness function refers to the performance measure of an individual. # - Selection refers to the selection of the fittest. # - Crossing-over refers to a swapping of segments of 2 parents' genes, producing a child individual with a new gene combination. # - Mutation is a random perturbation of genes based on a probability. # + [markdown] id="uokXBJZdqrZr" # # Optimization Problem: Linear Regression # # Evolutionary algorithms can serve as "black box" optimisation algorithms without needing to solving the objective function analytically. To illustrate that evolutionary algorithms can optimise, the simple linear regression problem is used. Define a linear function: # $$y = mx + c + \epsilon$$ # to be modelled by a linear regression model, where $m=1$, $c=0$, $\epsilon\sim N(0,1)$ represents gradient, y-intercept and Gaussian noise respectively. # - import numpy as np import matplotlib.pyplot as plt np.random.seed(0) # for reproducibility # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="T0M8rFcdk4Zj" outputId="10bc8dfb-92ce-485d-8d2b-f4c240ed5661" X = np.linspace(0, 10, 100) y = X + np.random.normal(0, 1, 100) plt.title("Linear function with Gaussian noise") plt.plot(X, y, '.') plt.show() # + [markdown] id="97C4UZhtr7L1" # # Process 1: Generate the initial population of individuals. # # Each individual (solution/model) is defined by a set of parameters. Hyperparameters to be specified, which are variables that are not updated at every iteration of optimisation, are the population size (number of individuals in the population at any point in time) and the number of parameters that defines an individual. The initial population's parameters can be zero-initialised or random-initialised. For your interest, there also exists many other initialisation methods to be used depending on context, such as the He initialisation and Xavier initialisation. The set of parameters that defines each individual is biologically analogous to the individual's genome (or gene or chromosome, depending on the computational process). # + colab={"base_uri": "https://localhost:8080/"} id="iK9Ma2AjgA8y" outputId="8627c4f1-efea-406d-fc73-8af96e5d9e1d" population_size = 10 num_parameters = 2 # initial_population = np.zeros(shape=(population_size, num_parameters)) # zero initialisation initial_population = np.random.normal(0, 1, size=(population_size, num_parameters)) # random normal initialisation initial_population # + [markdown] id="OZ6sphgrtThH" # # Process 2: Compute the fitness of all individuals. # # Another 2 hyperparameters are in the form of functions - the solution and the fitness function. The solution is a model that uses the individual's parameters to compute the output $y$ given input $X$. For simplicity, we use the polynomial regression model (with 2 parameters, it is a simple linear regression model). The fitness function measures the performance of an individual solution. The evolutionary analogy of the fitness function of an organism would be, for example, its survivability and/or reproductive success. Because we want to model the linear function with Gaussian noise dataset, the negative mean squared error (MSE) is used as the fitness function to determine how well the solution models the dataset. Because the fitness function is to be maximised, MSE is negated to reflect a higher value of MSE as more desirable. # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="e8eDiF7Ikg1w" outputId="c0f6bd68-65f1-4472-8efd-0b01c836cba2" def solution(params): # Polynomial regression model return np.sum([params[i] * X**i for i in range(len(params))], axis=0) def fitness_function(params): # Mean squared error return -np.sum(abs(y - solution(params))**2) / len(X) def plot_data(): plt.plot(X, y, '.') def plot_individual(individual): plt.plot(X, solution(individual), '.', c='grey') def plot_population(population): for individual in population: plot_individual(individual) individual = initial_population[0] fitness_score = fitness_function(individual) plot_data() plot_individual(individual) plt.show() plot_data() plot_population(initial_population) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="sxZP-JuIpg9b" outputId="aa17c82b-2fa2-4622-c31a-a1db6bfb4023" def compute_fitness(population): return np.array([fitness_function(individual) for individual in population]) fitness_scores = compute_fitness(initial_population) fitness_scores # + [markdown] id="Pnw8PioJvKId" # # Process 3: Select the fittest individuals. # # Like natural selection, select the top $k$ percentage of individuals with the highest fitness scores, where $k$ is a hyperparameter, to form the parent subpopulation that will reproduce to form the next generation of the population later. # + colab={"base_uri": "https://localhost:8080/"} id="zh5kELbDm7qA" outputId="224525fc-d6d4-41cc-99d0-c4e90b93648b" def get_fittest(population, fitness_scores): return population[fitness_scores.argmax(), :] def select_fittest(population, fitness_scores, k=0.5): return population[np.argsort(fitness_scores)[-int(len(population) * k):], :] parent_subpopulation = select_fittest(initial_population, fitness_scores, k=0.2) parent_subpopulation, compute_fitness(parent_subpopulation) # + [markdown] id="RVg3IoixvHt-" # # Process 4: Perform crossing-over between parents to produce children. # # Crossing-over is a biological process that exchanges genetic material to result in new combinations of genetic material. For the benefit of non-biology students, much detail has been abstracted out, so for your interest, refer to chromosomal crossovers. In the genetic algorithm, crossing-over is performed during reproduction by swapping a segment of parameters of one parent with another parent. For example, take 2 parents defined by 4 parameters: # # $$P1 = [A1, A2, A3, A4], P2 = [B1, B2, B3, B4]$$ # # A crossing-over at the index 3 will result in a child: # # $$C = [A1, A2, B3, B4]$$ # # There exists other methods of genetic exchange to introduce variance in the population gene pool, such as swapping elements instead of segments. # + colab={"base_uri": "https://localhost:8080/"} id="X-f1uYE_stb6" outputId="7a553215-b3f1-4730-c4f5-3f4cc95c5dca" def perform_crossingover(subpopulation): children = [] for i in range(population_size - len(subpopulation)): parents = subpopulation[np.random.randint(0, len(subpopulation), 2)] gene_indices = np.zeros(num_parameters).astype(int) gene_indices[np.random.randint(len(gene_indices)+1):] = 1 # segment swap child = parents[gene_indices, np.arange(num_parameters)] children.append(child) return np.append(subpopulation, np.array(children), axis=0) next_population = perform_crossingover(parent_subpopulation) next_population, compute_fitness(next_population) # + [markdown] id="xJK73lPdySEA" # # Process 5: Perform mutation on the population. # # A mutation is defined as a change in the DNA sequence. While the exact differences between DNA, gene and chromosome in the genetic algorithm are not maintained, inspiration is drawn from mutation in biology that usually worsens fitness but can occasionally improve fitness of the individual. To perform mutation on the population parameters, add Gaussian noise $\epsilon\sim N(0, \sigma)$ to the individuals' parameters, where $\sigma$ is the standard deviation hyperparameter. # + colab={"base_uri": "https://localhost:8080/"} id="hXruXC9v1Ewr" outputId="8465c8c4-3f39-478f-ca86-cd33a249c314" def perform_mutation(population, sigma=0.1): return population + np.random.normal(0, sigma, population.shape) # Gaussian noise mutated_population = perform_mutation(next_population, sigma=0.01) mutated_population, compute_fitness(mutated_population) # + [markdown] id="zWU9ebTyzW6w" # # The Genetic Algorithm: All 5 Processes Together # # By combining the 5 processes together, we construct the genetic algorithm and run it to find a solution that models the linear function well. # # Genetic Algorithm: # 1. Generate the initial population of individuals. # 2. Repeat until convergence: # 1. Compute fitness of the population. # 2. Select the fittest individuals (parent subpopulation). # 3. Perform crossing-over between parents to produce children. # 4. Perform mutation on the population. # 3. Select the fittest individual of the population as the solution. # + colab={"base_uri": "https://localhost:8080/", "height": 314, "referenced_widgets": ["908b5c225b6644e69a9ef11002a5c8cb", "b6280ded6ae143eb8b18d7aca45a6bda", "02ced57e35994aa99c52ae3d4c2244a4", "a4ae4fea44f04df59b90ecdd5014e2c3", "41c9ef1f4773480ab9b3520b71a396ad", "a9c9a17f7e7b4fd1bacfb929f15108ab", "bd03c2e50be748b2a9f0a21b967fd862", "df95f6b37547421b9b315c08196ee5b0"]} id="N0LM-lWu10Zy" outputId="4fd3f55b-9846-4809-a08f-c5458dd523b6" # Define hyperparameters of the genetic algorithm. population_size = 20 num_parameters = 2 num_generations = 50 top_k = 0.5 mutation_sigma = 0.01 # Process 1: Generate the initial population of individuals. population = np.random.normal(0, 1, size=(population_size, num_parameters)) # Misc: Experimental tracking scores = [] solutions = [] # Iterate the process over multiple generations of populations. for i in range(num_generations): # Process 2: Compute the fitness of all individuals. fitness_scores = compute_fitness(population) # Process 3: Select the fittest individuals. fittest_subpopulation = select_fittest(population, fitness_scores, k=top_k) # Misc: Experimental tracking fittest = get_fittest(population, fitness_scores) solutions.append(solution(fittest)) scores.append(fitness_function(fittest)) # Process 4: Perform crossing-over between parents to produce children. children = perform_crossingover(fittest_subpopulation) # Process 5: Perform mutation on the population. population = perform_mutation(children, sigma=mutation_sigma) # Misc: Experimental tracking plt.plot(np.arange(num_generations), scores) plt.show() # + [markdown] id="u_9BkWiX09GH" # # Experiment Result # # The fittest individual in the final population is a reasonably well-fit linear regression model. The rest of the population have a lower fitness score but are quite well-fit as well. # + colab={"base_uri": "https://localhost:8080/", "height": 515} id="SwKNAbVe7SoA" outputId="e56585db-7382-4812-b29d-24f64a0ef131" fitness_score = fitness_function(fittest) y_pred = solution(fittest) plot_data() plot_individual(fittest) plt.show() plot_data() plot_population(fittest_subpopulation) plt.show() # + [markdown] id="kMLfrP581V7-" # By visualising the fittest model at each generation (iteration) of the genetic algorithm, notice that virtually instantly, the linear regression model fits to the dataset. In fact, linear regression is too simple a problem to realise the effectiveness of the genetic algorithm. Nonetheless, the reason for using linear regression is to bring focus to the genetic algorithm without the overhead of needing to understand the model. For a more complex application of the genetic algorithm using neural networks, refer to [Part 2](https://jetnew.io/posts/2020/11/neuroevolution/) of the Evolutionary Computation series on Neuroevolution. For an evolutionary strategy based on novelty applied on reinforcement learning, refer to [Part 3](https://jetnew.io/posts/2020/11/novelty-search/). # + id="inhWaZ2s19gi" # %%capture from matplotlib.animation import FuncAnimation fig, ax = plt.subplots() plot_data() ga_line = ax.plot([], [])[0] ax.set_xlim(min(X), max(X)) ax.set_ylim(min(y), max(y)) def animate(i): ga_line.set_data(X, solutions[i]) ax.set_xlabel(f'Gen {i+1}') return ga_line, ax ani = FuncAnimation(fig, animate, frames=np.arange(0, num_generations), interval=80, repeat=False) # - ani.save('../images/genetic-algorithm/genetic_algorithm.gif') # <img src="../images/genetic-algorithm/genetic_algorithm.gif">
_jupyter/genetic_algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data-X: Airbnb Price Trend Predictor # Predicts the percentage change in price for an Airbnb in San Francisco a year from now based on historical data # ___ # ### Dependencies # None # ### Imports # + # Import Python packages import os from pathlib import Path # Import Standard ML packages import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.externals import joblib import sklearn.linear_model as linear_model from sklearn import ensemble import sklearn.metrics as metrics from sklearn.metrics import make_scorer from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler sns.set(style="whitegrid", palette="muted") plt.rcParams['figure.figsize'] = (12, 9) plt.rcParams['font.size'] = 14 # %matplotlib inline # - # ### Import Datasets may18_df = pd.read_csv("../raw_datasets/sf_airbnb_may_18.csv") nov18_df = pd.read_csv("../raw_datasets/sf_airbnb_nov_18.csv") nov17_df = pd.read_csv("../raw_datasets/sf_airbnb_nov_17.csv") nov16_df = pd.read_csv("../raw_datasets/sf_airbnb_nov_16.csv") # ### Data Cleaning/Transformation # + def select_columns(df, *columns): return df.loc[:, columns] def dollar_to_float(df, *columns): for c in columns: df[c] = df[c].str.replace(r'[$,]', '').astype("float64") return df def one_hot_encoding(df, *columns): for c in columns: hot = pd.get_dummies(df[c], prefix=c) df = pd.concat([df, hot], axis=1) df.drop(c, axis=1, inplace=True) return df def fill_na_with_median(df, *columns): for c in columns: df.loc[df[c].isnull(),c] = df.loc[df[c].notnull(),c].median() return df def clean_df(df): df = df.copy() return ( df.set_index("id").pipe( select_columns, "price", "longitude", "latitude", "accommodates", "bedrooms", "bathrooms", "beds", "room_type", "neighbourhood_cleansed", "zipcode" ) .pipe( fill_na_with_median, "bathrooms", "beds" ) .pipe( dollar_to_float, "price" ) .pipe( one_hot_encoding, "room_type", "neighbourhood_cleansed", "zipcode" ) ) def clean_df_price(df): df = df.copy() return ( df.set_index("id").pipe( select_columns, "price" ).pipe( dollar_to_float, "price" ) ) def merge_dfs(*dfs): for i, df in enumerate(dfs): if i == 0: merged_df = df else: merged_df = merged_df.merge(df, left_index=True, right_index=True) return merged_df # + curr_df = clean_df(nov18_df) past_df = clean_df_price(nov16_df) merged = merge_dfs(past_df, curr_df) merged.rename( index=str, columns={ "price_x": "price_past", "price_y": "price_curr" }, inplace=True ) merged["price_percent_change"] = (merged["price_curr"] - merged["price_past"])/merged["price_past"] merged = merged.drop(["price_curr"], axis=1) merged.head() # - # ### Model Training/Evaluation def scale_X(X): scaler = StandardScaler() scaler.fit(X) return scaler.transform(X) X_df = merged.drop("price_percent_change", axis=1) X = X_df.pipe(scale_X) Y = merged["price_percent_change"] X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=8) # + err_func = metrics.median_absolute_error estimators = [ GridSearchCV( estimator = linear_model.LinearRegression(), param_grid = { "fit_intercept": [True, False] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = linear_model.Lasso(), param_grid = { "alpha": [1, 5, 10, 20], "fit_intercept": [True, False] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = linear_model.Ridge(fit_intercept=True), param_grid = { "alpha": [1, 5, 10, 20], "fit_intercept": [True, False] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = linear_model.OrthogonalMatchingPursuit(), param_grid = { "fit_intercept": [True] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = linear_model.BayesianRidge(), param_grid = { "alpha_1": [1.e-6, 1], "alpha_2": [1.e-6, 1], "lambda_1": [1.e-6, 1], "lambda_2": [1.e-6, 1], "fit_intercept": [True, False] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = linear_model.ElasticNet(), param_grid = { "alpha": [1, 5, 10, 20], "l1_ratio": [0.3, 0.5, 0.7] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = ensemble.RandomForestRegressor(), param_grid = { "n_estimators": [5, 10, 20] }, scoring = make_scorer(err_func, greater_is_better=False) ), GridSearchCV( estimator = ensemble.GradientBoostingRegressor(), param_grid = { "loss": ["ls", "lad"], "learning_rate": [0.05, 0.1, 0.2] }, scoring = make_scorer(err_func, greater_is_better=False) ) ] estimator_labels = np.array([ 'Linear', 'Lasso', 'Ridge', 'OMP', 'BayesRidge', 'ElasticNet', 'RForest', 'GBoosting' ]) def get_estimator_name(e): return estimator_labels[estimators.index(e)] estimator_errs = np.array([]) best_model = None min_err = float("inf") for e in estimators: e.fit(X_train, y_train) y_pred = e.predict(X_test) curr_err = err_func(y_test, y_pred) estimator_errs = np.append(estimator_errs, curr_err) print(f"""{get_estimator_name(e)}: {e.best_params_}, Error: {curr_err}""") if curr_err < min_err: min_err = curr_err best_model = e print(f""" Best Estimator: {get_estimator_name(best_model)} MAE: {min_err} """) x_vals = np.arange(len(estimator_errs)) sorted_indices = np.argsort(estimator_errs) plt.figure(figsize=(8,6)) plt.title("Estimator Median Absolute Error") plt.xlabel('Estimator') plt.ylabel('Median Absolute Error') plt.bar(x_vals, estimator_errs[sorted_indices], align='center') plt.xticks(x_vals, estimator_labels[sorted_indices]) plt.savefig('../plots/Airbnb Price Trend MAE.png', bbox_inches='tight') plt.show() # - best_model.fit(X_train, y_train) plt.title(f"""Best Model ({get_estimator_name(best_model)}) Residuals""") plt.xlabel("Residuals (% Change)") plt.ylabel("Count") plt.xlim((-0.5, 0.5)) plt.hist((y_test - best_model.predict(X_test)).values, bins=250) plt.savefig('../plots/Airbnb Price Trend Residual.png', bbox_inches='tight') plt.show() # ### Export Models export_path = "../exported_models/airbnb_price_trends.hdf" X_df.to_hdf(export_path, "X_df") Y.to_hdf(export_path, "Y") export_path = "../exported_models/airbnb_price_trends.pkl" joblib.dump(best_model, export_path);
ipython_notebooks/.ipynb_checkpoints/airbnb_price_trends-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # # Check if gradient is correct numerically # # We will check by numerical forward difference implemented in `ForwardDiff.jl` using MendelIHT using ForwardDiff using Random using LinearAlgebra # ## First simulate data # + function simulate_multivariate_sparse( n::Int, p::Int, k::Int, r::Int; seed::Int=2021) # set seed Random.seed!(seed) # simulate `.bed` file with no missing data x = simulate_random_snparray(undef, n, p) xla = convert(Matrix{Float64}, x, impute=true, center=true, scale=true) n, p = size(x) # intercept is the only nongenetic covariate z = ones(n, 1) intercepts = randn(r)' # each trait have different intercept # simulate response y, true model b, and the correct non-0 positions of b Y, true_Σ, true_b, correct_position = simulate_random_response(xla, k, r, Zu=z*intercepts, overlap=0); return xla, Matrix(z'), true_b, true_Σ, Matrix(Y') end n = 100 p = 1000 k = 10 r = 2 seed = 2021 xla, Z, true_b, Σ, Y = simulate_multivariate_sparse(n, p, k, r,seed=seed) X = transpose(xla); # - # ## Loglikelihood and gradient functions according to math # # Evaluate gradient and loglikelihood where $B_{ij} = 0$ and $\Gamma$ is identity. # + function loglikelihood_B(B) resid = Y - B * X n = size(resid, 2) return n/2 * logdet(Γ) - 0.5 * tr(Γ*resid*resid') end function grad_B(B) return Γ * (Y - B * X) * X' end # + function loglikelihood_Γ(Γ) resid = Y - B * X n = size(resid, 2) return n/2 * logdet(Γ) - 0.5 * tr(Γ*resid*resid') end function grad_Γ(Γ) resid = Y - B * X return 0.5n * inv(Γ) - 0.5 * resid * resid' end # - # ### Actual gradient with respect to B B = zeros(r, p) Γ = Matrix{Float64}(I, r, r) @show loglikelihood_B(B) ∇B = grad_B(B) # ### Numerical gradient with respect to B g = x -> ForwardDiff.gradient(loglikelihood_B, x) g(B) # ### Actual gradient with respect to $\Gamma$ B = zeros(r, p) Γ = Matrix{Float64}(I, r, r) @show loglikelihood_Γ(Γ) ∇Γ = grad_Γ(Γ) # ### Numerical gradient with respect to $\Gamma$ g = x -> ForwardDiff.gradient(loglikelihood_Γ, x) g(Γ)
test/multivariate_gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Yes Reg No Adam No Decay run modules/venv_setup.ipynb run modules/module.ipynb run modules/model.ipynb run modules/data_loader.ipynb # + with open('saved_data/broad_search/reg1_adam0_decay0', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg1_adam0_decay0_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 2. Yes Reg No Adam Yes Decay # + with open('saved_data/broad_search/reg1_adam0_decay1', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg1_adam0_decay1_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 3. Yes Reg Yes Adam No Decay # + with open('saved_data/broad_search/reg1_adam1_decay0', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg1_adam1_decay0_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 4. Yes Reg Yes Adam Yes Decay # + with open('saved_data/broad_search/reg1_adam1_decay1', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg1_adam1_decay1_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 5. No Reg No Adam Yes Decay # + with open('saved_data/broad_search/reg0_adam0_decay1', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg0_adam0_decay1_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 6. No Reg Yes Adam No Decay # + with open('saved_data/broad_search/reg0_adam1_decay0', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg0_adam1_decay0_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # - # # 7. No Reg Yes Adam Yes Decay # + with open('saved_data/broad_search/reg0_adam1_decay1', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t # + with open('saved_data/broad_search/reg0_adam1_decay1_lr01', 'rb') as f: all_costs, all_accs, params, all_accs_test, all_costs_test, o, a, c, o_t, a_t, c_t = pickle.loads(f.read()) a, a_t
saved_data/broad_search/Results Summary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # # Word2Vec # 在许多自然语言处理任务中,单词通常由他们的 [tf-idf]() 分值来表示。这种分值(只是一个标量scalar)只能表征出一个词在一个文档中的重要程度,但无法给出任何语义层面的解释,意思就是你只是一个标量数值,能够显示出单词的一种重要程度或者权重,但是却无法表达出单词的语义,含义往往和词的上下文以及词的近义词反义词等有关系。Word2Vec 是一种能够采用神经网络模型,它对无标签的语料库进行训练,最终得出每个单词的一个词向量(也就是说这个单词通过向量vector来表征),这种词向量可以理解成是对单词在相关语料库所表现出来的隐含语义的一种编码。词向量表示方式是非常有用,主要体现在一下两点: # 1. 我们能通过计算余弦相似度来度量两个单词的语义相似度。所有单词已经在同一个高维度空间,在这个维度空间,我们可以采用各种数学定义上的距离度量,来度量两个单词的语义相似度,如欧式距离、马氏距离、余弦距离、汉明距离、曼哈顿距离、皮尔逊相关系数。 # 2. 我们可以使用词向量作为特征向量做各种有监督NLP机器学习任务(有机器学习基础的话就知道,所有机器学习模型的输入最后都变成了一个多维特征向量),比如文档分类,命名实体标注,文本情感分析。在词向量中隐含的语义信息能够作为机器学习输入数据的强有力的特征,传统机器学习的输入数据的每个维度特征,有很大一部分是人为设计的,不是靠机器单纯学出来的,比如某个维度表示词的频率,词的词性(adj,adv,noun.),词的表意(good or bad or neutral),词的长度,词的音节,音素等等,需要大量的语言学家设计出这些特征并做提取。 # # 你可能会问“我们怎么才能高效的学习到单词的语义呢?”. 确实,目前为止还无法科学解释为什么能够学出来,因为使用了神经网络炼金术-:),但是试验效果出奇的好,而且直觉上来讲,近义词(synonyms)在空间上距离相近,反义词(antonyms)在空间上距离远。更惊喜的是,词向量还能够进行类比推理,这个是论文中的经典例子“Woman is to queen as man is to king”. 结果在训练出来的词向量里面可以直接得出以下公式: # $$ # v_{queen} - v_{woman} + v_{man} \approx v_{king} # $$ # 这里 $v_{queen}$, $v_{woman}$, $v_{man}$ 和 $v_{king}$ 分别表示各自词的词向量。这些实验结果强烈的暗示了词向量技术是对他们代表的单词在某个隐含语义空间的编码。 # # 下面我来讲解Word2Vec常用的两个模型算法,skip-gram 和 continuous bag-of-words(CBOW) 模型。这两种模型都是单隐层的神经网络模型,并通过神经网络的经典学习方法反向传播(backpropagation)和随机梯度下降(stochastic gradient descent)进行学习。 # - skip-gram 是一个根据一个输入词预测他上下文单词的模型 # - cbow 是给定上下文单词,预测中心单词的模型 # + [markdown] deletable=true editable=true # ## The Skip-Gram Model # 我们需要使用数学符号来定义和描述我们的模型以及输入。skip-gram 的输入是单个单词 $w_I$ 输出是这个单词的上下文 $\{ w_{O,1},...,w_{O,C} \}$, 其中 $C$ 表示的是单词上下文的窗口大小,就是以该单词为中心,左右取多少个上下文单词。比如,"I drove my car to the store", 如果输入单词取 car, $C=3$,那么我们的输出上下文就是 {"I", "drove", "my", "to", "the", "store"}, 所有这些单词都是通过 one-hot 编码的,他们都是长度为$V$(表示训练所使用的词汇表的长度),相应单词索引位置值为1,其他位置值为0的向量。这些训练数据都来自纯文本,我们有无穷无尽的文本可以拿来训练。 # # 接下来我们就来定义模型: # $X$代表输入单词 $w_I$ 的one-hot编码输入,$\{y_1,...,y_C\}$ 代表输出上下文的one-hot编码,每个都是one-hot编码。$V \times N$ 矩阵 $W$ 是输入层和隐藏层之间的全连接矩阵,$W$第 i 行就代表与其对应的第 i 个单词的词向量。你可以试试用 one-hot 编码输入$X$和该矩阵做点乘 $W^TX$ 就明白了。隐层和输出层之间也有一个 $N \times V$ 矩阵 $W^{'}$, 中间的隐层含有 $N$ 个节点,也是最后词向量的长度大小。 # # ![skip-gram-arch](skip-gram-arch.png) # # + [markdown] deletable=true editable=true # 输入层到隐藏层计算方式: # $$ # \mathbf{h} = W^T \cdot X = v_{w_I}^T # $$ # 输出层是C个单词,属于C个多项分布$\{y_1,...,y_C\}$, 针对此的概率公式是: # $$ # P(w_{c,j}|w_I) = y_{c,j} = \frac{\exp (u_{c,j})}{\sum_{k=1}^{V} \exp(u_{c,k})} # $$ # 这里的关键就是,我们的损失函数和常见的softmax多分类交叉熵稍微有点不同,因为这里又多增加了一维参数C,代表C个输出上下文单词的分布,他们在我们的模型考虑中相互独立,实际上并不是独立的,但是为了简化,我们就认为他们是独立的。这些输出单词共享权重$W'$, $u_{c,j} = u_{j} ={v^{'}_{w_j}}^T \cdot \mathbf{h}$, $v^{'}_{w_j}$ 代表 $W'$的第j列,他也是一种词向量,但是这个词向量意义和矩阵$W$ 代表的意义不同。 # # 最后我们尝试推一下最后的交叉熵损失,论文直接给出了一个很突兀的公式,其实这个公式应该是下面这个公式化简出来的。 # # # 关于模型公式和求导反向传播详细推导,可以参考论文[word2vec Parameter Learning Explained](http://wiki.hacksmeta.com/static/pdf/word2vec-Parameter-Learning-Explained-5.pdf),求导非常容易出错而且对于矩阵求导,维度很容易搞错,还好有TensorFlow等工具可以自动差分求导,我们就不用管求导了。 # # # skip-gram 是一个根据一个输入词预测他上下文单词的模型,第一个矩阵目的是要将one-hot 编码的输入词,映射到词向量空间,也就是说第一个矩阵学习的是词向量空间。其后,第二个矩阵的目的是将词向量再映射回one-hot编码(但并不是原来输入词的,而是输入词上下文的),但是这个 $W'$ 不是简单的 $W^T$, 而是新引入一个矩阵,他学习的其实是输入单词向量和其上下文单词向量之间的协同关系,而不是学习的词向量,这也是我们使用第一个矩阵作为最后学习到的词向量的原因。 # + [markdown] deletable=true editable=true # ## 几个问题 # ### 为什么Word2Vec 不使用正则项? # 因为Word2Vec的目的并不是为了让这个模型去适应语料库意外的语料,也不是用来再去预测其他的未见过测用例,他只是为了训练出当前语料库中所有单词的词向量。只需要拟合当前数据即可,不需要泛化。 # ### 为什么Word2Vec 隐藏层没有使用激活函数? # 目的不是为了泛化,也不需要激活,激活是为了 # # ### Word2Vec 两个矩阵能够使用同一个吗?也就是说 W.T = W' # 目前网上没人思考过这个问题,如果我们得到了第一个矩阵是词嵌入矩阵,他的每一行代表的是对应单词的词向量,然后采用计算隐层向量(他其实就是某个词向量)和其他所有词向量的距离,用这个距离来度量输入词和其他词之间的关系,以此再进行softmax概率分布来评估误差,但是你不能还是用第一个矩阵来学习,因为第一个矩阵是用来学习词嵌入的,第二个矩阵是用来学习词嵌入和他的上下文单词关系的,这是两个不同的学习对象,因此我们需要再引入一个新矩阵来学习。 # # 目前我认为如果不能的话,应该是这个原因。但是如果可以的话,我觉得也说得通,这得做实验了。 # # ### 为什么取第一个矩阵做词向量而不是第二个矩阵? # 目前网上没人思考过这个问题,目前的解释认为从one-hot 输入到隐层其实是编码进入词向量空间,第一层映射是真实的拿到词向量,也就是说第一个矩阵才是学习我们需要的词向量的矩阵,从隐层到输出是解码到one-hot,但是第二层映射解码出来的目的不是变会原来输入单词的one-hot,而是尽可能匹配出上下文单词的one-hot。也即是说第二层映射过程中发生的是距离计算和度量,第二个矩阵学习到的是某个词向量和他上下文距离远近的关系。 # + [markdown] deletable=true editable=true # ## Continous bag of words model # cbow 是给定上下文单词,预测中心单词的模型,和skip-gram刚好是相反的。 # ![cbow](cbow-arch.png) # # + [markdown] deletable=true editable=true # 这里关键一点是,我们现在的输入变成多个向量了,如何得到一个隐层向量呢?论文中简单粗暴的用了平均方法: # # \begin{align*} # \mathbf{h} &= \frac{1}{C} W^T(x_{1} +x_2+...+x_{C}) \\ &= \frac{1}{C}(v_{w_1} + v_{w_2}+...+v_{w_C})^T # \end{align*} # 然后,从隐层到输出层,直接使用矩阵 $W'$ 相乘,这样可以得到线性和之后进行softmax操作: # # $$ # \mathbf{u} = W^{'T} \cdot h # $$ # 然后我们计算softmax概率公式: # $$ # P(w_j|w_I) = y_j = \frac{\exp(u_j)}{\sum\limits_{k\in V} \exp(u_k)} = \frac # {\exp(v_{w_j}^{'T}\cdot v_{w_I})}{\sum\limits_{k\in V} \exp(v_{w_k}^{'T}\cdot v_{w_I})} # $$ # 最后我们采用交叉熵计算损失: # # + [markdown] deletable=true editable=true # ## 优化 # 当拥有10000个单词的词汇表,我们如果想嵌入300维的词向量,那么我们的输入-隐层权重矩阵和隐层-输出层的权重矩阵都会有 10000 x 300 = 300万个权重,在如此庞大的神经网络中进行梯度下降是相当慢的,模型参数和数据量都是百万级亿级别,模型和数据量都太大了。而且容易学到一些不重要的词汇。 # # ### 抽样率和负采样(Negative Sampling) # Word2Vec的作者在它的第二篇论文中强调了这些问题,下面是作者在第二篇论文中的三个创新: # - 将常见的单词组合(word pairs)或者词组作为单个“words”来处理。 # - 对高频次单词进行抽样来减少训练样本的个数。 # - 对优化目标采用“negative sampling”方法,这样每个训练样本的训练只会更新一小部分的模型权重,从而降低计算负担。 # #### 词组做成一个单词 # 就是把一些常见的词组当成一个单词来看待,而不是分开看待,比如“New York” 可以不拆开,他是一个地名。这样学习到的语义更加有意义。 # #### 高频抽样 # 文档中有些词可能出现频率非常高,英文中单词"the" 的概率几乎非常大,中文的"的"字出现概率也很大,他们提供的信息量并不强,过多的加入训练反而会影响训练结果。 # ![negative-sample](negative-sample.jpg) # # 我们在采样的生成输入数据的过程中可以以某个概率保留这个词,词频越大的词,确定性越强,往往说明信息含量低,即信息熵比较低,保留的概率越低,我们可以定义保留概率 # $$ # P(w_i) = (\sqrt \frac{f(w_i)}{0.001} + 1) \times \frac{0.001}{f(w_i)} # $$ # 其中,$f(w_i)$ 表示的是一个词的词频的函数,一般我们取得就是词频率。 # #### 负采样 # word2vec里面一个重点都是one-hot 编码导致的词汇表维度非常大,而大的词汇表又会导致我们的参数矩阵非常大,再加上训练语料都是数亿级别,训练非常慢。我们模型训练过程中拟合one-hot编码有一个特点,就是我们只是希望输出单词上对应索引位置的值接近最大,其他地方都是0,所以我们可以选择只更新哪些高频词汇影响的部分权值矩阵。 # # 比如,当我们用训练样本 ( input word: "fox",output word: "quick") 来训练我们的神经网络时,“ fox”和“quick”都是经过one-hot编码的。如果我们的vocabulary大小为10000时,在输出层,我们期望对应“quick”单词的那个神经元结点输出1,其余9999个都应该输出0。在这里,这9999个我们期望输出为0的神经元结点所对应的单词我们称为“negative” word。 # # 为了加快训练速度,我们可以采样negative word,采样根据词频来采,这次是词频越高,被采样的概率越大。代码中采样负单词的采样概率公式如下: # $$ # P(w_i) = \frac{f(w_i)^{0.75}}{\sum_{j=0}^n(f(w_i)^{0.75})} # $$ # # ### 层次softmax(Hierarchical Softmax) # 首先我们根据语料库中单词的词频,采用Huffman树将单词进行Huffman编码,构造出一颗Huffman树,Huffman树的每个叶子节点都是词汇表中的单词,中间节点都是一个小二分类器,采用sigmoid激活函数进行二分。 # ![huffman-tree](huffman-tree.png) # # 在输出层我们不在直接使用softmax对所有的词汇计算加权和,这会需要$N \times N \times V$计算量,这里往往 $V \gg N$。而采用层次二分法,对输出的隐层向量h,我们从Huffman树的根节点开始,做二分模型,沿着内部节点一直走到叶子节点,这个时候我们的计算量是$N \times N \times \log V$ # # $$ # P(+) = \sigma(x_w^T\theta) = \frac{1}{1+e^{-x_w^T\theta}} # $$ # # ![word2vec-hierarchical-softmax](word2vec-hierarchical-softmax.png) # + [markdown] deletable=true editable=true # # 参考 # - [Word2vec数学原理全家桶](http://shomy.top/2017/07/28/word2vec-all/) # - [Word2Vec Tutorial - The Skip-Gram Model](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) # - [Word2Vec Tutorial Part 2 - Negative Sampling](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/) # - [基于Hierarchical Softmax的模型](http://www.cnblogs.com/pinard/p/7243513.html) # - [An Intuitive Understanding of Word Embeddings: From Count Vectors to Word2Vec](https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/) # + [markdown] deletable=true editable=true # # 基于TensorFlow实现Skip-Gram # + deletable=true editable=true from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import random import jieba import numpy as np from six.moves import xrange import tensorflow as tf import datetime as dt # + deletable=true editable=true import jieba import re def get_stop_words(filename='./data/stopwords.txt'): stop_words = [] with open(filename, 'r', encoding='utf-8') as f: stopwords = [line[:-1] for line in f] stop_words = set(stop_words) print("获取{n}个停用词".format(n=len(stop_words))) return stop_words def get_sentences(content): content = content.decode("utf8") eng_tokens = ['!', ',', '.', '?', ';', ':', ' '] zh_tokens = ['!', ',', '。', '?', ';', ':', ' '] zh_tokens = [token.decode('utf8') for token in zh_tokens] sentences = [] line = "" for ch in content: line += ch if ch in eng_tokens or ch in zh_tokens: if len(line) > 0: sentences.append(line.replace(ch, "").encode("utf8")) line = "" if len(line) > 0: sentences.append(line.encode("utf8")) return sentences def get_raw_sentence(sentence): puncts = "[\s+\.\!\/_,$%^*()<>+\"\'\”\“]+|[+——!,。?、~@#¥%……&*()《》]+" text = re.sub(puncts.decode("utf8"), "".decode("utf8"), sentence.decode("utf8")) return text def get_words_list(filename="./data/doupocangqiong.txt"): words_list = [] with open(filename, 'r') as f: for line in f: line = line.replace('\n', '').replace(' ', '') if re.match(r'^https?:/{2}\w.+$', line) or re.match(r'^ftp:/{2}\w.+$', line): continue # 先分句 sentences = get_sentences(line) for sentence in sentences: # 去除句子中的特殊标点符号 sentence = get_raw_sentence(sentence) words = list(jieba.cut(sentence, cut_all=False)) if len(sentence) > 0 else [] words_list.extend(words) return words_list # + deletable=true editable=true words = get_words_list() # + deletable=true editable=true for word in words[:10]: print(word) # + deletable=true editable=true def build_dataset(words, n_words): counter = [['UNK', -1]] counter.extend(collections.Counter(words).most_common(n_words - 1)) word2index = {} for word, _ in counter: word2index[word] = len(word2index) word_num_data = [] unk_count = 0 for word in words: if word in word2index: index = word2index[word] else: index = 0 unk_count += 1 word_num_data.append(index) counter[0][1] = unk_count index2word = dict(zip(word2index.values(), word2index.keys())) return word_num_data, counter, word2index, index2word def collect_data(vocabulary_size = 10000): words = get_words_list() data, count, word2index, index2word = build_dataset(words, vocabulary_size) del words return data, count, word2index, index2word # + [markdown] deletable=true editable=true # 对于输入的文档单词序列表,需要处理成数字序列才能训练,上述就是拿到词汇表的过程,`word2index` 就是根据词查其索引的词汇表,`index2word` 是根据词的索引查词,`word_num_data` 是训练语料,但是是词对应的索引号构成的序列。`counter` 就是词频表。 # + deletable=true editable=true data_index = 0 def generate_batch(data, batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) context = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [skip_window centor_word skip_window] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] # this is the input word context[i * num_skips + j, 0] = buffer[target] # these are the context words buffer.append(data[data_index]) data_index = (data_index+1)%len(data) data_index = (data_index + len(data) - span) % len(data) return batch, context # + [markdown] deletable=true editable=true # 比如在 5-gram 中,"the cat sat on the", 假设输入词是 "sat", 那么上下文单词会从`['the', 'cat', 'on', 'the']` 随机选取。选取个数由参数 `num_skips` 来确定. `skip_window` 就是窗口大小,这里 5-gram 就是 2. `span` 看公式就能知道是什么意思了,就是整个一次输入和上线文总总长度。 # # 这里在采样生成训练数据的时候,我们采用的是滑动窗口,一个队列就可以完成这个算法了。整个数据流data的全局索引由全局变量 `data_index` 来控制。 # # 注意训练skip-gram 的时候,往往和论文中那个模型示例图不一样的地方是,不是输入一个词,并不会同时输出所有的上下文单词,而是只选择一个输出,这可以简化训练过程,而且不影响训练结果,因为我们认为这是独立事件。 # # train/test/validate # # - vocabulary_size = 5000 n_sampled = 100 data, counter, word2index, index2word = collect_data(vocabulary_size) # + deletable=true editable=true batch_size = 128 embedding_size = 256 # 词向量长度 skip_window = 1 num_skips = 2 # 验证集 # 从0-100 选取16个整数,对一个的是100个最高频词的索引,用这些词来评估模型学习进度 valid_size = 10 valid_window = 100 #valid_examples = np.random.choice(valid_window, valid_size, replace=False) valid_words = [u'萧炎', u'灵魂', u'火焰', u'天阶', u'云岚宗', u'乌坦城', u'惊诧', u'强者', u'实力', u'斗气'] valid_examples = [word2index[word] for word in valid_words] num_sampled = 64 word2index print(u'\u8427\u85b0\u513f') print(u'\u836f\u8001') # + deletable=true editable=true graph = tf.Graph() with graph.as_default(): # 设置模型输入输出和验证集 placeholder train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_context = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # 设置embeddings tensor, input and hidden layer embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) # shape = (V, N) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # shape (N, ) # hidden layer and output weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/math.sqrt(embedding_size))) # shape (V, N) biases = tf.Variable(tf.zeros([vocabulary_size])) hidden_out = tf.matmul(embed, tf.transpose(weights)) + biases # loss train_one_hot = tf.one_hot(train_context, vocabulary_size) # cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hidden_out, labels=train_one_hot)) # negative sampling 损失负采样更新权重, 这里需要注意的是,既然你只取输出label的一部分来计算损坏,那么损失看起来自然比全部label一起计算损失要小很多 cross_entropy = tf.reduce_mean(tf.nn.sampled_softmax_loss(weights, biases, train_context, embed, n_sampled, vocabulary_size)) # algorithm sgd optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(cross_entropy) # 使用余弦距离评价验证集和对应词向量之间的相似度 norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) init = tf.global_variables_initializer() # + [markdown] deletable=true editable=true # $$ # similarity = cos(\theta) = \frac{\textbf{A}\cdot\textbf{B}}{\parallel\textbf{A}\parallel_2 \parallel \textbf{B} \parallel_2} # $$ # # + deletable=true editable=true with graph.as_default(): saver = tf.train.Saver() # 文件存储 def run(graph, num_steps): config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=config) as session: init.run() print("Initialized") average_loss = 0 for step in range(num_steps): batch_inputs, batch_context = generate_batch(data, batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_context: batch_context} _, loss_val = session.run([optimizer, cross_entropy], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 print("Average loss at step ", step, ': ', average_loss) average_loss = 0 if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = index2word[valid_examples[i]] top_k = 8 nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s: ' % valid_word for k in range(top_k): close_word = index2word[nearest[k]] log_str = '%s %s,' % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() save_path = saver.save(session, "checkpoints/wv.model") return final_embeddings # + deletable=true editable=true num_steps = 2000000 softmax_start_time = dt.datetime.now() final_embeddings = run(graph, num_steps=num_steps) softmax_end_time = dt.datetime.now() print("Softmax method took {} minutes to run 100 iterations".format((softmax_end_time-softmax_start_time).total_seconds())) with graph.as_default(): # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) nce_loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_context, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(nce_loss) # Add variable initializer. init = tf.global_variables_initializer() # + deletable=true editable=true # Step 6: Visualize the embeddings. def plot_with_labels(low_dim_embs, labels, filename='tsne3.png',fonts=None): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, fontproperties=fonts, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.show() plt.savefig(filename,dpi=800) try: from sklearn.manifold import TSNE import matplotlib.pyplot as plt import matplotlib as mpl # %matplotlib inline mpl.rcParams['font.sans-serif'] = 'simhei' tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [index2word[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels) except ImportError: print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") # + deletable=true editable=true import tensorflow as tf input = tf.Variable(tf.random_normal([100, 28, 28, 1])) filter = tf.Variable(tf.random_normal([5, 5, 1, 6])) sess = tf.Session() # + deletable=true editable=true from tensorflow.python.client import device_lib device_lib.list_local_devices() # + deletable=true editable=true
cs224n/word2vec/Word2Vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Denoising Autoencoders And Where To Find Them # # Today we're going to train deep autoencoders and deploy them to faces and search for similar images. # # Our new test subjects are human faces from the [lfw dataset](http://vis-www.cs.umass.edu/lfw/). # # Import stuff import sys sys.path.append("..") import grading import download_utils import numpy as np from sklearn.model_selection import train_test_split from lfw_dataset import load_lfw_dataset # # Load dataset # Dataset was downloaded for you. Relevant links (just in case): # - http://www.cs.columbia.edu/CAVE/databases/pubfig/download/lfw_attributes.txt # - http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz # - http://vis-www.cs.umass.edu/lfw/lfw.tgz # we downloaded them for you, just link them here download_utils.link_week_4_resources() # + X, attr = load_lfw_dataset(use_raw=True,dimx=38,dimy=38) X = X.astype('float32') / 255.0 img_shape = X.shape[1:] X_train, X_test = train_test_split(X, test_size=0.1, random_state=42) # + # %matplotlib inline import matplotlib.pyplot as plt plt.title('sample image') for i in range(6): plt.subplot(2,3,i+1) plt.imshow(X[i]) print("X shape:",X.shape) print("attr shape:",attr.shape) # - # ### Autoencoder architecture # # Let's design autoencoder as two sequential keras models: the encoder and decoder respectively. # # We will then use symbolic API to apply and train those models. # # <img src="http://nghiaho.com/wp-content/uploads/2012/12/autoencoder_network1.png" width=640px> # # import tensorflow as tf import keras, keras.layers as L s = keras.backend.get_session() # ## First step: PCA # # Principial Component Analysis is a popular dimensionality reduction method. # # Under the hood, PCA attempts to decompose object-feature matrix $X$ into two smaller matrices: $W$ and $\hat W$ minimizing _mean squared error_: # # $$\|(X W) \hat{W} - X\|^2_2 \to_{W, \hat{W}} \min$$ # - $X \in \mathbb{R}^{n \times m}$ - object matrix (**centered**); # - $W \in \mathbb{R}^{m \times d}$ - matrix of direct transformation; # - $\hat{W} \in \mathbb{R}^{d \times m}$ - matrix of reverse transformation; # - $n$ samples, $m$ original dimensions and $d$ target dimensions; # # In geometric terms, we want to find d axes along which most of variance occurs. The "natural" axes, if you wish. # # ![](https://upload.wikimedia.org/wikipedia/commons/thumb/9/90/PCA_fish.png/256px-PCA_fish.png) # # # PCA can also be seen as a special case of an autoencoder. # # * __Encoder__: X -> Dense(d units) -> code # * __Decoder__: code -> Dense(m units) -> X # # Where Dense is a fully-connected layer with linear activaton: $f(X) = W \cdot X + \vec b $ # # # Note: the bias term in those layers is responsible for "centering" the matrix i.e. substracting mean. def build_pca_autoencoder(img_shape,code_size=32): """ Here we define a simple linear autoencoder as described above. We also flatten and un-flatten data to be compatible with image shapes """ encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) encoder.add(L.Flatten()) #flatten image to vector encoder.add(L.Dense(code_size)) #actual encoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) decoder.add(L.Dense(np.prod(img_shape))) #actual decoder, height*width*3 units decoder.add(L.Reshape(img_shape)) #un-flatten return encoder,decoder # Meld them together into one model # + encoder,decoder = build_pca_autoencoder(img_shape,code_size=32) inp = L.Input(img_shape) code = encoder(inp) reconstruction = decoder(code) autoencoder = keras.models.Model(inp,reconstruction) autoencoder.compile('adamax','mse') # - autoencoder.fit(x=X_train,y=X_train,epochs=32, validation_data=[X_test,X_test]) def visualize(img,encoder,decoder): """Draws original, encoded and decoded images""" code = encoder.predict(img[None])[0] reco = decoder.predict(code[None])[0] plt.subplot(1,3,1) plt.title("Original") plt.imshow(img) plt.subplot(1,3,2) plt.title("Code") plt.imshow(code.reshape([code.shape[-1]//2,-1])) plt.subplot(1,3,3) plt.title("Reconstructed") plt.imshow(reco.clip(0,1)) plt.show() # + score = autoencoder.evaluate(X_test,X_test,verbose=0) print("Final MSE:",score) for i in range(5): img = X_test[i] visualize(img,encoder,decoder) # - # ### Going deeper # # PCA is neat but surely we can do better. This time we want you to build a deep autoencoder by... stacking more layers. # # In particular, your encoder and decoder should be at least 3 layers deep each. You can use any nonlinearity you want and any number of hidden units in non-bottleneck layers provided you can actually afford training it. # # ![layers](https://pbs.twimg.com/media/CYggEo-VAAACg_n.png:small) # # A few sanity checks: # * There shouldn't be any hidden layer smaller than bottleneck (encoder output). # * Don't forget to insert nonlinearities between intermediate dense layers. # * Convolutional layers are allowed but not required. To undo convolution use L.Deconv2D, pooling - L.UpSampling2D. # * Adding activation after bottleneck is allowed, but not strictly necessary. def build_deep_autoencoder(img_shape,code_size=32): """PCA's deeper brother. See instructions above""" H,W,C = img_shape encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) <Your code: define encoder as per instructions above> decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) <Your code: define encoder as per instructions above> return encoder,decoder # + #Check autoencoder shapes along different code_sizes get_dim = lambda layer: np.prod(layer.output_shape[1:]) for code_size in [1,8,32,128,512,1024]: encoder,decoder = build_deep_autoencoder(img_shape,code_size=code_size) print("Testing code size %i" % code_size) assert encoder.output_shape[1:]==(code_size,),"encoder must output a code of required size" assert decoder.output_shape[1:]==img_shape, "decoder must output an image of valid shape" assert len(encoder.trainable_weights)>=6, "encoder must contain at least 3 dense layers" assert len(decoder.trainable_weights)>=6, "decoder must contain at least 3 dense layers" for layer in encoder.layers + decoder.layers: assert get_dim(layer) >= code_size, "Encoder layer %s is smaller than bottleneck (%i units)"%(layer.name,get_dim(layer)) print("All tests passed!") # - # __Hint:__ if you're getting "Encoder layer is smaller than bottleneck" error, use code_size when defining intermediate layers. # # For example, such layer may have code_size*2 units. # + encoder,decoder = build_deep_autoencoder(img_shape,code_size=32) inp = L.Input(img_shape) code = encoder(inp) reconstruction = decoder(code) autoencoder = keras.models.Model(inp,reconstruction) autoencoder.compile('adamax','mse') # - # Training may take some 20 minutes. autoencoder.fit(x=X_train,y=X_train,epochs=32, validation_data=[X_test,X_test]) reconstruction_mse = autoencoder.evaluate(X_test,X_test,verbose=0) assert reconstruction_mse <= 0.005, "Compression is too lossy. See tips below." assert len(encoder.output_shape)==2 and encoder.output_shape[1]==32, "Make sure encoder has code_size units" print("Final MSE:", reconstruction_mse) for i in range(5): img = X_test[i] visualize(img,encoder,decoder) # __Tips:__ If you keep getting "Compression to lossy" error, there's a few things you might try: # # * Make sure it converged. Some architectures need way more than 32 epochs to converge. They may fluctuate a lot, but eventually they're going to get good enough to pass. You may train your network for as long as you want. # # * Complexity. If you already have, like, 152 layers and still not passing threshold, you may wish to start from something simpler instead and go in small incremental steps. # # * Architecture. You can use any combination of layers (including convolutions, normalization, etc) as long as __encoder output only stores 32 numbers per training object__. # # A cunning learner can circumvent this last limitation by using some manual encoding strategy, but he is strongly recommended to avoid that. # ## Denoising AutoEncoder # # Let's now make our model into a denoising autoencoder. # # We'll keep your model architecture, but change the way it trains. In particular, we'll corrupt it's input data randomly before each epoch. # # There are many strategies to apply noise. We'll implement two popular one: adding gaussian noise and using dropout. def apply_gaussian_noise(X,sigma=0.1): """ adds noise from normal distribution with standard deviation sigma :param X: image tensor of shape [batch,height,width,3] """ <your code here> return X + noise #noise tests theoretical_std = (X[:100].std()**2 + 0.5**2)**.5 our_std = apply_gaussian_noise(X[:100],sigma=0.5).std() assert abs(theoretical_std - our_std) < 0.01, "Standard deviation does not match it's required value. Make sure you use sigma as std." assert abs(apply_gaussian_noise(X[:100],sigma=0.5).mean() - X[:100].mean()) < 0.01, "Mean has changed. Please add zero-mean noise" plt.subplot(1,4,1) plt.imshow(X[0]) plt.subplot(1,4,2) plt.imshow(apply_gaussian_noise(X[:1],sigma=0.01)[0]) plt.subplot(1,4,3) plt.imshow(apply_gaussian_noise(X[:1],sigma=0.1)[0]) plt.subplot(1,4,4) plt.imshow(apply_gaussian_noise(X[:1],sigma=0.5)[0]) # + encoder,decoder = build_deep_autoencoder(img_shape,code_size=512) assert encoder.output_shape[1:]==(512,), "encoder must output a code of required size" inp = L.Input(img_shape) code = encoder(inp) reconstruction = decoder(code) autoencoder = keras.models.Model(inp,reconstruction) autoencoder.compile('adamax','mse') # - for i in range(50): print("Epoch %i/50, Generating corrupted samples..."%i) X_train_noise = apply_gaussian_noise(X_train) X_test_noise = apply_gaussian_noise(X_test) autoencoder.fit(x=X_train_noise,y=X_train,epochs=1, validation_data=[X_test_noise,X_test]) # __Note:__ if it hasn't yet converged, increase the number of iterations. # # __Bonus:__ replace gaussian noise with masking random rectangles on image. denoising_mse = autoencoder.evaluate(apply_gaussian_noise(X_test),X_test,verbose=0) print("Final MSE:", denoising_mse) for i in range(5): img = X_test[i] visualize(img,encoder,decoder) encoder.save("./encoder.h5") decoder.save("./decoder.h5") # ### Submit to Coursera from submit import submit_autoencoder submission = build_deep_autoencoder(img_shape,code_size=71) submit_autoencoder(submission, reconstruction_mse, 2<email>, <token>) # ### Image retrieval with autoencoders # # So we've just trained a network that converts image into itself imperfectly. This task is not that useful in and of itself, but it has a number of awesome side-effects. Let's see it in action. # # First thing we can do is image retrieval aka image search. We we give it an image and find similar images in latent space. # # To speed up retrieval process, we shall use Locality-Sensitive Hashing on top of encoded vectors. We'll use scikit-learn's implementation for simplicity. In practical scenario, you may want to use [specialized libraries](https://erikbern.com/2015/07/04/benchmark-of-approximate-nearest-neighbor-libraries.html) for better performance and customization. images = X_train codes = <encode all images> assert len(codes) == len(images) from sklearn.neighbors import LSHForest lshf = LSHForest(n_estimators=50).fit(codes) def get_similar(image, n_neighbors=5): assert image.ndim==3,"image must be [batch,height,width,3]" code = encoder.predict(image[None]) (distances,),(idx,) = lshf.kneighbors(code,n_neighbors=n_neighbors) return distances,images[idx] def show_similar(image): distances,neighbors = get_similar(image,n_neighbors=11) plt.figure(figsize=[8,6]) plt.subplot(3,4,1) plt.imshow(image) plt.title("Original image") for i in range(11): plt.subplot(3,4,i+2) plt.imshow(neighbors[i]) plt.title("Dist=%.3f"%distances[i]) plt.show() #smiles show_similar(X_test[2]) #ethnicity show_similar(X_test[500]) #glasses show_similar(X_test[66]) # ## Bonus: cheap image morphing # # + for _ in range(5): image1,image2 = X_test[np.random.randint(0,len(X_test),size=2)] code1, code2 = encoder.predict(np.stack([image1,image2])) plt.figure(figsize=[10,4]) for i,a in enumerate(np.linspace(0,1,num=7)): output_code = code1*(1-a) + code2*(a) output_image = decoder.predict(output_code[None])[0] plt.subplot(1,7,i+1) plt.imshow(output_image) plt.title("a=%.2f"%a) plt.show() # - # Of course there's a lot more you can do with autoencoders. # # If you want to generate images from scratch, however, we recommend you our honor track seminar about generative adversarial networks.
1 Introduction to Deep Learning/week4/Autoencoders-task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: carecur # language: python # name: carecur # --- # # Example: Predicting Daily Curtailment Events # # Process Overview: # # 1. Label curtailment events (i.e. define a decision boundary) # 2. Partition historic data into training and test sets # 1. Spring Season (Feb-May) # 3. Fit a statistical model to the training data # 4. Predict against the test data # 5. Evaluate the performance of the model against known labels in the test data. # # + import pandas as pd from dask import dataframe as dd import statsmodels.formula.api as smf import statsmodels.api as sm from src.conf import settings # - # ## Early Spring # # In our exploratory data analysis, we found evidence of seasonality being a strong factor in influencing curtailment intensity, being particularly noticeable in early spring. Here we exam only Feb - May curtailment to derive a model accordingly. # + # Read curtailment data, aggregate to dailies. df = pd.concat( [ pd.read_parquet( settings.DATA_DIR / f"processed/caiso/{y}.parquet" ) for y in range(2017, 2020) ] ) df.columns = df.columns.str.lower().str.replace(" ", "_") df.index = df.index.tz_convert("US/Pacific") df = df.groupby(pd.Grouper(freq="D")).sum() # Hourly resampling is a bad idea because hour-to-hour effects are co-correlated # df = df[columns].groupby(pd.Grouper(freq="H")).sum() df.reset_index(inplace=True) # - df.dtypes df.head() # Subset curtailments data to relevant time range df = df[df["timestamp"].dt.month.isin(range(2,6))] # Analysis Period of Dataset df["timestamp"].describe() # + # Label Data - based on our EDA, we might start by "guessing" a threshold of importance of .05 # Later methods will be less biased, and allow for more variance. # TODO: Try to find natural clusterings through an unsupervised process to label the dataset, and try to predict those labels. df["curtailment_event"] = pd.Categorical(df["solar_curtailment"]/df["solar"] > .1) df["is_weekday"] = pd.Categorical(df["timestamp"].dt.weekday.isin([5, 6])) # - # Merge Weather Data # Use Day-Ahead forecasts forecasts = [ *(settings.DATA_DIR / f"interim/gfs/ca/gfs_3_201[7-9][01][2-5]*_0000_{i*3:03}.parquet" for i in range(5, 10)) ] dayahead_weather = dd.read_parquet(forecasts).compute() dayahead_weather["timestamp"] = dayahead_weather["valid_time"].dt.tz_localize("UTC").dt.tz_convert("US/Pacific") dayahead_weather.head() dayahead_weather.iloc[0] dayahead_weather.dtypes # Take an average over all datapoints (no weighting) # FIXME: 100 We need to be more thoughtful about how to integrate this data. # It should be weighted somehow, or perhaps certain locations are expressed as their own IV. # Look into Uitlity CZs dayahead_hourly = dayahead_weather.groupby(pd.Grouper(key="timestamp", freq="H"))[["t", "dswrf", "uswrf", "gust", "SUNSD"]].mean().interpolate().reset_index() dayahead_daily = dayahead_weather.groupby(pd.Grouper(key="timestamp", freq="D")).agg({"t": "mean", "dswrf": "mean", "uswrf": "mean", "SUNSD": "sum"}) data = df.merge(dayahead_daily, on="timestamp", how="inner") data # # Single Model Run test_data = data.sample(int(len(data)*.3//1)) training_data = data[~data.index.isin(test_data.index)] model = "C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + t" result = smf.glm( model, training_data, family=sm.families.Binomial() ).fit() result.summary() # + predictions = result.predict(test_data.drop(columns=["curtailment_event"])) predictions.name = "probability" predictions = test_data.merge(predictions, left_index=True, right_index=True) cutoff = .7 true_positives = predictions.query("probability > @cutoff")["curtailment_event"].value_counts().loc[True] false_negatives = predictions.query("probability > @cutoff")["curtailment_event"].value_counts().loc[False] true_negatives = predictions.query("probability <= @cutoff")["curtailment_event"].value_counts().loc[False] false_positives = predictions.query("probability <= @cutoff")["curtailment_event"].value_counts().loc[True] accuracy = (true_positives+true_negatives)/len(predictions) precision = true_positives / (true_positives + false_positives) print(f"Accuracy: {accuracy}; Precision: {precision}") # - predictions["curtailment_event"].astype(bool).sum()/len(predictions["curtailment_event"]) 1 - predictions["probability"].mean() # # Multi-Model Run # # TODO: # # - Try a k-fold approach instead. Divide the training set into K folds (without resampling), use k-1 folds as the training data and withold 1 fold. Run the model k times witholding the next fold. Calculate your accuracy metrics based on the witheld data as the "test" data within each folded run. # # Other Notes: # # - Folding is difficult to do when predicting low-frequency events. Each fold may contain a consecutive set of rows that contain no curtailment events, effectively limiting our predictions to only consider or attempt to predict 1 type of event. def simulate(model, data, cutoff=.8): test_data = data.sample(int(len(data)*.2//1)) training_data = data[~data.index.isin(test_data.index)] result = smf.glm( model, training_data, family=sm.families.Binomial() ).fit() predictions = result.predict(test_data.drop(columns=["curtailment_event"])) predictions.name = "probability" predictions = 1 - predictions predictions = test_data.merge(predictions, left_index=True, right_index=True) positive_predictions = predictions.query("probability > @cutoff")["curtailment_event"].value_counts() negative_predictions = predictions.query("probability <= @cutoff")["curtailment_event"].value_counts() true_positives = positive_predictions.loc[True] false_positives = positive_predictions.loc[False] true_negatives = negative_predictions.loc[False] false_negatives = negative_predictions[True] accuracy = (true_positives+true_negatives)/len(predictions) precision = true_positives / (true_positives + false_positives) return {"results": result, "accuracy": accuracy, "precision": precision} # Run this 100 times: model1 = [] for i in range(100): model1.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + load + t + dswrf", data, cutoff=.7)) results1 = pd.DataFrame(model1) print("Accuracy : {}".format(results1["accuracy"].mean()), "Precision : {}".format(results1["precision"].mean())) model1[1]["results"].summary() # Run this 100 times: model2 = [] for i in range(100): model2.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + load", data, cutoff=.7)) results2 = pd.DataFrame(model2) print("Accuracy : {}".format(results2["accuracy"].mean()), "Precision : {}".format(results2["precision"].mean())) # Run this 100 times: model3 = [] for i in range(100): model3.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + t * dswrf", data, cutoff=.8)) results3 = pd.DataFrame(model3) print("Accuracy : {}".format(results3["accuracy"].mean()), "Precision : {}".format(results3["precision"].mean())) # Run this 100 times: model4 = [] for i in range(100): model4.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + t + dswrf", data, cutoff=.7)) results4 = pd.DataFrame(model4) print("Accuracy : {}".format(results4["accuracy"].mean()), "Precision : {}".format(results4["precision"].mean())) # Run this 100 times: model5 = [] for i in range(100): model5.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + SUNSD", data, cutoff=.7)) results5 = pd.DataFrame(model5) print("Accuracy : {}".format(results5["accuracy"].mean()), "Precision : {}".format(results5["precision"].mean())) # Run this 100 times: model_ = [] for i in range(100): model_.append(simulate("C(curtailment_event) ~ C(timestamp.dt.month) + C(is_weekday) + t", data, cutoff=.7)) results_ = pd.DataFrame(model_) print("Accuracy : {}".format(results_["accuracy"].mean()), "Precision : {}".format(results_["precision"].mean())) # # Visualize Results # # TODO: # # - Histogram of probabilities on days where there is or isn't a "substantial" curtailment event model_[0] # + import altair as alt alt.Chart() # -
notebooks/1-ttu-logistic-weather.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Qoqo # Quantum Operation Quantum Operation # Yes, we use [reduplication](https://en.wikipedia.org/wiki/Reduplication) # # What Qoqo is # * A toolkit to represent quantum operations and circuits # * A thin runtime to run quantum measurements # * A way to serialize quantum circuits and measurement information # * A set of optional interfaces to devices, simulators and toolkits # # What Qoqo is not # * A decomposer translating circuits to a specific set of gates # * A quantum circuit optimizer # * A collection of quantum algorithms # # + from qoqo.operations import RotateZ, RotateX gate1 = RotateZ(qubit=0, theta=1) gate2 = RotateX(qubit=0, theta=1) # multiplied = gate1.mul(gate2) print("Multiplied gate: ", gate1) # - # ## A simple circuit and measurement # # We show the construction of a simple entangling circuit and an observable measurement based on this circuit # ### Entangling circuit snippet # Similar to many other toolkits the unitary entangling circuit can be constructed by adding operations to a circuit # + from qoqo import Circuit from qoqo import operations as ops circuit_snippet = Circuit() circuit_snippet += ops.Hadamard(qubit=0) circuit_snippet += ops.CNOT(control=0, target=1) print(circuit_snippet) print(len(circuit_snippet)) print(circuit_snippet.get_operation_types()) assert len(circuit_snippet) == 2 assert circuit_snippet.get_operation_types() == set(['Hadamard', 'CNOT']) # - # ### Measuring qubits # Qoqo uses classical registers for the readout. We need to add a classical register definition to the circuit and a measurement statement. # The number of projective measurements can be directly set in the circuit. # The simulation and measurement of the circuit is handled by the qoqo_quest interface (in this example). # + from qoqo_quest import Backend from qoqo import Circuit from qoqo import operations as ops circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=2, is_output=True) circuit += ops.Hadamard(qubit=0) circuit += ops.CNOT(control=0, target=1) circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=10, qubit_mapping=None) backend = Backend(number_qubits=2) (result_bit_registers, result_float_registers, result_complex_registers) = backend.run_circuit(circuit) for single_projective_measurement in result_bit_registers['ro'] : print(single_projective_measurement) assert len(result_bit_registers['ro']) == 10 # - # ### Measuring Observables # Qoqo includes the direct evaluation of projective measurements to an observable measurement e.g. 3 * < Z0 > + < Z0 Z1 > # The measurement is defined by a set of expectation values of a product of pauli operators and a matrix that combines the expectation values # + from qoqo.measurements import PauliZProductInput, PauliZProduct from qoqo import QuantumProgram from qoqo_quest import Backend from qoqo import Circuit from qoqo import operations as ops import numpy as np import scipy.sparse as sp circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=2, is_output=True) circuit += ops.PauliX(qubit=0) #circuit += ops.Hadamard(qubit=0) circuit += ops.CNOT(control=0, target=1) circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=10, qubit_mapping=None) measurement_input = PauliZProductInput(number_qubits=2, use_flipped_measurement=False) index0 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0]) index1 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0,1]) # From readout 'ro' measure two pauli products 0: < Z0 > and 1: < Z0 Z1 > measurement_input.add_linear_exp_val(name="example", linear={0:3.0, 1: 1.0}) # One expectation value: 3 * pauli_product0 + 1 * pauli_product1 measurement = PauliZProduct(input=measurement_input, circuits=[circuit], constant_circuit=None ) backend = Backend(number_qubits=2) program = QuantumProgram(measurement=measurement, input_parameter_names=[]) res = program.run(backend)["example"] print("Result of QuantumProgram", res) assert res > -4.0 * 10 assert res < 4.0 * 10 # - # ### De/Serializing the quantum program # # Same procedure as introduced in the example before, but now the measurement, and afterwards the quantum program, are serialized to and de-serialized from json. The measurement result is compared before and after the de/-serialization. # + from qoqo.measurements import PauliZProductInput, PauliZProduct from qoqo import QuantumProgram from qoqo_quest import Backend from qoqo import Circuit from qoqo import operations as ops import numpy as np import scipy.sparse as sp circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=2, is_output=True) circuit += ops.PauliX(qubit=0) circuit += ops.CNOT(control=0, target=1) circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=10, qubit_mapping=None) measurement_input = PauliZProductInput(number_qubits=2, use_flipped_measurement=False) index0 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0]) index1 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0,1]) # From readout 'ro' measure two pauli products 0: < Z0 > and 1: < Z0 Z1 > measurement_input.add_linear_exp_val(name="example", linear={0:3.0, 1: 1.0}) # One expectation value: 3 * pauli_product0 + 1 * pauli_product1 measurement = PauliZProduct(input=measurement_input, circuits=[circuit], constant_circuit=None ) backend = Backend(number_qubits=2) program = QuantumProgram(measurement=measurement, input_parameter_names=[]) measurement_json = measurement.to_json() assert measurement_json != "" measurement_new = PauliZProduct.from_json(measurement_json) print("De/Serialization of PauliZProduct performed successfully.") program_json = program.to_json() assert program_json != "" program_new = QuantumProgram.from_json(program_json) print("De/Serialization of QuantumProgram performed successfully.") # - # ## Fine control over decoherence # Qoqo allows full control over decoherence by placing decoherence operations in the circuit on the same level as gates. # Example: Letting only one qubit decay. # The backend automatically switches from statevector simulation to density matrix simulation in the presence of noise. # + from qoqo import QuantumProgram from qoqo_quest import Backend from qoqo import Circuit from qoqo import operations as ops damping = 0.1 number_measurements = 100 circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=2, is_output=True) circuit += ops.PauliX(qubit=0) circuit += ops.PauliX(qubit=1) circuit += ops.PragmaDamping(qubit=0, gate_time=1, rate=damping) circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=number_measurements, qubit_mapping=None) print(circuit) backend = Backend(number_qubits=2) (result_bit_registers, result_float_registers, result_complex_registers) = backend.run_circuit(circuit) sum_test = np.array([0.0, 0.0]) for single_projective_measurement in result_bit_registers['ro']: #print(single_projective_measurement) sum_test += single_projective_measurement scaled_result = sum_test/number_measurements print("Scaled result", scaled_result) assert len(scaled_result) == 2 # - # ## Symbolic parameters # In many cases, operation parameters depend on a symbolic parameter of the whole quantum program (time in time-evolution, overrotation, variational parameters...) # Qoqo allows the fast calculation of symbolic parameter expressions. # Expressions are provided in string form. # QuantumProgram can automatically replace symbolic parameters using call parameters. # ### Writing the symbolic circuit and replacing symbolic parameters # + from qoqo import Circuit from qoqo import operations as ops circuit = Circuit() print('Symbolic circuit') circuit += ops.RotateX(qubit=0, theta='3*time+offset') print(circuit) circuit2 = circuit.substitute_parameters({'time': 1/3, 'offset':1}) print('After substitution') print(circuit2) # - # ### Symbolic parameters in a full quantum program # + from qoqo.measurements import PauliZProductInput, PauliZProduct from qoqo import QuantumProgram from qoqo_quest import Backend from qoqo import Circuit from qoqo import operations as ops import numpy as np import scipy.sparse as sp number_measurements = 100000 circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=2, is_output=True) circuit += ops.RotateX(qubit=0, theta='3*time+offset') circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=number_measurements, qubit_mapping=None) measurement_input = PauliZProductInput(number_qubits=2, use_flipped_measurement=False) index0 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0]) index1 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[0,1]) # From readout 'ro' measure two pauli products 0: < Z0 > and 1: < Z0 Z1 > measurement_input.add_linear_exp_val(name="example", linear={0:3.0, 1: 1.0}) # One expectation value: 3 * pauli_product0 + 1 * pauli_product1 measurement = PauliZProduct(input=measurement_input, circuits=[circuit], constant_circuit=None ) backend = Backend(number_qubits=2) program = QuantumProgram(measurement=measurement, input_parameter_names=['time', 'offset']) # The symbolic parameter is the free parameter result = program.run(backend,[0.5, 0]) print("Result", result) assert len(result) == 1 # - # ## Testing scaling performance with qoqo_mock # Quantum simulators cannot simulate systems with a significant number of qubits fast enough to benchmark qoqo with a large number of qubits and operations. # The qoqo_mock interface can be used to benchmark qoqo without simulating a quantum computer. # + from qoqo.measurements import PauliZProductInput, PauliZProduct from qoqo import QuantumProgram from qoqo_mock import MockedBackend from qoqo import Circuit from qoqo import operations as ops import numpy as np import timeit # Default values are small to reduce load for automated testing uncomment values to test large systems number_measurements = 10 # 1000 number_operations = 100 # 1000000 number_qubits = 5 # 500 circuit = Circuit() circuit += ops.DefinitionBit(name='ro', length=number_qubits, is_output=True) for i, q in zip(np.random.randint(0,4,number_operations), np.random.randint(0,500,number_operations)): if i == 0: circuit += ops.RotateX(qubit=q, theta="4*theta_x") if i == 1: circuit += ops.RotateY(qubit=q, theta="2*theta_y") if i == 2: circuit += ops.RotateZ(qubit=q, theta="3*theta_z") if i == 4: circuit += ops.ControlledPauliZ(qubit=q, control=0) circuit += ops.PragmaRepeatedMeasurement(readout='ro', number_measurements=number_measurements, qubit_mapping=None) pp_dict = dict() measurement_input = PauliZProductInput(number_qubits=number_qubits, use_flipped_measurement=False) for i in range(number_qubits): index0 = measurement_input.add_pauliz_product(readout="ro", pauli_product_mask=[i]) pp_dict[number_qubits] = i measurement_input.add_linear_exp_val(name="example", linear={0:1.0}) measurement = PauliZProduct(input=measurement_input, circuits=[circuit], constant_circuit=None ) backend= MockedBackend(number_qubits=number_qubits) program = QuantumProgram(measurement=measurement, input_parameter_names=['theta_x', 'theta_y', 'theta_z']) res = program.run(backend, [0,1,2]) print("Result", res) time_taken = timeit.timeit('program.run(backend, [0,1,2])', globals=globals(),number=1) print("Time taken", time_taken) assert len(res) == 1 assert time_taken < 30
qoqo/Intro_to_qoqo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="duWPWgPsdhUu" # # Fill-Mask using RoBERTa large model # # - # This Code Template is to perform Mask filling operation in python using HuggingFace library. In this template RoBERTa Model is utilized.RoBERTa is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. # ### Required Packages # + id="3fFR9LMeDv7y" # !pip install transformers # + id="8o7AEzWoEPeS" from transformers import pipeline # + [markdown] id="5FwilAgkdyqI" # ### RoBERTA # # RoBERTa is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. # # More precisely, it was pretrained with the Masked language modeling (MLM) objective. Taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. # # + colab={"base_uri": "https://localhost:8080/", "height": 262, "referenced_widgets": ["8bf396b898e24a2f945efe3807de8dd7", "c230175a5b684c57a0c55a47b8b6cdf7", "5b9b62d0841b429f83839baafff293e5", "6065b893d74043628aa321a77b8cb37d", "e0e4622086d047ff902f136138d8731f", "33ce2690901a413a9f035612737d2088", "2ca3ac29a8164f4691b9b42e72ff47ba", "<KEY>", "<KEY>", "1cc4695c06c94e2485d10cbd1134b629", "2a3e6a621cac46239527ae511416fae2", "fc376a8e0fba41d78caf1fe25a0af2fd", "12472e3da45e48e4b4e62f028ae15ee4", "<KEY>", "<KEY>", "bec577df41f94994816ff3bd27ff29c4", "59cec1adba33489bba291171a59fd10f", "0cc6d837b5484c1a9989944dc12cee2f", "<KEY>", "225a10e2bfe942afb26eeeba7dd94454", "<KEY>", "<KEY>", "b3bed7cbabe548de8954bd3f77af364f", "48caaffde165444e81309c8660c8fe3a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "1038acdf371e48519a13243cdaba7c51", "<KEY>", "<KEY>", "4aaace3540d44d4b9295581275444595", "0ddd7c3d124f4f4c8a920be370fe8437", "<KEY>", "d153a40400d449269d37d92f610a4f39", "ebaab8c08ccf425db0a565e49368ebbf", "<KEY>", "c631f9adb9554a068f5febe023aafbb1", "<KEY>"]} id="mGJLtvlNER7p" outputId="8865af79-8823-424d-a273-b8afcc180cc9" unmasker = pipeline('fill-mask', model='roberta-large') # + [markdown] id="N-4zGKpmgu4a" # # # # ### Fill Mask # + colab={"base_uri": "https://localhost:8080/"} id="OIudVNThEVYR" outputId="cdf40cee-db2d-4b35-ef96-1fc1974e0a6e" unmasker("how are is that <mask> even.")
Natural Language Processing/NLP/FillMaskRoBERTa_Large.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import json import time import os import csv from scipy.stats import linregress # Import API key from api_keys import api_key # set up the query URL #api_key = api_keys.api_key url = 'http://api.openweathermap.org/data/2.5/weather?units=Imperial&appid=' + api_key print(url) # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = (os.path.join('cities.csv')) # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # + # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) # change to 1500 at end lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # replace space with a '%20' to correct the URL city = city.replace(' ', '%20') # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count & check lat lon output #len(cities) #cities #print([lat_lng for lat_lng in zip(lats, lngs)]) # - # PERFORM TEST API CALL response = requests.get(f'{url}&q={city}').json() # test response #response # + # Create emtpty lists to append all of the called API data into cityName = [] country = [] date = [] tempF = [] humidity = [] clouds = [] windMPH = [] lat = [] lon = [] # initiate call counter count = 1 # Logging statement print(f'Retrieving Data') print(f'-------------------------------') # loop through all cities for city in cities: # Set up a try loop in case coordinates generated have no city associated try: response = requests.get(f'{url}&q={city}').json() cityName.append(response['name']) clouds.append(response['clouds']['all']) country.append(response['sys']['country']) date.append(response['dt']) humidity.append(response['main']['humidity']) tempF.append(response['main']['temp_max']) lat.append(response['coord']['lat']) lon.append(response['coord']['lon']) windMPH.append(response['wind']['speed']) cityRecord = (response['name']) print(f'Record Processing {count} | {cityRecord}') print(f'{url}&q={city}') # Increase counter by one count= count + 1 # Wait a second in loop to not over exceed rate limit time.sleep(1.01) except: print('City not found...skipping...') # If no record found "skip" to next call continue # + weatherDictionary = {'Country': country, 'City': cityName, 'Date': date, 'Temperature': tempF, 'Cloud Density': clouds, 'Humidity': humidity, 'Humidity': humidity, 'Wind(MPH)': windMPH, 'Latitude': lat, 'Longitude': lon} weatherDF = pd.DataFrame(weatherDictionary) weatherDF.to_csv(os.path.join('weatherData.csv')) weatherDF.head() # + # Build scatterplot using column headers for hte X then Y Axis plt.scatter(weatherDF['Latitude'], weatherDF['Temperature'], marker='o', s=10) # add other graph properties plt.title("City Latitude vs. Max Temperature") plt.ylabel("Max. Temperature (F)") plt.xlabel("Latitude") plt.grid(True) # save graph plt.savefig(os.path.join('temperature-VS-latitude.png')) # Show graph plt.show() # + # Build a scatter plot for each data type plt.scatter(weatherDF["Latitude"], weatherDF["Humidity"], marker="o", s=10) # Incorporate the other graph properties plt.title("City Latitude vs. Humidity") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig(os.path.join('humidity-VS-latitude.png')) # Show plot plt.show() # + # Build a scatter plot for each data type plt.scatter(weatherDF['Latitude'], weatherDF['Cloud Density'], marker="o", s=10) # Incorporate the other graph properties plt.title("City Latitude vs. Cloudiness") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure plt.savefig(os.path.join('cloudDeinsity-VS-latitude.png')) # Show plot plt.show() # + # Build a scatter plot for each data type plt.scatter(weatherDF['Latitude'], weatherDF['Wind(MPH)'], marker='o', s=10) # Incorporate the other graph properties plt.title('City Latitude vs. Wind Speed') plt.ylabel('Wind Speed (MPH)') plt.xlabel('Latitude') plt.grid(True) # Save the figure plt.savefig(os.path.join('windSpeed-VS-latitude.png')) # Show plot plt.show() # - north = weatherDF[weatherDF['Latitude'] >= 0] south = weatherDF[weatherDF['Latitude'] < 0] # + # Defining the variables lat_n = north["Latitude"] max_temp_n = north["Temperature"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_n, max_temp_n) regression = slope * lat_n + intercept plt.scatter(lat_n, max_temp_n, edgecolors="black",facecolors="tomato") plt.plot(lat_n,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Max Temperature") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(north)_Lat_vs_MaxTemp(fig5).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_s = south["Latitude"] max_temp_s = south["Temperature"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_s, max_temp_s) regression = slope * lat_s + intercept plt.scatter(lat_s, max_temp_s ,edgecolors="black",facecolors="plum") plt.plot(lat_s,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Max Temperature (F)") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(south)_Lat_vs_MaxTemp(fig6).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_n = north["Latitude"] humid_n = north["Humidity"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_n, humid_n) regression = slope * lat_n + intercept plt.scatter(lat_n, humid_n ,edgecolors="black",facecolors="skyblue") plt.plot(lat_n,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Humidity") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(north)_Lat_vs_Humidity(fig7).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_s = south["Latitude"] humid_s = south["Humidity"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_s, humid_s) regression = slope * lat_s + intercept plt.scatter(lat_s, humid_s ,edgecolors="black",facecolors="dodgerblue") plt.plot(lat_s,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Humidity") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(south)_Lat_vs_Humidity(fig8).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_n = north["Latitude"] cloud_n = north["Cloud Density"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_n, cloud_n) regression = slope * lat_n + intercept plt.scatter(lat_n, cloud_n ,edgecolors="black",facecolors="seagreen") plt.plot(lat_n,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Cloudiness") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(north)_Lat_vs_Cloudiness(fig9).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_s = south["Latitude"] cloud_s = south["Cloud Density"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_s, cloud_s) regression = slope * lat_s + intercept plt.scatter(lat_s, cloud_s ,edgecolors="black",facecolors="lime") plt.plot(lat_s,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Cloudiness") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(south)_Lat_vs_Cloudiness(fig10).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_n = north["Latitude"] wind_n = north["Wind(MPH)"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_n, wind_n) regression = slope * lat_n + intercept plt.scatter(lat_n, wind_n ,edgecolors="black",facecolors="gold") plt.plot(lat_n,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Wind Speed") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgray") # Save the figure plt.savefig(os.path.join('Regression(north)_Lat_vs_Wind_Speed(fig11).png')) # plt.savefig("../Images/Regression(north)_Lat_vs_Wind_Speed(fig11).png") # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # + # Defining the variables lat_s = south["Latitude"] wind_s = south["Wind(MPH)"] # creating the chart slope, intercept, r_value, p_value, std_err = linregress(lat_s, wind_s) regression = slope * lat_s + intercept plt.scatter(lat_s, wind_s ,edgecolors="black",facecolors="khaki") plt.plot(lat_s,regression,"--", color = "black") plt.xlabel("Latitude") plt.ylabel("Wind Speed") plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey") # Save the figure plt.savefig(os.path.join('Regression(north)_Lat_vs_Wind_Speed(fig11).png')) # Show plot print(f"The r-squared is {r_value}") print(f"The regression expression is: Y = {slope}*x + {intercept}") print(f"The p-value is {p_value}") plt.show() # - # WeatherPy # # Analysis # # Weather becomes warmer when approaching the equator and pulling in data at this time of year shows that the Southern Hemisphere on average is warmer than the Northern, due to the tilt of the earth. # There is no strong correlation between latitude and cloudiness or wind. # It is interesting to see that there does not appear to be any correlation between Latitude and Humidity either, despite a clear correlation between Latitude and Temperature.
WeatherPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![AWS Data Wrangler](_static/logo.png "AWS Data Wrangler")](https://github.com/awslabs/aws-data-wrangler) # # Amazon S3 # ## Table of Contents # * [1. CSV files](#1.-CSV-files) # * [1.1 Writing CSV files](#1.1-Writing-CSV-files) # * [1.2 Reading single CSV file](#1.2-Reading-single-CSV-file) # * [1.3 Reading multiple CSV files](#1.3-Reading-multiple-CSV-files) # * [1.3.1 Reading CSV by list](#1.3.1-Reading-CSV-by-list) # * [1.3.2 Reading CSV by prefix](#1.3.2-Reading-CSV-by-prefix) # * [2. JSON files](#2.-JSON-files) # * [2.1 Writing JSON files](#2.1-Writing-JSON-files) # * [2.2 Reading single JSON file](#2.2-Reading-single-JSON-file) # * [2.3 Reading multiple JSON files](#2.3-Reading-multiple-JSON-files) # * [2.3.1 Reading JSON by list](#2.3.1-Reading-JSON-by-list) # * [2.3.2 Reading JSON by prefix](#2.3.2-Reading-JSON-by-prefix) # * [3. Parquet files](#3.-Parquet-files) # * [3.1 Writing Parquet files](#3.1-Writing-Parquet-files) # * [3.2 Reading single Parquet file](#3.2-Reading-single-Parquet-file) # * [3.3 Reading multiple Parquet files](#3.3-Reading-multiple-Parquet-files) # * [3.3.1 Reading Parquet by list](#3.3.1-Reading-Parquet-by-list) # * [3.3.2 Reading Parquet by prefix](#3.3.2-Reading-Parquet-by-prefix) # * [4. Fixed-width formatted files (only read)](#4.-Fixed-width-formatted-files-%28only-read%29) # * [4.1 Reading single FWF file](#4.1-Reading-single-FWF-file) # * [4.2 Reading multiple FWF files](#4.2-Reading-multiple-FWF-files) # * [4.2.1 Reading FWF by list](#4.2.1-Reading-FWF-by-list) # * [4.2.2 Reading FWF by prefix](#4.2.2-Reading-FWF-by-prefix) # * [5. Excel files](#5.-Excel-files) # * [5.1 Writing Excel file](#5.1-Writing-Excel-file) # * [5.2 Reading Excel file](#5.2-Reading-Excel-file) # * [6. Reading with lastModified filter](#6.-Reading-with-lastModified-filter) # * [6.1 Define the Date time with UTC Timezone](#6.1-Define-the-Date-time-with-UTC-Timezone) # * [6.2 Define the Date time and specify the Timezone](#6.2-Define-the-Date-time-and-specify-the-Timezone) # * [6.3 Read json using the LastModified filters](#6.3-Read-json-using-the-LastModified-filters) # * [7. Download Objects](#7.-Download-objects) # * [7.1 Download object to a file path](#7.1-Download-object-to-a-file-path) # * [7.2 Download object to a file-like object in binary mode](#7.2-Download-object-to-a-file-like-object-in-binary-mode) # * [8. Upload Objects](#8.-Upload-objects) # * [8.1 Upload object from a file path](#8.1-Upload-object-from-a-file-path) # * [8.2 Upload object from a file-like object in binary mode](#8.2-Upload-object-from-a-file-like-object-in-binary-mode) # * [9. Delete objects](#7.-Delete-objects) # # + import awswrangler as wr import pandas as pd import boto3 import pytz from datetime import datetime df1 = pd.DataFrame({ "id": [1, 2], "name": ["foo", "boo"] }) df2 = pd.DataFrame({ "id": [3], "name": ["bar"] }) # - # ## Enter your bucket name: import getpass bucket = getpass.getpass() # # 1. CSV files # ## 1.1 Writing CSV files # + path1 = f"s3://{bucket}/csv/file1.csv" path2 = f"s3://{bucket}/csv/file2.csv" wr.s3.to_csv(df1, path1, index=False) wr.s3.to_csv(df2, path2, index=False); # - # ## 1.2 Reading single CSV file wr.s3.read_csv([path1]) # ## 1.3 Reading multiple CSV files # ### 1.3.1 Reading CSV by list wr.s3.read_csv([path1, path2]) # ### 1.3.2 Reading CSV by prefix wr.s3.read_csv(f"s3://{bucket}/csv/") # # 2. JSON files # ## 2.1 Writing JSON files # + path1 = f"s3://{bucket}/json/file1.json" path2 = f"s3://{bucket}/json/file2.json" wr.s3.to_json(df1, path1) wr.s3.to_json(df2, path2) # - # ## 2.2 Reading single JSON file wr.s3.read_json([path1]) # ## 2.3 Reading multiple JSON files # ### 2.3.1 Reading JSON by list wr.s3.read_json([path1, path2]) # ### 2.3.2 Reading JSON by prefix wr.s3.read_json(f"s3://{bucket}/json/") # # 3. Parquet files # For more complex features releated to Parquet Dataset check the tutorial number 4. # ## 3.1 Writing Parquet files # + path1 = f"s3://{bucket}/parquet/file1.parquet" path2 = f"s3://{bucket}/parquet/file2.parquet" wr.s3.to_parquet(df1, path1) wr.s3.to_parquet(df2, path2); # - # ## 3.2 Reading single Parquet file wr.s3.read_parquet([path1]) # ## 3.3 Reading multiple Parquet files # ### 3.3.1 Reading Parquet by list wr.s3.read_parquet([path1, path2]) # ### 3.3.2 Reading Parquet by prefix wr.s3.read_parquet(f"s3://{bucket}/parquet/") # # 4. Fixed-width formatted files (only read) # As of today, Pandas doesn't implement a `to_fwf` functionality, so let's manually write two files: # + content = "1 Herfelingen 27-12-18\n"\ "2 Lambusart 14-06-18\n"\ "3 Spormaggiore 15-04-18" boto3.client("s3").put_object(Body=content, Bucket=bucket, Key="fwf/file1.txt") content = "4 Buizingen 05-09-19\n"\ "5 <NAME> 04-09-19" boto3.client("s3").put_object(Body=content, Bucket=bucket, Key="fwf/file2.txt") path1 = f"s3://{bucket}/fwf/file1.txt" path2 = f"s3://{bucket}/fwf/file2.txt" # - # ## 4.1 Reading single FWF file wr.s3.read_fwf([path1], names=["id", "name", "date"]) # ## 4.2 Reading multiple FWF files # ### 4.2.1 Reading FWF by list wr.s3.read_fwf([path1, path2], names=["id", "name", "date"]) # ### 4.2.2 Reading FWF by prefix wr.s3.read_fwf(f"s3://{bucket}/fwf/", names=["id", "name", "date"]) # # 5. Excel files # ## 5.1 Writing Excel file # + path = f"s3://{bucket}/file0.xlsx" wr.s3.to_excel(df1, path, index=False) # - # ## 5.2 Reading Excel file wr.s3.read_excel(path) # # 6. Reading with lastModified filter # Specify the filter by LastModified Date. # # The filter needs to be specified as datime with time zone # # Internally the path needs to be listed, after that the filter is applied. # # The filter compare the s3 content with the variables lastModified_begin and lastModified_end # # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html # ### 6.1 Define the Date time with UTC Timezone # + begin = datetime.strptime("20-07-31 20:30", "%y-%m-%d %H:%M") end = datetime.strptime("21-07-31 20:30", "%y-%m-%d %H:%M") begin_utc = pytz.utc.localize(begin) end_utc = pytz.utc.localize(end) # - # ### 6.2 Define the Date time and specify the Timezone # + begin = datetime.strptime("20-07-31 20:30", "%y-%m-%d %H:%M") end = datetime.strptime("21-07-31 20:30", "%y-%m-%d %H:%M") timezone = pytz.timezone("America/Los_Angeles") begin_Los_Angeles = timezone.localize(begin) end_Los_Angeles = timezone.localize(end) # - # ### 6.3 Read json using the LastModified filters wr.s3.read_fwf(f"s3://{bucket}/fwf/", names=["id", "name", "date"], last_modified_begin=begin_utc, last_modified_end=end_utc) wr.s3.read_json(f"s3://{bucket}/json/", last_modified_begin=begin_utc, last_modified_end=end_utc) wr.s3.read_csv(f"s3://{bucket}/csv/", last_modified_begin=begin_utc, last_modified_end=end_utc) wr.s3.read_parquet(f"s3://{bucket}/parquet/", last_modified_begin=begin_utc, last_modified_end=end_utc); # ## 7. Download objects # + [markdown] pycharm={"name": "#%% md\n"} # Objects can be downloaded from S3 using either a path to a local file or a file-like object in binary mode. # + [markdown] pycharm={"name": "#%% md\n"} # ### 7.1 Download object to a file path # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} local_file_dir = getpass.getpass() # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import os path1 = f"s3://{bucket}/csv/file1.csv" local_file = os.path.join(local_file_dir, "file1.csv") wr.s3.download(path=path1, local_file=local_file) pd.read_csv(local_file) # + [markdown] pycharm={"name": "#%% md\n"} # ### 7.2 Download object to a file-like object in binary mode # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} path2 = f"s3://{bucket}/csv/file2.csv" local_file = os.path.join(local_file_dir, "file2.csv") with open(local_file, mode="wb") as local_f: wr.s3.download(path=path2, local_file=local_f) pd.read_csv(local_file) # + [markdown] pycharm={"name": "#%% md\n"} # ## 8. Upload objects # + [markdown] pycharm={"name": "#%% md\n"} # Objects can be uploaded to S3 using either a path to a local file or a file-like object in binary mode. # + [markdown] pycharm={"name": "#%% md\n"} # ## 8.1 Upload object from a file path # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} local_file = os.path.join(local_file_dir, "file1.csv") wr.s3.upload(local_file=local_file, path=path1) wr.s3.read_csv(path1) # + [markdown] pycharm={"name": "#%% md\n"} # ## 8.2 Upload object from a file-like object in binary mode # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} local_file = os.path.join(local_file_dir, "file2.csv") with open(local_file, "rb") as local_f: wr.s3.upload(local_file=local_f, path=path2) wr.s3.read_csv(path2) # - # # 9. Delete objects # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} wr.s3.delete_objects(f"s3://{bucket}/")
tutorials/003 - Amazon S3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/google/jax-md/blob/simulation_refactor_2/notebooks/nvt_simulation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="kNDQ02qlDnNW" cellView="form" #@title Imports & Utils # !pip install jax-md import numpy as onp from jax.config import config ; config.update('jax_enable_x64', True) import jax.numpy as np from jax import random from jax import jit from jax import lax from jax import ops import time from jax_md import space, smap, energy, minimize, quantity, simulate import matplotlib import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='white') def format_plot(x, y): plt.xlabel(x, fontsize=20) plt.ylabel(y, fontsize=20) def finalize_plot(shape=(1, 1)): plt.gcf().set_size_inches( shape[0] * 1.5 * plt.gcf().get_size_inches()[1], shape[1] * 1.5 * plt.gcf().get_size_inches()[1]) plt.tight_layout() # + [markdown] id="uxCDg0ioWh70" # # Constant Temperature Simulation # + [markdown] id="dFK0Dew5MwXt" # Here we demonstrate some code to run a simulation at in the NVT ensemble. We start off by setting up some parameters of the simulation. This will include a temperature schedule that will start off at a high temperature and then instantaneously quench to a lower temperature. # + id="yGdrjCj1Wm9J" N = 400 dimension = 2 box_size = quantity.box_size_at_number_density(N, 0.8, 2) dt = 5e-3 displacement, shift = space.periodic(box_size) kT = lambda t: np.where(t < 5000.0 * dt, 0.1, 0.01) # + [markdown] id="uFrxHj12M3X1" # Next we need to generate some random positions as well as particle sizes. # + id="rEic-SLigNIa" key = random.PRNGKey(0) # + id="5ApXCCgdWm9O" key, split = random.split(key) R = box_size * random.uniform(split, (N, dimension), dtype=np.float64) # The system ought to be a 50:50 mixture of two types of particles, one # large and one small. sigma = np.array([[1.0, 1.2], [1.2, 1.4]]) N_2 = int(N / 2) species = np.where(np.arange(N) < N_2, 0, 1) # + [markdown] id="hjPUEPcwM6jc" # Then we need to construct our simulation operators. # + id="B1xXV_FmWm9Q" energy_fn = energy.soft_sphere_pair(displacement, species=species, sigma=sigma) init, apply = simulate.nvt_nose_hoover(energy_fn, shift, dt, kT(0.)) state = init(key, R) # + [markdown] id="hJRvIrfiNAgV" # Now let's actually do the simulation. To do this we'll write a small function that performs a single step of the simulation. This function will keep track of the temperature, the extended Hamiltonian of the Nose-Hoover dynamics, and the current particle positions. # + id="1rtYEp_LP35v" write_every = 100 def step_fn(i, state_and_log): state, log = state_and_log t = i * dt # Log information about the simulation. T = quantity.temperature(state.velocity) log['kT'] = ops.index_update(log['kT'], i, T) H = simulate.nvt_nose_hoover_invariant(energy_fn, state, kT(t)) log['H'] = ops.index_update(log['H'], i, H) # Record positions every `write_every` steps. log['position'] = lax.cond(i % write_every == 0, lambda p: ops.index_update(p, i // write_every, state.position), lambda p: p, log['position']) # Take a simulation step. state = apply(state, kT=kT(t)) return state, log # + [markdown] id="asUbxPn9lU6H" # To run our simulation we'll use `lax.fori_loop` which will execute the simulation a single call from python. # + id="psqU1uEPWm9T" steps = 10000 log = { 'kT': np.zeros((steps,)), 'H': np.zeros((steps,)), 'position': np.zeros((steps // write_every,) + R.shape) } state, log = lax.fori_loop(0, steps, step_fn, (state, log)) R = state.position # + [markdown] id="0hhEEuojNFht" # Now, let's plot the temperature as a function of time. We see that the temperature tracks the goal temperature with some fluctuations. # + id="jiJOwCAPirFn" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="69cb51fa-cb3a-4f0e-e521-1a8736b3b67f" t = onp.arange(0, steps) * dt plt.plot(t, log['kT'], linewidth=3) plt.plot(t, kT(t), linewidth=3) format_plot('$t$', '$T$') finalize_plot() # + [markdown] id="kTvH7BpVQtkm" # Now let's plot the Hamiltonian of the system. We see that it is invariant apart from changes to the temperature, as expected. # + id="xDsowzLKQo3Z" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="d748438a-6d66-4bef-9c43-57cf5c7b41be" plt.plot(t, log['H'], linewidth=3) format_plot('$t$', '$H$') finalize_plot() # + [markdown] id="qtroRWfgqcT4" # Now let's plot a snapshot of the system. # + id="Yq4Rz3eMqcTh" colab={"base_uri": "https://localhost:8080/", "height": 873} outputId="b7cec16c-7941-4465-cc3f-ff8024de598c" ms = 65 R_plt = onp.array(state.position) plt.plot(R_plt[:N_2, 0], R_plt[:N_2, 1], 'o', markersize=ms * 0.5) plt.plot(R_plt[N_2:, 0], R_plt[N_2:, 1], 'o', markersize=ms * 0.7) plt.xlim([0, np.max(R[:, 0])]) plt.ylim([0, np.max(R[:, 1])]) plt.axis('off') finalize_plot((2, 2)) # + [markdown] id="l4RLeAhVmA-_" # If we want, we can also draw an animation of the simulation using JAX MD's renderer. # + colab={"base_uri": "https://localhost:8080/", "height": 922} id="79CiYF_aTgTq" outputId="11aafee1-9c6e-43b0-97e8-b2d205f119b2" from jax_md.colab_tools import renderer diameters = sigma[species, species] colors = np.where(species[:, None], np.array([[1.0, 0.5, 0.01]]), np.array([[0.35, 0.65, 0.85]])) renderer.render(box_size, { 'particles': renderer.Disk(log['position'], diameters, colors) }, resolution=(700, 700)) # + [markdown] id="2cuLb1IQnnZo" # ## Larger Simulation with Neighbor Lists # + [markdown] id="MQQWnGuxnvZf" # We can use neighbor lists to run a much larger version of this simulation. As their name suggests, neighbor lists are lists of particles nearby a central particle. By keeping track of neighbors, we can compute the energy of the system much more efficiently. This becomes increasingly true as the simulation gets larger. # + id="5Zzf9meloDql" N = 10000 box_size = quantity.box_size_at_number_density(N, 0.8, 2) displacement, shift = space.periodic(box_size) kT = lambda t: np.where(t < 50.0, 0.1, 0.01) # + [markdown] id="lcnnfeoQoDqm" # As before we randomly initialize the system. # + id="qEFpYkomoDqm" key, split = random.split(key) R = box_size * random.uniform(split, (N, dimension), dtype=np.float64) sigma = np.array([[1.0, 1.2], [1.2, 1.4]]) N_2 = int(N / 2) species = np.where(np.arange(N) < N_2, 0, 1) # + [markdown] id="WJWuZXFfoDqm" # Then we need to construct our simulation operators. This time we use the `energy.soft_sphere_neighbor_fn` to create two functions: one that constructs lists of neighbors and one that computes the energy. # + id="2O3OLWRaoDqm" neighbor_fn, energy_fn = energy.soft_sphere_neighbor_list(displacement, box_size, species=species, sigma=sigma) init, apply = simulate.nvt_nose_hoover(energy_fn, shift, dt, kT(0.), tau=200*dt) nbrs = neighbor_fn(R) state = init(key, R, neighbor=nbrs) # + [markdown] id="xY4dnk53oDqm" # Now let's actually do the simulation. This time our simulation step function will also update the neighbors. As above, we will also only record position data every hundred steps. # + id="OibU3VOQoDqm" write_every = 100 def step_fn(i, state_nbrs_log): state, nbrs, log = state_nbrs_log t = i * dt # Log information about the simulation. T = quantity.temperature(state.velocity) log['kT'] = ops.index_update(log['kT'], i, T) H = simulate.nvt_nose_hoover_invariant(energy_fn, state, kT(t), neighbor=nbrs) log['H'] = ops.index_update(log['H'], i, H) # Record positions every `write_every` steps. log['position'] = lax.cond(i % write_every == 0, lambda p: ops.index_update(p, i // write_every, state.position), lambda p: p, log['position']) # Take a simulation step. state = apply(state, kT=kT(t), neighbor=nbrs) nbrs = neighbor_fn(state.position, nbrs) return state, nbrs, log # + [markdown] id="7GyNMRbboDqm" # To run our simulation we'll use `lax.fori_loop` which will execute the simulation a single call from python. # + id="QGCAGy-ooDqm" steps = 20000 log = { 'kT': np.zeros((steps,)), 'H': np.zeros((steps,)), 'position': np.zeros((steps // write_every,) + R.shape) } state, nbrs, log = lax.fori_loop(0, steps, step_fn, (state, nbrs, log)) R = state.position # + [markdown] id="87EOPGJPoDqm" # Now, let's plot the temperature as a function of time. We see that the temperature tracks the goal temperature with some fluctuations. # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="QMIkiaO6oDqm" outputId="85833498-adc0-4c68-dacc-6cbb94255c2a" t = onp.arange(0, steps) * dt plt.plot(t, log['kT'], linewidth=3) plt.plot(t, kT(t), linewidth=3) format_plot('$t$', '$T$') finalize_plot() # + [markdown] id="Rlq_kaeNoDqo" # Now let's plot the Hamiltonian of the system. We see that it is invariant apart from changes to the temperature, as expected. # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="LN5irdwUoDqo" outputId="0aac2adb-caed-413e-9ad5-a7b1254c6b78" plt.plot(t, log['H'], linewidth=3) format_plot('$t$', '$H$') finalize_plot() # + [markdown] id="EoG1KAFyoDqo" # Now let's plot a snapshot of the system. # + colab={"base_uri": "https://localhost:8080/", "height": 873} id="JfV79m3ioDqo" outputId="345d68de-6000-4c4d-f8e1-1942d921f3d5" ms = 10 R_plt = onp.array(state.position) plt.plot(R_plt[:N_2, 0], R_plt[:N_2, 1], 'o', markersize=ms * 0.5) plt.plot(R_plt[N_2:, 0], R_plt[N_2:, 1], 'o', markersize=ms * 0.7) plt.xlim([0, np.max(R[:, 0])]) plt.ylim([0, np.max(R[:, 1])]) plt.axis('off') finalize_plot((2, 2)) # + [markdown] id="tmtKpLp6oDqo" # If we want, we can also draw an animation of the simulation using JAX MD's renderer. # + colab={"base_uri": "https://localhost:8080/", "height": 922} id="TFz5LNV5oDqo" outputId="88df7d41-0d28-41be-a1b8-37c35fbd1eb2" from jax_md.colab_tools import renderer diameters = sigma[species, species] colors = np.where(species[:, None], np.array([[1.0, 0.5, 0.01]]), np.array([[0.35, 0.65, 0.85]])) renderer.render(box_size, { 'particles': renderer.Disk(log['position'], diameters, colors) }, buffer_size=20, resolution=(700, 700)) # + [markdown] id="8E59mHRb4-3R" # Finally, let's plot the velocity distribution compared with its theoretical prediction. # + id="SCKwEVc_5BEk" colab={"base_uri": "https://localhost:8080/"} outputId="eb16b593-439f-485e-9cad-2bb96fc9c191" V_flat = onp.reshape(onp.array(state.velocity), (-1,)) occ, bins = onp.histogram(V_flat, bins=100, normed=True) # + id="q9JJPIIq5DmG" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="611ae855-d584-4176-df63-89528f2ae186" T_cur = kT(steps * dt) plt.semilogy(bins[:-1], occ, 'o') plt.semilogy( bins[:-1], 1.0 / np.sqrt(2 * np.pi * T_cur) * onp.exp(-1/(2 * T_cur) * bins[:-1] ** 2), linewidth=3) format_plot('t', 'T') finalize_plot()
notebooks/nvt_simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> Regression selection</dd> # <dt>Description</dt> <dd>A linked streams example demonstrating how to the Selection1D stream to tap on a datapoint and reveal a regression plot. Highlights how custom interactivity can be used to reveal more information about a dataset.</dd> # <dt>Backends</dt> <dd> Bokeh</dd> # <dt>Tags</dt> <dd> streams, linked, tap selection</dd> # </dl> # </div> import numpy as np import holoviews as hv from holoviews.streams import Selection1D from scipy import stats hv.extension('bokeh') # + # %%opts Scatter [color_index=2 tools=['tap', 'hover'] width=600] {+framewise} (marker='triangle' cmap='Set1' size=10) # %%opts Overlay [toolbar='above' legend_position='right'] Curve (line_color='black') {+framewise} def gen_samples(N, corr=0.8): xx = np.array([-0.51, 51.2]) yy = np.array([0.33, 51.6]) means = [xx.mean(), yy.mean()] stds = [xx.std() / 3, yy.std() / 3] covs = [[stds[0]**2 , stds[0]*stds[1]*corr], [stds[0]*stds[1]*corr, stds[1]**2]] return np.random.multivariate_normal(means, covs, N) data = [('Week %d' % (i%10), np.random.rand(), chr(65+np.random.randint(5)), i) for i in range(100)] sample_data = hv.NdOverlay({i: hv.Points(gen_samples(np.random.randint(1000, 5000), r2)) for _, r2, _, i in data}) points = hv.Scatter(data, kdims=['Date', 'r2'], vdims=['block', 'id']).redim.range(r2=(0., 1)) stream = Selection1D(source=points) empty = (hv.Points(np.random.rand(0, 2)) * hv.Curve(np.random.rand(0, 2))).relabel('No selection') def regression(index): if not index: return empty scatter = sample_data[index[0]] xs, ys = scatter['x'], scatter['y'] slope, intercep, rval, pval, std = stats.linregress(xs, ys) xs = np.linspace(*scatter.range(0)+(2,)) reg = slope*xs+intercep return (scatter * hv.Curve((xs, reg))).relabel('r2: %.3f' % slope) reg = hv.DynamicMap(regression, kdims=[], streams=[stream]) average = hv.Curve(points, kdims=['Date'], vdims=['r2']).aggregate(function=np.mean) points * average + reg # - # <center><img src="http://assets.holoviews.org/gifs/examples/streams/bokeh/regression_tap.gif" width=400></center>
examples/reference/streams/bokeh/Selection1D_tap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/18cse005/DMDW/blob/main/Credit_Card_Fraud_Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="SRUd3ThB10Bd" import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # + id="ojIgy0yI92si" # loading the dataset to a Pandas DataFrame credit_card_data = pd.read_csv('/content/creditcard.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="bIAAiXdyAww7" outputId="aa7a37a6-9673-41bd-89e7-b6480a8767fb" # first 5 rows of the dataset credit_card_data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="LialLCenA0bh" outputId="a9bbd695-1e9b-40eb-e052-598af68e2e0e" credit_card_data.tail() # + colab={"base_uri": "https://localhost:8080/"} id="irrlHUEsA3Ya" outputId="3757b716-843b-4ccf-eb75-1a01428eab19" # dataset informations credit_card_data.info() # + colab={"base_uri": "https://localhost:8080/"} id="0GQAQGdyA6Uf" outputId="be5b9cf3-d08c-4ea8-d45c-986527247269" # checking the number of missing values in each column credit_card_data.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="GgLoPWH5A-IA" outputId="e03e0a48-a303-434c-e830-3c92c8b7807c" # distribution of legit transactions & fraudulent transactions credit_card_data['Class'].value_counts() # + [markdown] id="lWcl-gahBI10" # This Dataset is highly unblanced # # 0 --> Normal Transaction # # 1 --> fraudulent transaction # + id="RkyMSPaIBFGh" # separating the data for analysis legit = credit_card_data[credit_card_data.Class == 0] fraud = credit_card_data[credit_card_data.Class == 1] # + colab={"base_uri": "https://localhost:8080/"} id="BXtldU04BL8Z" outputId="6c0f9d4a-37be-4984-a195-f12113c80a10" print(legit.shape) print(fraud.shape) # + colab={"base_uri": "https://localhost:8080/"} id="FeZ3iPF8BOtk" outputId="665bc5b7-8e25-417f-e032-3c0f6a05be24" # statistical measures of the data legit.Amount.describe() # + colab={"base_uri": "https://localhost:8080/"} id="gOmpl0fgBR06" outputId="e3071b5a-8b96-4da8-af30-027b8f63dba9" fraud.Amount.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 162} id="4D733ONABUxg" outputId="50e7a7f2-d468-451b-dd36-3a0a4de4efea" # compare the values for both transactions credit_card_data.groupby('Class').mean() # + [markdown] id="frb7ZqXEBaT7" # Under-Sampling # # Build a sample dataset containing similar distribution of normal transactions and Fraudulent Transactions # # Number of Fraudulent Transactions --> 492 # + id="kzqsqMh5BXWO" legit_sample = legit.sample(n=492) # + [markdown] id="5vEVfvSEBiTG" # Concatenating two DataFrames # # # + id="DNik2_szBenK" new_dataset = pd.concat([legit_sample, fraud], axis=0) # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="5XJ2z54HBldA" outputId="32c9fb59-cb14-4ab8-d192-fcbab4b28850" new_dataset.head() # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="v6r92MY4Bn78" outputId="55ea783c-151f-410c-c921-cbe6260d1600" new_dataset.tail() # + colab={"base_uri": "https://localhost:8080/"} id="fdDeGUBGBqMB" outputId="e2a2a77c-6007-4bd2-e79f-3ed112187a51" new_dataset['Class'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 162} id="UjIVJWlXBsam" outputId="1d536f4a-13c9-4546-b171-56319697235f" new_dataset.groupby('Class').mean() # + [markdown] id="Q7MS02CjBxzl" # Splitting the data into Features & Targets # # # + id="baXjQcrNBu14" X = new_dataset.drop(columns='Class', axis=1) Y = new_dataset['Class'] # + colab={"base_uri": "https://localhost:8080/"} id="H6wPO_-ZB0Ec" outputId="48c32812-84d4-41cd-984d-6c15c7c9ed3c" print(X) # + colab={"base_uri": "https://localhost:8080/"} id="ZHMgA_t1B2Hn" outputId="f8acc828-774f-41ce-8645-bd3e8818a3d5" print(Y) # + [markdown] id="TbdHiQEXB8HP" # Split the data into Training data & Testing Data # + id="jXehn6YcB6tH" X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y, random_state=2) # + colab={"base_uri": "https://localhost:8080/"} id="FvPaaUZVB_H2" outputId="72a0a119-868f-49a1-f79b-b22f7a1ceb85" print(X.shape, X_train.shape, X_test.shape) # + [markdown] id="l59FE2g6CDmX" # Model Training # # Logistic Regression # + id="dEYQ3tfzCA9a" model = LogisticRegression() # + colab={"base_uri": "https://localhost:8080/"} id="WK75VFLMCG-F" outputId="3414884b-48e2-4de0-ce7d-ddac5fc14b7b" # training the Logistic Regression Model with Training Data model.fit(X_train, Y_train) # + [markdown] id="EICwniNKCQNA" # Model Evaluation # # Accuracy Score # + id="_GZ5iFBDCKGX" # accuracy on training data X_train_prediction = model.predict(X_train) training_data_accuracy = accuracy_score(X_train_prediction, Y_train) # + colab={"base_uri": "https://localhost:8080/"} id="rO4fKLfpCTD_" outputId="a628a2a6-a480-48a3-ee29-dd2c3a9436a6" print('Accuracy on Training data : ', training_data_accuracy) # + id="byWvRqGQCVYn" # accuracy on test data X_test_prediction = model.predict(X_test) test_data_accuracy = accuracy_score(X_test_prediction, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="nQsz5GwlCXov" outputId="cb3de397-333c-4c3a-ffd5-428587591ba0" print('Accuracy score on Test Data : ', test_data_accuracy) # + id="Z_jjADi2CZ7W"
Credit_Card_Fraud_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''dsd'': conda)' # language: python # name: python3 # --- # # Previsão de faturamento - Modelo autorregressivo ajustado ao produto `auxilio_final_de_ano` # # <sub>Projeto para a disciplina de **Estatística** (Módulo 4) do Data Science Degree (turma de julho de 2020)</sub> # ## Equipe # # * <NAME> # * <NAME> # * <NAME> # * <NAME> # # Link para [projeto no Github](https://github.com/flimao/case-previsao-faturamento) # ## Introdução # # Este notebook contém o modelo ajustado para o faturamento do produto `auxilio_final_de_ano` mês a mês. # + import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pickle # modelos from pmdarima.arima import auto_arima from pmdarima.arima.arima import ARIMA #from statsmodels.tsa.arima.model import ARIMA from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # metricas from sklearn.metrics import mean_absolute_percentage_error as smape, mean_squared_error as smse, mean_absolute_error as smae, r2_score # importar scripts cwd = os.getcwd() os.chdir("../") import py_scripts.plots, py_scripts.transform, py_scripts.metrics os.chdir(cwd) import matplotlib as mpl mpl.rcParams['figure.dpi'] = 120 mpl.rcParams['figure.figsize'] = (10, 4) # - # ## Importação dos dados # + ts_raw = pd.read_csv(r'../data/sim_ts_limpo.csv') tsd, tswide = py_scripts.transform.pipeline(ts_raw) produtos = tswide.columns n_produtos = produtos.shape[0] # - # ## Modelo autorregressivo integrável de médias móveis com sazonalidade (`SARIMA`) # + analise_produto = 'auxilio_final_de_ano' fat = tswide[analise_produto] lancamento = tsd[analise_produto].index[0] antes_lancto = fat[:lancamento].index fat = fat.drop(index = antes_lancto) # - fat.plot(linestyle = '', marker = 'o') plt.title('Faturamento (R$ mi)') plt.show() # Para analisar e prever essa série temporal, é necessário um modelo mais completo. Utilizaremos aqui um modelo autorregressivo integrado de média móvel com sazonalidade - **SARIMA**. # # OBS.: o modelo completo chama-se SARIMAX; o `X` adicional permite a modelagem de variáveis exógenas. No entanto, não utilizaremos variáveis exógenas neste caso. # + # após ramp-up fat_modelo = fat.copy() train_test_split_idx = int(fat_modelo.dropna().shape[0] * 0.7 + 1) fat_train = fat_modelo[:train_test_split_idx] fat_test = fat_modelo[train_test_split_idx:] fat_train.plot(label = 'Conjunto de treino') fat_test.plot(label = 'Conjunto de teste') plt.title(f"Train test split (produto '{analise_produto}', pós ramp-up)") plt.ylabel('Faturamento (R$ bi)') plt.legend() plt.show() # - fig = plt.figure() ax = fig.gca() plot_pacf(fat_modelo.dropna(), lags = 20, method = 'ywm', ax = ax) ax.set_xlabel('Lags') ax.set_title(f"Autocorrelação parcial - faturamento do produto '{analise_produto}' ($d, D \sim 4$)") plt.show() # + # modelo = auto_arima( # y = fat_train, # start_p = 1, max_p = 3, # d = None, max_d = 5, # start_q = 1, max_q = 3, # start_P = 1, max_P = 3, # D = None, max_D = 5, # start_Q = 1, max_Q = 3, # #max_order = 6, # m = 12, # seasonal = True, # alpha = 0.05, # stepwise = True, # trace = True, # n_fits = 500, # enforce_stationarity = True, # ) # - modelo = ARIMA(order = (3, 0, 2), seasonal_order=(1, 6, 0, 12), with_intercept = False).fit(y = fat_train) modelo.summary() preds = py_scripts.plots.ajuste_grafico( modelo = modelo, produto = analise_produto, serie_treino = fat_train, serie_teste = fat_test, ci = True, in_sample = False, preds_metrics = True ) modelo.plot_diagnostics(figsize = (10,8)) plt.tight_layout() plt.show() # Vamos aplicar algumas métricas ao modelo: # + kwargs_ajuste = dict( y_true = fat_test.dropna(), y_pred = preds[fat_test.dropna().index], ) metricas = py_scripts.metrics.mostrar_metricas(**kwargs_ajuste) # - # Vamos salvar o modelo atual para o faturamento do produto `auxilio_final_de_ano`. # + modelo_dict = dict( modelo = modelo.fit(fat_train), serie_treino = fat_train, ) with open(r'../models/produto_auxilio.model', 'wb') as arq_modelo_auxilio: pickler = pickle.Pickler(file = arq_modelo_auxilio) pickler.dump(modelo_dict) # -
notebooks_models/produto_auxilio_arima.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Heuristic Miner # ## Step 1: Handling and import event data # + pycharm={"name": "#%%\n"} import pm4py from pm4py.objects.log.importer.xes import importer as xes_importer #log_path = os.path.join("tests", "compressed_input_data", "09_a32f0n00.xes.gz") log = xes_importer.apply("Road_Traffic_Fine_Management_Process.xes") # - # ## Step 2: Mining event log - Process Discovery # + pycharm={"name": "#%%\n"} from pm4py.algo.discovery.heuristics import algorithm as heuristics_miner heu_net = heuristics_miner.apply_heu(log, parameters={heuristics_miner.Variants.CLASSIC.value.Parameters.DEPENDENCY_THRESH: 0.99}) # - # ## Step 3: Visualize the Heuristic Net # + pycharm={"name": "#%%\n"} from pm4py.visualization.heuristics_net import visualizer as hn_visualizer gviz = hn_visualizer.apply(heu_net) hn_visualizer.view(gviz) # - # ## Step 4: Convert Heuristic Net to Petri Net and BPMN # + pycharm={"name": "#%%\n"} # To obtain a Petri Net that is based on the Heuristics Miner, the code on the right hand side can be used. Also this Petri Net can be visualized. from pm4py.algo.discovery.heuristics import algorithm as heuristics_miner net, initial_marking, final_marking = heuristics_miner.apply(log, parameters={heuristics_miner.Variants.CLASSIC.value.Parameters.DEPENDENCY_THRESH: 0.99}) from pm4py.visualization.petri_net import visualizer as pn_visualizer gviz = pn_visualizer.apply(net, initial_marking, final_marking) pn_visualizer.view(gviz) # + pycharm={"name": "#%%\n"} bpmn_graph = pm4py.convert_to_bpmn(*[net, initial_marking, final_marking]) pm4py.view_bpmn(bpmn_graph, "png") # - # ## Step 5: Log-Model Evaluation # ### Replay Fitness # + pycharm={"name": "#%%\n"} from pm4py.algo.evaluation.replay_fitness import algorithm as replay_fitness_evaluator fitness = replay_fitness_evaluator.apply(log, net, initial_marking, final_marking, variant=replay_fitness_evaluator.Variants.TOKEN_BASED) # + pycharm={"name": "#%%\n"} fitness # - # ### Precision # + pycharm={"name": "#%%\n"} from pm4py.algo.evaluation.precision import algorithm as precision_evaluator prec = precision_evaluator.apply(log, net, initial_marking, final_marking, variant=precision_evaluator.Variants.ETCONFORMANCE_TOKEN) # + pycharm={"name": "#%%\n"} prec # - # ### F-measure # + pycharm={"name": "#%%\n"} def f_measure(f, p): return (2*f*p)/(f+p) f_measure(fitness['average_trace_fitness'], prec) # + pycharm={"name": "#%%\n"} # %reset -f # + pycharm={"name": "#%%\n"}
src/Road/HeuristicMiner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Window functions with aggregations (I) # # To familiarize yourself with the window functions, you will work with the `Orders` table in this chapter. Recall that using `OVER()`, you can create a window for the entire table. To create partitions using a specific column, you need to use `OVER()` along with `PARTITION BY`. # # Instructions # # 1. Write a T-SQL query that returns the sum of `OrderPrice` by creating partitions for each `TerritoryName`. # + SELECT OrderID, TerritoryName, -- Total price for each partition SUM(OrderPrice) -- Create the window and partitions OVER(PARTITION BY TerritoryName) AS TotalPrice FROM Orders; # OrderID TerritoryName TotalPrice # 43706 Australia 1469 # 43722 Australia 1469 # 43729 Australia 1469 # 47622 Australia 1469 # 47722 Australia 1469 # 48577 Australia 1469 # 48611 Australia 1469 # 50342 Australia 1469 # 50365 Australia 1469 # 51331 Australia 1469 # 51398 Australia 1469 # 53543 Australia 1469 # 53578 Australia 1469 # 53576 Canada 2573 # ... # - # ## Window functions with aggregations (II) # # In the last exercise, you calculated the sum of all orders for each territory. In this exercise, you will calculate the number of orders in each territory. # # Instructions # # 1. Count the number of rows in each partition. # 2. Partition the table by `TerritoryName`. # + SELECT OrderID, TerritoryName, -- Number of rows per partition COUNT(*) -- Create the window and partitions OVER(PARTITION BY TerritoryName) AS TotalOrders FROM Orders; # OrderID TerritoryName TotalOrders # 43706 Australia 13 # 43722 Australia 13 # 43729 Australia 13 # 47622 Australia 13 # 47722 Australia 13 # 48577 Australia 13 # 48611 Australia 13 # 50342 Australia 13 # 50365 Australia 13 # 51331 Australia 13 # 51398 Australia 13 # 53543 Australia 13 # 53578 Australia 13 # 53576 Canada 37 # ... # - # ## Do you know window functions? # # Which of the following statements is _incorrect_ regarding window queries? # The standard aggregations like `SUM()`, `AVG()`, and `COUNT()` require `ORDER BY` in the `OVER()` clause. # ## First value in a window # # Suppose you want to figure out the first `OrderDate` in each territory or the last one. How would you do that? You can use the window functions `FIRST_VALUE()` and `LAST_VALUE()`, respectively! Here are the steps: # # - First, create partitions for each territory. # - Then, order by `OrderDate`. # - Finally, use the `FIRST_VALUE()` and/or `LAST_VALUE()` functions as per your requirement. # # Instructions # # 1. Write a T-SQL query that returns the first `OrderDate` by creating partitions for each `TerritoryName`. # + SELECT TerritoryName, OrderDate, -- Select the first value in each partition FIRST_VALUE(OrderDate) -- Create the partitions and arrange the rows OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS FirstOrder FROM Orders; # TerritoryName OrderDate FirstOrder # Australia 2015-02-23 09:00:00 2015-02-23 09:00:00 # Australia 2015-02-23 11:00:00 2015-02-23 09:00:00 # Australia 2015-02-23 12:00:00 2015-02-23 09:00:00 # Australia 2015-04-23 02:00:00 2015-02-23 09:00:00 # Australia 2015-04-24 02:00:00 2015-02-23 09:00:00 # Australia 2015-05-06 03:00:00 2015-02-23 09:00:00 # Australia 2015-05-07 05:00:00 2015-02-23 09:00:00 # Australia 2015-06-03 03:00:00 2015-02-23 09:00:00 # Australia 2015-06-03 05:00:00 2015-02-23 09:00:00 # Australia 2015-06-17 07:00:00 2015-02-23 09:00:00 # Australia 2015-06-18 04:00:00 2015-02-23 09:00:00 # Australia 2015-07-21 03:00:00 2015-02-23 09:00:00 # Australia 2015-07-21 12:00:00 2015-02-23 09:00:00 # Canada 2015-01-01 13:00:00 2015-01-01 13:00:00 # ... # - # ## Previous and next values # # What if you want to shift the values in a column by one row up or down? You can use the exact same steps as in the previous exercise but with two new functions, `LEAD()`, for the next value, and `LAG()`, for the previous value. So you follow these steps: # # - First, create partitions # - Then, order by a certain column # - Finally, use the `LEAD()` and/or `LAG()` functions as per your requirement # # Instructions # # 1. Write a T-SQL query that for each territory: # 1. Shifts the values in `OrderDate` one row down. Call this column `PreviousOrder`. # 2. Shifts the values in `OrderDate` one row up. Call this column `NextOrder`. _You will need to PARTITION BY the territory._ # + SELECT TerritoryName, OrderDate, -- Specify the previous OrderDate in the window LAG(OrderDate) -- Over the window, partition by territory & order by order date OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS PreviousOrder, -- Specify the next OrderDate in the window LEAD(OrderDate) -- Create the partitions and arrange the rows OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS NextOrder FROM Orders; # TerritoryName OrderDate PreviousOrder NextOrder # Australia 2015-02-23 09:00:00 null 2015-02-23 11:00:00 # Australia 2015-02-23 11:00:00 2015-02-23 09:00:00 2015-02-23 12:00:00 # Australia 2015-02-23 12:00:00 2015-02-23 11:00:00 2015-04-23 02:00:00 # ... # - # ## Creating running totals # # You usually don't have to use `ORDER BY` when using aggregations, but if you want to create running totals, you _should_ arrange your rows! In this exercise, you will create a running total of `OrderPrice`. # # Instructions # # 1. Create the window, partition by `TerritoryName` and order by `OrderDate` to calculate a running total of `OrderPrice`. # + SELECT TerritoryName, OrderDate, -- Create a running total SUM(OrderPrice) -- Create the partitions and arrange the rows OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS TerritoryTotal FROM Orders; # TerritoryName OrderDate TerritoryTotal # Australia 2015-02-23 09:00:00 48 # Australia 2015-02-23 11:00:00 83 # Australia 2015-02-23 12:00:00 313 # ... # - # ## Assigning row numbers # # Records in T-SQL are inherently unordered. Although in certain situations, you may want to assign row numbers for reference. In this exercise, you will do just that. # # Instructions # # 1. Write a T-SQL query that assigns row numbers to all records partitioned by `TerritoryName` and ordered by `OrderDate`. # + SELECT TerritoryName, OrderDate, -- Assign a row number ROW_NUMBER() -- Create the partitions and arrange the rows OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS OrderCount FROM Orders; # TerritoryName OrderDate OrderCount # Australia 2015-02-23 09:00:00 1 # Australia 2015-02-23 11:00:00 2 # Australia 2015-02-23 12:00:00 3 # ... # - # ## Calculating standard deviation # # Calculating the standard deviation is quite common when dealing with numeric columns. In this exercise, you will calculate the _running standard deviation_, similar to the running total you calculated in the previous lesson. # # Instructions # # 1. Create the window, partition by `TerritoryName` and order by `OrderDate` to calculate a running standard deviation of `OrderPrice`. # + SELECT OrderDate, TerritoryName, -- Calculate the standard deviation STDEV(OrderPrice) OVER(PARTITION BY TerritoryName ORDER BY OrderDate) AS StdDevPrice FROM Orders; # OrderDate TerritoryName StdDevPrice # 2015-02-23 09:00:00 Australia null # 2015-02-23 11:00:00 Australia 9.192388155425117 # 2015-02-23 12:00:00 Australia 109.02446208687908 # ... # - # ## Calculating mode (I) # # Unfortunately, there is no function to calculate the _mode_, the most recurring value in a column. To calculate the mode: # # - First, create a CTE containing an ordered count of values using `ROW_NUMBER()`. # - Write a query using the CTE to pick the value with the highest row number. # # In this exercise, you will write the CTE needed to calculate the mode of `OrderPrice`. # # Instructions # # 1. Create a CTE `ModePrice` that returns two columns (`OrderPrice` and `UnitPriceFrequency`). # 2. Write a query that returns all rows in this CTE. # + -- Create a CTE Called ModePrice which contains two columns WITH ModePrice (OrderPrice, UnitPriceFrequency) AS(SELECT OrderPrice, ROW_NUMBER() OVER(PARTITION BY OrderPrice ORDER BY OrderPrice) AS UnitPriceFrequency FROM Orders) -- Select everything from the CTE SELECT * FROM ModePrice; # OrderPrice UnitPriceFrequency # 3.5 1 # 3.5 2 # 3.700000047683716 1 # ... # - # ## Calculating mode (II) # # In the last exercise, you created a CTE which assigned row numbers to each unique value in `OrderPrice`. All you need to do now is to find the `OrderPrice` with the highest row number. # # Instructions # # 1. Use the CTE `ModePrice` to return the value of `OrderPrice` with the highest row number. # + -- CTE from the previous exercise WITH ModePrice (OrderPrice, UnitPriceFrequency) AS (SELECT OrderPrice, ROW_NUMBER() OVER (PARTITION BY OrderPrice ORDER BY OrderPrice) AS UnitPriceFrequency FROM Orders) -- Select the order price from the CTE SELECT OrderPrice AS ModeOrderPrice FROM ModePrice -- Select the maximum UnitPriceFrequency from the CTE WHERE UnitPriceFrequency IN (SELECT MAX(UnitPriceFrequency) From ModePrice); # ModeOrderPrice # 32
intermediate_sql_server/4_window_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="fWfkYsCgPvqR" # # Short intro to the SCT library of AutoGraph # # **Work in progress, use with care and expect changes.** # # The `pyct` module packages the source code transformation APIs used by AutoGraph. # # This tutorial is just a preview - there is no PIP package yet, and the API has not been finalized, although most of those shown here are quite stable. # # [Run in Colab](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/pyct_tutorial.ipynb) # # Requires `tf-nightly`: # + colab={} colab_type="code" id="wq1DRamRlqoB" # !pip install tf-nightly # + [markdown] colab_type="text" id="r7Q78WIKe2cu" # ### Writing a custom code translator # # [transformer.CodeGenerator](https://github.com/tensorflow/tensorflow/blob/40802bcdb5c8a4379da2145441f51051402bd29b/tensorflow/python/autograph/pyct/transformer.py#L480) is an AST visitor that outputs a string. This makes it useful in the final stage of translating Python to another language. # + [markdown] colab_type="text" id="HHaCMFOpuoVx" # Here's a toy C++ code generator written using a `transformer.CodeGenerator`, which is just a fancy subclass of [ast.NodeVisitor](https://docs.python.org/3/library/ast.html#ast.NodeVisitor): # + colab={} colab_type="code" id="PJlTIbJlurpm" import gast from tensorflow.python.autograph.pyct import transformer class BasicCppCodegen(transformer.CodeGenerator): def visit_Name(self, node): self.emit(node.id) def visit_arguments(self, node): self.visit(node.args[0]) for arg in node.args[1:]: self.emit(', ') self.visit(arg) def visit_FunctionDef(self, node): self.emit('void {}'.format(node.name)) self.emit('(') self.visit(node.args) self.emit(') {\n') self.visit_block(node.body) self.emit('\n}') def visit_Call(self, node): self.emit(node.func.id) self.emit('(') self.visit(node.args[0]) for arg in node.args[1:]: self.emit(', ') self.visit(arg) self.emit(');') # + [markdown] colab_type="text" id="chCX1A3rA9Pn" # Another helpful API is [transpiler.GenericTranspiler](https://github.com/tensorflow/tensorflow/blob/ee7172a929cb0c3d94a094fafc60bbaa175c085d/tensorflow/python/autograph/pyct/transpiler.py#L227) which takes care of parsing: # + colab={} colab_type="code" id="LmwWewU1Bw0B" import gast from tensorflow.python.autograph.pyct import transpiler class PyToBasicCpp(transpiler.GenericTranspiler): #TODO(mdan): Document this. def get_transformed_name(self, node): return 'new_f' def transform_ast(self, node, ctx): codegen = BasicCppCodegen(ctx) codegen.visit(node) return codegen.code_buffer # + [markdown] colab_type="text" id="nUhlScyOjlYM" # Try it on a simple function: # + colab={} colab_type="code" id="ty9q853QvUqo" def f(x, y): print(x, y) code, _ = PyToBasicCpp().transform(f, None) print(code) # + [markdown] colab_type="text" id="rmRI9dG_ydE_" # ### Helpful static analysis passes # # The `static_analysis` module contains various helper passes for dataflow analyis. # # All these passes annotate the AST. These annotations can be extracted using [anno.getanno](https://github.com/tensorflow/tensorflow/blob/40802bcdb5c8a4379da2145441f51051402bd29b/tensorflow/python/autograph/pyct/anno.py#L111). Most of them rely on the `qual_names` annotations, which just simplify the way more complex identifiers like `a.b.c` are accessed. # # The most useful is the activity analysis which just inventories symbols read, modified, etc.: # + colab={} colab_type="code" id="GEJ30Wea4Xfy" def get_node_and_ctx(f): node, source = parser.parse_entity(f, ()) f_info = transformer.EntityInfo( name='f', source_code=source, source_file=None, future_features=(), namespace=None) ctx = transformer.Context(f_info, None, None) return node, ctx # + colab={} colab_type="code" id="BiwPJrDd0aAX" from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.pyct import qual_names from tensorflow.python.autograph.pyct.static_analysis import annos from tensorflow.python.autograph.pyct.static_analysis import activity def f(a): b = a + 1 return b node, ctx = get_node_and_ctx(f) node = qual_names.resolve(node) node = activity.resolve(node, ctx) fn_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE) # Note: tag will be changed soon. print('read:', fn_scope.read) print('modified:', fn_scope.modified) # + [markdown] colab_type="text" id="w8dBRlKkFNIP" # Another useful utility is the control flow graph builder. # # Of course, a CFG that fully accounts for all effects is impractical to build in a late-bound language like Python without creating an almost fully-connected graph. However, one can be reasonably built if we ignore the potential for functions to raise arbitrary exceptions. # + colab={} colab_type="code" id="KvLe9lWnFg7N" from tensorflow.python.autograph.pyct import cfg def f(a): if a > 0: return a b = -a node, ctx = get_node_and_ctx(f) node = qual_names.resolve(node) cfgs = cfg.build(node) cfgs[node] # + [markdown] colab_type="text" id="Cro-jfPA2oxR" # Other useful analyses include liveness analysis. Note that these make simplifying assumptions, because in general the CFG of a Python program is a graph that's almost complete. The only robust assumption is that execution can't jump backwards. # + colab={} colab_type="code" id="73dARy4_2oAI" from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import cfg from tensorflow.python.autograph.pyct import qual_names from tensorflow.python.autograph.pyct.static_analysis import annos from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions from tensorflow.python.autograph.pyct.static_analysis import reaching_fndefs from tensorflow.python.autograph.pyct.static_analysis import liveness def f(a): b = a + 1 return b node, ctx = get_node_and_ctx(f) node = qual_names.resolve(node) cfgs = cfg.build(node) node = activity.resolve(node, ctx) node = reaching_definitions.resolve(node, ctx, cfgs) node = reaching_fndefs.resolve(node, ctx, cfgs) node = liveness.resolve(node, ctx, cfgs) print('live into `b = a + 1`:', anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)) print('live into `return b`:', anno.getanno(node.body[1], anno.Static.LIVE_VARS_IN)) # + [markdown] colab_type="text" id="GKSaqLbKQI_v" # ### Writing a custom Python-to-Python transpiler # # `transpiler.Py2Py` is a generic class for a Python [source-to-source compiler](https://en.wikipedia.org/wiki/Source-to-source_compiler). It operates on Python ASTs. Subclasses override its [transform_ast](https://github.com/tensorflow/tensorflow/blob/95ea3404528afcb1a74dd5f0946ea8d17beda28b/tensorflow/python/autograph/pyct/transpiler.py#L261) method. # # Unlike the `transformer` module, which have an AST as input/output, the `transpiler` APIs accept and return actual Python objects, handling the tasks associated with parsing, unparsing and loading of code. # + [markdown] colab_type="text" id="eicHoYlzRhnc" # Here's a transpiler that does nothing: # + colab={} colab_type="code" id="edaG6dWEPvUI" from tensorflow.python.autograph.pyct import transpiler class NoopTranspiler(transpiler.PyToPy): def get_caching_key(self, ctx): # You may return different caching keys if the transformation may generate # code versions. return 0 def get_extra_locals(self): # No locals needed for now; see below. return {} def transform_ast(self, ast, transformer_context): return ast tr = NoopTranspiler() # + [markdown] colab_type="text" id="hKxmlWeQSQyN" # The main entry point is the [transform](https://github.com/tensorflow/tensorflow/blob/95ea3404528afcb1a74dd5f0946ea8d17beda28b/tensorflow/python/autograph/pyct/transpiler.py#L384) method returns the transformed version of the input. # + colab={} colab_type="code" id="HXTIYsunSVr1" def f(x, y): return x + y new_f, module, source_map = tr.transform(f, None) new_f(1, 1) # + [markdown] colab_type="text" id="aKO42LBXw3SD" # ### Adding new variables to the transformed code # # The transformed function has the same global and local variables as the original function. You can of course generate local imports to add any new references into the generated code, but an easier method is to use the `get_extra_locals` method: # + colab={} colab_type="code" id="_Wl0n5I_1NJZ" from tensorflow.python.autograph.pyct import parser class HelloTranspiler(transpiler.PyToPy): def get_caching_key(self, ctx): return 0 def get_extra_locals(self): return {'name': 'you'} def transform_ast(self, ast, transformer_context): print_code = parser.parse('print("Hello", name)') ast.body = [print_code] + ast.body return ast def f(x, y): pass new_f, _, _ = HelloTranspiler().transform(f, None) _ = new_f(1, 1) # + colab={} colab_type="code" id="JcMSHJXK6pO2" import inspect print(inspect.getsource(new_f))
tensorflow/python/autograph/g3doc/pyct_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib notebook import matplotlib.pyplot as plt from colicoords import iter_subplots, save, load, IterCellPlot, AutoIterCellPlot, CellListPlot, CellPlot from colicoords.config import cfg import numpy as np import tifffile # # Cy3B-NHS cells = load('Cy3B_NHS/cells_raw.hdf5') len(cells) # First, we optimize the cells based on the binary image res = cells.optimize_mp() obj_vals = np.array([r.objective_value for r in res]) # We can histogram the objective values of the fit, large values are an indication of the cells being badly segmented or the binary image might be of two cells just after division. plt.figure(figsize=(4,4)) h = plt.hist(obj_vals, bins='fd', linewidth=0) # Using ``AutoIterCellPlot`` we can quicly inspect the cells with a high objective value. b = obj_vals > 60 aicp = AutoIterCellPlot(cells[b]) aicp.plot() # We use numpy-style boolean indexing to remove the outlying cells: cells_obj = cells[~b].copy() # In the next step, the coordinate system optimized based on the brightfield image. By using ``optimize_mp`` the workload is spread over all available CPU cores. res = cells_obj.optimize_mp('brightfield') # We can make another plot of the resulting objective values: obj_vals = np.array([r.objective_value for r in res]) gof = obj_vals / cells_obj.area plt.figure(figsize=(4,4)) h = plt.hist(obj_vals, bins='fd', linewidth=0) b = obj_vals > 0.5 cell_obj_bf = cells_obj[~b] len(cell_obj_bf) cell_obj_bf = load('Cy3B_NHS/cells_final - Copy.hdf5') len(cell_obj_bf) # Next, we use the ``measure_r`` to find measure for the radius of the cell. Here, we define the midpoint of the radial distribution of the brightfield image as the radius. This process is demonstrated below. # + cell = cell_obj_bf[0] cell.measure_r('brightfield', mode='mid') cp = CellPlot(cell) fig, axes = plt.subplots(1, 2, figsize=(5,2)) cp.plot_r_dist(data_name='brightfield', ax=axes[0]) axes[0].axvline(cell.radius * cfg.IMG_PIXELSIZE / 1000, color='r') cp.imshow('brightfield', ax=axes[1]) cp.plot_outline(ax=axes[1]) plt.tight_layout() # - cell_obj_bf.measure_r('brightfield', mode='mid') # Some of the brightfield radial distributions are not monotonically increasing between the minimum and maximum point and raise a warning. We check the result by histogramming the radius values and then discard outliers. plt.figure() h = plt.hist(cell_obj_bf.radius, bins='fd') b = (cell_obj_bf.radius > 4.8) * (cell_obj_bf.radius < 6.7) # To quickly visualize the final selected and coordinate optimized cells: aicp = AutoIterCellPlot(cell_obj_bf[b]) aicp.plot() save('Cy3B_NHS/cells_final.hdf5', cell_obj_bf[b]) aicp = AutoIterCellPlot(cell_obj_bf) aicp.plot() # # eGFP cells = load('eGFP/cells_raw.hdf5') len(cells) res = cells.optimize_mp() obj_vals = np.array([r.objective_value for r in res]) plt.figure(figsize=(4,4)) h = plt.hist(obj_vals, bins='fd', linewidth=0) b = obj_vals > 90 aicp = AutoIterCellPlot(cells[b]) aicp.plot() cells_obj = cells[~b].copy() res = cells_obj.optimize_mp('brightfield') obj_vals = np.array([r.objective_value for r in res]) gof = obj_vals / cells_obj.area plt.figure(figsize=(4,4)) h = plt.hist(obj_vals, bins='fd', linewidth=0) b = obj_vals > 2 cell_obj_bf = cells_obj[~b] len(cell_obj_bf) save('eGFP/cells_final.hdf5', cell_obj_bf) cell_obj_bf.measure_r('brightfield', mode='mid') plt.figure() h = plt.hist(cell_obj_bf.radius, bins='fd') b = (cell_obj_bf.radius > 3.85) * (cell_obj_bf.radius < 6.7) sum(b), len(cell_obj_bf) aicp = AutoIterCellPlot(cell_obj_bf[b]) aicp.plot() save('eGFP/cells_final.hdf5', cell_obj_bf[b])
figures/Figure_6/05_Optimize_and_filter_cell_objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW7 Extra Credit # # ### This extra credit assignment, worth 50 pts toward the homework score, analyzes the displacement amplitude spectrum for a small $M_L$=4.1 earthquake that occurred in Berkeley on December 4, 1998. # # ### Write python code to apply a ~$\frac{1}{f_2}$ source model with attenuation to the observed displacment amplitude spectrum to determine: # # 1. The scalar seismic moment # # 2. The corner frequency of the earthquake # # 3. The rupture area and slip # # 4. The stress drop. # # 5. Discuss your results in terms of what is typically found for earthquakes (use Lay and Wallace text as a reference). # # ### The SH Greens function solution for an anelastic halfspace is: # ### u(f)=$\frac{2 * |R_{SH}| * M_0}{4 * \pi * \rho * \beta^3 *R} \cdot \frac{1}{[1 + (\frac{f}{f_c})^2]^{(\frac{p}{2})}} \cdot e^{(\frac{-f*\pi*R}{Q*\beta})}$ # #### Where Rsh is the SH radiation pattern (eqn 8.65 Lay and Wallace), $M_0$ is the scalar moment, $\rho, \beta$, Q (range 10-100), R, f and $f_c$ (range .1 to 10 Hz) are the density, shear wave velocity, attenuation quality factor, total distance, frequency and corner frequency. The parameter p allows for adjusting the high frequency fall off rate of the spectrum. For a Brune source p=2 (a minimum value of p to conserve energy is 1.5, and typically the maximum is 3). # # #### u(f) is the given amplitude spectrum plotted below. # # #### Be sure to use CGS (cm, grams, seconds) units for all parameters. The unit for scalar moment will therefore be dyne cm. # # #### Develop a nested for loop to search for optimal Mo, fc and Q parameters # #Initial Setup and Subroutine Definitions - Do Not Edit import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # + #Model Parameters distance=6.8e5 #units cm depth=5.1e5 #units cm azimuth=137.8*np.pi/180 #radians strike=139*np.pi/180 #radians rake=179*np.pi/180 #radians dip=69*np.pi/180 #radians beta=3.2e5 #cm/s density=2.6 #grams/cc #Compute Total distance (R), Azimuth(phi) and takeoff angle(I) phi=strike-azimuth R=np.sqrt(distance**2 + depth**2); I=np.pi-np.arctan(distance/depth); #pi is for upgoing angle # Read Data File and Setup frequency and amplitude spectral amplitude arrays data=pd.read_csv('brkspec.txt', sep=' ', delimiter=None, header=None, names = ['Hz','AmpSpec']) freq=np.array(data["Hz"]) ampspec=np.array(data["AmpSpec"]) plt.loglog(freq,ampspec) plt.title('Berkeley Event Amplitude Spectrum') plt.xlabel('frequency (Hz)') plt.ylabel('amplitude spectrum cm/Hz') plt.savefig('brkspec.jpg') plt.show() # + ##### Write code to calculate the SH radiation pattern coefficient #Write code to fit the spectral model to the observed displacement spectrum. #This can be accomplished with a nested for loop over the scalar moment and corner frequency #parameters #Define grid search range Mo=np.arange(100.,400.,10.)*1e20 #dyne cm fc=np.arange(0.1,10.,0.05) q=np.arange(10.,100.,5.) p=np.arange(2.0,3.5,10.) #p=np.array([2.0, 2.0]) #Loop over model parameters and test for fit with data to determine best fit parameters #Plot the fit to the data, and discuss the uncertainties in the solution # - # #### This is an example of the fit that can be obtained # # <img src='brkspecfit.jpg'> # ### Questions # 1. What are the scalar seismic moment, Mw, corner frequency and Q that best fit the spectra assuming p=2.0? # # 2. How does the fit and the scalar moment, corner frequency and Q change if p=3.0? # # 3. The fault radius can be determed from the corner frequency where radius=0.37*beta/fc. Use the fault radius and moment to estimate the average slip and the stress drop of the earthquake # # 4. Discuss the estimated stress drop in terms of the expected range of values for earthquakes. # # 5. How well determined do you think your corner frequency and moment estimates are. How do uncertainties in those quantitites translate to uncertainty in stress drop? #Use the corner frequency to estimate the fault rupture area, the average slip on the fault #and the stress drop
HW7extracredit/hw7_extra_credit_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### This notebook is for text mining literatures related to our gene mutations # + import requests import sys import os import numpy as np import Bio import pandas as pd import json pd.set_option('display.max_colwidth', -1) # + #read in file chr3_annotated_exon-splice.txt and data = pd.read_csv("variant_burden/chr3_annotated_exon-splice.txt", sep='\t', keep_default_na=False, na_values=['.']) data_nonsynonymous = data.loc[data['ExonicFunc.ensGene'] != 'synonymous SNV'] data_nonsynonymous.dropna(thresh=2) listoftop10nonsynonymous = data_nonsynonymous['hgnc_symbol'].value_counts() #print (type(listoftop10nonsynonymous)) listoftop10nonsynonymous = listoftop10nonsynonymous.sort_index() listoftop10nonsynonymous= listoftop10nonsynonymous.nlargest(10) #print(listoftop10nonsynonymous) listoftop10nonsynonymous = listoftop10nonsynonymous.index.values.tolist() #print (listoftop10nonsynonymous) ##################### get list of nonsynonymous gene ############## df_gene = pd.DataFrame() list_snps_1 = [] for gene in listoftop10nonsynonymous: list_snp_1 = data_nonsynonymous.loc[data_nonsynonymous['hgnc_symbol'] == gene]['avsnp150'] #deduplicate list_snp_1 = list(set(list_snp_1)) str_snp = ",".join(list_snp_1) list_snps_1.append(str_snp) #print (list_snps) df_gene_list_1 = pd.DataFrame({ 'gene': listoftop10nonsynonymous, 'listofsnp': list_snps_1 }) df_gene_list_1 # + data_uncommon = data_nonsynonymous.loc[data_nonsynonymous['PopFreqMax'] < 0.05] listoftop10uncommon = data_uncommon['hgnc_symbol'].value_counts() listoftop10uncommon = listoftop10uncommon.sort_index() listoftop10uncommon= listoftop10uncommon.nlargest(10) #print (listoftop10uncommon) listoftop10uncommon = listoftop10uncommon.index.values.tolist() ####################### get list of uncommon gene ############### df_gene = pd.DataFrame() list_snps_2 = [] for gene in listoftop10uncommon: list_snp_2 = data_nonsynonymous.loc[data_nonsynonymous['hgnc_symbol'] == gene]['avsnp150'] #deduplicate list_snp_2 = list(set(list_snp_2)) str_snp = ",".join(list_snp_2) list_snps_2.append(str_snp) #print (list_snps) df_gene_list_2 = pd.DataFrame({ 'gene': listoftop10uncommon, 'listofsnp': list_snps_2 }) df_gene_list_2 # + import os for gene in df_gene_list_1['gene']: f= open("ns/" + gene + ".txt", "w+") # snp list gene_mu_string = df_gene_list_1[df_gene_list_1['gene']==gene]["listofsnp"] print (gene_mu_string.values[0]) f.write("Related Key Words Count for Gene: " + gene + "\n") f.write("==========================================================================\n") for snp in gene_mu_string.values[0].split(","): f.write("\n") f.write("SNP: " + snp + "\n") # LitVar API to fetch related key words related to searching rsid ('gene','variant','chemical','drug') searchURLCOUNT = "https://www.ncbi.nlm.nih.gov/research/bionlp/litvar/api/v1/public/relations" paramsCOUNT = {} data_string = '{"accessions": ["litvar@' + snp + '##"], "unlimited": 0}' data_COUNT = json.loads(data_string) req = requests.post(searchURLCOUNT, params=paramsCOUNT, json=data_COUNT) # return value as json of {rsid,pmids} return_data = req.json() #print (return_data) for i in range(len(return_data)): data = return_data[i] f.write ("Category: " + data['concept'] + "\n") relations = data['relations'] f.write ("--------------------->\n") for j in range(len(relations)): #print (relations[j]) f.write ("\t" + relations[j]['name'] + "\t" + str(relations[j]['count']) + "\n") f.write("\n\n") f.write("Related literatures for Gene: " + gene + "\n") f.write("==========================================================================\n") # LitVar API to fetch related literatures for gene mutations searchURL = "https://www.ncbi.nlm.nih.gov/research/bionlp/litvar/api/v1/public/rsids2pmids" params={'rsids':gene_mu_string} req=requests.get(searchURL,params=params) related_lit = [] # return value as json of {rsid,pmids} return_data = req.json() for item in return_data: related_lit.append(item['pmids']) #print (item['pmids']) #print (item["rsid"]) # PubMed API to fetch metadata of literature by given pmid from Bio.Entrez import efetch from Bio import Entrez Entrez.email = '<EMAIL>' for lit in related_lit: lit_string = ",".join(str(e) for e in lit) #print (lit_string) handle = efetch(db='pubmed', id=lit_string, retmode='text', rettype='abstract') f.write (handle.read()) f.write("----------------------------------------------------------------------------\n") f.close() # - for gene in df_gene_list_2['gene']: f= open("ns_uncommon/" + gene + ".txt", "w+") # snp list gene_mu_string = df_gene_list_2.loc[df_gene_list_2['gene']==gene]["listofsnp"] print (gene_mu_string.values[0]) f.write("Related Key Words Count for Gene: " + gene + "\n") f.write("==========================================================================\n") for snp in gene_mu_string.values[0].split(","): f.write("\n") f.write("SNP: " + snp + "\n") searchURLCOUNT = "https://www.ncbi.nlm.nih.gov/research/bionlp/litvar/api/v1/public/relations" paramsCOUNT = {} #print (snp) data_string = '{"accessions": ["litvar@' + snp + '##"], "unlimited": 0}' data_COUNT = json.loads(data_string) req = requests.post(searchURLCOUNT, params=paramsCOUNT, json=data_COUNT) # return value as json of {rsid,pmids} return_data = req.json() #print (return_data) for i in range(len(return_data)): data = return_data[i] f.write ("Category: " + data['concept'] + "\n") relations = data['relations'] f.write ("--------------------->\n") for j in range(len(relations)): #print (relations[j]) f.write ("\t" + relations[j]['name'] + "\t" + str(relations[j]['count']) + "\n") f.write("\n\n") f.write("Related literatures for Gene: " + gene + "\n") f.write("==========================================================================\n") # LitVar API to fetch related literatures for gene mutations searchURL = "https://www.ncbi.nlm.nih.gov/research/bionlp/litvar/api/v1/public/rsids2pmids" params={'rsids':gene_mu_string} req=requests.get(searchURL,params=params) related_lit_2 = [] # return value as json of {rsid,pmids} return_data = req.json() for item in return_data: related_lit_2.append(item['pmids']) #print (item['pmids']) #print (item["rsid"]) # PubMed API to fetch metadata of literature by given pmid from Bio.Entrez import efetch from Bio import Entrez Entrez.email = '<EMAIL>' for lit in related_lit_2: lit_string = ",".join(str(e) for e in lit) #print (lit_string) handle = efetch(db='pubmed', id=lit_string, retmode='text', rettype='abstract') f.write (handle.read()) f.write("----------------------------------------------------------------------------\n") f.close()
TextMining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <br/> # # ## Sesión 9 - Python 06 # # # Fundamentos de Programación # # + [markdown] slideshow={"slide_type": "slide"} # ## Temario # + [markdown] slideshow={"slide_type": "subslide"} # - Ejercicios sesión anterior. # - Estructuras de control. # - For # - Definición # - Ejemplos. # - List comprehension. # + [markdown] slideshow={"slide_type": "slide"} # ### Ejercicios # # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Obtener el mayor número. # - Obtener el menor número. # - Obtener el promedio. # - Ordenarlos de forma descendente. # - Obtener la sumatoria de todos los valores. # - Imprimir únicamente los numeros impares. # + [markdown] slideshow={"slide_type": "subslide"} # ### Ejercicios # # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Obtener el mayor número. # + slideshow={"slide_type": "fragment"} var_lista = [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] var_numeroMayor = -1000 while(len(var_lista) != 0): if ( var_lista[-1] > var_numeroMayor): var_numeroMayor = var_lista[-1] var_lista.pop() print(var_numeroMayor) # + [markdown] slideshow={"slide_type": "subslide"} # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Obtener el menor número. # + slideshow={"slide_type": "fragment"} var_lista = [3,6,5,4,8,7,6,10,-10,5,6,7,12,5,33,6,15,14,18,2,16,10,3,15,6,27,12,5] var_numeroMenor = 1245678945645132156 while(len(var_lista) != 0): if ( var_lista[-1] < var_numeroMenor): var_numeroMenor = var_lista[-1] var_lista.pop() print(var_numeroMenor) # + [markdown] slideshow={"slide_type": "subslide"} # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Obtener el promedio. # + slideshow={"slide_type": "fragment"} # entrada var_lista = [3,6,5,4,8,7,6,10,-10] var_total = var_posicion = 0 #proceso while (var_posicion < len(var_lista)): var_total = var_total + var_lista[var_posicion] print(var_posicion, var_lista[var_posicion], var_total) var_posicion = var_posicion + 1 #salida print(var_total/len(var_lista)) # + [markdown] slideshow={"slide_type": "subslide"} # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Ordenarlos de forma descendente. # + slideshow={"slide_type": "fragment"} var_lista = [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] var_lista.sort(reverse = True) print(var_lista) # + [markdown] slideshow={"slide_type": "subslide"} # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Obtener la sumatoria de todos los valores. # + slideshow={"slide_type": "fragment"} var_lista = [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] var_lista.sort(reverse = True) var_total = var_posicion = 0 #proceso while (var_posicion < len(var_lista)): # sumamos el valor del item al total var_total = var_total + var_lista[var_posicion] # incrementamos nuestra posición en 1 var_posicion = var_posicion + 1 print(var_total) # + [markdown] slideshow={"slide_type": "subslide"} # Dada una lista de números [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] Realizar las siguientes operaciones: # # - Imprimir únicamente los numeros impares. # + slideshow={"slide_type": "fragment"} var_lista = [3,6,5,4,8,7,6,10,3,5,6,7,12,5,33,6,15,14,18,7,16,10,3,15,6,27,12,5] var_posicion, var_salida = 0, [] #proceso while (var_posicion < len(var_lista)): if ( var_lista[var_posicion] % 2 != 0): var_salida.append(var_lista[var_posicion]) # incrementamos nuestra posición en 1 var_posicion = var_posicion + 1 print(var_salida) # + [markdown] slideshow={"slide_type": "slide"} # ## For (para cada uno) # + [markdown] slideshow={"slide_type": "subslide"} # - El número de iteraciones esta definido. # - La condición depende de una iteración, no de un estado. # - Permite reutilizar código. # - Permite anidamientos (for anidado) # + [markdown] slideshow={"slide_type": "subslide"} # ### Sintaxis # + [markdown] slideshow={"slide_type": "subslide"} # ```python # código... # for <variable> in <iterable> : # ····subbloque # código... # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Ejemplo 01 # # Imprimir los números del 0 al 9 # + slideshow={"slide_type": "fragment"} for numero in range(10): print(numero) # + [markdown] slideshow={"slide_type": "subslide"} # ### Ejemplo 02 # # Iterar una palabra # + slideshow={"slide_type": "fragment"} for i in "Mi curso de python": print(i) # + [markdown] slideshow={"slide_type": "subslide"} # ### Ejemplo 02 # # Iterar una palabra # + slideshow={"slide_type": "fragment"} for letra in "palabra": print(letra) # + [markdown] slideshow={"slide_type": "subslide"} # ### Ejemplo 03 # # Iterar una lista # + slideshow={"slide_type": "fragment"} var_miLista = [1,2,3,4,5,6,"Miguel",2.25] for i in var_miLista: print(i) # + [markdown] slideshow={"slide_type": "subslide"} # #### Ejemplo 03 # # Iterar una lista con sublistas # + slideshow={"slide_type": "fragment"} var_miLista = [[2,25000],[3,18000,153],[1,20000]] for item in var_miLista: print(item) for subitem in item: print(subitem) # + [markdown] slideshow={"slide_type": "subslide"} # #### Ejemplo 04 # # Obtener el total de una lista de productos # + slideshow={"slide_type": "fragment"} var_total = 0 var_miLista = [[2,"25000"],[3,18000],[1,20000],[12,5000]] # Itero mi lista de productos for item in var_miLista: # Asigno independientemente los valores var_cantidad, var_precio = item # Concateno en mi variable total var_total += (var_cantidad * int(var_precio)) print(var_total) # + [markdown] slideshow={"slide_type": "subslide"} # #### Ejemplo 04 # # Imprimir mi lista de productos al reves # + slideshow={"slide_type": "fragment"} var_miLista = [1,2,3,4,5,6] # Itero mi lista de productos for i in var_miLista[::-1]: print(i) # + [markdown] slideshow={"slide_type": "slide"} # ### List comprehensions # + [markdown] slideshow={"slide_type": "subslide"} # - una forma distinta de expresar listas # - Idiomatic Expressions. # + [markdown] slideshow={"slide_type": "subslide"} # #### Ejemplo 01 # # Imprimir los números del 0 al 9 # + slideshow={"slide_type": "fragment"} var_miLista = [item for item in range(10)] print(var_miLista) # + [markdown] slideshow={"slide_type": "subslide"} # #### Ejemplo 02 # # Imprimir los primeros 20 números pares # + slideshow={"slide_type": "fragment"} #var_miLista = [i for i in range(41) if i % 8 == 0 and i % 5 == 0] #print(var_miLista) var_entrada = [] var_temporal = "5,35" while( var_temporal != '0,0'): var_temporal = input("Ingrese los datos: ") var_entrada.extend([int(i) for i in var_temporal.split(',')]) print(var_entrada)
Week 03/9 - python 06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Ctran Breadcrumb Analysis # #### Definitions # # **reading**: A **_reading_** is a unique entry from a c-tran breadcrumb data collection source (e.g. a bus pushing data). A **_reading_** is composed of multiple fields and is considered well-formed if all fields are present and all fields are of the correct datatype. Additionally, a **_reading_** is considered unique if it's well-formed and the set of it's field values are unique throughout the entirety of the data. In practice, each reading relates to each row in a database or dataframe. # # At the time of writing, a **_reading_** contains these fields with these datatypes: # # | Field name | EVENT_NO_TRIP | OPD_DATE | VEHICLE_ID | METERS | ACT_TIME | GPS_LONGITUDE | GPS_LATITUDE | # |------------|---------------|----------|------------|---------|----------|---------------|--------------| # | Datatype | integer | date | integer | integer | integer | float w/ precision 6 | float w/ precision 6| # # # Example: # # | Field name | EVENT_NO_TRIP | OPD_DATE | VEHICLE_ID | METERS | ACT_TIME | GPS_LONGITUDE | GPS_LATITUDE | # |------------|---------------|----------|------------|---------|----------|---------------|--------------| # | Values | 152011646 | 24-FEB-20| 1776 | 77999 | 28558 | -122.579383 | 45.533608 | # # **vehicle**: A **_vehicle_** is any datasource sending valid readings over time. Additionally, it must have a unique `VEHICLE_ID` which is sent within each reading. # # **path**: A **_path_** is a line connecting two points in 2D-space by the shortest path (i.e. as the crow flies). A point is defined as an `x`-`y` coordinate pair and its units are inconsequential -- as long as they're the same for botht the `x` and `y` coordintate. In practice, a point looks like a longitude-latitude coordinate pair, or an x-y coordinate pair with a known origin (e.g. the bottom left corner of Clark County). It's also important to note that, for the purposes of this project, the surface of any geodetic system (e.g. a sphere or the earth) is considered 2D-space. That is, the **_path_** between two points on the earth would be "curved" as it travels across the surface of it and not "through" it. # # - route # - day # - iteration # #### Schema Definition # # For the purposes of this notebook the schema is based off of pandas however, there there is a schema that defines a table within a postgresql database which is also defined here. The only notable difference is the datatype of the `GPS_LATITUDE` and `GPS_LONGITUDE` fields. # # + from glob import glob import pandas as pd # Grab all tsv data from the `data` directory # ALL_DATA_FILES = glob("../data/**/*.csv") ALL_DATA_FILES = glob("../data/**/bos_20200301620.csv") def load_data(): ### Data columns are: ### "EVENT_NO_TRIP" "OPD_DATE" "VEHICLE_ID" "METERS" "ACT_TIME" "GPS_LONGITUDE" "GPS_LATITUDE" # Taken and modified from https://stackoverflow.com/a/21232849/4668680 return pd.concat( [pd.read_csv(_file, parse_dates=[1]) for _file in ALL_DATA_FILES], ignore_index=True ) df = load_data() # - # %timeit -n1 df = load_data() # + unique_days = df["OPD_DATE"].unique() lookup_table = {day:{} for day in unique_days} # Get dataframes for each unique vehicle for each unique day for day in unique_days: day_df = df.loc[df["OPD_DATE"] == day] unique_vehicles = day_df.VEHICLE_ID.unique() lookup_table[day] = {unique_vehicle:None} for unique_vehicle in unique_vehicles: vehicle_df = day_df.loc[day_df["VEHICLE_ID"] == unique_vehicle] lookup_table[day][unique_vehicle] = vehicle_df # + # See: https://stackoverflow.com/a/48431023/4668680 from math import sin, cos, sqrt, atan2, radians def getDistanceFromLatLonInKm(lat1,lon1,lat2,lon2): R = 6371 # Radius of the earth in km # R = 3956 dLat = radians(lat2-lat1) dLon = radians(lon2-lon1) rLat1 = radians(lat1) rLat2 = radians(lat2) a = sin(dLat/2) * sin(dLat/2) + cos(rLat1) * cos(rLat2) * sin(dLon/2) * sin(dLon/2) c = 2 * atan2(sqrt(a), sqrt(1-a)) d = R * c # Distance in km return d def calc_velocity(dist_km, time_start, time_end): """Return 0 if time_start == time_end, avoid dividing by 0""" return dist_km / ((time_end - time_start).seconds / 3600) if time_end > time_start else 0 # + import numpy as np tmp_df = lookup_table[np.datetime64('2020-03-16T00:00:00.000000000')][1776] # Convert OPD_DATE + ACT_TIME to timestamp dates = pd.to_datetime(df.OPD_DATE, cache=True) times = pd.to_timedelta(df.ACT_TIME, unit="seconds") tmp_df["timestamp"] = dates + times # First sort by ID and timestamp: tmp_df = tmp_df.sort_values(by=['timestamp']) # print(tmp_df) # Group the sorted dataframe by ID, and grab the initial value for lat, lon, and time. tmp_df['lat0'] = tmp_df["GPS_LATITUDE"][0] tmp_df['lon0'] = tmp_df["GPS_LONGITUDE"][0] tmp_df["t0"] = tmp_df["timestamp"][0] tmp_df['dist_km'] = tmp_df.apply( lambda row: getDistanceFromLatLonInKm( lat1=row['GPS_LATITUDE'], lon1=row['GPS_LONGITUDE'], lat2=row['lat0'], lon2=row['lon0'] ), axis=1 ) tmp_df['velocity_kmh'] = tmp_df.apply( lambda row: calc_velocity( dist_km=row['dist_km'], time_start=row['t0'], time_end=row['timestamp'] ), axis=1 ) print(tmp_df) # - # print(df) UNIQUE_TRIP_NOS = df["EVENT_NO_TRIP"].unique() UNIQUE_DATES = df["OPD_DATE"].unique() UNIQUE_VEHICLES = df["VEHICLE_ID"].unique() # #### Which dates are tracked, are these weekdays or weekend days? # + from pandas.tseries.holiday import USFederalHolidayCalendar # Test all dates are weekdays unique_days = pd.Series(UNIQUE_DATES) # print(unique_days.dt.day_name()) is_all_weekdays = pd.Series([(day != "Saturday" and day != "Sunday") for day in unique_days]).all() print(is_all_weekdays) # - df.OPD_DATE.agg(["min", "max"]) # #### Are the same vehicles tracked each day or does it change from day to day? # + date_group = df.groupby("OPD_DATE") df2 = date_group.apply(lambda x: len(x["VEHICLE_ID"].unique())) # - df2 # #### The ACT_TIME columns seems to represent the time at which the event occurred. what are its units? # It's units appear to be seconds offset from midnight; this is the min/max time in hours df.ACT_TIME.agg(["min", "max"]).div(60 * 60) # #### Relating to last: Can you convert this to datetime format? # TODO: This would be more useful related to vehicle IDs # TODO: This doesn't appear to maintain order df["ACT_DTG"] = df.OPD_DATE + pd.to_timedelta(df.ACT_TIME, unit="s") print(df.ACT_DTG) # #### Are the events spaced evenly in time or are the events more frequent at specific times of day (for a given vehicle)? # + # What is the delta from reading to reading; is it always 5 seconds? # + # What is the METERS field? index = pd.Index(df.VEHICLE_ID) # Meters with vehicle id index meters_series = pd.Series(df.METERS, index=index) monotonic_meters = pd.Series(meters_series.groupby(level=0).is_monotonic_increasing) print(monotonic_meters.all()) # - # #### Which route has the largest birds-eye distance? The smallest distance? # + index = pd.MultiIndex.from_frame(df[["OPD_DATE", "VEHICLE_ID"]]) # Index by unique vehicles per day wanted_columns = ["GPS_LATITUDE", "GPS_LONGITUDE"] lat_long_df = pd.DataFrame( df[wanted_columns].to_numpy(), columns=wanted_columns, index=index ) # TODO: Why does vehicle id 1002 have NaN # Intermediate step to min/max lat/long of each vehicle (across all days) all_v_min_max = lat_long_df.groupby("VEHICLE_ID").agg(["min", "max"]).reindex(axis=1) print(all_v_min_max) # idx = pd.IndexSlice # lat_mins = all_v_min_max.loc[:,["GPS_LATITUDE"]] # maxs = all_v_min_max.loc[:,idx[:,"max"]] # print(lat_mins) # - # TODO: Use datetimes instead of strings for date comparisons for bus_id in UNIQUE_VEHICLES: assert df[ (df.OPD_DATE == "20-MAR-20") & (df.VEHICLE_ID == bus_id) ].METERS.is_monotonic, f"{bus_id} METERS is not monotonically increasing" import geopandas from shapely.geometry import Polygon p1 = Polygon([(0, 0), (1, 0), (1, 1)]) p2 = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)]) p3 = Polygon([(2, 0), (3, 0), (3, 1), (2, 1)]) g = geopandas.GeoSeries([p1, p2, p3]) g
data_processing/python/info.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np import keras from tqdm.notebook import tqdm from IPython.display import display import sklearn from matplotlib import pyplot as plt from keras.layers import LSTM, Dropout, Dense, BatchNormalization, TimeDistributed, Input, Masking, Embedding from sklearn.metrics import roc_curve, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier #https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/ # + def plot_roc_cur(fper, tper, label="", line="", title="plot title"): plt.plot(fper, tper, line, label=label) #plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title(title) plt.legend(loc='lower right') def create_roc_curve(preds, targets, label="", line="", title=""): all_fpr, all_tpr = [], [] fpr, tpr, thresholds = roc_curve(targets, preds) all_fpr.append(fpr) all_tpr.append(tpr) all_fpr = np.array(all_fpr) all_tpr = np.array(tpr) plot_roc_cur(all_fpr[0], all_tpr, label=label, line=line, title=title) # - # To do: # # - Load alle trials i stedte for data (all_readings = np.load('all_readings.npy', allow_pickle=True)) # - Ryd op i de trials. # - Generér uniform trials (3D array med [batch, samples,features]) # - Altså, cellen med #Make all trials same length - Save this cell # - batch er alle dine træningspunkter # - samples er dine time series (små udklip af eye tracking) # - features er... ja, features. # - Få LSTM til at virke med din data - skidevære med metrics. # - Når det virker kan du rode rundt med ordentlig klassifikation # - Evt separer saccades og fixations? # + target = 'gender' #'age' even_gender = False sample_length = 8 batch_size=10 seed = 42 epochs=100 verbose=True np.random.seed(seed) # + data = np.load('all_readings.npy', allow_pickle=True) data = [i for i in data if len(i)>sample_length] if even_gender: data = data[:-52] # + def make_trials_uniform(trials): uniform_X = [] uniform_y = [] show = True for trial in tqdm(trials): X, y = cleanup_reading(trial, show=show) show=False length = X.shape[0] for i in range(sample_length, length-length%sample_length, sample_length): uniform_X.append(X.iloc[i-sample_length:i]) uniform_y.append(y.values[0]) return np.array(uniform_X), np.array(uniform_y) def experimental(trial): trial = trial[trial['eventType']==0] trial = trial.drop('eventType', axis=1) trial = trial.replace(0,np.nan).dropna(axis=1,how="all") return trial[['duration','meanPupilDiameter']] def cleanup_reading(trial, show=False): trial = trial[trial.columns[1:]] trial = trial[trial['eye']=='left'] trial = trial.drop('eye',1) trial = trial.drop('eventIdxLeft',1) trial = trial.drop('eventIdxRight',1) fixations = trial[trial['eventType']=='fixation'] saccades = trial[trial['eventType']=='saccade'] fixations = fixations.reset_index() fixations = fixations.drop(['startSaccadeX', 'startSaccadeY', 'endSaccadeX', 'endSaccadeY', 'age', 'gender', 'index', 'eventType'], axis=1) fixations = fixations.rename(columns={"duration": "fix_duration"}) saccades = saccades.reset_index() saccades = saccades.drop(['meanX', 'meanY', 'microsaccadeCount', 'microsaccadeAmplitude', 'meanPupilDiameter', 'microsaccadePeakVelocity', 'index', 'eventType'], axis=1) saccades = saccades.rename(columns={"duration": "sac_duration"}) trial = pd.concat([fixations, saccades], axis=1) feats = trial.columns trial = trial.reset_index() trial = trial.fillna(0) if show: print("Your features") display(trial.head()) return trial[feats[:-2]], trial[feats[-2:]] # + np.random.shuffle(data) X,y = make_trials_uniform(data) X,y = sklearn.utils.shuffle(X, y, random_state=seed) y = y-1 if target == 'gender': y = y[:,0] #Only tage gender else: y = y[:,1] y = y.reshape(-1, 1) print(f'X shape: {X.shape}, y shape: {y.shape}') X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=seed) # + model = keras.Sequential() model.add(Input(shape=(X_train[0].shape))) model.add(BatchNormalization()) model.add(LSTM(46, return_sequences=True)) model.add(LSTM(600, return_sequences=True)) model.add(Dense(720, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation="sigmoid")) model.compile(loss="binary_crossentropy" , metrics=[keras.metrics.binary_accuracy] , optimizer="adam") model.summary() # - callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(X_train, y_train, validation_split=0.1, epochs=epochs, batch_size=batch_size, callbacks=[callback]) fig=plt.figure(figsize=(8,6)) plt.plot(history.history['loss'], label="loss"); plt.plot(history.history['binary_accuracy'], label="acc"); plt.plot(history.history['val_loss'], '--', label="val loss"); plt.plot(history.history['val_binary_accuracy'], '--', label="val acc"); plt.legend(); # + dummy = DummyClassifier() dummy.fit(X_train, y_train) create_roc_curve(model.predict(X_test).mean(axis=1), y_test, label="LSTM") create_roc_curve(dummy.predict(X_test), y_test, label="Baseline", title="ROC Curve - TüEyeQ") # - model.evaluate(X_test, y_test) best_acc accuracies = [i[1].history['val_accuracy'][-1] for i in models] best = 0 best_ind = 0 best_model = None for ind, model in enumerate(models): model = model[0] acc = model.evaluate(X_test, y_test) if acc[1] > best: best = acc[1] best_ind = ind best_model = model best_model.summary() # Final architecture models[95][0].summary() # + fig = plt.figure(figsize=(12,5)) hist = models[29][1].history plt.subplot(1,2,1) plt.title("Training Accuracy") plt.plot(history.history['binary_accuracy'], label="Accuracy") plt.plot(history.history['val_binary_accuracy'], '--', label="Validation Accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend() plt.subplot(1,2,2) plt.title("Training Loss") plt.plot(history.history['loss'], label="Loss") plt.plot(history.history['val_loss'], '--', label="Validation Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend(); # - model.evaluate(X_test,y_test) np.save('LSTM_tueyeq_pred', (model.predict(X_test).mean(axis=1), y_test)) # + #np.save('doves_turk', np.concatenate()) # -
LSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] id="YabibwdJWJUB" colab_type="text" # # 第3章 最尤推定法:確率を用いた推定理論 # + [markdown] id="B48z-a6kWJUB" colab_type="text" # ## 「03-maximum_likelihood.py」の解説 # + [markdown] id="UMlYfg8-WJUC" colab_type="text" # ITエンジニアための機械学習理論入門「第3章 最尤推定法:確率を用いた推定理論」で使用しているサンプルコード「03-maximum_likelihood.py」の解説です。 # # ※ 解説用にコードの内容は少し変更しています。 # + [markdown] id="qTkiTTxtWJUD" colab_type="text" # はじめに必要なモジュールをインポートしておきます。 # # 関数 normal は、正規分布に従う乱数を生成するために利用します。 # + id="S6raToIWWJUE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas import Series, DataFrame from numpy.random import normal # + [markdown] id="Oajb2XsIWJUG" colab_type="text" # 正弦関数に正規分布のノイズを載せたデータセットを生成する関数を定義します。 # # これは、0≦x≦1 の区間を等分した num 個の点 x に対して、対応する y の値を生成します。 # + id="lUa-qm78WJUI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} # データセット {x_n,y_n} (n=1...num) を用意 def create_dataset(num): dataset = DataFrame(columns=['x','y']) for i in range(num): x = float(i)/float(num-1) y = np.sin(2*np.pi*x) + normal(scale=0.3) dataset = dataset.append(Series([x,y], index=['x','y']), ignore_index=True) return dataset # + [markdown] id="d4w1t1dTWJUL" colab_type="text" # 例として、10個のデータをトレーニングセットとして生成します。 # + id="huEmCrDCWJUM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 359} outputId="41b05dde-028e-46d8-c66e-398e28a2c73b" executionInfo={"status": "ok", "timestamp": 1518327108403, "user_tz": -540, "elapsed": 777, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-QnNQI9d1-o8/AAAAAAAAAAI/AAAAAAAAZnk/pgF0J6fNn2I/s50-c-k-no/photo.jpg", "userId": "105109646685285082872"}} N=10 # サンプルを取得する位置 x の個数 train_set = create_dataset(N) train_set # + [markdown] id="Nt8-Jq1AWJUR" colab_type="text" # x と y の値のリストは、train_set.x と train_set.y で取得できます。 # # グラフ上にプロットすると次のようになります。 # + id="6oct2qswWJUR" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}], "base_uri": "https://localhost:8080/", "height": 364} outputId="d6b2729f-18ee-43b9-baad-7f4f1e9488d4" executionInfo={"status": "ok", "timestamp": 1518327108981, "user_tz": -540, "elapsed": 522, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-QnNQI9d1-o8/AAAAAAAAAAI/AAAAAAAAZnk/pgF0J6fNn2I/s50-c-k-no/photo.jpg", "userId": "105109646685285082872"}} plt.scatter(train_set.x, train_set.y, marker='o', color='blue') # + [markdown] id="z2Haw9TxWJUV" colab_type="text" # このデータに対して、最尤推定法でフィッティングした m 次多項式と標準偏差を決定する関数を用意します。 # # 引数 dataset と m にトレーニングセットと多項式の次数を代入すると、多項式に対応する関数 f(x) のオブジェクトと標準偏差の値が返ります。 # + id="wCb738LTWJUW" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} def resolve(dataset, m): t = dataset.y phi = DataFrame() for i in range(0,m+1): p = dataset.x**i p.name="x**%d" % i phi = pd.concat([phi,p], axis=1) tmp = np.linalg.inv(np.dot(phi.T, phi)) ws = np.dot(np.dot(tmp, phi.T), t) def f(x): y = 0.0 for i, w in enumerate(ws): y += w * (x ** i) return y sigma2 = 0.0 for index, line in dataset.iterrows(): sigma2 += (f(line.x)-line.y)**2 sigma2 /= len(dataset) return (f, np.sqrt(sigma2)) # + [markdown] id="7cjWVlOZWJUY" colab_type="text" # また、得られた関数 f(x) に対して、トレーニングセットに対する最大対数尤度を求める関数を用意します。 # + id="sBou_p_aWJUZ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} # 最大対数尤度(Maximum log likelihood)を計算 def log_likelihood(dataset, f): dev = 0.0 n = float(len(dataset)) for index, line in dataset.iterrows(): x, y = line.x, line.y dev += (y - f(x))**2 err = dev * 0.5 beta = n / dev lp = -beta*err + 0.5*n*np.log(0.5*beta/np.pi) return lp # + [markdown] id="mv0H6RSWWJUd" colab_type="text" # これらを用いて、結果をグラフに可視化する関数が次になります。 # + id="dT3pEB9UWJUd" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} def show_result(subplot, train_set, m): f, sigma = resolve(train_set, m) subplot.set_xlim(-0.05,1.05) subplot.set_ylim(-1.5,1.5) subplot.set_title("M=%d" % m, fontsize=10) # トレーニングセットを表示 subplot.scatter(train_set.x, train_set.y, marker='o', color='blue', label=None) # 真の曲線を表示 linex = np.linspace(0,1,101) liney = np.sin(2*np.pi*linex) subplot.plot(linex, liney, color='green', linestyle='--') # 多項式近似の曲線を表示 linex = np.linspace(0,1,101) liney = f(linex) label = "Sigma=%.2f" % sigma subplot.plot(linex, liney, color='red', label=label) subplot.plot(linex, liney+sigma, color='red', linestyle='--') subplot.plot(linex, liney-sigma, color='red', linestyle='--') subplot.legend(loc=1, fontsize=10) # + [markdown] id="V9VSwWBkWJUg" colab_type="text" # 先ほど生成したトレーニングセットを用いて、0, 1, 3, 9次多項式(定数関数)でフィッティングした結果を表示します。 # + id="heYgw9RwWJUh" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 497} outputId="a8d6557f-2626-498c-c912-093d8ee95f38" executionInfo={"status": "ok", "timestamp": 1518327112291, "user_tz": -540, "elapsed": 1393, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-QnNQI9d1-o8/AAAAAAAAAAI/AAAAAAAAZnk/pgF0J6fNn2I/s50-c-k-no/photo.jpg", "userId": "105109646685285082872"}} fig = plt.figure(figsize=(10, 8)) for i, m in enumerate([0,1,3,9]): subplot = fig.add_subplot(2,2,i+1) show_result(subplot, train_set, m) # + [markdown] id="Stg8OO5dWJUm" colab_type="text" # 多項式の次数が上がるにつれてデータポイントの近くを通るようになり、標準偏差が減少していることがわかります。 # + [markdown] id="Z2pTwZjtWJUn" colab_type="text" # ここで、トレーニングセットとテストセットに対する対数尤度の変化を確認します。 # # 多項式の次数を0〜9に変化させながら、対数尤度のグラフを描く関数を用意します。(次数が10の場合は、オーバーフィッティングにより対数尤度が無限大に発散します。 # + id="aqHpuBCtWJUo" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} # 多項式近似に対する最大対数尤度を計算 def show_loglikelihood_trend(train_set, test_set): df = DataFrame() train_mlh = [] test_mlh = [] for m in range(0,9): # 多項式の次数 f, sigma = resolve(train_set, m) train_mlh.append(log_likelihood(train_set, f)) test_mlh.append(log_likelihood(test_set, f)) df = pd.concat([df, DataFrame(train_mlh, columns=['Training set']), DataFrame(test_mlh, columns=['Test set'])], axis=1) df.plot(title='Log likelihood for N=%d' % N, grid=True, style=['-','--']) # + [markdown] id="GtvG7RH9WJUr" colab_type="text" # トレーニングセットとは独立に生成したテストセットを用意します。 # + id="xHFJgacvWJUr" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 359} outputId="fe7e5041-9cd3-4c66-ae97-c33f78ede4f9" executionInfo={"status": "ok", "timestamp": 1518327113487, "user_tz": -540, "elapsed": 574, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-QnNQI9d1-o8/AAAAAAAAAAI/AAAAAAAAZnk/pgF0J6fNn2I/s50-c-k-no/photo.jpg", "userId": "105109646685285082872"}} test_set = create_dataset(N) test_set # + [markdown] id="gyqVzXz9WJUv" colab_type="text" # 多項式の次数を0〜9に変化させながら、トレーニングセットとテストセットに対する対数尤度を計算して、結果をグラフ表示します。 # + id="K2zKyOYdWJUw" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 362} outputId="0dce48f3-2e4b-4a18-df79-1d055efd975e" executionInfo={"status": "ok", "timestamp": 1518327114318, "user_tz": -540, "elapsed": 783, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-QnNQI9d1-o8/AAAAAAAAAAI/AAAAAAAAZnk/pgF0J6fNn2I/s50-c-k-no/photo.jpg", "userId": "105109646685285082872"}} show_loglikelihood_trend(train_set, test_set)
03-maximum_likelihood.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tensorflow2_p36) # language: python # name: conda_tensorflow2_p36 # --- # # Amazon SageMaker Debugger - Reacting to Cloudwatch Events from Rules # [Amazon SageMaker](https://aws.amazon.com/sagemaker/) is managed platform to build, train and host maching learning models. Amazon SageMaker Debugger is a new feature which offers the capability to debug machine learning models during training by identifying and detecting problems with the models in near real time. # # In this notebook, we'll show you how you can react off rule triggers and take some action, e.g. stop the training job through CloudWatch Events. # # ## How does Amazon SageMaker Debugger work? # # Amazon SageMaker Debugger lets you go beyond just looking at scalars like losses and accuracies during training and gives you full visibility into all tensors 'flowing through the graph' during training. Furthermore, it helps you monitor your training in near real time using rules and provides you alerts, once it has detected inconsistency in training flow. # # ### Concepts # * **Tensors**: These represent the state of the training network at intermediate points during its execution # * **Debug Hook**: Hook is the construct with which Amazon SageMaker Debugger looks into the training process and captures the tensors requested at the desired step intervals # * **Rule**: A logical construct, implemented as Python code, which helps analyze the tensors captured by the hook and report anomalies, if at all # # With these concepts in mind, let's understand the overall flow of things that Amazon SageMaker Debugger uses to orchestrate debugging. # # ### Saving tensors during training # # The tensors captured by the debug hook are stored in the S3 location specified by you. There are two ways you can configure Amazon SageMaker Debugger to save tensors: # # #### With no changes to your training script # If you use one of the SageMaker provided [Deep Learning Containers](https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html) for 1.15, then you don't need to make any changes to your training script for the tensors to be stored. SageMaker Debugger will use the configuration you provide through the SageMaker SDK's Tensorflow `Estimator` when creating your job to save the tensors in the fashion you specify. You can review the script we are going to use at [src/mnist_zerocodechange.py](src/mnist_zerocodechange.py). You will note that this is an untouched TensorFlow script which uses the Estimator interface. Please note that SageMaker Debugger only supports `tf.keras`, `tf.Estimator` and `tf.MonitoredSession` interfaces. Full description of support is available at [SageMaker Debugger with TensorFlow ](https://github.com/awslabs/sagemaker-debugger/tree/master/docs/tensorflow.md) # # #### Orchestrating your script to store tensors # For other containers, you need to make couple of lines of changes to your training script. SageMaker Debugger exposes a library `smdebug` which allows you to capture these tensors and save them for analysis. It's highly customizable and allows to save the specific tensors you want at different frequencies and possibly with other configurations. Refer [DeveloperGuide](https://github.com/awslabs/sagemaker-debugger/tree/master/docs) for details on how to use SageMaker Debugger library with your choice of framework in your training script. Here we have an example script orchestrated at [src/mnist_byoc](src/mnist_byoc.py). You also need to ensure that your container has the `smdebug` library installed. # # ### Analysis of tensors # # Once the tensors are saved, Amazon SageMaker Debugger can be configured to run debugging ***Rules*** on them. At a very broad level, a rule is Python code used to detect certain conditions during training. Some of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on. Sagemaker Debugger comes pre-packaged with certain built-in rules. Users can write their own rules using the Sagemaker Debugger APIs. You can also analyze raw tensor data outside of the Rules construct in say, a Sagemaker notebook, using Amazon Sagemaker Debugger's full set of APIs. Please refer [Analysis Developer Guide](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md) for more on these APIs. # # ### Cloudwatch Events for Rules # Rule status changes in a training job trigger CloudWatch Events. These events can be acted upon by configuring a CloudWatch Rule (different from Amazon SageMaker Debugger Rule) to trigger each time a Debugger Rule changes status. In this notebook we'll go through how you can create a CloudWatch Rule to direct Training Job State change events to a lambda function that stops the training job in case a rule triggers and has status `"IssuesFound"` # # #### Lambda Function # # * In your AWS console, go to Lambda Management Console, # * Create a new function by hitting Create Function, # * Choose the language as Python 3.7 (or higher) and put in the following sample code for stopping the training job if one of the Rule statuses is `"IssuesFound"`: # # ```python # import json # import boto3 # import logging # # logger = logging.getLogger() # logger.setLevel(logging.INFO) # # # def lambda_handler(event, context): # training_job_name = event.get("detail").get("TrainingJobName") # logging.info(f'Evaluating Debugger rules for training job: {training_job_name}') # # eval_statuses = event.get("detail").get("DebugRuleEvaluationStatuses", None) # # if eval_statuses is None or len(eval_statuses) == 0: # logging.info("Couldn't find any debug rule statuses, skipping...") # return { # 'statusCode': 200, # 'body': json.dumps('Nothing to do') # } # # # should only attempt stopping jobs with InProgress status # training_job_status = event.get("detail").get("TrainingJobStatus", None) # if training_job_status != 'InProgress': # logging.debug(f"Current Training job status({training_job_status}) is not 'InProgress'. Exiting") # return { # 'statusCode': 200, # 'body': json.dumps('Nothing to do') # } # # client = boto3.client('sagemaker') # # for status in eval_statuses: # logging.info(status.get("RuleEvaluationStatus") + ', RuleEvaluationStatus=' + str(status)) # if status.get("RuleEvaluationStatus") == "IssuesFound": # secondary_status = event.get("detail").get("SecondaryStatus", None) # logging.info( # f'About to stop training job, since evaluation of rule configuration {status.get("RuleConfigurationName")} resulted in "IssuesFound". ' + # f'\ntraining job "{training_job_name}" status is "{training_job_status}", secondary status is "{secondary_status}"' + # f'\nAttempting to stop training job "{training_job_name}"' # ) # try: # client.stop_training_job( # TrainingJobName=training_job_name # ) # except Exception as e: # logging.error( # "Encountered error while trying to " # "stop training job {}: {}".format( # training_job_name, str(e) # ) # ) # raise e # return None # ``` # * Create a new execution role for the Lambda, and # * In your IAM console, search for the role and attach "AmazonSageMakerFullAccess" policy to the role. This is needed for the code in your Lambda function to stop the training job. # * Basic settings > set Timeout to 30 seconds instead of 3 seconds. # # #### Create a CloudWatch Rule # # * In your AWS Console, go to CloudWatch and select Rule from the left column, # * Hit Create Rule. The console will redirect you to the Rule creation page, # * For the Service Name, select "SageMaker". # * For the Event Type, select "SageMaker Training Job State Change". # * In the Targets select the Lambda function you created above, and # * For this example notebook, we'll leave everything as is. import boto3 import os import sagemaker from sagemaker.tensorflow import TensorFlow from sagemaker.debugger import Rule, rule_configs # + # define the entrypoint script entrypoint_script='src/mnist_zerocodechange.py' # these hyperparameters ensure that vanishing gradient will trigger for our tensorflow mnist script hyperparameters = { "num_epochs": "10", "lr": "10.00" } # + rules=[ Rule.sagemaker(rule_configs.vanishing_gradient()), Rule.sagemaker(rule_configs.loss_not_decreasing()) ] estimator = TensorFlow( role=sagemaker.get_execution_role(), base_job_name='smdebugger-demo-mnist-tensorflow', train_instance_count=1, train_instance_type='ml.m4.xlarge', entry_point=entrypoint_script, framework_version='1.15', train_volume_size=400, py_version='py3', train_max_run=3, script_mode=True, hyperparameters=hyperparameters, ## New parameter rules = rules ) # + # After calling fit, SageMaker will spin off 1 training job and 1 rule job for you # The rule evaluation status(es) will be visible in the training logs # at regular intervals # wait=False makes this a fire and forget function. To stream the logs in the notebook leave this out estimator.fit(wait=False) # - # ## Monitoring # # SageMaker kicked off rule evaluation jobs, one for each of the SageMaker rules - `VanishingGradient` and `LossNotDecreasing` as specified in the estimator. # Given that we've tweaked the hyperparameters of our training script such that `VanishingGradient` is bound to fire, we should expect to see the `TrainingJobStatus` as # `Stopped` once the `RuleEvaluationStatus` for `VanishingGradient` changes to `IssuesFound` # rule job summary gives you the summary of the rule evaluations. You might have to run it over # a few times before you start to see all values populated/changing estimator.latest_training_job.rule_job_summary() # + # This utility gives the link to monitor the CW event def _get_rule_job_name(training_job_name, rule_configuration_name, rule_job_arn): """Helper function to get the rule job name""" return "{}-{}-{}".format( training_job_name[:26], rule_configuration_name[:26], rule_job_arn[-8:] ) def _get_cw_url_for_rule_job(rule_job_name, region): return "https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix".format(region, region, rule_job_name) def get_rule_jobs_cw_urls(estimator): region = boto3.Session().region_name training_job = estimator.latest_training_job training_job_name = training_job.describe()["TrainingJobName"] rule_eval_statuses = training_job.describe()["DebugRuleEvaluationStatuses"] result={} for status in rule_eval_statuses: if status.get("RuleEvaluationJobArn", None) is not None: rule_job_name = _get_rule_job_name(training_job_name, status["RuleConfigurationName"], status["RuleEvaluationJobArn"]) result[status["RuleConfigurationName"]] = _get_cw_url_for_rule_job(rule_job_name, region) return result get_rule_jobs_cw_urls(estimator) # - # After running the last two cells over and until `VanishingGradient` reports `IssuesFound`, we'll attempt to describe the `TrainingJobStatus` for our training job. estimator.latest_training_job.describe()["TrainingJobStatus"] # ## Result # # This notebook attempted to show a very simple setup of how you can use CloudWatch events for your training job to take action on rule evaluation status changes. Learn more about Amazon SageMaker Debugger in the [GitHub Documentation](https://github.com/awslabs/sagemaker-debugger).
sagemaker-debugger/tensorflow_action_on_rule/tf-mnist-stop-training-job.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math import numpy as np import matplotlib.pyplot as plt import openrtdynamics2.lang as dy import openrtdynamics2.py_execute as dyexe import openrtdynamics2.targets as tg # + dy.clear() system = dy.enter_system() # define system inputs input1 = dy.system_input( dy.DataTypeFloat64(1), name='input1', default_value=2.0, value_range=[0, 25], title="input #1") # the diagram output = input1 * dy.counter() output2 = dy.counter() # define output(s) dy.append_output(output, 'output') dy.append_output(output2, 'output2') # generate code code_gen_results = dy.generate_code(template=tg.TargetCppMinimal()) # - compiled_system = dyexe.CompiledCode(code_gen_results) testsim = dyexe.SystemInstance(compiled_system) sim_results = dyexe.run_batch_simulation(testsim, input_data={}, N=10, output_keys=['output', 'output2'] ) sim_results['output'] sim_results['output2'] assert(sim_results['output'][0] == 0) assert(sim_results['output'][1] == 2) assert(sim_results['output'][2] == 4) assert(sim_results['output'][3] == 6) assert(sim_results['output2'][0] == 0) assert(sim_results['output2'][1] == 1) assert(sim_results['output2'][2] == 2) assert(sim_results['output2'][3] == 3) # # Advanced counter # + dy.clear() system = dy.enter_system() # the diagram s1, trigger1 = dy.counter_triggered( upper_limit=dy.int32(15), reset_on_limit=False, initial_state=0 ) s2, trigger2 = dy.counter_triggered( upper_limit=dy.int32(10), reset_on_limit=True, initial_state=0 ) s3, trigger3 = dy.counter_triggered( upper_limit=dy.int32(10), reset_on_limit=True, initial_state=5 ) # define output(s) dy.append_output(s1, 'signal1') dy.append_output(s2, 'signal2') dy.append_output(s3, 'signal3') dy.append_output(trigger1, 'trigger1') dy.append_output(trigger2, 'trigger2') dy.append_output(trigger3, 'trigger3') # generate code code_gen_results = dy.generate_code(template=tg.TargetCppMinimal()) # - compiled_system = dyexe.CompiledCode(code_gen_results) sim_results = dyexe.run_batch_simulation(dyexe.SystemInstance(compiled_system), input_data={}, N=30) # + plt.figure() plt.plot( sim_results['signal1'], '+-' ) plt.plot( sim_results['signal2'], '+-' ) plt.plot( sim_results['signal3'], '+-' ) plt.legend(['signal1', 'signal2', 'signal3']) plt.show() plt.figure() plt.plot( sim_results['trigger1'], '+' ) plt.plot( sim_results['trigger2'], '+' ) plt.plot( sim_results['trigger3'], '+' ) plt.legend(['trigger1', 'trigger2', 'trigger3']) plt.show() # - # NBVAL_CHECK_OUTPUT sim_results['signal1'] # NBVAL_CHECK_OUTPUT sim_results['signal2'] # NBVAL_CHECK_OUTPUT sim_results['signal3'] # NBVAL_CHECK_OUTPUT sim_results['trigger1'] # NBVAL_CHECK_OUTPUT sim_results['trigger2'] # NBVAL_CHECK_OUTPUT sim_results['trigger3']
tests/test_counter.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.0-rc2 # language: julia # name: julia-1.7 # --- # # Solve a nonlinear system # \begin{align} # 3x_1 - \cos(x_2x_3) - \frac{1}{2}& = 0\\ # x_1^2 - 81(x_2+0.1)^2 + \sin(x_3) +1.06& = 0\\ # e^{-x_1x_2} + 20 x_3 + \frac{10\pi-3}{3}& = 0 # \end{align} # using Printf using ForwardDiff using LinearAlgebra using NLsolve # ## Hand Coded Jacobian # + function F(x) return [3*x[1] - cos(x[2]*x[3]) - 0.5 x[1]^2 - 81 * (x[2]+0.1)^2 + sin(x[3])+1.06 exp(-x[1]*x[2]) + 20 * x[3] + (10 * π-3)/3]; end function J(x) J_ = zeros(3,3); J_[1,1] = 3; J_[1,2] = x[3] * sin(x[2]*x[3]); J_[1,3] = x[2] * sin(x[2]*x[3]); J_[2,1] = 2*x[1]; J_[2,2] =-162 * (x[2]+0.1); J_[2,3] = cos(x[3]); J_[3,1] = -x[2] * exp(-x[1]*x[2]); J_[3,2] = -x[1] * exp(-x[1]*x[2]) J_[3,3] = 20; return J_ end # - F(zeros(3)) J([0., 0., 0.]) # + # starting guess x = zeros(3); δ = zeros(3); n_iters = 20; tol = 1e-10; for n in 1:n_iters δ .= -J(x)\F(x); @. x += δ; f_err = norm(F(x)); @printf("%d: ||F(x)|| = %g\n",n, f_err); if(f_err<tol) break; end end @show x; # - # ## Automatic Differentiation J_auto = x-> ForwardDiff.jacobian(F, x); J_auto(x) # + # starting guess x = zeros(3); δ = zeros(3); n_iters = 20; tol = 1e-10; for n in 1:n_iters δ .= -J_auto(x)\F(x); @. x += δ; f_err = norm(F(x)); @printf("%d: ||F(x)|| = %g\n",n, f_err); if(f_err<tol) break; end end @show x; # - # ## Solution with NLsolve x = zeros(3); soln= nlsolve(F, x) x = zeros(3); soln= nlsolve(F,J_auto, x)
notebooks/Julia Functionality/Multidim Roots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup import pandas as pd def arirang_news_list(num1, num2): news_list = [] for i in range(num1,num2+1): try: print(i) #상세 뉴스 페이지 url = f"https://www.arirang.com/News/News_View.asp?nseq={i}" resp = requests.get(url) html = resp.text soup = BeautifulSoup(html) #뉴스 타이틀 title = soup.title.string #뉴스 기사 text = soup.find(id="newsText").text.split('Reporter : ')[0] reporter = soup.find(id="newsText").text.split('Reporter : ')[1] except: print("error") pass finally: # 기사 하나 딕셔너리에 넣기 single_dict = {} single_dict['nseq'] = i single_dict['url'] = url single_dict['title'] = title single_dict['text'] = text single_dict['reporter'] = reporter # 기사 딕셔너리를 전체 리스트에 넣기 news_list.append(single_dict) print(len(news_list)) #df 만들고 csv 파일로 저장 df = pd.DataFrame(news_list) df.to_csv(f"{num1}-{num2}.csv") arirang_news_list(274965,274977)
arirang-news-scrapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fairness # Recidivism Case Study # # Copyright 2020 <NAME> # # License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) # ## Review # # This is the third in a series of notebooks that make up a [case study on classification and algorithmic fairness](https://allendowney.github.io/RecidivismCaseStudy/). # This case study is part of the [*Elements of Data Science*](https://allendowney.github.io/ElementsOfDataScience/) curriculum. # # The goal of this case study is to explain the statistical arguments presented in two articles from 2016: # # * "[Machine Bias](https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing)", by <NAME>, <NAME>, <NAME> and <NAME>, and published by [ProPublica](https://www.propublica.org). # # * A response by <NAME>, <NAME>, <NAME> and <NAME>: "[A computer program used for bail and sentencing decisions was labeled biased against blacks. It’s actually not that clear.](https://www.washingtonpost.com/news/monkey-cage/wp/2016/10/17/can-an-algorithm-be-racist-our-analysis-is-more-cautious-than-propublicas/)", published in the Washington Post. # # I strongly encourage you to read both articles before you go on. # # Both articles are about COMPAS, a statistical tool used in the justice system to assign defendants a "risk score" that is intended to reflect the risk that they will commit another crime if released. # # The ProPublica article evaluates COMPAS as a binary classifier, and compares its error rates for black and white defendants. # # In response, the Washington Post article shows that COMPAS has the same predictive value black and white defendants. And they explain that the test cannot have the same predictive value and the same error rates at the same time. # # [In the first notebook](https://colab.research.google.com/github/AllenDowney/RecidivismCaseStudy/blob/master/01_classification.ipynb) I replicated the analysis from the ProPublica article. # # [In the second notebook](https://colab.research.google.com/github/AllenDowney/RecidivismCaseStudy/blob/master/01_calibration.ipynb) I replicated the analysis from the WaPo article. # # In this notebook I apply the same analysis to evaluate the performance of COMPAS for male and female defendants. import pandas as pd import numpy as np import matplotlib.pyplot as plt # ## Data # # The authors of "Machine Bias" published their data and analysis in [this repository](https://github.com/propublica/compas-analysis). # The terms of use for the data [are here](https://www.propublica.org/datastore/terms). In compliance with those terms, I am not redistributing the data. The following cell downloads the data file we'll use directly from their repository. # + from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://github.com/propublica/compas-analysis/raw/master/' + 'compas-scores-two-years.csv') # - # The following cell reads the data file: cp = pd.read_csv('compas-scores-two-years.csv') cp.shape # ## Data bias # # [**Note:** I wrote about data bias in the previous notebook, but I am repeating it here in case someone reads this notebook without reading the previous one.] # # Systems like COMPAS are trying to predict whether a defendant will *commit* another crime if released. But the dataset reports whether a defendant was *charged* with another crime. # # Not everyone who commits a crime gets charged (not even close). The probability of getting charged for a particular crime depends on the type of crime and location; the presence of witnesses and their willingness to work with police; the decisions of police about where to patrol, what crimes to investigate, and who to arrest; and decisions of prosecutors about who to charge. # # It is likely that every one of these factors depends on the race of the defendant. In this dataset, the prevalence of *new charges* is higher for black defendants, but that doesn't necessarily mean that the prevalence of *new crimes* is higher. # # If the dataset is affected by racial bias in the probability of being charged, prediction algorithms like COMPAS will be biased, too. In discussions of whether and how these systems should be used in the criminal justice system, this is an important issue. # # However, I am going to put it aside *for now* in order to focus on understanding the arguments posed in the ProPublica article and the metrics they are based on. For the rest of this notebook I will take the "recidivism rates" in the dataset at face value; but I will try to be clear about that they mean (and don't mean). # ## Code # # The functions from the previous notebook are in a file called `utils.py`; the following cell downloads it if you don't already have it: download('https://raw.githubusercontent.com/AllenDowney/RecidivismCaseStudy/' + 'master/utils.py') # ## Male and female defendants # # The authors of the ProPublica article published a supplementary article, # [*How We Analyzed the COMPAS Recidivism Algorithm*](https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm), which describes their analysis in more detail. # # In the supplementary article, they briefly mention results for male and female respondents: # # > The COMPAS system unevenly predicts recidivism between genders. According to Kaplan-Meier estimates, women rated high risk recidivated at a 47.5 percent rate during two years after they were scored. But men rated high risk recidivated at a much higher rate – 61.2 percent – over the same time period. This means that a high-risk woman has a much lower risk of recidivating than a high-risk man, a fact that may be overlooked by law enforcement officials interpreting the score. # # We can replicate this result using the methods from the previous notebooks; we don't have to do Kaplan-Meier estimation. # # According to the binary gender classification in this dataset, about 81% of defendants are male. male = (cp['sex'] == 'Male') male.mean() female = (cp['sex'] == 'Female') female.mean() # Here are the confusion matrices for male and female defendants. # + from utils import make_matrix matrix_male = make_matrix(cp[male]) matrix_male # - matrix_female = make_matrix(cp[female]) matrix_female # And here are the metrics: # + from utils import compute_metrics metrics_male = compute_metrics(matrix_male, 'Male defendants') metrics_male # - metrics_female = compute_metrics(matrix_female, 'Female defendants') metrics_female # The fraction of defendants charged with another crime (prevalence) is substantially higher for male defendants (47% vs 36%). # # Nevertheless, the error rates for the two groups are about the same. # # As a result, the predictive values for the two groups are substantially different: # # * PPV: Women classified as high risk are less likely to be charged with another crime, compared to high-risk men (51% vs 64%). # # * NPV: Women classified as low risk are more likely to "survive" two years without a new charge, compared to low-risk men (76% vs 67%). # # The difference in predictive values implies that COMPAS is not calibrated for men and women. In the next section we'll campare the calibration curves. # ## Calibration for male and female defendants # # Here are the calibration curves for male and female defendants. # + from utils import calibration_curve, decorate cal_all = calibration_curve(cp) cal_all.plot(linestyle='dotted', color='gray', label='All defendants') cal_male = calibration_curve(cp[male]) cal_male.plot(label='Male') cal_female = calibration_curve(cp[female]) cal_female.plot(label='Female') decorate(xlabel='Risk score', ylabel='Fraction charged with new crime', title='Recivism vs risk score, grouped by sex') plt.legend(); # - # For all risk scores, female defendants are substantially less likely to be charged with another crime. # # Or, reading the graph the other way, female defendants are given risk scores 1-2 points higher than male defendants with the same actual risk of recidivism. # # To the degree that COMPAS scores are used to decide which defendants are incarcerated, those decisions: # # * Are unfair to women. # # * Are less effective than they could be, if they incarcerate lower-risk women while allowing higher-risk men to go free. # # ## What would it take? # # Suppose we want to fix COMPAS so that predictive values are the same for male and female defendants. We could do that by using different thresholds for the two groups. # # In this section, we'll see what it would take to re-calibrate COMPAS; then we'll find out what effect that would have on error rates. # # From the previous notebook, `sweep_threshold` loops through possible thresholds, makes the confusion matrix for each threshold, and computes the accuracy metrics. # # Here are the resulting tables for all defendants, male defendants, and female defendants. # + from utils import sweep_threshold table_all = sweep_threshold(cp) # - table_male = sweep_threshold(cp[male]) table_female = sweep_threshold(cp[female]) # As we did in the previous notebook, we can find the threshold that would make predictive value the same for both groups. # + from utils import predictive_value matrix_all = make_matrix(cp) ppv, npv = predictive_value(matrix_all) # + from utils import crossing crossing(table_male['PPV'], ppv) # - crossing(table_male['NPV'], npv) # With a threshold near 3.4, male defendants would have the same predictive values as the general population. # # Now let's do the same computation for female defendants. crossing(table_female['PPV'], ppv) crossing(table_female['NPV'], npv) # To get the same predictive values for men and women, we would need substantially different thresholds: about 6.8 compared to 3.4. # # At those levels, the false positive rates would be very different: # + from utils import interpolate interpolate(table_male['FPR'], 3.4) # - interpolate(table_female['FPR'], 6.8) # And so would the false negative rates. interpolate(table_male['FNR'], 3.4) interpolate(table_female['FNR'], 6.8) # If the test is calibrated in terms of predictive value, it is uncalibrated in terms of error rates. # ## ROC # # In the previous notebook I defined the [receiver operating characteristic (ROC) curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). # # The following figure shows ROC curves for male and female defendants: # + from utils import plot_roc plot_roc(table_male) plot_roc(table_female) # - # The ROC curves are nearly identical, which implies that it is possible to calibrate COMPAS equally for male and female defendants. # ## AUC # # In the previous notebook I define [concordance](https://cran.r-project.org/web/packages/survival/vignettes/concordance.pdf) and compute it by estimating the area under the ROC curve. # # The concordance (AUC) for all respondents is about 70%. # + from utils import compute_auc compute_auc(table_all) # - # For the subgroups it is slightly lower, but also near 70%. compute_auc(table_male) compute_auc(table_female) # Again, this implies that COMPAS could be calibrated. # ## Summary # # With respect to sex, COMPAS is fair by the criteria posed by the ProPublica article: it has the same error rates for groups with different prevalence. # # But it is unfair by the criteria of the WaPo article, which argues: # # > A risk score of seven for black defendants should mean the same thing as a score of seven for white defendants. Imagine if that were not so, and we systematically assigned whites higher risk scores than equally risky black defendants with the goal of mitigating ProPublica’s criticism. We would consider that a violation of the fundamental tenet of equal treatment. # # With respect to male and female defendants, COMPAS violates this tenet. # So who's right? We have two competing definitions of fairness, and it is mathematically impossible to satisfy them both. Is it better to have equal error rates for all groups, as COMPAS does for men and women? Or is it better to be calibrated, which implies equal predictive values? Or, since we can't have both, should the test be "tempered", allowing both error rates and predictive values to depend on prevalence? # # [In the next notebook](https://colab.research.google.com/github/AllenDowney/RecidivismCaseStudy/blob/master/04_matrix.ipynb) I explore these trade-offs in more detail.
03_fairness.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="CCQY7jpBfMur" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="z6X9omPnfO_h" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="2QQJJyDzqGRb" # # Eager execution # # + [markdown] colab_type="text" id="B1xdylywqUSX" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/eager"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/eager.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/eager.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub)에서 소스 보기</a> # </td> # </table> # + [markdown] colab_type="text" id="-agGVYp_4GWZ" # Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 # 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. # 이 번역에 개선할 부분이 있다면 # [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. # 문서 번역이나 리뷰에 참여하려면 # [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 # 메일을 보내주시기 바랍니다. # + [markdown] colab_type="text" id="EGjDcGxIqEfX" # # # 텐서플로의 즉시 실행은 그래프를 생성하지 않고 함수를 바로 실행하는 명령형 프로그래밍 환경입니다. # 나중에 실행하기 위해 계산가능한 그래프를 생성하는 대신에 계산값을 즉시 알려주는 연산입니다. # 이러한 기능은 텐서플로를 시작하고 모델을 디버깅하는 것을 더욱 쉽게 만들고 불필요한 상용구 코드(boilerplate code) 작성을 줄여줍니다. # 가이드를 따라하려면, 대화형 `파이썬` 해석기(interpreter)를 이용해 아래에 있는 코드 샘플을 실행하세요. # # 즉시 실행은 연구와 실험을 위한 유연한 기계학습 플랫폼으로 다음과 같은 기능을 제공합니다: # # * *직관적인 인터페이스*-코드를 자연스럽게 구조화하고 파이썬의 데이터 구조를 활용. 작은 모델과 작은 데이터를 빠르게 반복 # * *손쉬운 디버깅*-실행중인 모델을 검토하거나 변경 사항을 테스트해보기 위해서 연산을 직접 호출. 에러 확인을 위해서 표준 파이썬 디버깅 툴을 사용 # * *자연스런 흐름 제어*-그래프 제어 흐름 대신에 파이썬 제어 흐름을 사용함으로써 동적인 모델 구조의 단순화 # # 즉시 실행은 대부분의 텐서플로 연산과 GPU 가속을 지원합니다. # # Note: 일부 모델은 즉시 실행을 활성화한 경우 추가연산비용(overhead)이 증가한 경우도 있습니다.성능을 향상시키려는 노력은 계속 진행 중이지만 만약에 문제점을 찾거나 관련된 벤치마크를 공유하고 싶다면 [버그를 기록해주세요](https://github.com/tensorflow/tensorflow/issues) # + [markdown] colab_type="text" id="RBAeIwOMrYk8" # ## 설치와 기본 사용법 # + colab={} colab_type="code" id="ByNsp4VqqEfa" from __future__ import absolute_import, division, print_function, unicode_literals try: # # %tensorflow_version은 오직 Colab에서만 사용가능합니다. # %tensorflow_version 2.x #gpu except Exception: pass import tensorflow as tf import cProfile # + [markdown] colab_type="text" id="48P3-8q4qEfe" # 텐서플로 2.0에서 즉시 실행은 기본으로 활성화되어 있습니다. # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="7aFsD8csqEff" tf.executing_eagerly() # + [markdown] colab_type="text" id="x_G1zZT5qEfh" # 이제부터는 텐서플로 연산을 바로 실행할 수 있고 결과를 즉시 확인할 수 있습니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="9gsI54pbqEfj" x = [[2.]] m = tf.matmul(x, x) print("hello, {}".format(m)) # + [markdown] colab_type="text" id="ajFn6qsdqEfl" # 즉시 실행 활성화는 텐서플로 연산을 바로 평가하고 그 결과를 파이썬에게 알려주는 방식으로 동작을 변경합니다. # `tf.Tensor` 객체는 계산 그래프에 있는 노드를 가르키는 간접 핸들(symbolic handle) 대신에 구체적인 값을 참조합니다. # 나중에 실행하기 위해서 생성된 계산 그래프가 없기 때문에, `print()`나 디버거를 통해서 결과를 검토하기 쉽습니다. # 텐서값을 평가, 출력하거나 확인하는 것이 그래디언트(gradients)를 계산하는 흐름을 방해하지 않습니다. # # 즉시 실행은 [NumPy](http://www.numpy.org/)와 같이 잘 작동됩니다. # NumPy 연산에 `tf.Tensor`를 매개변수로 사용가능합니다. # 텐서플로 [수학 연산](https://www.tensorflow.org/api_guides/python/math_ops)은 파이썬 객체와 NumPy 배열을 `tf.Tensor` 객체로 변환합니다. # `tf.Tensor.numpy` 메서드는 객체 값을 NumPy `ndarray`로 반환합니다. # + colab={} colab_type="code" id="sTO0_5TYqz1n" a = tf.constant([[1, 2], [3, 4]]) print(a) # + colab={} colab_type="code" id="Dp14YT8Gq4r1" # 브로드캐스팅(Broadcasting) 지원 b = tf.add(a, 1) print(b) # + colab={} colab_type="code" id="69p3waMfq8cQ" # 연산자 오버로딩 지원 print(a * b) # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="Ui025t1qqEfm" # NumPy값 사용 import numpy as np c = np.multiply(a, b) print(c) # + colab={} colab_type="code" id="Tq_aFRzWrCua" # 텐서로부터 numpy 값 얻기: print(a.numpy()) # => [[1 2] # [3 4]] # + [markdown] colab_type="text" id="H08f9ss9qEft" # ## 동적인 제어 흐름 # # 즉시 실행의 가장 큰 이점은 모델을 실행하는 동안에도 호스트 언어의 모든 기능을 활용할 수 있다는 것입니다. # 그래서 다음과 같이 [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz)를 손쉽게 작성할 수 있습니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="0fudRMeUqEfu" def fizzbuzz(max_num): counter = tf.constant(0) max_num = tf.convert_to_tensor(max_num) for num in range(1, max_num.numpy()+1): num = tf.constant(num) if int(num % 3) == 0 and int(num % 5) == 0: print('FizzBuzz') elif int(num % 3) == 0: print('Fizz') elif int(num % 5) == 0: print('Buzz') else: print(num.numpy()) counter += 1 # + colab={} colab_type="code" id="P2cKknQWrJLB" fizzbuzz(15) # + [markdown] colab_type="text" id="7kA-aC3BqEfy" # 여기에 텐서값에 따른 조건절이 있고 실행중에 그 결과를 출력합니다. # + [markdown] colab_type="text" id="8huKpuuAwICq" # ## 즉시 훈련 # + [markdown] colab_type="text" id="mp2lCCZYrxHd" # ### 그래디언트 계산하기 # # [자동 미분](https://en.wikipedia.org/wiki/Automatic_differentiation)은 인공 신경망 훈련을 위한 # [역전파](https://en.wikipedia.org/wiki/Backpropagation)와 같은 기계학습 알고리즘을 구현하는데 유용합니다. # 즉시 실행을 사용하는 동안에는, 나중에 그래디언트를 계산하는 연산을 추적하기 위해 `tf.GradientTape`을 사용하세요. # # 즉시 실행 중에 그래디언트를 계산하고 모델 훈련에 이용하기 위해서 `tf.GradientTape`을 사용할 수 있습니다. # 특히 복잡하고 반복적인 훈련인 경우에 더 유용합니다. # # 매번 실행될 때 서로 다른 연산이 수행될 수 있기 때문에 모든 정방향(forward-pass) 연산은 "tape"에 기록됩니다. # 그다음 tape를 거꾸로 돌려 그래디언트를 계산한 후 tape를 폐기합니다. # 특정한 `tf.GradientTape`는 오직 하나의 그래디언트만을 계산할 수 있고 부가적인 호출은 실행중 에러(runtime error)를 발생시킵니다. # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="7g1yWiSXqEf-" w = tf.Variable([[1.0]]) with tf.GradientTape() as tape: loss = w * w grad = tape.gradient(loss, w) print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32) # + [markdown] colab_type="text" id="vkHs32GqweYS" # ### 모델 훈련 # # 다음 예는 표준 MNIST 손글씨 분류를 위한 다층 모델을 생성합니다. # 즉시 실행 환경에서 훈련가능한 그래프를 생성하기 위한 옵티마이저(optimizer)와 층 API를 보여줍니다. # + colab={} colab_type="code" id="38kymXZowhhz" # mnist 데이터 가져오기 및 포맷 맞추기 (mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data() dataset = tf.data.Dataset.from_tensor_slices( (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32), tf.cast(mnist_labels,tf.int64))) dataset = dataset.shuffle(1000).batch(32) # + colab={} colab_type="code" id="rl1K8rOowmwT" # 모델 생성 mnist_model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu', input_shape=(None, None, 1)), tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) # + [markdown] colab_type="text" id="fvyk-HgGwxwl" # # 즉시 실행에서는 훈련을 하지 않아도 모델을 사용하고 결과를 점검할 수 있습니다: # + colab={} colab_type="code" id="BsxystjBwxLS" for images,labels in dataset.take(1): print("로짓: ", mnist_model(images[0:1]).numpy()) # + [markdown] colab_type="text" id="Y3PGa8G7qEgB" # 케라스 모델은 자체적인 훈련 메서드(fit)을 포함하고 있지만 때로는 좀 더 수정할 필요가 있습니다. # 다음은 즉시 실행을 활용한 반복적인 훈련의 예입니다: # + colab={} colab_type="code" id="bzRhM7JDnaEG" optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_history = [] # + [markdown] colab_type="text" id="tXaupYXRI2YM" # Note: 조건을 만족했는지 확인하기 위해서 `tf.debugging`에 있는 단언문(assert) 함수를 사용하세요. 이것은 즉시 실행과 그래프 실행 모두에서 동작합니다. # + colab={} colab_type="code" id="DDHrigtiCIA4" def train_step(images, labels): with tf.GradientTape() as tape: logits = mnist_model(images, training=True) # 결과의 형태를 확인하기 위해서 단언문 추가 tf.debugging.assert_equal(logits.shape, (32, 10)) loss_value = loss_object(labels, logits) loss_history.append(loss_value.numpy().mean()) grads = tape.gradient(loss_value, mnist_model.trainable_variables) optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables)) # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="0m1xAXrmqEgJ" def train(): for epoch in range(3): for (batch, (images, labels)) in enumerate(dataset): train_step(images, labels) print ('에포크 {} 종료'.format(epoch)) # + colab={} colab_type="code" id="C5dGz0p_nf4W" train() # + colab={} colab_type="code" id="5vG5ql_2vYB5" import matplotlib.pyplot as plt plt.plot(loss_history) plt.xlabel('Batch #') plt.ylabel('Loss [entropy]') # + [markdown] colab_type="text" id="kKpOlHPLqEgl" # ### 변수와 옵티마이저 # # `tf.Variable` 객체는 자동 미분을 쉽게 하기 위해서 학습동안 변경된 `tf.Tensor` 값을 저장합니다. # 모델 파라미터는 클래스 인스턴스 변수로 캡슐화될 수 있습니다. # # 효과적으로 모델 파라미터를 캡슐화하려면 `tf.Variable`을 `tf.GradientTape`과 함께 사용합니다. # 예를 들어, 위의 자동 미분은 다음과 같이 재작성 가능합니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="nnQLBYmEqEgm" class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() self.W = tf.Variable(5., name='weight') self.B = tf.Variable(10., name='bias') def call(self, inputs): return inputs * self.W + self.B # 약 3 * x + 2개의 점으로 구성된 실험 데이터 NUM_EXAMPLES = 2000 training_inputs = tf.random.normal([NUM_EXAMPLES]) noise = tf.random.normal([NUM_EXAMPLES]) training_outputs = training_inputs * 3 + 2 + noise # 최적화할 손실함수 def loss(model, inputs, targets): error = model(inputs) - targets return tf.reduce_mean(tf.square(error)) def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, [model.W, model.B]) # 정의: # 1. 모델 # 2. 모델 파라미터에 대한 손실 함수의 미분 # 3. 미분에 기초한 변수 업데이트 전략 model = Model() optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) print("초기 손실: {:.3f}".format(loss(model, training_inputs, training_outputs))) # 반복 훈련 for i in range(300): grads = grad(model, training_inputs, training_outputs) optimizer.apply_gradients(zip(grads, [model.W, model.B])) if i % 20 == 0: print("스텝 {:03d}에서 손실: {:.3f}".format(i, loss(model, training_inputs, training_outputs))) print("최종 손실: {:.3f}".format(loss(model, training_inputs, training_outputs))) print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy())) # + [markdown] colab_type="text" id="rPjb8nRWqEgr" # ## 즉시 실행에서 상태를 위한 객체 사용 # # 텐서플로 1.x 그래프 실행에서, 프로그램 상태(예: 변수)는 전역 컬렉션에 저장되고 그 수명은 `tf.Session` 객체에 의해서 관리됩니다. # 반면에 즉시 실행에서 상태 객체 수명은 그와 관련된 파이썬 객체 수명에 의해서 결정됩니다. # # ### 변수는 객체입니다 # # 즉시 실행에서 변수는 그 객체의 마지막 참조가 제거될 때까지 유지되고 그 이후 삭제됩니다. # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="A2boS674qEgs" if tf.config.experimental.list_physical_devices("GPU"): with tf.device("gpu:0"): print("GPU 사용 가능") v = tf.Variable(tf.random.normal([1000, 1000])) v = None # v는 더이상 GPU 메모리를 사용하지 않음 # + [markdown] colab_type="text" id="scMjg6L6qEgv" # ### 객체 기반의 저장 # # 이번 장은 [훈련 체크포인트 가이드](./checkpoint.ipynb) 요약버전입니다. # # `tf.train.Checkpoint`는 `tf.Variable`을 체크포인트 파일로 저장하거나 체크포인트 파일에서 복구할 수 있습니다: # + colab={} colab_type="code" id="7z5xRfdHzZOQ" x = tf.Variable(10.) checkpoint = tf.train.Checkpoint(x=x) # + colab={} colab_type="code" id="IffrUVG7zyVb" x.assign(2.) # 변수에 새로운 값을 할당하고 저장 checkpoint_path = './ckpt/' checkpoint.save('./ckpt/') # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="eMT9koCoqEgw" x.assign(11.) # 저장한 후에 변수 변경 # 체크포인트로부터 값을 복구 checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path)) print(x) # => 2.0 # + [markdown] colab_type="text" id="vbFnP-yLqEgx" # 모델을 저장하거나 읽어들이기 위해서, `tf.train.Checkpoint`는 숨겨진 변수를 요구하지 않고 객체 내부 상태를 저장합니다. # `옵티마이저`와 `모델`, 전역 단계 상태를 기록하려면 `tf.train.Checkpoint`에 전달하면 됩니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="hWZHyAXMqEg0" import os model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10) ]) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) checkpoint_dir = 'path/to/model_dir' if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") root = tf.train.Checkpoint(optimizer=optimizer, model=model) root.save(checkpoint_prefix) root.restore(tf.train.latest_checkpoint(checkpoint_dir)) # + [markdown] colab_type="text" id="R-ITwkBCF6GJ" # Note: 대부분의 반복 훈련 과정에서 변수는 `tf.train.Checkpoint.restore`가 호출된 이후에 생성됩니다. # 이러한 변수는 생성되자마자 복원될 것이므로 단언문을 통해 체크포인트가 완벽히 적재되었다는 것을 보장받을 수 있습니다. # 자세한 내용은 [훈련 체크포인트 가이드](./checkpoint.ipynb)를 참고하세요. # + [markdown] colab_type="text" id="3yoD0VJ7qEg3" # ### 객체 지향형 지표 # # `tf.keras.metrics`는 객체로 저장됩니다. # 새로운 데이터를 이 객체에 전달하여 지표를 수정하고 `tf.keras.metrics.result` 메서드를 사용해 그 결과를 얻습니다. # 예를 들어: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="9ccu0iAaqEg5" m = tf.keras.metrics.Mean("loss") m(0) m(5) m.result() # => 2.5 m([8, 9]) m.result() # => 5.5 # + [markdown] colab_type="text" id="aB8qWtT955pI" # ### 서머리(summary)와 텐서보드 # # [텐서보드](https://tensorflow.org/tensorboard)는 훈련과정에서 모델을 파악하거나 디버깅하고 최적화하기 위해 사용하는 시각화 도구입니다. # 텐서보드는 프로그램이 실행되는 동안 작성된 서머리 이벤트를 사용합니다. # # 즉시 실행에서 변수의 서머리 정보를 기록하기 위해서 `tf.summary`를 사용합니다. # 예를 들어, 다음은 매 100번째 훈련마다 `loss`의 서머리 정보를 기록합니다: # + colab={} colab_type="code" id="z6VInqhA6RH4" logdir = "./tb/" writer = tf.summary.create_file_writer(logdir) with writer.as_default(): # 또는 반복 전에 writer.set_as_default()를 호출 for i in range(1000): step = i + 1 # 실제 훈련 함수로 손실을 계산 loss = 1 - 0.001 * step if step % 100 == 0: tf.summary.scalar('손실', loss, step=step) # + colab={} colab_type="code" id="08QQD2j36TaI" # !ls tb/ # + [markdown] colab_type="text" id="xEL4yJe5qEhD" # ## 자동 미분 관련 고급편 # # ### 동적 모델 # # `tf.GradientTape`는 또한 동적인 모델에서도 사용가능합니다. # 아래 예는 [역추적 길찾기](https://wikipedia.org/wiki/Backtracking_line_search) 알고리즘의 복잡한 제어 흐름에도 불구하고, # 그래디언트가 있으며 미분 가능이 하다는 것을 제외하면 일반적인 NumPy으로 작성한 코드처럼 보입니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="L518n5dkqEhE" def line_search_step(fn, init_x, rate=1.0): with tf.GradientTape() as tape: # 변수는 자동적으로 기록되지만 텐서는 사용자가 스스로 확인해야 함 tape.watch(init_x) value = fn(init_x) grad = tape.gradient(value, init_x) grad_norm = tf.reduce_sum(grad * grad) init_value = value while value > init_value - rate * grad_norm: x = init_x - rate * grad value = fn(x) rate /= 2.0 return x, value # + [markdown] colab_type="text" id="gieGOf_DqEhK" # ### 사용자 정의 그래디언트 # # 사용자 정의 그래디언트는 그래디언트를 재정의(override)하는 가장 쉬운 방법입니다. # 정방향 함수안에서 입력값 또는 출력값, 중간값과 관련된 그래디언트를 정의해야 합니다. # 예를 들어 다음은 역전파 과정에서 그래디언트의 놈(norm)을 클리핑(clip)하는 가장 쉬운 방법입니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="-OwwsWUAqEhK" @tf.custom_gradient def clip_gradient_by_norm(x, norm): y = tf.identity(x) def grad_fn(dresult): return [tf.clip_by_norm(dresult, norm), None] return y, grad_fn # + [markdown] colab_type="text" id="JPLDHkF_qEhN" # 사용자 정의 그래디언트는 일반적으로 연산에 대해 수치적으로(numerically) 안정된 그래디언트를 제공하기 위해 사용됩니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="24WiLROnqEhO" def log1pexp(x): return tf.math.log(1 + tf.exp(x)) def grad_log1pexp(x): with tf.GradientTape() as tape: tape.watch(x) value = log1pexp(x) return tape.gradient(value, x) # + colab={} colab_type="code" id="n8fq69r9-B-c" # 그래디언트 계산은 x = 0일 때 잘 동작 grad_log1pexp(tf.constant(0.)).numpy() # + colab={} colab_type="code" id="_VFSU0mG-FSp" # 그러나, x = 100일 때 수치적으로 불안정하기 때문에 실패 grad_log1pexp(tf.constant(100.)).numpy() # + [markdown] colab_type="text" id="-VcTR34rqEhQ" # 여기 `log1pexp` 함수는 이론적으로 사용자 정의 그래디언트를 활용해 간결해 질 수 있습니다. # 아래 구현은 불필요한 계산을 제거함으로써 계산을 좀 더 효율적으로 하기 위해 정방향 경로안에서 계산된 `tf.exp(x)`값을 재사용합니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="Q7nvfx_-qEhS" @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.math.log(1 + e), grad def grad_log1pexp(x): with tf.GradientTape() as tape: tape.watch(x) value = log1pexp(x) return tape.gradient(value, x) # + colab={} colab_type="code" id="5gHPKMfl-Kge" # 전처럼, 그래디언트 계산은 x = 0일 때 잘 동작 grad_log1pexp(tf.constant(0.)).numpy() # + colab={} colab_type="code" id="u38MOfz3-MDE" # 그래디언트 계산은 x = 100일 때 역시 잘 동작 grad_log1pexp(tf.constant(100.)).numpy() # + [markdown] colab_type="text" id="rnZXjfQzqEhV" # ## 성능 # # 즉시 실행에서 계산은 자동으로 GPU로 분배됩니다. # 만약 계산 분배를 사용자가 제어하고 싶다면 그 부분을 `tf.device('/gpu:0')` 블록 (또는 CPU도 동일)으로 감싸서 실행하세요: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="Ac9Y64H-qEhX" import time def measure(x, steps): # 텐서플로는 처음 사용할 때 GPU를 초기화, 시간계산에서 제외 tf.matmul(x, x) start = time.time() for i in range(steps): x = tf.matmul(x, x) # tf.matmul는 행렬 곱셈을 완료하기 전에 결과를 반환할 수 있습니다 # (예, CUDA 스트림 대기열에 연산을 추가한 후에 결과를 반환할 수 있다). # 아래 x.numpy() 호출은 대기열에 추가된 모든 연산이 완료될 것임을 보장합니다 # (그리고 그 결과가 호스트 메모리에 복사될 것이고, # 그래서 matnul 연산시간보다는 조금 많은 연산시간이 # 포함됩니다). _ = x.numpy() end = time.time() return end - start shape = (1000, 1000) steps = 200 print("{} 크기 행렬을 자기 자신과 {}번 곱했을 때 걸리는 시간:".format(shape, steps)) # CPU에서 실행: with tf.device("/cpu:0"): print("CPU: {} 초".format(measure(tf.random.normal(shape), steps))) # GPU에서 실행, 가능하다면: if tf.config.experimental.list_physical_devices("GPU"): with tf.device("/gpu:0"): print("GPU: {} 초".format(measure(tf.random.normal(shape), steps))) else: print("GPU: 없음") # + [markdown] colab_type="text" id="RLw3IS7UqEhe" # `tf.Tensor` 객체는 실제로 그 연산을 수행할 다른 디바이스로 복사될 수 있습니다: # + attributes={"classes": ["py"], "id": ""} colab={} colab_type="code" id="ny6LX2BVqEhf" if tf.config.experimental.list_physical_devices("GPU"): x = tf.random.normal([10, 10]) x_gpu0 = x.gpu() x_cpu = x.cpu() _ = tf.matmul(x_cpu, x_cpu) # CPU에서 실행 _ = tf.matmul(x_gpu0, x_gpu0) # GPU:0에서 실행 # + [markdown] colab_type="text" id="oA_qaII3-p6c" # ### 벤치마크 # # GPU에서 학습을 하는 [ResNet50](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/resnet50) 같은 계산량이 많은 모델에서, # 즉시 실행 성능은 `tf.function` 실행과 비교될 수 있습니다. # 그러나 이러한 차이는 계산량이 작은 모델인 경우 더 커지고, 수많은 작은 연산으로 구성된 모델은 자주 반복되는 부분을 최적화하는 사례도 있습니다. # # ## 함수를 활용 # # 즉시 실행이 개발과 디버깅 과정을 좀 더 대화형(interactive)으로 만들어 주지만 # 텐서플로 1.x 형태 그래프 실행은 학습의 분산과 성능, 운영 배포에 장점을 가지고 있습니다. # 이러한 차이를 해소하기 위해서, 텐서플로 2.0에서는 `tf.function` API를 도입했습니다. # 자세한 내용은 [tf.function](./function.ipynb) 가이드를 참고하세요.
site/ko/guide/eager.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: TCC # language: python # name: tcc # --- from git_crawler import GitCrawler GitCrawler(['a'], ['b'], 'a/a') # + import argparse import os #from SentiCR.SentiCR import SentiCR from SentiSW.code.classification.classifier import Classifier from SentiSW.code.entity.training_set_generation import get_entity import pickle import asyncio import aiohttp import time import datetime import numpy as np import math import time import pymongo users = [] tokens = [] user = '' token = '' ''' mongo_client = pymongo.MongoClient("mongodb://localhost:27017/") database = mongo_client["reposadb"] pull_requests_collection = database["pull_requests"] repositories_collection = database["repositories"] ''' with open('auth.txt', 'r') as file: for line in file.readlines(): user, token = line.split(':') users.append(user.replace('\n', '')) tokens.append(token.replace('\n', '')) repositories = [] def init(): print('Initialising') with open('repositories.txt', 'r') as file: repositories = file.read().splitlines() for repository in repositories: owner, name = repository.split("/") query = { "_id" : "{}/{}".format(owner, name) } documento = list(repositories_collection.find(query)) if(len(documento) == 0): repositories_collection.insert_one({ "_id" : "{}/{}".format(owner, name), "owner": owner, "name" : name, 'open_pull_requests' : [], 'closed_pull_requests' : [] }) def get_tuples(texts): #sentiment_analyzer = Classifier(read=False, vector_method='tfidf') #sentiment_analyzer.save_model() sentiment_analyzer = Classifier(read=True, vector_method='tfidf') sentiments = sentiment_analyzer.get_sentiment_polarity_collection(texts) tuples = [] i = 0 for sentiment in sentiments: t = {'sentiment': sentiment[0]} if sentiment != 'Neutral': entity = get_entity(texts[i]) t['entity'] = entity else: t['entity'] = None tuples.append(t) i = i + 1 return tuples def get_tuple(text): #sentiment_analyzer = Classifier(read=False, vector_method='tfidf') #sentiment_analyzer.save_model() sentiment_analyzer = Classifier(read=True, vector_method='tfidf') sentiment = sentiment_analyzer.get_sentiment_polarity(text)[0] ret = {'sentiment': sentiment} if sentiment != 'Neutral': entity = get_entity(text) ret['entity'] = entity else: ret['entity'] = None return ret def classify(sentences): saved_SentiCR_model = 'classifier_models/SentiCR_model.sav' if(os.path.exists(saved_SentiCR_model)): sentiment_analyzer = pickle.load(open(saved_SentiCR_model, 'rb')) print('Loaded SentiCR model') else: sentiment_analyzer = SentiCR.SentiCR() pickle.dump(sentiment_analyzer, open(saved_SentiCR_model, 'wb')) print('Saved model to file') for sent in sentences: score = sentiment_analyzer.get_sentiment_polarity(sent) print(sent+"\n Score: "+str(score))
.ipynb_checkpoints/main-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## In the Accelerated_Intro_WilkinsonExams folder # # ## ISSUE THESE COMMADS # # # ### git fetch # ### git checkout FETCH_HEAD -- Exam\ Week\ 2.ipynb # ### git checkout FETCH_HEAD -- Germplasm.tsv # ### git checkout FETCH_HEAD -- LocusGene.tsv # # this will retrieve the exam material without destroying your existing notes # <pre> # # # </pre> # # ## Start your mySQL Server from a terminal # (if it isn't already running) # # <code>sudo docker start course-mysql</code> # <pre> # # </pre> # Don't forget that, if you use sqlMagic, you need to execute the connection lines in your Notebook! # # <pre> # # %load_ext sql # # %config SqlMagic.autocommit=False # # %sql mysql+pymysql://root:root@127.0.0.1:3306/mysql # </pre> # # ## Create a new Python3 Jupyter Notebook in your Exam Answers folder # # commit and push this Notebook when you are finished. # # You have **2 hours and 50 minutes to complete this exam.** # # # ## Data Files # Germplasm.tsv and LocusGene.tsv contain the datasets we need for the exam. # # Our objective is to create a database to contain the data in these files, insert the data into the database, then query the database in a variety of ways. # # # # ## Problem 1: Controls # # Write a Python script that proves that the lines of data in Germplasm.tsv, and LocusGene are in the same sequence, based on the AGI Locus Code (ATxGxxxxxx). (hint: This will help you decide how to load the data into the database) # ## Problem 2: Design and create the database. # * It should have two tables - one for each of the two data files. # * The two tables should be linked in a 1:1 relationship # * you may use either sqlMagic or pymysql to build the database # # # # ## Problem 3: Fill the database # Using pymysql, create a Python script that reads the data from these files, and fills the database. There are a variety of strategies to accomplish this. I will give all strategies equal credit - do whichever one you are most confident with. # ## Problem 4: Create reports, written to a file # # 1. Create a report that shows the full, joined, content of the two database tables (including a header line) # # 2. Create a joined report that only includes the Genes SKOR and MAA3 # # 3. Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # 4. Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # When creating reports 2 and 3, remember the "Don't Repeat Yourself" rule! # # All reports should be written to **the same file**. You may name the file anything you wish. # <pre> # # </pre> # ## Don't forget to commit and push your answers before you leave! # # It was wonderful to have you in my class! I hope to see you again soon! # # Good luck with your careers!! # # Mark
Exam Week 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nlu # language: python # name: nlu # --- # + [markdown] slideshow={"slide_type": "slide"} # # Vector-space models: retrofitting # - __author__ = "<NAME>" __version__ = "CS224u, Stanford, Spring 2021" # + [markdown] slideshow={"slide_type": "slide"} # ## Contents # # 1. [Overview](#Overview) # 1. [Set-up](#Set-up) # 1. [The retrofitting model](#The-retrofitting-model) # 1. [Examples](#Examples) # 1. [Only node 0 has outgoing edges](#Only-node-0-has-outgoing-edges) # 1. [All nodes connected to all others](#All-nodes-connected-to-all-others) # 1. [As before, but now 2 has no outgoing edges](#As-before,-but-now-2-has-no-outgoing-edges) # 1. [All nodes connected to all others, but $\alpha = 0$](#All-nodes-connected-to-all-others,-but-$\alpha-=-0$) # 1. [WordNet](#WordNet) # 1. [Background on WordNet](#Background-on-WordNet) # 1. [WordNet and VSMs](#WordNet-and-VSMs) # 1. [Reproducing the WordNet synonym graph experiment](#Reproducing-the-WordNet-synonym-graph-experiment) # 1. [Other retrofitting models and ideas](#Other-retrofitting-models-and-ideas) # + [markdown] slideshow={"slide_type": "slide"} # ## Overview # # Thus far, all of the information in our word vectors has come solely from co-occurrences patterns in text. This information is often very easy to obtain – though one does need a __lot__ of text – and it is striking how rich the resulting representations can be. # # Nonetheless, it seems clear that there is important information that we will miss this way – relationships that just aren't encoded at all in co-occurrences or that get distorted by such patterns. # # For example, it is probably straightforward to learn representations that will support the inference that all puppies are dogs (_puppy_ entails _dog_), but it might be difficult to learn that _dog_ entails _mammal_ because of the unusual way that very broad taxonomic terms like _mammal_ are used in text. # # The question then arises: how can we bring structured information – labels – into our representations? If we can do that, then we might get the best of both worlds: the ease of using co-occurrence data and the refinement that comes from using labeled data. # # In this notebook, we look at one powerful method for doing this: the __retrofitting__ model of [Faruqui et al. 2016](http://www.aclweb.org/anthology/N15-1184). In this model, one learns (or just downloads) distributed representations for nodes in a knowledge graph and then updates those representations to bring connected nodes closer to each other. # # This is an incredibly fertile idea; the final section of the notebook reviews some recent extensions, and new ones are likely appearing all the time. # + [markdown] slideshow={"slide_type": "slide"} # ## Set-up # - from collections import defaultdict from nltk.corpus import wordnet as wn import numpy as np import os import pandas as pd import retrofitting from retrofitting import Retrofitter import utils data_home = 'data' # __Note__: To make full use of this notebook, you will need the NLTK data distribution – or, at the very least, its WordNet files. Anaconda comes with NLTK but not with its data distribution. To install that, open a Python interpreter and run # # ```import nltk; nltk.download()``` # # If you decide to download the data to a different directory than the default, then you'll have to set `NLTK_DATA` in your shell profile. (If that doesn't make sense to you, then we recommend choosing the default download directory!) import nltk nltk.download('wordnet') # + [markdown] slideshow={"slide_type": "slide"} # ## The retrofitting model # # For an __an existing VSM__ $\widehat{Q}$ of dimension $m \times n$, and a set of __edges__ $E$ (pairs of indices into rows in $\widehat{Q}$), the retrofitting objective is to obtain a new VSM $Q$ (also dimension $m \times n$) according to the following objective: # # $$\sum_{i=1}^{m} \left[ # \alpha_{i}\|q_{i} - \widehat{q}_{i}\|_{2}^{2} # # + # \sum_{j : (i,j) \in E}\beta_{ij}\|q_{i} - q_{j}\|_{2}^{2} # \right]$$ # # The left term encodes a pressure to stay like the original vector. The right term encodes a pressure to be more like one's neighbors. In minimizing this objective, we should be able to strike a balance between old and new, VSM and graph. # # Definitions: # # 1. $\|u - v\|_{2}^{2}$ gives the __squared euclidean distance__ from $u$ to $v$. # # 1. $\alpha$ and $\beta$ are weights we set by hand, controlling the relative strength of the two pressures. In the paper, they use $\alpha=1$ and $\beta = \frac{1}{\{j : (i, j) \in E\}}$. # + [markdown] slideshow={"slide_type": "slide"} # ## Examples # # To get a feel for what's happening, it's helpful to visualize the changes that occur in small, easily understood VSMs and graphs. The function `retrofitting.plot_retro_path` helps with this. # + Q_hat = pd.DataFrame( [[0.0, 0.0], [0.0, 0.5], [0.5, 0.0]], columns=['x', 'y']) Q_hat # + [markdown] slideshow={"slide_type": "slide"} # ### Only node 0 has outgoing edges # + edges_0 = {0: {1, 2}, 1: set(), 2: set()} _ = retrofitting.plot_retro_path(Q_hat, edges_0) # + [markdown] slideshow={"slide_type": "slide"} # ### All nodes connected to all others # + edges_all = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} _ = retrofitting.plot_retro_path(Q_hat, edges_all) # + [markdown] slideshow={"slide_type": "slide"} # ### As before, but now 2 has no outgoing edges # + edges_isolated = {0: {1, 2}, 1: {0, 2}, 2: set()} _ = retrofitting.plot_retro_path(Q_hat, edges_isolated) # + [markdown] slideshow={"slide_type": "slide"} # ### All nodes connected to all others, but $\alpha = 0$ # - _ = retrofitting.plot_retro_path( Q_hat, edges_all, retrofitter=Retrofitter(alpha=lambda x: 0)) # + [markdown] slideshow={"slide_type": "slide"} # ## WordNet # # Faruqui et al. conduct experiments on three knowledge graphs: [WordNet](https://wordnet.princeton.edu), [FrameNet](https://framenet.icsi.berkeley.edu/fndrupal/), and the [Penn Paraphrase Database (PPDB)](http://paraphrase.org/). [The repository for their paper](https://github.com/mfaruqui/retrofitting) includes the graphs that they derived for their experiments. # # Here, we'll reproduce just one of the two WordNet experiments they report, in which the graph is formed based on synonymy. # + [markdown] slideshow={"slide_type": "slide"} # ### Background on WordNet # # WordNet is an incredible, hand-built lexical resource capturing a wealth of information about English words and their inter-relationships. ([Here is a collection of WordNets in other languages.](http://globalwordnet.org)) For a detailed overview using NLTK, see [this tutorial](http://compprag.christopherpotts.net/wordnet.html). # # The core concepts: # # * A __lemma__ is something like our usual notion of __word__. Lemmas are highly sense-disambiguated. For instance, there are six lemmas that are consistent with the string `crane`: the bird, the machine, the poets, ... # # * A __synset__ is a collection of lemmas that are synonymous in the WordNet sense (which is WordNet-specific; words with intuitively different meanings might still be grouped together into synsets.). # # WordNet is a graph of relations between lemmas and between synsets, capturing things like hypernymy, antonymy, and many others. For the most part, the relations are defined between nouns; the graph is sparser for other areas of the lexicon. # + lems = wn.lemmas('crane', pos=None) for lem in lems: ss = lem.synset() print("="*70) print("Lemma name: {}".format(lem.name())) print("Lemma Synset: {}".format(ss)) print("Synset definition: {}".format(ss.definition())) # + [markdown] slideshow={"slide_type": "slide"} # ### WordNet and VSMs # # A central challenge of working with WordNet is that one doesn't usually encounter lemmas or synsets in the wild. One probably gets just strings, or maybe strings with part-of-speech tags. Mapping these objects to lemmas is incredibly difficult. # # For our experiments with VSMs, we simply collapse together all the senses that a given string can have. This is expedient, of course. It might also be a good choice linguistically: senses are flexible and thus hard to individuate, and we might hope that our vectors can model multiple senses at the same time. # # (That said, there is excellent work on creating sense-vectors; see [Reisinger and Mooney 2010](http://www.aclweb.org/anthology/N10-1013); [Huang et al 2012](http://www.aclweb.org/anthology/P12-1092).) # # The following code uses the NLTK WordNet API to create the edge dictionary we need for using the `Retrofitter` class: # + e = defaultdict(set) s = {name for name in ['a', 'b', 'c']} print(s) e['test'] |= s e # - def get_wordnet_edges(): edges = defaultdict(set) for ss in wn.all_synsets(): lem_names = {lem.name() for lem in ss.lemmas()} for lem in lem_names: edges[lem] |= lem_names return edges wn_edges = get_wordnet_edges() print("number of edges: {}".format(len(wn_edges))) k = list(wn_edges.keys())[10] print(k) print(wn_edges[k]) list(wn_edges.items())[0] # + [markdown] slideshow={"slide_type": "slide"} # ### Reproducing the WordNet synonym graph experiment # + [markdown] slideshow={"slide_type": "-"} # For our VSM, let's use the 300d file included in this distribution from the GloVe team, as it is close to or identical to the one used in the paper: # # http://nlp.stanford.edu/data/glove.6B.zip # # If you download this archive, place it in `vsmdata`, and unpack it, then the following will load the file into a dictionary for you: # - glove_dict = utils.glove2dict( os.path.join(data_home, 'glove.6B', 'glove.6B.300d.txt')) # This is the initial embedding space $\widehat{Q}$: X_glove = pd.DataFrame(glove_dict).T X_glove.T.shape # + [markdown] slideshow={"slide_type": "slide"} # Now we just need to replace all of the strings in `edges` with indices into `X_glove`: # - def convert_edges_to_indices(edges, Q): lookup = dict(zip(Q.index, range(Q.shape[0]))) index_edges = defaultdict(set) for start, finish_nodes in edges.items(): s = lookup.get(start) if s: f = {lookup[n] for n in finish_nodes if n in lookup} if f: index_edges[s] = f return index_edges wn_index_edges = convert_edges_to_indices(wn_edges, X_glove) wn_index_edges # + [markdown] slideshow={"slide_type": "slide"} # And now we can retrofit: # - wn_retro = Retrofitter(verbose=True) X_retro = wn_retro.fit(X_glove, wn_index_edges) X_retro # + [markdown] slideshow={"slide_type": "slide"} # You can now evaluate `X_retro` using the homework/bake-off notebook [hw_wordrelatedness.ipynb](hw_wordrelatedness.ipynb)! # + # Optionally write `X_retro` to disk for use elsewhere: X_retro.to_csv( os.path.join(data_home, 'glove6B300d-retrofit-wn.csv.gz'), compression='gzip') # + [markdown] slideshow={"slide_type": "slide"} # ## Other retrofitting models and ideas # # * The retrofitting idea is very close to __graph embedding__, in which one learns distributed representations of nodes based on their position in the graph. See [Hamilton et al. 2017](https://arxiv.org/pdf/1709.05584.pdf) for an overview of these methods. There are numerous parallels with the material we've reviewed here. # # * If you think of the input VSM as a "warm start" for graph embedding algorithms, then you're essentially retrofitting. This connection opens up a number of new opportunities to go beyond the similarity-based semantics that underlies Faruqui et al.'s model. See [Lengerich et al. 2017](https://arxiv.org/pdf/1708.00112.pdf), section 3.2, for more on these connections. # # * [Mrkšić et al. 2016](https://www.aclweb.org/anthology/N16-1018) address the limitation of Faruqui et al's model that it assumes connected nodes in the graph are similar. In a graph with complex, varied edge semantics, this is likely to be false. They address the case of antonymy in particular. # # * [Lengerich et al. 2017](https://arxiv.org/pdf/1708.00112.pdf) present a __functional retrofitting__ framework in which the edge meanings are explicitly modeled, and they evaluate instantiations of the framework with linear and neural edge penalty functions. (The Faruqui et al. model emerges as a specific instantiation of this framework.)
vsm_03_retrofitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:analysis3-20.10] # language: python # name: conda-env-analysis3-20.10-py # --- # # Ocean surface Eddy Kinetic Energy trends # This Jupyter notebook shows how to compute the EKE trend maps reported in the manuscript "Global changes in oceanic mesoscale currents over the satellite altimetry record". # + # Import libraries import xarray as xr import cmocean as cm import cartopy.crs as ccrs import pylab as plt import numpy as np # Inline plotting # %matplotlib inline # - # #### Import Mann Kendall test: from xarrayMannKendall import Mann_Kendall_test from utils import area,ccrs_land,add_patches import datetime as datetime from dask.distributed import Client c = Client() c dataset_EKE = xr.open_dataset('../datasets/EKE_timeseries.nc',chunks={'lat':10,'lon':10}) rho=1025 #kg/m^3 EKE_density = rho*dataset_EKE.EKE EKE = EKE_density.sortby('time').sel(time=slice('1993','2020')) # Print function used. # ?Mann_Kendall_test # For more information in the Mann-Kendall method, please refer to: https://doi.org/10.1023/B:WARM.0000043140.61082.60 # The reported trends use a modified Mann-Kendall method. A modified Mann-Kendall test is used to assess statistical significance of trends, while properly taking into account the autocorrelation in the time-series. The effective sample size for all the reported trends is always smaller than the actual sample size, due to autocorrelation of the time series. tke_trends = Mann_Kendall_test(EKE,'time',MK_modified=True, method="linregress",alpha=0.05, coords_name = {'time':'time','x':'lon','y':'lat'}) # Note that the following cell will take ~1 hour depending on the number of cores, as the Mann-Kendall method requirest to invert a matrix of shape (len(time),len(time)) for each grid point. EKE_trends = tke_trends.compute() # + EKE_trends.attrs['title'] = "Eddy Kinetic Energy" EKE_trends.attrs['Description'] = """Eddy Kinetic Energy computed from AVISO+ altimetry. Then trends were computed using a modified Mann-Kendall test. \n See: https://github.com/josuemtzmo/xarrayMannKendall.""" EKE_trends.attrs['Publication'] = "Dataset created for Martínez-Moreno, J. et. al. 2020: \n 'Mesoscale kinetic energy response to changing oceans'" EKE_trends.attrs['Author'] = "<NAME>" EKE_trends.attrs['Contact'] = "<EMAIL>" EKE_trends.attrs['Created date'] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S") ###################################################### EKE_trends['trend'].attrs['units'] = r"$J m^{-3} day^{-1}$" EKE_trends['trend'].attrs['name'] = 'trend' EKE_trends['trend'].attrs['long_name'] = "Eddy Kinetic Energy trends" EKE_trends['trend'].attrs['missing_value'] = np.nan EKE_trends['trend'].attrs['valid_min'] = np.nanmin(EKE_trends['trend']) EKE_trends['trend'].attrs['valid_max'] = np.nanmax(EKE_trends['trend']) EKE_trends['trend'].attrs['valid_range'] = [np.nanmin(EKE_trends['trend']),np.nanmax(EKE_trends['trend'])] ###################################################### EKE_trends['signif'].attrs['units'] = "" EKE_trends['signif'].attrs['name'] = 'signif' EKE_trends['signif'].attrs['long_name'] = "Eddy Kinetic Energy trends significance" EKE_trends['signif'].attrs['missing_value'] = np.nan EKE_trends['signif'].attrs['valid_min'] = np.nanmin(EKE_trends['signif']) EKE_trends['signif'].attrs['valid_max'] = np.nanmax(EKE_trends['signif']) EKE_trends['signif'].attrs['valid_range'] = [np.nanmin(EKE_trends['signif']),np.nanmax(EKE_trends['signif'])] ###################################################### EKE_trends['p'].attrs['units'] = "" EKE_trends['p'].attrs['name'] = 'p' EKE_trends['p'].attrs['long_name'] = "Eddy Kinetic Energy trends p" EKE_trends['p'].attrs['missing_value'] = np.nan EKE_trends['p'].attrs['valid_min'] = np.nanmin(EKE_trends['p']) EKE_trends['p'].attrs['valid_max'] = np.nanmax(EKE_trends['p']) EKE_trends['p'].attrs['valid_range'] = [np.nanmin(EKE_trends['p']),np.nanmax(EKE_trends['p'])] # + comp = dict(zlib=True, complevel=5) encoding = {var: comp for var in EKE_trends.data_vars} EKE_trends.to_netcdf('../datasets/EKE_trends.nc', encoding=encoding) # - EKE_trends trends = EKE_trends.trend*10*365 # Convert to trends per decade # + fig = plt.figure(figsize=(6,2), dpi=200) ax = fig.add_subplot(1, 1, 1, projection = ccrs.Robinson(central_longitude=180)) im = trends.plot(transform=ccrs.PlateCarree(),cbar_kwargs=dict(label="$J\ m^{-3}\ decade^{-1}$"), cmap=cm.cm.balance, vmin=-10, vmax=10,ax=ax) ax.set_extent([0.1,359.99,-60, 60], crs=ccrs.PlateCarree()) ax.add_feature(ccrs_land) add_patches(ax) # - # # EKE decomposition trends dataset_EKE = xr.open_dataset('../datasets/EKE_decomposition_timeseries.nc',chunks={'lat':10,'lon':10}) dataset_EKE # ### Mesoscale EKE trends EKE_m_density = dataset_EKE.EKE_m EKE_m = EKE_m_density.sortby('time').sel(time=slice('1993','2020')) eke_m_trends = Mann_Kendall_test(EKE_m,'time',MK_modified=True, method="linregress",alpha=0.05, coords_name = {'time':'time','x':'lon','y':'lat'}) EKE_m_trends = eke_m_trends.compute() # + EKE_m_trends.attrs['title'] = "Mesoscale Eddy Kinetic Energy trends" EKE_m_trends.attrs['Description'] = """Mesoscale Eddy Kinetic Energy computed from AVISO+ altimetry. Then trends were computed using a modified Mann-Kendall test. \n See: https://github.com/josuemtzmo/xarrayMannKendall.""" EKE_m_trends.attrs['Publication'] = "Dataset created for Martínez-Moreno, J. et. al. 2020: \n 'Mesoscale kinetic energy response to changing oceans'" EKE_m_trends.attrs['Author'] = "<NAME>" EKE_m_trends.attrs['Contact'] = "<EMAIL>" EKE_m_trends.attrs['Created date'] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S") ###################################################### EKE_m_trends['trend'].attrs['units'] = r"$J m^{-3} day^{-1}$" EKE_m_trends['trend'].attrs['name'] = 'trend' EKE_m_trends['trend'].attrs['long_name'] = "Mesoscale Eddy Kinetic Energy trends" EKE_m_trends['trend'].attrs['missing_value'] = np.nan EKE_m_trends['trend'].attrs['valid_min'] = np.nanmin(EKE_m_trends['trend']) EKE_m_trends['trend'].attrs['valid_max'] = np.nanmax(EKE_m_trends['trend']) EKE_m_trends['trend'].attrs['valid_range'] = [np.nanmin(EKE_m_trends['trend']),np.nanmax(EKE_m_trends['trend'])] ###################################################### EKE_m_trends['signif'].attrs['units'] = "" EKE_m_trends['signif'].attrs['name'] = 'signif' EKE_m_trends['signif'].attrs['long_name'] = "Mesoscale Eddy Kinetic Energy trends significance" EKE_m_trends['signif'].attrs['missing_value'] = np.nan EKE_m_trends['signif'].attrs['valid_min'] = np.nanmin(EKE_m_trends['signif']) EKE_m_trends['signif'].attrs['valid_max'] = np.nanmax(EKE_m_trends['signif']) EKE_m_trends['signif'].attrs['valid_range'] = [np.nanmin(EKE_m_trends['signif']),np.nanmax(EKE_m_trends['signif'])] ###################################################### EKE_m_trends['p'].attrs['units'] = "" EKE_m_trends['p'].attrs['name'] = 'p' EKE_m_trends['p'].attrs['long_name'] = "Mesoscale Eddy Kinetic Energy trends p" EKE_m_trends['p'].attrs['missing_value'] = np.nan EKE_m_trends['p'].attrs['valid_min'] = np.nanmin(EKE_m_trends['p']) EKE_m_trends['p'].attrs['valid_max'] = np.nanmax(EKE_m_trends['p']) EKE_m_trends['p'].attrs['valid_range'] = [np.nanmin(EKE_m_trends['p']),np.nanmax(EKE_m_trends['p'])] # + comp = dict(zlib=True, complevel=5) encoding = {var: comp for var in EKE_m_trends.data_vars} EKE_m_trends.to_netcdf('../datasets/mesoscale_EKE_trends.nc', encoding=encoding) # - # ### Large-scale EKE trends EKE_ls_density = dataset_EKE.EKE_ls EKE_ls = EKE_ls_density.sortby('time').sel(time=slice('1993','2020')) eke_ls_trends = Mann_Kendall_test(EKE_ls,'time',MK_modified=True, method="linregress",alpha=0.05, coords_name = {'time':'time','x':'lon','y':'lat'}) EKE_ls_trends = eke_ls_trends.compute() # + EKE_ls_trends.attrs['title'] = "Large-scale Eddy Kinetic Energy trends" EKE_ls_trends.attrs['Description'] = """Mesoscale Eddy Kinetic Energy computed from AVISO+ altimetry. Then trends were computed using a modified Mann-Kendall test. \n See: https://github.com/josuemtzmo/xarrayMannKendall.""" EKE_ls_trends.attrs['Publication'] = "Dataset created for Martínez-Moreno, J. et. al. 2020: \n 'Mesoscale kinetic energy response to changing oceans'" EKE_ls_trends.attrs['Author'] = "<NAME>" EKE_ls_trends.attrs['Contact'] = "<EMAIL>" EKE_ls_trends.attrs['Created date'] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S") ###################################################### EKE_ls_trends['trend'].attrs['units'] = r"$J m^{-3} day^{-1}$" EKE_ls_trends['trend'].attrs['name'] = 'trend' EKE_ls_trends['trend'].attrs['long_name'] = "Large-scale Eddy Kinetic Energy trends" EKE_ls_trends['trend'].attrs['missing_value'] = np.nan EKE_ls_trends['trend'].attrs['valid_min'] = np.nanmin(EKE_ls_trends['trend']) EKE_ls_trends['trend'].attrs['valid_max'] = np.nanmax(EKE_ls_trends['trend']) EKE_ls_trends['trend'].attrs['valid_range'] = [np.nanmin(EKE_ls_trends['trend']),np.nanmax(EKE_ls_trends['trend'])] ###################################################### EKE_ls_trends['signif'].attrs['units'] = "" EKE_ls_trends['signif'].attrs['name'] = 'signif' EKE_ls_trends['signif'].attrs['long_name'] = "Large-scale Eddy Kinetic Energy trends significance" EKE_ls_trends['signif'].attrs['missing_value'] = np.nan EKE_ls_trends['signif'].attrs['valid_min'] = np.nanmin(EKE_ls_trends['signif']) EKE_ls_trends['signif'].attrs['valid_max'] = np.nanmax(EKE_ls_trends['signif']) EKE_ls_trends['signif'].attrs['valid_range'] = [np.nanmin(EKE_ls_trends['signif']),np.nanmax(EKE_ls_trends['signif'])] ###################################################### EKE_ls_trends['p'].attrs['units'] = "" EKE_ls_trends['p'].attrs['name'] = 'p' EKE_ls_trends['p'].attrs['long_name'] = "Large-scale Eddy Kinetic Energy trends p" EKE_ls_trends['p'].attrs['missing_value'] = np.nan EKE_ls_trends['p'].attrs['valid_min'] = np.nanmin(EKE_ls_trends['p']) EKE_ls_trends['p'].attrs['valid_max'] = np.nanmax(EKE_ls_trends['p']) EKE_ls_trends['p'].attrs['valid_range'] = [np.nanmin(EKE_ls_trends['p']),np.nanmax(EKE_ls_trends['p'])] # + comp = dict(zlib=True, complevel=5) encoding = {var: comp for var in EKE_ls_trends.data_vars} EKE_ls_trends.to_netcdf('../datasets/largescale_EKE_trends.nc', encoding=encoding) # - # ## Plot trends # + fig = plt.figure(figsize=(6,4), dpi=200) ax = fig.add_subplot(2, 1, 1, projection = ccrs.Robinson(central_longitude=180)) im = (EKE_ls_trends.trend*10*365).plot(transform=ccrs.PlateCarree(),cbar_kwargs=dict(label="$J\ m^{-3}\ decade^{-1}$"), cmap=cm.cm.balance,vmin=-10,vmax=10,ax=ax) ax.set_extent([0.1,359.99,-60, 60], crs=ccrs.PlateCarree()) ax.add_feature(ccrs_land) ax.set_title('Large-scale EKE trends') add_patches(ax) ax = fig.add_subplot(2, 1, 2, projection = ccrs.Robinson(central_longitude=180)) im = (EKE_m_trends.trend*10*365).plot(transform=ccrs.PlateCarree(),cbar_kwargs=dict(label="$J\ m^{-3}\ decade^{-1}$"), cmap=cm.cm.balance,vmin=-10,vmax=10,ax=ax) ax.set_extent([0.1,359.99,-60, 60], crs=ccrs.PlateCarree()) ax.add_feature(ccrs_land) ax.set_title('Mesoscale EKE trends') add_patches(ax)
trends/Compute_EKE_trends.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets.samples_generator import make_blobs import sklearn.mixture as gmm X, y_true = make_blobs(n_samples=400, centers=4, cluster_std=0.60, random_state=0) model = gmm.GaussianMixture(n_components=4, warm_start=False, init_params = 'kmeans') model.fit(X) # print(model) y_predict = model.predict(X) # print(model.score(X, y_true)) # + #Basado en: https://www.eduonix.com/dashboard/complete-guide-to-machine-learning-using-python import matplotlib.pyplot as plt from matplotlib.patches import Ellipse import numpy as np from sklearn.datasets.samples_generator import make_blobs import sklearn.mixture as gmm X, y_true = make_blobs(n_samples=400, centers=4, cluster_std=1, random_state=0) model = gmm.GaussianMixture(n_components=4, warm_start=False, init_params = 'kmeans') model.fit(X) plt.subplot(1,2,1) plt.scatter(X[:,0], X[:,1], c = y_true, s=40, cmap='viridis') plt.axis('equal') plt.subplot(1, 2, 2) ax = plt.gca() ax.scatter(X[:,0], X[:,1], c = y_predict, s=40, cmap='viridis') ax.axis('equal') w_factor = 0.2 / model.weights_.max() for pos, covar, weights in zip(model.means_, model.covariances_, model.weights_): U, s, vt = np.linalg.svd(covar) angle = np.degrees(np.arctan2(U[1, 0], U[0, 0])) width, height = 2 * np.sqrt(s) # # Draw the Ellipse for center in range(1, 4): ax.add_patch(Ellipse(pos, center * width, center * height, angle, alpha = weights * w_factor)) plt.show() # -
10_ Gaussian Mixture Models/make_blobs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np #numpy is a library for making computations import matplotlib.pyplot as plt #it is a 2D plotting library import pandas as pd # pandas is mainly used for data analysis import seaborn as sns # data visualization library # %matplotlib inline #magic function to embed all the graphs in the python notebook #Import the salary dataset #Reading the csv file using the read_csv function of the pandas module df=pd.read_csv("E:/YOUTUBE_proj_Techtrends/Salary_Data.csv") #The read_csv function converts the data into dataframe #Look how the data looks like #Lets print the first 5 rows of the dataframe df.head() X=df.iloc[:,:-1].values #Storing the column 1 in X and column 2 in y y=df.iloc[:,:1].values sns.distplot(df['YearsExperience'],kde=False,bins=10) #This plot is used to represent univariate distribution of observations #Show the counts of observations in each categorical bin using bars sns.countplot(y='YearsExperience',data=df) #Plotting a barplot sns.barplot(x='YearsExperience',y='Salary',data=df) #Representing the correlation among the columns using a heatmap sns.heatmap(df.corr()) sns.distplot(df.Salary) # #### Now we will use the scikit learn package to create the Linear Regression model # # #### Split the data into training and testing set from sklearn.model_selection import train_test_split #splitting the data using this module and setting the test size as 1/3 . Rest 2/3 is used for training the data X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=1/3,random_state=0) # #### Creating the Linear Regression Model and Fitting the training data #importing the linear regression model from sklearn.linear_model import LinearRegression #creating the model lr=LinearRegression() lr.fit(X_train,y_train) #fitting the training data X_train.shape #Counting the number of observations in the training data y_train.shape # #### Predicting the Test Results y_pred=lr.predict(X_test) y_pred #Predicted data # #### Visualizing the training data # # #### Plotting the actual y training values VS the y values predicted by the model using training data plt.scatter(X_train,y_train,color='blue') plt.plot(X_train,lr.predict(X_train),color='red') plt.title('Salary vs Years of Experience (Training Data)') plt.xlabel('Years of Experience') plt.ylabel('Salary of an employee') plt.show() # We see that the data is fitted so well and the predicted and actual data is almost the same # # ### Visualizing the Test Data # # Plotting the y test data vs y predicted data plt.scatter(X_test,y_test,color='blue') plt.plot(X_test,lr.predict(X_test),color='red') plt.title('Salary vs Years of Experience (Test Data)') plt.xlabel('Years of Experience') plt.ylabel('Salary of an employee') plt.show() # We see that the predicted data fits the regression line so well # # #### Calculating the errors so as to check the difference between the actual value and predicted model value... There are certain metrics to find these error such as Mean Squared Error, Root Mean Squared Error and Mean Absolute Error** from sklearn import metrics print('Mean Absolute Error of the Model:',metrics.mean_absolute_error(y_test,y_pred)) print('Mean Squared Error of the Model: ',metrics.mean_squared_error(y_test,y_pred)) print('Root Mean Squared Error of the Model: ',np.sqrt(metrics.mean_absolute_error(y_test,y_pred))) # #### Looking at the values we see that the error is very minute and hence we can see our model gives very accurate values from sklearn.metrics import r2_score r2_score(y_test,y_pred) #This shows that our model is completely accurate #R value lies between 0 to 1. Value of 1 represents it is completely accurate
salary_prediction_ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Method from pewpyter.jupyter_forms import JupyterForm, JupyterMethod from tests.jupyter_forms import fake_methods as fm jupyter_method = JupyterMethod(fm.fake_annotated) jupyter_form = JupyterForm(jupyter_method) jupyter_form.display_ui() # # Less Simple Method jupyter_method = JupyterMethod(fm.fake_many_default) jupyter_form = JupyterForm(jupyter_method) jupyter_form.display_ui() # # Class Method fake_obj = fm.FakeClass() jupyter_method = JupyterMethod(fm.FakeClass.fake_instance_method) jupyter_form = JupyterForm(jupyter_method, obj_name='fake_obj') jupyter_form.display_ui()
Jupyter Forms Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Crystallization Database Connector Notebook # This notebook illustrates how to access the postgres database and perform some numberical analysis. # # ### Use # - goto top menu [Cell] --> [Run All] # - The bottom illustrates a continuously updated plot that is receiving data from the crystallization module. # - Note that the plot updates at the frequency that the crystallization module sends new data. Thus, the plot may not appear to be changing - be patient. # # ### Details # - The first cell imports the necessary modules to connect to the crystallization module and imports some auxillary modules to aid in calculating and plotting results. # - The second group of cells demonstrates how to connect to the PostgreSQL database for array data analytics. # - The third group of cells demonstrates how to connect to the InfluxDB database for scalar data analytics. # ### Importing the needed modules import numpy as np import json from datetime import datetime from matplotlib import pyplot as plt import psycopg2 from influxdb import InfluxDBClient # ## PostgreSQL # ### Connect to the database try: conn = psycopg2.connect("dbname='crystallization' user='postgres' host='postgres' password='<PASSWORD>'") except: print("failed to connect") # ### Get the data try: cur = conn.cursor() cur.execute("""SELECT * FROM infrared""") rows = cur.fetchall() except: conn.rollback() cur = conn.cursor() cur.execute("""SELECT * FROM infrared""") rows = cur.fetchall() # #### Filter the data for analysis data = np.array([row[3] for row in rows if row[1]=="IR:1"]) # ### Make some calculations and plots # #### Plot one of the spectra SPECTRA_NUMBER = 50 fig, ax = plt.subplots(figsize=(10,6)) ax.plot(data[SPECTRA_NUMBER]) ax.set_xlabel("frequency") ax.set_ylabel("absorption") ax.set_title("IR:1") # #### Plot a single wavelength WAVELENGTH = 200 fig, ax = plt.subplots(figsize=(10,6)) ax.plot(data[:, WAVELENGTH]) ax.set_xlabel("WAVELENGTH=500") ax.set_ylabel("absorption") ax.set_title("IR:1") # ## Influxdb # #### Connect to the database client = InfluxDBClient(host='influx', port=8086) client.switch_database('crystallization') client.get_list_measurements() # #### Get the data results = client.query('SELECT "BATH:TEMP:PV" FROM "crystallization"."autogen"."crystallization" WHERE time > now() - 1m GROUP BY "EQUIPMENT"') # #### Filter and format the data TAG = "TCU:1" points = results.get_points(tags={"EQUIPMENT": TAG}) data = np.array([[p["time"], round(p["BATH:TEMP:PV"], 1)] for p in points]) # #### Plot the data fig, ax = plt.subplots(figsize=(10,6)) ax.plot(data[:, 1]) ax.set_xlabel("time") ax.set_ylabel("temperature") ax.set_title("TCU:1")
jupyter/Database.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="klGNgWREsvQv" # ##### Copyright 2018 The TF-Agents Authors. # + colab={} colab_type="code" id="nQnmcm0oI1Q-" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="pmDI-h7cI0tI" # # Train a Deep Q Network with TF-Agents # + [markdown] colab_type="text" id="lsaQlK8fFQqH" # ### Get Started # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # # + [markdown] colab_type="text" id="cKOCZlhUgXVK" # This example shows how to train a [DQN (Deep Q Networks)](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) agent on the Cartpole environment using the TF-Agents library. # # ![Cartpole environment](https://raw.githubusercontent.com/tensorflow/agents/master/tf_agents/colabs/images/cartpole.png) # # It will walk you through all the components in a Reinforcement Learning (RL) pipeline for training, evaluation and data collection. # # # To run this code live, click the 'Run in Google Colab' link above. # # + [markdown] colab_type="text" id="1u9QVVsShC9X" # ## Setup # + colab={} colab_type="code" id="KEHR2Ui-lo8O" # Note: If you haven't installed the following dependencies, run: # !apt-get install xvfb # !pip install 'gym==0.10.11' # !pip install 'imageio==2.4.0' # !pip install PILLOW # !pip install 'pyglet==1.3.2' # !pip install pyvirtualdisplay # !pip install tf-agents-nightly # !pip install tf-nightly # + colab={} colab_type="code" id="sMitx5qSgJk1" from __future__ import absolute_import, division, print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.dqn import dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() # + colab={} colab_type="code" id="NspmzG4nP3b9" tf.version.VERSION # + [markdown] colab_type="text" id="LmC0NDhdLIKY" # ## Hyperparameters # + colab={} colab_type="code" id="HC1kNrOsLSIZ" num_iterations = 20000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_max_length = 100000 # @param {type:"integer"} batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} log_interval = 200 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} # + [markdown] colab_type="text" id="VMsJC3DEgI0x" # ## Environment # # In Reinforcement Learning (RL), an environment represents the task or problem to be solved. Standard environments can be created in TF-Agents using `tf_agents.environments` suites. TF-Agents has suites for loading environments from sources such as the OpenAI Gym, Atari, and DM Control. # # Load the CartPole environment from the OpenAI Gym suite. # + colab={} colab_type="code" id="pYEz-S9gEv2-" env_name = 'CartPole-v0' env = suite_gym.load(env_name) # + [markdown] colab_type="text" id="IIHYVBkuvPNw" # You can render this environment to see how it looks. A free-swinging pole is attached to a cart. The goal is to move the cart right or left in order to keep the pole pointing up. # + colab={} colab_type="code" id="RlO7WIQHu_7D" #@test {"skip": true} env.reset() PIL.Image.fromarray(env.render()) # + [markdown] colab_type="text" id="B9_lskPOey18" # The `environment.step` method takes an `action` in the environment and returns a `TimeStep` tuple containing the next observation of the environment and the reward for the action. # # The `time_step_spec()` method returns the specification for the `TimeStep` tuple. Its `observation` attribute shows the shape of observations, the data types, and the ranges of allowed values. The `reward` attribute shows the same details for the reward. # # # + colab={} colab_type="code" id="exDv57iHfwQV" print('Observation Spec:') print(env.time_step_spec().observation) # + colab={} colab_type="code" id="UxiSyCbBUQPi" print('Reward Spec:') print(env.time_step_spec().reward) # + [markdown] colab_type="text" id="b_lHcIcqUaqB" # The `action_spec()` method returns the shape, data types, and allowed values of valid actions. # + colab={} colab_type="code" id="bttJ4uxZUQBr" print('Action Spec:') print(env.action_spec()) # + [markdown] colab_type="text" id="eJCgJnx3g0yY" # In the Cartpole environment: # # - `observation` is an array of 4 floats: # - the position and velocity of the cart # - the angular position and velocity of the pole # - `reward` is a scalar float value # - `action` is a scalar integer with only two possible values: # - `0` — "move left" # - `1` — "move right" # # + colab={} colab_type="code" id="V2UGR5t_iZX-" time_step = env.reset() print('Time step:') print(time_step) action = 1 next_time_step = env.step(action) print('Next time step:') print(next_time_step) # + [markdown] colab_type="text" id="4JSc9GviWUBK" # Usually two environments are instantiated: one for training and one for evaluation. # + colab={} colab_type="code" id="N7brXNIGWXjC" train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) # + [markdown] colab_type="text" id="zuUqXAVmecTU" # The Cartpole environment, like most environments, is written in pure Python. This is converted to TensorFlow using the `TFPyEnvironment` wrapper. # # The original environment's API uses Numpy arrays. The `TFPyEnvironment` converts these to `Tensors` to make it compatible with Tensorflow agents and policies. # # + colab={} colab_type="code" id="Xp-Y4mD6eDhF" train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) # + [markdown] colab_type="text" id="E9lW_OZYFR8A" # ## Agent # # The algorithm used to solve an RL problem is represented by an `Agent`. TF-Agents provides standard implementations of a variety of `Agents`, including: # # - [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) (used in this tutorial) # - [REINFORCE](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) # - [DDPG](https://arxiv.org/pdf/1509.02971.pdf) # - [TD3](https://arxiv.org/pdf/1802.09477.pdf) # - [PPO](https://arxiv.org/abs/1707.06347) # - [SAC](https://arxiv.org/abs/1801.01290). # # The DQN agent can be used in any environment which has a discrete action space. # # At the heart of a DQN Agent is a `QNetwork`, a neural network model that can learn to predict `QValues` (expected returns) for all actions, given an observation from the environment. # # Use `tf_agents.networks.q_network` to create a `QNetwork`, passing in the `observation_spec`, `action_spec`, and a tuple describing the number and size of the model's hidden layers. # # # + colab={} colab_type="code" id="TgkdEPg_muzV" fc_layer_params = (100,) q_net = q_network.QNetwork( train_env.observation_spec(), train_env.action_spec(), fc_layer_params=fc_layer_params) # + [markdown] colab_type="text" id="z62u55hSmviJ" # Now use `tf_agents.agents.dqn.dqn_agent` to instantiate a `DqnAgent`. In addition to the `time_step_spec`, `action_spec` and the QNetwork, the agent constructor also requires an optimizer (in this case, `AdamOptimizer`), a loss function, and an integer step counter. # + colab={} colab_type="code" id="jbY4yrjTEyc9" optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.Variable(0) agent = dqn_agent.DqnAgent( train_env.time_step_spec(), train_env.action_spec(), q_network=q_net, optimizer=optimizer, td_errors_loss_fn=common.element_wise_squared_loss, train_step_counter=train_step_counter) agent.initialize() # + [markdown] colab_type="text" id="I0KLrEPwkn5x" # ## Policies # # A policy defines the way an agent acts in an environment. Typically, the goal of reinforcement learning is to train the underlying model until the policy produces the desired outcome. # # In this tutorial: # # - The desired outcome is keeping the pole balanced upright over the cart. # - The policy returns an action (left or right) for each `time_step` observation. # # Agents contain two policies: # # - `agent.policy` — The main policy that is used for evaluation and deployment. # - `agent.collect_policy` — A second policy that is used for data collection. # # # # # # # # # # + colab={} colab_type="code" id="BwY7StuMkuV4" eval_policy = agent.policy collect_policy = agent.collect_policy # + [markdown] colab_type="text" id="2Qs1Fl3dV0ae" # Policies can be created independently of agents. For example, use `tf_agents.policies.random_tf_policy` to create a policy which will randomly select an action for each `time_step`. # + colab={} colab_type="code" id="HE37-UCIrE69" random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) # + [markdown] colab_type="text" id="dOlnlRRsUbxP" # To get an action from a policy, call the `policy.step(time_step)` method. The `time_step` contains the observation from the environment. This method returns a `PolicyStep`, which is a named tuple with three components: # # - `action` — the action to be taken (in this case, `0` or `1`) # - `state` — used for stateful (that is, RNN-based) policies # - `info` — auxiliary data, such as log probabilities of actions # + colab={} colab_type="code" id="5gCcpXswVAxk" example_environment = tf_py_environment.TFPyEnvironment( suite_gym.load('CartPole-v0')) # + colab={} colab_type="code" id="D4DHZtq3Ndis" time_step = example_environment.reset() # + colab={} colab_type="code" id="PRFqAUzpNaAW" random_policy.action(time_step) # + [markdown] colab_type="text" id="94rCXQtbUbXv" # ## Metrics and Evaluation # # The most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode. Several episodes are run, creating an average return. # # The following function computes the average return of a policy, given the policy, environment, and a number of episodes. # # + colab={} colab_type="code" id="bitzHo5_UbXy" #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] # See also the metrics module for standard implementations of different metrics. # https://github.com/tensorflow/agents/tree/master/tf_agents/metrics # + [markdown] colab_type="text" id="_snCVvq5Z8lJ" # Running this computation on the `random_policy` shows a baseline performance in the environment. # + colab={} colab_type="code" id="9bgU6Q6BZ8Bp" compute_avg_return(eval_env, random_policy, num_eval_episodes) # + [markdown] colab_type="text" id="NLva6g2jdWgr" # ## Replay Buffer # # The replay buffer keeps track of data collected from the environment. This tutorial uses `tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer`, as it is the most common. # # The constructor requires the specs for the data it will be collecting. This is available from the agent using the `collect_data_spec` method. The batch size and maximum buffer length are also required. # # # + colab={} colab_type="code" id="vX2zGUWJGWAl" replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_max_length) # + [markdown] colab_type="text" id="ZGNTDJpZs4NN" # For most agents, `collect_data_spec` is a named tuple called `Trajectory`, containing the specs for observations, actions, rewards, and other items. # + colab={} colab_type="code" id="_IZ-3HcqgE1z" agent.collect_data_spec # + colab={} colab_type="code" id="sy6g1tGcfRlw" agent.collect_data_spec._fields # + [markdown] colab_type="text" id="rVD5nQ9ZGo8_" # ## Data Collection # # Now execute the random policy in the environment for a few steps, recording the data in the replay buffer. # + colab={} colab_type="code" id="wr1KSAEGG4h9" #@test {"skip": true} def collect_step(environment, policy, buffer): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer buffer.add_batch(traj) def collect_data(env, policy, buffer, steps): for _ in range(steps): collect_step(env, policy, buffer) collect_data(train_env, random_policy, replay_buffer, steps=100) # This loop is so common in RL, that we provide standard implementations. # For more details see the drivers module. # https://github.com/tensorflow/agents/blob/master/tf_agents/docs/python/tf_agents/drivers.md # + [markdown] colab_type="text" id="84z5pQJdoKxo" # The replay buffer is now a collection of Trajectories. # + colab={} colab_type="code" id="4wZnLu2ViO4E" # For the curious: # Uncomment to peel one of these off and inspect it. # iter(replay_buffer.as_dataset()).next() # + [markdown] colab_type="text" id="TujU-PMUsKjS" # The agent needs access to the replay buffer. This is provided by creating an iterable `tf.data.Dataset` pipeline which will feed data to the agent. # # Each row of the replay buffer only stores a single observation step. But since the DQN Agent needs both the current and next observation to compute the loss, the dataset pipeline will sample two adjacent rows for each item in the batch (`num_steps=2`). # # This dataset is also optimized by running parallel calls and prefetching data. # + colab={} colab_type="code" id="ba7bilizt_qW" # Dataset generates trajectories with shape [Bx2x...] dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2).prefetch(3) dataset # + colab={} colab_type="code" id="K13AST-2ppOq" iterator = iter(dataset) print(iterator) # + colab={} colab_type="code" id="Th5w5Sff0b16" # For the curious: # Uncomment to see what the dataset iterator is feeding to the agent. # Compare this representation of replay data # to the collection of individual trajectories shown earlier. # iterator.next() # + [markdown] colab_type="text" id="hBc9lj9VWWtZ" # ## Training the agent # # Two thing must happen during the training loop: # # - collect data from the environment # - use that data to train the agent's neural network(s) # # This example also periodicially evaluates the policy and prints the current score. # # The following will take ~5 minutes to run. # + colab={} colab_type="code" id="0pTbJ3PeyF-u" #@test {"skip": true} # %%time # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy, replay_buffer) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience).loss step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1}'.format(step, avg_return)) returns.append(avg_return) # + [markdown] colab_type="text" id="68jNcA_TiJDq" # ## Visualization # # # + [markdown] colab_type="text" id="aO-LWCdbbOIC" # ### Plots # # Use `matplotlib.pyplot` to chart how the policy improved during training. # # One iteration of `Cartpole-v0` consists of 200 time steps. The environment gives a reward of `+1` for each step the pole stays up, so the maximum return for one episode is 200. The charts shows the return increasing towards that maximum each time it is evaluated during training. (It may be a little unstable and not increase monotonically each time.) # + colab={} colab_type="code" id="NxtL1mbOYCVO" #@test {"skip": true} iterations = range(0, num_iterations + 1, eval_interval) plt.plot(iterations, returns) plt.ylabel('Average Return') plt.xlabel('Iterations') plt.ylim(top=250) # + [markdown] colab_type="text" id="M7-XpPP99Cy7" # ### Videos # + [markdown] colab_type="text" id="9pGfGxSH32gn" # Charts are nice. But more exciting is seeing an agent actually performing a task in an environment. # # First, create a function to embed videos in the notebook. # + colab={} colab_type="code" id="ULaGr8pvOKbl" def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) # + [markdown] colab_type="text" id="9c_PH-pX4Pr5" # Now iterate through a few episodes of the Cartpole game with the agent. The underlying Python environment (the one "inside" the TensorFlow environment wrapper) provides a `render()` method, which outputs an image of the environment state. These can be collected into a video. # + colab={} colab_type="code" id="owOVWB158NlF" def create_policy_eval_video(policy, filename, num_episodes=5, fps=30): filename = filename + ".mp4" with imageio.get_writer(filename, fps=fps) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) return embed_mp4(filename) create_policy_eval_video(agent.policy, "trained-agent") # + [markdown] colab_type="text" id="povaAOcZygLw" # For fun, compare the trained agent (above) to an agent moving randomly. (It does not do as well.) # + colab={} colab_type="code" id="pJZIdC37yNH4" create_policy_eval_video(random_policy, "random-agent")
tf_agents/colabs/1_dqn_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # How to Measure the Strength of Ties between Nodes? # # Introduction # This tutorial explores different methods to measure the connectedness and strength of social ties. Often times, we can observe social ties based on if people are friends with each other on popular social or professional networks such as Facebook or LinkedIn. However, people sometimes join the same network out of peer-pressure or reciprocation, so whether they are "friends" on social media may not be the most accurate representation of social ties. In other instances, we may not have access to social media data that help define people’s connections. In these circumstances, we can measure the connections between people based on common events they attended together. # In our previous Graph Library homework, we explored connecting vertices, or nodes, in a graph using adjacency matrix. Each edge, which is the line segment that connects two vertices in the graph, is weighted as one. However, not all ties should be considered equally. For instance, if one node is connected to many other nodes, the strength of its connection should perhaps be weighted less than the strength of connections from a node that are connected to only a few other nodes. The intuition is that in real life, if an individual is friends with a hundred people, the connection may not be as special as the connection with an individual who only has a few cherished friendships. # There are many applications for measuring the strength of social ties. We can understand social cohorts behaviors better, and build recommender systems based on the preferences of other individuals in the group who hold strong social ties. Within an organization, we can understand and predict the performance of employees based on the people they have strong connections with. Perhaps strong performers in an organization can bring up the performance of people they frequently interact with, and weak performers bring down the group as a whole. All of these hypotheses can be tested if we can measure the connectedness and strength of social ties. In addition, given two nodes that currently have no association, we may be able to predict the likelihood of a future connection between them. # # Tutorial Content # This tutorial will introduce a few metrics to measure the strength between social ties. Each metric will then be evaluated on whether they meet the conditions of eight axioms. We will first create a method to add edges. Unlike the add_edges() method we implemented in the previous Graph Library homework, the setup for this tutorial will be slightly different. Here is a toy example. In our graph, we will have people and events. The nodes in circles on the left represent people, while the nodes on the right in squares represent events. The line between the circle and square nodes represent the events they people attended. # ![title](graph.PNG) # Given the bipartite graph of person X event, we can use different metrics to infer tie strength. As we go, below are the sections we will cover. At the end, we will also plot a sample network graph of <NAME>'s _the Tempest_ using the algorithms we have constructed. # # * [Function Setup and Metric I: Common Neighbors](#setup) # # * [Metric II: Jaccard Index](#jaccard) # # * [Metric III: Delta](#delta) # # * [Metric IV and V: Adamic and Adar, and Linear](#adamic) # # * [Metric VI: Preferential Attachment](#preferential) # # * [Metrics Evaluation using Axioms](#axioms) # # * [Exploring the Network Graph in _the Tempest_](#tempest) # <a id='setup'></a> # # Function Setup and Metric I: Common Neighbors import numpy as np import collections import itertools # + class Graph: def __init__(self): """ Initialize the class. """ self.events = {} # key is event, value is the list of people who attended the event self.edges = {} # this is a dictionary with keys as a pair of nodes and value as number of # mutual events they attended together self.people = [] # a list of all nodes, or people self.ppl_events = {} # key is a node, or a person, value is the number of all the events the person # attended self.edges_events = {} # a dictionary with keys as a pair of nodes and value as a list of common # events the pair of people attended together def add_edges(self, events, people): """ Add edges to the network. Args: events: a dictionary where key is event, value is the list of people who attended the event people: a list of all people who are in this graph """ self.events = events self.people = sorted(people) for person in self.people: # initialize the dictionary self.ppl_events[person] = 0 all_combo = list(itertools.combinations(self.people, 2)) # get all unique combinations of a pair of nodes for i in all_combo: self.edges[i] = 0 #initalize the dictionary self.edges_events[i] = [] for event in self.events: list_att = self.events[event] # list of people who attended event for j in list_att: self.ppl_events[j] += 1 #increment the event count for everyone who is in the event for pair in all_combo: if pair[0] in list_att and pair[1] in list_att: self.edges[pair] += 1 #increment the event count for the pair of nodes self.edges_events[pair].append(event) #add the event to the list of events for the pair # test code with our toy example test_events = {'P': ['a', 'b'], 'Q': ['b', 'c', 'd'], 'R': ['c', 'd', 'e']} test_people = ['a', 'b', 'c', 'd', 'e'] G = Graph() G.add_edges(test_events, test_people) print G.ppl_events print G.edges print G.edges_events # - # Now we have created the edges in the graph, let’s start with some of the basic metrics to analyze strength of connections between nodes. # # The first one is Common Neighbors, which is just the number of common events individual u and individual v attended. Formally, Common Neighbors tie strength between individual u and v is defiend as $TS(u, v) = |E(u) \cap E(v)|$, E(u) and E(v) represents the events u attended and v attended respectively (or the neighborhood v and u are in from the Person X Event graph). Our add_edges method satisfies the Common Neighbors already. # <a id='jaccard'></a> # # Metric II: Jaccard Index # The second method is Jaccard Index, which takes into consideration of how “social” individual u and v are. In other words, this metric solves the problem that two nodes could have many common neighbors solely because they have lots of neighbors, not because they have strong ties. The strength of ties is defined as the total number of events u and v both attended (intersection) divided by the union of events u and v attended. Formally, it is $TS(u,v)=\frac{|E(u) \cap E(v)|}{|E(u) \cup E(v)|}$ # + def jaccard_index (edges, ppl_events): """Returns: the dictionary of tie strength calculated based on Jaccard Index""" dict_jaccard = collections.OrderedDict() for pair in edges: dict_jaccard[pair] = float(edges[pair])/ \ float(ppl_events[pair[0]] + ppl_events[pair[1]]) return dict_jaccard # test code with our toy example print jaccard_index(G.edges, G.ppl_events) # - # <a id='delta'></a> # # Metric III: Delta # Delta is a measure of tie strength where the strength increases with the number of events. It is formally defined as TS(u,v) = $\sum_{P\in E(u) \cap E(v)} \frac{1}{| P | \choose 2}$, where the number of people that attended event P can be denoted as |P|. If |P| is small, then $| P | \choose 2$ is smaller and the fraction of a smaller number leads to a larger result. This metric mactches our intuition that two people are more likely to meet or be introduced at a smaller event. # + def delta (edges_events, events): """Returns: the dictionary of tie strength calculated based on Delta""" dict_delta = collections.OrderedDict() for pair in edges_events: list_delta = [] for event in edges_events[pair]: list_delta.append(len(events[event])) #print list_delta if len(list_delta) == 0: # no common events dict_delta[pair] = 0.0 else: list_perm = [] for h in list_delta: dummy_perm = range(h) #print pair #print dummy_perm list_perm.append(len(list(itertools.combinations(dummy_perm, 2)))) #print list_perm dict_delta[pair] = np.sum(1/np.array(list_perm).astype(float)) return dict_delta # test code with our toy example print delta(G.edges_events, G.events) # - # # <a id='adamic'></a> # # Metric IV and V: Adamic and Adar, and Linear # Adamic and Adar is sometimes also referred to as Frequency-Weighted Common Neighbors. This metric refines the simple counting of neighbors by weighting rare events more heavily. Similar to Delta, this metric is also built upon the intuition that two people are more likely to meet or be introduced at a smaller event. Adamic and Adar is formally defined as $\sum_{P\in E(u) \cap E(v)} \frac{1}{log|P|}$, where |P| again represents the number of people in event P. # # Linear is also a slight variation from Delta and Adamic and Adar. Linear is defined as $\sum_{P\in E(u) \cap E(v)} \frac{1}{|P|}$. # + def adamic_adar(edges_events, events): """Returns: the dictionary of tie strength calculated based on Adamic and Adar""" dict_ada = collections.OrderedDict() for pair in edges_events: list_ada = [] for event in edges_events[pair]: list_ada.append(len(events[event])) if len(list_ada) == 0: # no common events dict_ada[pair] = 0.0 else: dict_ada[pair] = np.sum(1/np.log(np.array(list_ada))) return dict_ada def linear(edges_events, events): """Returns: the dictionary of tie strength calculated based on Linear""" dict_linear = collections.OrderedDict() for pair in edges_events: list_linear = [] for event in edges_events[pair]: list_linear.append(len(events[event])) if len(list_linear) == 0: # no common events dict_linear[pair] = 0.0 else: #print list_linear dict_linear[pair] = np.sum(1/(np.array(list_linear)).astype(float)) return dict_linear # test code with our toy example print adamic_adar(G.edges_events, G.events) print linear(G.edges_events, G.events) # - # <a id='preferential'></a> # # Metric VI: Preferential Attachment # Preferential Attachment is a popular concept in social networks that people with many connections tend to create more connections in the future as well. A crude way of interpreting this is the popular belief that the rich tend to hang out with the rich and get richer. In our problem setup, we interpret this as the more events a person attend, the more neighborhoods the person is involved in and the stronger the person’s ties are. This is clearly not always the case in real life, which I will go into more details at the next section when we talk about the intuition for the axioms and whether the metrics meet the axioms. For now, let’s formally define Preferential Attachment as TS(u,v) = |E(u)| * |E(v)|. # + def preferential(edges, ppl_events): """Returns: the dictionary of tie strength calculated based on Preferential Attachment""" dict_pref = collections.OrderedDict() for pair in edges: dict_pref[pair] = float(ppl_events[pair[0]]) * float(ppl_events[pair[1]]) return dict_pref # test code with our toy example print preferential(G.edges, G.ppl_events) # - # <a id='axioms'></a> # # Metrics Evaluation using Axioms # Now that we have defined the metrics, we will discuss some of the axioms and the intuitions that we hope the tie strength metrics would satisfy. # # Axiom 1: Isomorphism. # Intuition: tie strength between u and v should not depend on the labels of u and v, but only on the link structure. # # Axiom 2: baseline. # Intuition: if there are no events, i.e. the graph is empty, then the tie strength between every pair u and v should be 0. If there are only two people u and v and a single event which u and v attend, then tie strength is 1. # # Axiom 3: frequency (more events create stronger ties). # Intuition: all other things being equal, the more common events u and v attend, the stronger their tie strength. # # Axiom 4: intimacy (smaller events create stronger ties). # Intuition: all else being equal, the fewer attendees there are to an event, the stronger the tie between a pair of attendees to this event. # # Axiom 5: popularity (larger events create more ties). # Intuition: if event P has more attendees than event Q, then the TOTAL tie strength created by P is more than that created by Q. # # Axiom 6: conditional independence of vertices. # Intuition: the tie strength of u to other people does not depend on events that u does not attend; it only depends on the events that u attends. # # Axiom 7: conditional independence of events. # Intuition: the increase in tie strength between two people u and v due to an event P does not depends on other events, but only on existing tie strength between u and v. # # Axiom 8: submodularity. # Intuition: the marginal increase in tie strength of u and v due to an event P is at most the tie strength between u and v if Q was their only event. # # How do the metrics we introduced earlier meet these axioms? # # ![title](table.PNG) # We can see that most of our metrics meet all of the intuitions we hope the measurement of tie strength would satisfy. Some of the metrics, namely Jaccard Index and Preferential Attachment, do not satisfy all of the axioms. It makes sense that Jaccard Index does not satisfy Axiom 6, 7 and 8, as the metric depends on not only the events u and v both attend, but also the events that u and v attended separately. Preferential Attachment failed Axiom 3 because regardless of the number of common events u and v attend, as long as u and v attend sufficient number of events, even if they never cross path, they will have a higher tie strength. However, it does not indicate that Jaccard Index and Preferential Attachment should not be used when evaluating tie strength. Preferential Attachment, for instance, is a popular metric for link predictions in social networks. It follows the assumption that users with many friends tend to create more connections in the future. Therefore, we should choose metrics according to which aspect of the problem we want to address. For one of my other course project, we used Jaccard Index to measure the strength of ties between company employees and use the measured tie strength to test if high performers tend to connect more with other high performers, and if low performers tend to be around other low performers. We decided to use Jaccard Index because it takes into consideration of how “social” people are, or in an organization setting, how many meetings or events people tend to be in due to job requirement. # # Due to the confidentiality of the organization data I was working with, I will not be demonstrating my analysis from the organization data. However, in the next section, we will use Shakespeare’s _the Tempest_ to test out our metrics. # # <a id='tempest'></a> # # Exploring the Network Graph in _the Tempest_ # test code with the Tempest # a1s1 stands for Act I Scene I etc. tempest_events = {'a1s1': ['Mariners', 'Alonso', 'King of Naples', 'Sebastian', 'Antonio', 'Gonzalo', 'Boatswain', \ 'Ferdinand', 'Francisco', 'Trinculo', 'Stephano'], \ 'a1s2': ['Prospero', 'Miranda', 'Ariel','Caliban', 'Ferdinand'], \ 'a2s1': ['Alonso', 'Sebastian', 'Antonio', 'Gonzalo', 'Francisco', 'Ariel'],\ 'a2s2': ['Caliban', 'Trinculo', 'Stephano'],\ 'a3s1': ['Ferdinand', 'Miranda', 'Prospero'], \ 'a3s2': ['Caliban', 'Trinculo', 'Stephano', 'Ariel'], \ 'a3s3': ['Alonso', 'Sebastian', 'Antonio', 'Gonzalo', 'Prospero', 'Ariel'], \ 'a4s1': ['Prospero', 'Ferdinand', 'Miranda', 'Ariel', 'Iris', 'Juno', 'Ceres', 'Caliban', 'Trinculo',\ 'Stephano'], \ 'a4s2': ['Prospero', 'Ariel', 'Alonso', 'Antonio', 'Sebastian', 'Gonzalo', 'Ferdinand', 'Miranda', \ 'Boatswain', 'Mariners', 'Caliban', 'Trinculo', 'Stephano']} tempest_people = ['Mariners', 'Alonso', 'King of Naples', 'Sebastian', 'Antonio', 'Gonzalo', 'Boatswain', \ 'Prospero', 'Miranda', 'Ariel', 'Caliban', 'Ferdinand', 'Francisco', 'Trinculo', 'Stephano',\ 'Iris', 'Juno', 'Ceres'] G_t = Graph() G_t.add_edges(tempest_events, tempest_people) print 'ppl_events: ', G_t.ppl_events print 'edges: ', G_t.edges print 'edges_events: ', G_t.edges_events # We will try to visualize the Tempest network using Adamic and Adar, and Jaccard Index. We will start with Jaccard Index. dict_ji = jaccard_index(G_t.edges, G_t.ppl_events) print dict_ji # I decided to normalize the weights of edges then multiply by a constant, in this case I chose 200, and use this as the weight for the networkx plotting function later. The thickness of the edges indicate the strength of the ties. list_ji_values = dict_ji.values() arr_normalized = np.array(list_ji_values)/float(sum(np.array(list_ji_values)))*200.0 print arr_normalized # + try: import matplotlib.pyplot as plt except: raise import networkx as nx # %matplotlib inline plt.figure(figsize=(20, 12)) matplotlib.rcParams['figure.figsize'] G_nx=nx.Graph() for pair in dict_ji.keys(): G_nx.add_edge(pair[0], pair[1]) pos=nx.spring_layout(G_nx, k=0.8) # positions for all nodes # nodes nx.draw_networkx_nodes(G_nx,pos,node_size=700, node_color ='yellow') # edges for i in range(len(dict_ji.keys())): test_list = [] test_list.append(dict_ji.keys()[i]) nx.draw_networkx_edges(G_nx, pos, edgelist = test_list, width = arr_normalized[i], edge_color='blue') # labels nx.draw_networkx_labels(G_nx,pos,font_size=20,font_color = 'k', font_family='sans-serif') plt.axis('off') plt.show() # - # Let's see how different the network graph may be for Adamic and Adar. dict_ad = adamic_adar(G_t.edges_events, G_t.events) list_ad_values = dict_ad.values() arr_normalized_ad = np.array(list_ad_values)*3.0 #print arr_normalized_ad # + # %matplotlib inline plt.figure(figsize=(20, 12)) matplotlib.rcParams['figure.figsize'] G_ad=nx.Graph() for pair in dict_ad.keys(): G_ad.add_edge(pair[0], pair[1]) pos=nx.spring_layout(G_ad, k=0.8) # positions for all nodes # nodes nx.draw_networkx_nodes(G_ad,pos,node_size=700, node_color ='yellow') # edges for i in range(len(dict_ad.keys())): test_list = [] test_list.append(dict_ad.keys()[i]) nx.draw_networkx_edges(G_ad, pos, edgelist = test_list, width = arr_normalized_ad[i], edge_color='blue') # labels nx.draw_networkx_labels(G_ad,pos,font_size=20,font_color = 'k', font_family='sans-serif') plt.axis('off') plt.show() # - # We can see some noticeable differences in the two graphs. For example, Iris, Juno and Ceres which are three spirits that only occurred in one scene in _the Tempest_ are weighted much heavier in Jaccard Index than Adamic and Adar based on the thickness of the edges. This is because jaccard index takes into consideration of all many events two individuals attended together out of the union set of events they attend. Iris, Juno and Ceres only occurred once and the only scene they were in they were together, so it makes sense for the three minor characters to have strong ties in Jaccard Index. On the other hand, Adamic and Adar’s tie strength increases with the number of events. Iris, Juno and Ceres only occurred in one scene, so it makes sense for the three of to have low weights. # # by: <NAME> # References: # # <NAME>, <NAME>. Measuring Tie Strength in Implicit Social Networks. Retrieved from http://eliassi.org/papers/gupte-websci12.pdf # # Link Prediction Algorithms. Retrieved from http://be.amazd.com/link-prediction/ # # Weighted Graph. Retrieved from https://networkx.github.io/documentation/networkx-1.10/examples/drawing/weighted_graph.html # # The Tempest Summary. Retrieved from http://www.sparknotes.com/shakespeare/tempest/section10.rhtml #
2016/tutorial_final/95/tutorial_luanj.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Lambda School Data Science # # *Unit 2, Sprint 2, Module 3* # # --- # # Cross-Validation # # # ## Assignment # - [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset. # - [ ] Continue to participate in our Kaggle challenge. # - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # You won't be able to just copy from the lesson notebook to this assignment. # # - Because the lesson was ***regression***, but the assignment is ***classification.*** # - Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification. # # So you will have to adapt the example, which is good real-world practice. # # 1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) # 2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...` # 3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values) # 4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)) # # # # ## Stretch Goals # # ### Reading # - <NAME>, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation # - <NAME>, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107) # - <NAME>, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation # - <NAME>, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb) # - <NAME>, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) # # ### Doing # - Add your own stretch goals! # - Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details. # - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives. # - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6: # # > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ... # # The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? # # ### BONUS: Stacking! # # Here's some code you can use to "stack" multiple submissions, which is another form of ensembling: # # ```python # import pandas as pd # # # Filenames of your submissions you want to ensemble # files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv'] # # target = 'status_group' # submissions = (pd.read_csv(file)[[target]] for file in files) # ensemble = pd.concat(submissions, axis='columns') # majority_vote = ensemble.mode(axis='columns')[0] # # sample_submission = pd.read_csv('sample_submission.csv') # submission = sample_submission.copy() # submission[target] = majority_vote # submission.to_csv('my-ultimate-ensemble-submission.csv', index=False) # ``` # + # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + import pandas as pd # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) # Read test_features.csv & sample_submission.csv test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') # - # make a compy of train in case it gets corrupted train_cp = train.copy() # + import numpy as np def engineerFeatures(df): df["date_recorded_datetime"] = pd.to_datetime(df["date_recorded"]) df["date_recorded_epoch"] = df["date_recorded_datetime"].astype(np.int64) df["date_recorded_month"] = df["date_recorded_datetime"].apply(lambda x: x.month) df["date_recorded_year"] = df["date_recorded_datetime"].apply(lambda x: x.year).astype(np.object) df["construction_year_na"] = (df["construction_year"] == 0).apply(lambda x: 1 if x else 0) df["age"] = df["date_recorded_year"] - df["construction_year"].apply(lambda x: x if x > 0 else 3000) df["age"] = df["age"].apply(lambda x: x if x > 0 else df["age"].median()) df["nan_count"] = df.replace({"unknown": np.NaN}).isnull().sum(axis=1) df["age_brackets"] = pd.cut(df["age"], [0, 1, 4, 16, 999], labels=[str(x) for x in range(4)]).astype(np.object) for col in ["funder", "wpt_name", "region", "management", "source", "installer"]: df["age_brackets_by_"+col] = df["age_brackets"] + "_" + df[col] engineerFeatures(train) engineerFeatures(test) engineered = train.copy() # - engineered.columns # + # re-import everything so we have it in one place import numpy as np from sklearn.model_selection import train_test_split import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.compose import make_column_transformer from sklearn.feature_selection import SelectKBest from matplotlib import pyplot as plt from sklearn.metrics import accuracy_score train = engineered.copy() target = "status_group" bad_cols = [ "id", "scheme_name", "date_recorded", "date_recorded_datetime", "construction_year", "management_group", "payment_type", "quantity_group", "source_type", "waterpoint_type_group", "extraction_type_group", "extraction_type_class", "region_code", "district_code", "wpt_name", "management", ] features = train.columns.drop([target] + bad_cols) # + from sklearn.impute import SimpleImputer from sklearn.linear_model import RidgeCV from sklearn.tree import DecisionTreeRegressor class CustomImputer(SimpleImputer): def __init__(self): self.__col_to_reg = {} def fit(self, *args, **kwargs): df = args[0] self.__col_to_reg = {} for col in df.columns: features = df.columns.drop(col) target = col reg = DecisionTreeRegressor() reg.fit(df[features], df[col]) self.__col_to_reg[col] = reg return df def transform(self, *args, **kwargs): df = args[0] df = df.copy() for col in df.columns[df.isna().any()]: features = df.columns.drop(col) reg = self.__col_to_reg[col] df[col] = reg.predict(df[features])*df[col].isnull().astype(np.int64) + df[col]*(~(df[col].isnull()).astype(np.int64)) return df def fit_transform(self, *args, **kwargs): df = args[0] self.fit(df) return self.transform(df) # - def runPredictions(): """ Split the data, make a pipeline, fit the pipeline, make predictions, print the feature importance @returns: pipeline """ global train, features, score, X_val, y_val high_card = [] low_card = [] for col in train[features].select_dtypes(include="object"): if len(train[col].value_counts()) > 50: high_card.append(col) else: low_card.append(col) # split data trn, val = train_test_split(train) X_train = trn[features] y_train = trn[target] X_val = val[features] y_val = val[target] # make the pipeline pipeline = make_pipeline( make_column_transformer( (ce.OrdinalEncoder(), high_card), (ce.OneHotEncoder(), low_card), ), SimpleImputer(), StandardScaler(), RandomForestClassifier(random_state=511, n_jobs=-1, n_estimators=120), ) pipeline.fit(X_train, y_train) # make predictions y_pred = pipeline.predict(X_val) score = accuracy_score(y_pred, y_val) print(f""" Model accuracy score: {score} """) # make visualizations model = pipeline.named_steps["randomforestclassifier"] #encoder = pipeline.named_steps["ordinalencoder"] #encoded_cols = encoder.transform(X_val).columns #importances = pd.Series(model.feature_importances_, encoded_cols) #fig, ax = plt.subplots(figsize=(10, 40)) #importances.sort_values().plot.barh(color="gray", ax=ax) #fig.show() return pipeline pipeline = runPredictions() def runPredictions(): """ Split the data, make a pipeline, fit the pipeline, make predictions, print the feature importance @returns: pipeline """ global train, features, score, X_val, y_val # split data trn, val = train_test_split(train) X_train = trn[features] y_train = trn[target] X_val = val[features] y_val = val[target] # make the pipeline pipeline = make_pipeline( ce.OrdinalEncoder(), CustomImputer(), StandardScaler(), RandomForestClassifier(random_state=511, n_jobs=-1, n_estimators=120), ) pipeline.fit(X_train, y_train) # make predictions y_pred = pipeline.predict(X_val) score = accuracy_score(y_pred, y_val) print(f""" Model accuracy score: {score} """) # make visualizations model = pipeline.named_steps["randomforestclassifier"] #encoder = pipeline.named_steps["ordinalencoder"] #encoded_cols = encoder.transform(X_val).columns #importances = pd.Series(model.feature_importances_, encoded_cols) #fig, ax = plt.subplots(figsize=(10, 40)) #importances.sort_values().plot.barh(color="gray", ax=ax) #fig.show() return pipeline pipeline = runPredictions() # + # use RandomSearchCv from sklearn.model_selection import RandomizedSearchCV from sklearn.feature_selection import f_regression, f_classif, SelectKBest trn, val = train_test_split(train) X_train = trn[features] y_train = trn[target].replace({"functional": 0, "functional needs repair": 1, "non functional": 2}) X_val = val[features] y_val = val[target] pipeline = make_pipeline( ce.OrdinalEncoder(), CustomImputer(), StandardScaler(), SelectKBest(f_regression), RandomForestClassifier(n_jobs=-1), ) param_distributions = { "selectkbest__k": range(1, len(X_train.columns)), "randomforestclassifier__n_estimators": range(80, 500), } search = RandomizedSearchCV( pipeline, param_distributions=param_distributions, n_iter=120, cv=5, scoring="accuracy", verbose=10, return_train_score=True, n_jobs=-1, ) search.fit(X_train, y_train) # - search.best_params_ search.best_score_ pipeline = search.best_estimator_ pipeline.score(X_val, y_val.replace({"functional": 0, "functional needs repair": 1, "non functional": 2}))
module3/LS_DS_223_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # ========================================================= # Gaussian Processes regression: basic introductory example # ========================================================= # # A simple one-dimensional regression example computed in two different ways: # # 1. A noise-free case # 2. A noisy case with known noise-level per datapoint # # In both cases, the kernel's parameters are estimated using the maximum # likelihood principle. # # The figures illustrate the interpolating property of the Gaussian Process # model as well as its probabilistic nature in the form of a pointwise 95% # confidence interval. # # Note that the parameter ``alpha`` is applied as a Tikhonov # regularization of the assumed covariance between the training points. # # + print(__doc__) # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>>s # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) # ---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instantiate a Gaussian Process model kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, sigma = gp.predict(x, return_std=True) # Plot the function, the prediction and the 95% confidence interval based on # the MSE plt.figure() plt.plot(x, f(x), 'r:', label=r'$f(x) = x\,\sin(x)$') plt.plot(X, y, 'r.', markersize=10, label='Observations') plt.plot(x, y_pred, 'b-', label='Prediction') plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-10, 20) plt.legend(loc='upper left') # ---------------------------------------------------------------------- # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Instantiate a Gaussian Process model gp = GaussianProcessRegressor(kernel=kernel, alpha=dy ** 2, n_restarts_optimizer=10) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, sigma = gp.predict(x, return_std=True) # Plot the function, the prediction and the 95% confidence interval based on # the MSE plt.figure() plt.plot(x, f(x), 'r:', label=r'$f(x) = x\,\sin(x)$') plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label='Observations') plt.plot(x, y_pred, 'b-', label='Prediction') plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-10, 20) plt.legend(loc='upper left') plt.show()
sklearn/sklearn learning/demonstration/auto_examples_jupyter/gaussian_process/plot_gpr_noisy_targets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Warehousing and Data Mining # # ## Labs # # ### Prepared by <NAME> # # #### Contact Information # SCIT ext. 3643 # # <EMAIL> # # <EMAIL> # # # #### Recommended Text # # R for Data Science - http://r4ds.had.co.nz/ # # Python for Data Science - https://jakevdp.github.io/PythonDataScienceHandbook/ # # ### Week 1 # # - Introduction # > Language Syntax # # > Data Types # # > Operators # # > Program Control Structures # # > Functions # # > Data Structures # # > Lists, DataFrames # # > Reading and Writing Files # 4**2 8^2
week1/Tutorial 1 - Introduction to Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgQEhUQBxESFhEXERASEBAPFhIVGBgdFRUXFhUWFxQYHTQgJBolGxgXITIhJSkrLi4vFyAzODMsNygtLisBCgoKDg0OGxAQGyslICUrLTcwNzc3LTEwMi0rNysvLS03KzgtLS0tLS0tLzYtNS03NSsrLS01LS0tLTcrLSsrNv/AABEIAOEA4QMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAABAUGAwEHAv/EAEAQAAICAQMDAgIHBQQJBQAAAAABAgMEBRESBhMhMUFRYRQVIlJxgaEHMjORsSNCYsFyc5KTorLR4fAWNENVg//EABoBAQADAQEBAAAAAAAAAAAAAAABAgUEAwb/xAAmEQEAAgICAQIGAwAAAAAAAAAAAQIDEQQFIRJREzEyQVJxFCIz/9oADAMBAAIRAxEAPwD7gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3BF1G6UK5Sj67ePz8FMmSMdJtP2TEbnSRuj3dGXx8XMuTlW9/PvJo7fVWf8AH/iZk17PLaPVXFMw6JwVidTaGi3Q5IzNOBmy34P0bi95P1R+/qrP+K/2mTHZZpjcYZR8Gv5Q0fJDdGZrwMyTag/R7P7T+R0+qtQ+K/2mI7LNMbjDKfg0/OGickEzMRwc1uUYPzHbf7T91uv0PVbn0P7fLb4S8p/mRHbWjzfHMR7n8eJ+m0TLTgi4GZC2O8fD918CUa2PJXJWLVnxLnmJidSAAugAAAAAAAAAAAAAAAAAAAAACBrP8KX5f1RPIGtfwZfl/VHNzP8AC/6lfH9cftx6e/hv/S/yRaMy+FqVlS4wSflvySPr2/7sf1Mnidpx8eCtLT5iHRl497WmYhaab6T/ANbMmGao1a2G/GMfMnL39zt9e3/dj+p64e341aREyrbjZN/JZ4H71n+sf9ETDN06tbFycYr7UuT9fgkdPr2/7sf1GHtuNWupktxskz8lpifxbvxh/wAiJF9UJpxsW6aM/Xq1sZSkoreTTfr7LY6fXt/3Y/qVp2vF9E1tPv8AYnjZd7iHLTZSqv4ezbg/8jTGUxbHO+Mpermn4NWX6a/qx2iPlE+DlRq0b9gAGy5gAAAAAAAAAAAAAAAAAAAAAIOsp9qW3y/qiceSin4fp8DyzY/iY5p7wms6nbGVwlJpQ9X6I7vEf9yUHJesU/P5eC8lpdK5OjdScZJefC3XwK+jDvbhF18eMt5WfE+VnqrY/wCt43toTyfV5hVA0ktHxW23v5bfq/cfUmJ/i/myJ6Pkb8aWjmUZs9Sb9DR/UmJ/i/mz9V6TixalHfdeVuxHR8jfnRPMprwovocvRyhy+5v5/D09SM014l6rwyylhZPmvt+XPfu/5/8AnxLOzScab5Wb7vbfZkx1Vsu/RGphWOT6fq8qPTU+7Db7yNaRMXTset71Lz8X5ZLN3rOHbjY5rf5zLlz5YyW3AADSeAAAAAAAAAAAAAAAAAAAAAAAAAeHoAAAAAAAAAAAAAAAAAAAAAAAAAABgeSkl5l6erf/AHM3k9fdJVy4W5+Py32+zNSW/wAOUfBS9TK/VM/6qrnKGJTVC/PdbcZWc/4dPJeUmvtfz+CNbgaDpFEO3h41MYbeihHz+La3b/EDrpmraflQ7ml3V2w3250yjNfg2n4fyZNMwujMCrLhmaO3jyTavqoSVd0dnspw/dT32e6Xt+DO/VHVFWE66qap35Vu6oxaduUtvWUm/EYL3kwNADD5HVXVGLHva/pkVjLzZZiXq6da+9Kvit0vfZ+DXYufi21Rvosi6ZQ7kbE1x47b8t/hsBKIGpavhY8qoZkmpXWxppSjKW82m0nsvHhPy/BmaOq9dzG59K4MbMZSajk5drpVmz2brgotuO6/eKXX9fuvytOx9Tx54+VDUaZyqk1OMouMlzqtSSlHfZPwmt/KA+npgz/U/U1eG66seqd+Va32MWppSlt6ylJ+IwXvJlTf1R1Rirva9pkVjrzZZiXq6da95Sr4rdL1bTA12dm41EJW5s4writ5zm9kl6bt/Ao1170l/wDYYv8AvYL9WQv2iZdN2jZV2HJShPF51zj7ptNNE7F0/ppUQlk1YSXag5SnChf3Vu22gL3FyqLYqeJOM4SW8Z1yjKL+aknsdjBfsyjUrc96Tv8AV7yYfQ9t+DfBq517/wB3lt6eCz1rq62GQ8PQcWWVkxip3RjNVV1J7bc7WmlJp7qO27A1QM11F1T9GnXjYNEsjNsjyhjVtLZL1nZY1tGG/jdorL+ruoMTazqfTlXjNpSvxLlf292lvZDinx+aA0+va5p2DV39Wnwr5Rjy2lLzL0SjFb/H+RPqshJKVb3i0mmvdNbpme6t1XTq8J5OZTDJo5Y+0PsSjLu2QjCS5Lb+8n+BM6h1/CwKO/mctt4wrqrXKc5S/drhH3bAtwYW3rXX6k7tQ0e+GKlynZG2E7Ix+9KhR38Ly1vujV1arhSoWXGyPYdXe7r8Ljtycn+QE8GIo6q6lyl3endNjLG/+O3Lu7MrF96NfFtRfs36lt011PHLlOjLpnj5dSTuxrWm0nulOE14lBteqA0IMxr/AFXZRcsTR8aeVl8O5KmEowjCPs7LWmo7+y2f6kfP6m1t3yxdDw67baqqbMt23duEHYm41RlxfKXh+dkv12DXgr9Cz8jIqVmZRZRZvKMqbdm04vZtNesX7PxuiwAAAAGABhMCyOLreTHL8LNox7ceT9JSoj251r/FsuW34m7KnqTp/Bz6+1np+HzrsrfGyuS9JwmvKZQw0LrepcMXVapwX7ssnGTml85Rl5fzYGuuyaYuMbZRUpPjBSaTk9t9op+r29kY7TeH17l/S9u59Bxvou/r2+X9tx//AE47/kS9I6NcLo5mvZM8vLj/AAp2RUK6t/XtUrwn8yb1L0zXluFtFs6MqrfsZVWzlHf1jKL8Sg/H2WBeW8OL7u3HZ8uW223vvv7bHyjA7/8A6by/oPLhvn/R/Xftd6Xp8uPL9TSX9K9S5K7OvapyxmtrK8WiNM7F7xlYpbpP3SXnc1uLg41dUaKIRVUYKEa0vCjtttt8NgI/T7xfo1P1ft2ezV2uO223FbenyM313VVPL0uMEncs/nFL1VcK5Stf+juo/oeU9H61iNx6Vz+zjuTksXIqV8YN+X25tpqO/niWOgdKyoueZquRPJzJR4d6xKEYRfrCqpeIrx592BmsvE1WzXMj6Bk10WLCo7MraVdyrbXcUN5LbaxefxRd2aP1hs+7qmPx2fJSwobbe+/9p6bFj1L01Xluu7HtnRlVcuxlVbNx5fvRlF+JQfjeL+BUX9L9T5K7WuapF478WQxKFTOxe8ZWOT2T99gKjUdNjidOZFVGTDIgqrp1XVJKLU7eW0dpNbJt+5a4X7NOjp1QlZhw5OuEnJTuT3cU291P5suNb6bpuwLNOweNVcqVTXsm1FJr233f8ynr6f65ilGGr0qKSil9Dr8JeF7/AAA4aNj2aZqdenYVs54d2PbbXTbJzdEq354Sfng0/T4skfsvhyqyr7PNlmpZrnL3fCxwit/gktkT+muk5Y1s8vU8ieTmTioSvsSiowXnhXWntGO/lkvpHQp4NNlVk1Pnk5F/JJrbuzc+O2/tv6gUnS3D621T6T/H3xO3v69pV+OPy323+expeobcSOLfLUduyqLe7y8pxcWmtvn6be5XdSdKxyrIZODdPHzK1xryakpbx9eFkH4lH5Mq59GaxlOK6s1Dv0xkpLGopjRCbXlOxpttf4QMnj3WT6Wrdzbauqhu/hDUFGK/JJL8jXdYRctQ0iEv3e/lWbfOFS4v8uT/AJn5r6GvWlfVnehy73d7qjLj/wC57/Hjvv6fZ9S61fQbL8rDyYzSWNK9yg02592EYrZ7+Ntt/cC4vgnCSl5TjJP815Pkt/dfS8VW2oqShY152rjluMvH3VFefkn6ep9dlFtNfFNFD0306sbBjg5rjbFK6Nj47RkrJyk1xb+EtgK7G0fq3hH6NquP2+EeHHChtx2XHb+09NjhgaDqEdRpyNW1Gi2+ui2PYrpjTOVc/VvaxtxU9nvsdMfpXqPFj2en9SUcdeK6suhXyqX3Y2ck2l7KXwLLpvpeOLOeRmXTyMuxKNuTbsnsvKhCC8Rhv7ICr/Z3Dlfql9vmyWpXUufvwo+zXH8Evb5sla90/qcMiWf0vbCORKEIZGPem6r1Xvw3kvMJpNpSX/Xef0zoM8N5LnYp9/NvylsmuKse/F+fLXxIWq6D1E7p26JqTqhZtypvqjcoNJR3qba29PT4gWPSeuRzqO663XZGyym+mT3cLKpcZx39/Pv8y5KnpjQ6sGhUVSlN8p2W2z25WTm+U5y292y2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/9k=) # # Python Object-Oriented Programming (OOP) # # _<NAME>_ # ## Zaawansowana obiektowość # # ![](https://miro.medium.com/max/1000/1*eSgw4TrT3_5kUU3QFFW4qA.jpeg) # ### Zaawansowane kwestie OOP # #### Interfejsy i introspekcja # # Istnieje możliwość sprawdzania charakterystyk klas i obiektów w trakcie działania programu. # + class A: """<NAME>""" def __init__(self): super().__init__() self.a = "A" def fa(self): print("a:", self.a) class B: """<NAME>""" def __init__(self): super().__init__() self.b = "B" def fb(self): print("b:", self.b) class Pochodna(B, A): """Dziecko""" def __init__(self): super().__init__() # - # > ##### Funkcja wbudowana `issubclass()` # ```python # issubclass(class, classinfo) # ``` # > Zwraca `True`, jeśli *class* jest podklasą (bezpośrednią, pośrednią lub wirtualną) *classinfo*. Klasa jest traktowana jako podklasa sama w sobie. *classinfo* może być krotką obiektów klas, w którym to przypadku każdy wpis w *classinfo* zostanie sprawdzony. W każdym innym przypadku zgłaszany jest wyjątek `TypeError`. issubclass(Pochodna, A) issubclass(B, Pochodna) # > ##### Atrybut specjalny `__bases__` # ```python # class.__bases__ # ``` # > Krotka klas bazowych obiektu klasy. Pochodna.__bases__ # > ##### Funkcja wbudowana `isinstance()` # ```python # isinstance(object, classinfo) # ``` # > Zwraca `True`, jeśli argument *object* jest instancją argumentu *classinfo* lub jego (bezpośredniej, pośredniej lub wirtualnej) podklasy. Jeśli *object* nie jest obiektem danego typu, funkcja zawsze zwraca `False`. Jeśli *classinfo* jest krotką obiektów typu (lub rekurencyjnie innymi takimi krotkami), zwraca `True`, jeśli *object* jest instancją dowolnego z typów. Jeśli *classinfo* nie jest typem lub krotką typów i takimi krotkami, zgłaszany jest wyjątek `TypeError`. d = Pochodna() isinstance(d, A) # > ##### Funkcja wbudowana `getattr()` # ```python # getattr(object, name[, default]) # ``` # > Zwraca wartość nazwanego atrybutu *object*. Nazwa musi być ciągiem. Jeśli ciąg jest nazwą jednego z atrybutów obiektu, wynikiem jest wartość tego atrybutu. Na przykład `getattr(x, 'foobar')` jest równoważne z `x.foobar`. Jeśli nazwany atrybut nie istnieje, zwracana jest wartość *default*, jeśli została podana. W przeciwnym razie zostanie zgłoszony `AttributeError`. getattr(d, "a") getattr(d, "fa") d.a d.fa # > ##### Funkcja wbudowana `hasattr()` # ```python # hasattr(object, name) # ``` # > Argumentami są obiekt i łańcuch (ciąg). Wynik to `True`, jeśli ciąg jest nazwą jednego z atrybutów obiektu, lub `False`, jeśli nie. (Jest to realizowane przez wywołanie metody `getattr(object, name)` i sprawdzenie, czy wywołuje ona błąd `AttributeError`, czy nie). hasattr(d, "a") hasattr(d, "fa") # > ##### Emulowanie wywoływalnych obiektów # ```python # object.__call__(self[, args...]) # ``` # > Wywoływana, gdy instancja jest „wywoływana” jako funkcja; jeśli ta metoda jest zdefiniowana, `x(arg1, arg2, ...)` jest skrótem dla `x.__call__(arg1, arg2, ...)`. # > ##### Funkcja wbudowana `callable()` # ```python # callable(object) # ``` # > Zwraca `True`, jeśli argument *object* wydaje się być wywoływalny, `False`, jeśli nie. Jeśli zwraca `True`, nadal jest możliwe, że wywołanie się nie powiedzie, ale jeśli jest `False`, wywołanie *object* nigdy się nie powiedzie. Zauważ, że klasy są wywoływalne (wywołanie klasy zwraca nową instancję); instancje są wywoływalne, jeśli ich klasa ma metodę `__call__()`. # > # > Nowość w wersji 3.2: Ta funkcja została najpierw usunięta w Pythonie 3.0, a następnie przywrócona w Pythonie 3.2. callable(getattr(d, "a", None)) callable(getattr(d, "fa", None)) # #### Składowe prywatne klasy # # Wszystkie atrybuty i metody zdefiniowane w klasie są publiczne. Aby ukryć atrybut lub metodę przed dostępem spoza klasy (składowa private) należy jej nazwę poprzedzić dwoma podkreślnikami (np. `__atrybut`). class KontoBankowe: def __init__(self, nazwa, stan=0): self.nazwa = nazwa self.__stan = stan def info(self): print("nazwa:", self.nazwa) print("stan:", self.__stan) def wyplac(self, ilosc): self.__stan -= ilosc def wplac(self, ilosc): self.__stan += ilosc jk = KontoBankowe("Kowalski", 1000) print(jk.stan) # Błąd! print(jk.__stan) # Błąd! jk.info() # OK print(jk._KontoBankowe__stan) # OK print(jk.name) # #### Składowe statyczne # # Składowe statyczne są wspólne dla wszystkich instancji klasy. # > ##### [Zmienna statyczna (pole statyczne)](https://pl.wikipedia.org/wiki/Zmienna_statyczna) # > **Zmienna statyczna** w [programowaniu](https://pl.wikipedia.org/wiki/Programowanie_komputer%C3%B3w) jest to [zmienna](https://pl.wikipedia.org/wiki/Zmienna_(informatyka)), która w danym bloku programu posiada dokładnie jedną instancję i istnieje przez cały czas działania programu. # > ##### [Metoda statyczna](https://pl.wikipedia.org/wiki/Metoda_statyczna) # > **Metoda statyczna** albo **metoda klasowa** jest to [metoda](https://pl.wikipedia.org/wiki/Metoda_(programowanie_obiektowe)) [klasy](https://pl.wikipedia.org/wiki/Klasa_(programowanie_obiektowe)), która nie jest wywoływana w kontekście żadnego konkretnego [obiektu](https://pl.wikipedia.org/wiki/Obiekt_(programowanie_obiektowe)) tej klasy. Metody statyczne z reguły służą do obsługi składowych statycznych klas. # > ###### Właściwości # > * W ciele metody statycznej, z racji tego iż nie jest wywoływana na rzecz konkretnego obiektu, nie można odwoływać się do składowych niestatycznych. Nie można więc użyć wskaźnika `self`. # > * Metoda statyczna może wywołać jedynie inne metody statyczne w swojej klasie lub odwoływać się jedynie do [pól (zmiennych) statycznych](https://pl.wikipedia.org/wiki/Zmienna_statyczna) w swojej klasie. Dostęp do pól i metod obiektów przekazywanych jako parametry czy też obiektów i funkcji globalnych następuje tak samo jak w zwykłej funkcji. <!--, jednak w przypadku obiektów własnej klasy ma dostęp do składowych prywatnych.--> # <!-- > * Metoda statyczna nie może być metodą wirtualną. --> # > ##### `@staticmethod` # > Przekształca metodę w metodę statyczną. # > # > Metoda statyczna nie otrzymuje niejawnego pierwszego argumentu. Aby zadeklarować metodę statyczną, użyj tego idiomu: # > # > ```python # > class C: # > @staticmethod # > def f(arg1, arg2, ...): ... # > ``` # > # > Metodę statyczną można wywołać w klasie (na przykład `C.f()`) lub w instancji (na przykład `C().f()`). # > ##### `@classmethod` # > Przekształca metodę w metodę klasy. # > # > Metoda klasy otrzymuje klasę jako niejawny pierwszy argument, tak jak metoda instancji otrzymuje instancję. Aby zadeklarować metodę klasy, użyj tego idiomu: # > ```python # > class C: # > @classmethod # > def f(cls, arg1, arg2, ...): ... # > ``` # > # > Metodę klasy można wywołać w klasie (na przykład `C.f()`) lub w instancji (na przykład `C().f()`). Instancja jest ignorowana z wyjątkiem swojej klasy. Jeśli metoda klasy jest wywoływana dla klasy pochodnej, obiekt klasy pochodnej jest przekazywany jako niejawny pierwszy argument. class CountedObject: __count = 0 # Statyczna skladowa def __init__(self): CountedObject.__count += 1 @staticmethod def staticGetCount(): return CountedObject.__count @classmethod def classGetCount(cls): print("classGetCount wywoływana dla instancji", cls) return cls.__count # Działanie: print("Liczba obiektów:", CountedObject.staticGetCount()) print("Tworzenie obiektów...") c1 = CountedObject() print("Liczba obiektów:", CountedObject.staticGetCount()) c2 = CountedObject() print("Liczba obiektów:", CountedObject.staticGetCount()) cs = [CountedObject(), CountedObject()] print("Liczba obiektów:", CountedObject.staticGetCount()) print("Liczba obiektów:", CountedObject.classGetCount()) # #### Właściwości # > ##### Hermetyzacja (enkapsulacja) # > **Hermetyzacja** (kalk. „enkapsulacja”, w starszych pozycjach „kapsułkowanie”, od [ang](https://pl.wikipedia.org/wiki/J%C4%99zyk_angielski). *encapsulation*) – jedno z założeń [programowania obiektowego](https://pl.wikipedia.org/wiki/Programowanie_obiektowe). Hermetyzacja polega na ukrywaniu pewnych danych składowych lub [metod](https://pl.wikipedia.org/wiki/Metoda_(programowanie_obiektowe)) obiektów danej [klasy](https://pl.wikipedia.org/wiki/Klasa_(programowanie_obiektowe)) tak, aby były one dostępne tylko wybranym metodom/funkcjom, np.: metodom wewnętrznym danej klasy. # > ##### Właściwość # > **Właściwość klasy** (ang. *class property*) – specjalny składnik [klas](https://pl.wikipedia.org/wiki/Klasa_(programowanie_obiektowe)), posiadający cechy [pola (atrybutu)](https://pl.wikipedia.org/wiki/Pole_(informatyka)) i [metody](https://pl.wikipedia.org/wiki/Metoda_(programowanie_obiektowe)). Właściwości są odczytywane i zapisywane tak jak pola, ale ich odczytywanie i zapisywanie zazwyczaj przebiega przez wywołanie metod. Łatwiej jest czytać i zapisywać pola, niż wywoływać metody, jednak wstawienie poprzez wywołanie metody pozwala na sprawdzanie poprawności danych, aktywowanie kodu aktualizacji (np. wyglądu GUI). Oznacza to, że właściwości są pośrednie między kodem (metody) a danymi ([pole/atrybut](https://pl.wikipedia.org/wiki/Pole_(informatyka))) klasy i zapewniają wyższy poziom [hermetyzacji (enkapsulacji)](https://pl.wikipedia.org/wiki/Hermetyzacja_(informatyka)) niż publiczne pola. # Właściwości (nie mylić z atrybutami!), zwane również jako gettery i settery, umożliwiają enkapsulację obiektu. Są odpowiednikiem metod dostępowych z innych języków programowania. # > ##### `class property(fget=None, fset=None, fdel=None, doc=None)` # > Zwraca atrybut właściwości. # > # > *fget* to funkcja do pobierania wartości atrybutu. *fset* to funkcja do ustawiania wartości atrybutu. *fdel* to funkcja służąca do usuwania wartości atrybutu. *doc* tworzy docstring dla atrybutu. # > # > Typowym zastosowaniem jest zdefiniowanie zarządzanego atrybutu `x`: # > # > ```python # > class C: # > def __init__(self): # > self._x = None # > # > def getx(self): # > return self._x # > # > def setx(self, value): # > self._x = value # > # > def delx(self): # > del self._x # > # > x = property(getx, setx, delx, "Jestem właściwością „x”.") # > ``` # > # > Jeśli *c* jest instancją *C*, `c.x` wywoła metodę pobierającą, `c.x = value` - ustawiającą, a `del c.x` - usuwającą. class Rectangle: def __init__(self): self.width = 0 self.height = 0 def setSize(self, size): self.width, self.height = size def getSize(self): return self.width, self.height size = property(getSize, setSize) # Przykład użycia: r = Rectangle() r.width = 10 r.height = 20 r.size r.size = 150, 100 r.height # Istnieje możliwość definiowania właściwości typu "read-only". Poniżej przykład konta bankowego, w którym można odczytać stan konta, ale nie można go zmienić. class BankAccount: counter = 0 def __init__(self, owner, balance=0): self.owner = owner self.__balance = balance BankAccount.counter += 1 def __getBalance(self): return self.__balance balance = property(__getBalance) # Użycie: ba = BankAccount("jk", 100) ba.balance ba.balance = 100 # Błąd! # #### Atrybuty specjalne # # Instancje klas posiadają specjalne atrybuty, które opisują obiekty: ba = BankAccount("Kowalski", 1000) ba.__dict__ # Słownik zdefiniowanych przez użytkownika atrybutów ba.__class__.__name__ # Nazwa klasy # <!-- ba.withdraw.__name__ # Nazwa metody --> dir(ba) [attrib for attrib in dir(ba) if not attrib.startswith('_')] # Interfejs
Python/.ipynb_checkpoints/04 Python OOP - zaawansowana obiektowość-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline # #%matplotlib notebook import matplotlib.pyplot as plt import numpy as np import scipy.constants as const import time import csv from itertools import chain, product import os from qutip import * np.set_printoptions(threshold=np.inf) # + ''' Define all of our relevant basis. ''' i=np.complex(0,1) ''' Rotation from Lab -> Atom: R_AL: {|+>,|->} -> {|H>,|V>} R_AL.|+>_A = |+>_L --> R_AL.(1 0)^tr = (1 i)^tr / sqrt(2) R_AL.|->_A = |->_L --> R_AL.(0 1)^tr = (1 -i)^tr / sqrt(2) ''' R_AL = np.sqrt(1/2) * np.matrix([[1, i], [i, 1]]) ''' Rotation from Lab -> Cavity: R_LA: {|H>,|V>} -> {|X>,|Y>} ''' # Option 1: Measured parameters for our cavity. #alpha_CL, phi1_CL, phi2_CL = 0.459, -64.9*np.pi/180, -40.1*np.pi/180 alpha_CL, phi1_CL, phi2_CL = 0.888, 115.1*np.pi/180, -40.1*np.pi/180 # Old choice # alpha_CL, phi1_CL, phi2_CL = 0.879, -97*np.pi/180, 117*np.pi/180 # theta,phi = 160,25 #alpha_CL, phi1_CL, phi2_CL = 0.449, -112.4*np.pi/180, -140.2*np.pi/180 #alpha_CL, phi1_CL, phi2_CL = 0.893, 67.6*np.pi/180, -140.2*np.pi/180 # theta,phi = 70,25 #alpha_CL, phi1_CL, phi2_CL = 0.449, 67.6*np.pi/180, 39.8*np.pi/180 #alpha_CL, phi1_CL, phi2_CL = 0.893, 247.6*np.pi/180, 39.8*np.pi/180 ## Option 2: Circular cavity eigenmodes. ## Note phi2=90 <--> |X>=|+>, phi2=-90 <--> |X>=|-> #alpha_CL, phi1_CL, phi2_CL = 1/np.sqrt(2), 0*np.pi/180, 90*np.pi/180 ## Option 3: Linear cavity eigenmodes. #alpha_CL, phi1_CL, phi2_CL = 1, 0*np.pi/180, 0*np.pi/180 beta_CL = np.sqrt(1-alpha_CL**2) R_CL = np.matrix([[alpha_CL* np.exp(i*phi1_CL), -beta_CL * np.exp(-i*phi2_CL)], [beta_CL * np.exp(i*phi2_CL), alpha_CL* np.exp(-i*phi1_CL)]]) R_LC = R_CL.getH() ''' Rotation from Atom -> Cavity: R_LA: {|+>,|->} -> {|X>,|Y>} ''' R_AC = R_LC*R_AL R_CA = R_AC.getH() ''' alpha, phi for writing the atomic basis in terms of the cavity basis ''' alpha = np.abs(R_AC[0, 0]) beta = np.sqrt(1-alpha**2) phi1, phi2 = np.angle(R_AC[0, 0]), np.angle(R_AC[1, 0]) args_system = dict([('alpha',alpha), ('beta',beta), ('phi1',phi1), ('phi2',phi2)]) # + ''' Import coupling factors and energy level splittings from file. The notation used is to denote the ground/excited 'F' levels with g'F'/x'F' The magnetic 'mF' sublevels -3,-2,-1,0,1,2,3 are denoted MMM,MM,M,[nothing],P,PP,PPP. Examples: the F'=2,mF'=3 sublevel is denoted x2PPP, the F=1,mF'=-1 sublevel is denoted g1M. ''' imports = [] with open("./params/exp_params_13MHz.csv") as file: reader = csv.reader(file) for row in reader: imports.append(row) imports = dict(map(lambda x: (str(x[0]), float(x[1])), imports)) deltaZ,deltaEx3,deltaEx1,deltaEx0,\ deltaZx3MMM,\ deltaZx3MM,deltaZx2MM,\ deltaZx3M,deltaZx2M,deltaZx1M,\ deltaZx3,deltaZx2,deltaZx1,deltaZx0,\ deltaZx3P,deltaZx2P,deltaZx1P,\ deltaZx3PP,deltaZx2PP,\ deltaZx3PPP = \ [imports[delta]*2*np.pi for delta in ["deltaZ", "deltaEx3","deltaEx1","deltaEx0", "deltaZx3MMM", "deltaZx3MM","deltaZx2MM", "deltaZx3M","deltaZx2M", "deltaZx1M", "deltaZx3","deltaZx2", "deltaZx1", "deltaZx0", "deltaZx3P","deltaZx2P", "deltaZx1P", "deltaZx3PP","deltaZx2PP", "deltaZx3PPP"] ] ''' Shift the global energy offsets to a new level. By default the parameter file has the energy splitting given wrt the F'=2 level. ''' E0shift = deltaEx1 deltaEx3,deltaEx2,deltaEx1,deltaEx0 = [deltaE - E0shift for deltaE in [deltaEx3,0,deltaEx1,deltaEx0]] CGg1Mx3MM, CGg1Mx2MM, \ CGg1x3M, CGg1x2M, CGg1x1M, CGg1Mx3M, CGg1Mx2M, CGg1Mx1M, \ CGg1Px3, CGg1Px2, CGg1Px1, CGg1Px0, CGg1x3, CGg1x2, CGg1x1, CGg1x0, CGg1Mx3, CGg1Mx2, CGg1Mx1, CGg1Mx0, \ CGg1Px3P, CGg1Px2P, CGg1Px1P, CGg1x3P, CGg1x2P, CGg1x1P, \ CGg1Px3PP,CGg1Px2PP = [imports[CG] for CG in [ "CGg1Mx3MM", "CGg1Mx2MM", "CGg1x3M", "CGg1x2M", "CGg1x1M", "CGg1Mx3M", "CGg1Mx2M", "CGg1Mx1M", "CGg1Px3", "CGg1Px2", "CGg1Px1", "CGg1Px0", "CGg1x3", "CGg1x2", "CGg1x1", "CGg1x0", "CGg1Mx3", "CGg1Mx2", "CGg1Mx1", "CGg1Mx0", "CGg1Px3P", "CGg1Px2P", "CGg1Px1P", "CGg1x3P", "CGg1x2P", "CGg1x1P", "CGg1Px3PP","CGg1Px2PP"]] CGg2MMx3MMM, \ CGg2Mx3MM, CGg2Mx2MM, CGg2MMx3MM, CGg2MMx2MM, \ CGg2x3M, CGg2x2M, CGg2x1M, CGg2Mx3M, CGg2Mx2M, CGg2Mx1M, CGg2MMx3M, CGg2MMx2M, CGg2MMx1M, \ CGg2Px3, CGg2Px2, CGg2Px1, CGg2Px0, CGg2x3, CGg2x2, CGg2x1, CGg2x0, CGg2Mx3, CGg2Mx2, CGg2Mx1, CGg2Mx0, \ CGg2PPx3P, CGg2PPx2P, CGg2PPx1P, CGg2Px3P, CGg2Px2P, CGg2Px1P, CGg2x3P, CGg2x2P, CGg2x1P, \ CGg2PPx3PP, CGg2PPx2PP, CGg2Px3PP, CGg2Px2PP, \ CGg2PPx3PPP = [imports[CG] for CG in [ "CGg2MMx3MMM", "CGg2Mx3MM","CGg2Mx2MM", "CGg2MMx3MM", "CGg2MMx2MM", "CGg2x3M", "CGg2x2M", "CGg2x1M", "CGg2Mx3M", "CGg2Mx2M", "CGg2Mx1M", "CGg2MMx3M", "CGg2MMx2M", "CGg2MMx1M", "CGg2Px3", "CGg2Px2", "CGg2Px1", "CGg2Px0", "CGg2x3", "CGg2x2", "CGg2x1", "CGg2x0", "CGg2Mx3", "CGg2Mx2", "CGg2Mx1", "CGg2Mx0", "CGg2PPx3P", "CGg2PPx2P", "CGg2PPx1P", "CGg2Px3P", "CGg2Px2P", "CGg2Px1P", "CGg2x3P", "CGg2x2P", "CGg2x1P", "CGg2PPx3PP", "CGg2PPx2PP", "CGg2Px3PP", "CGg2Px2PP", "CGg2PPx3PPP"]] # Convert all the coupling to F=2 into an overall 'dark-state' coupling. def getCGdDecayRate(CGlist): return np.sqrt ( sum( [CG**2 for CG in CGlist ] ) ) CGdx0 = getCGdDecayRate([CGg2Mx0, CGg2x0, CGg2Px0]) CGdx1M = getCGdDecayRate([CGg2MMx1M, CGg2Mx1M, CGg2x1M]) CGdx1 = getCGdDecayRate([CGg2Mx1, CGg2x1, CGg2Px1]) CGdx1P = getCGdDecayRate([CGg2x1P, CGg2Px1P, CGg2PPx1P]) CGdx2MM = getCGdDecayRate([CGg2MMx2MM, CGg2Mx2MM]) CGdx2M = getCGdDecayRate([CGg2MMx2M, CGg2Mx2M, CGg2x2M]) CGdx2 = getCGdDecayRate([CGg2Mx2, CGg2x2, CGg2Px2]) CGdx2P = getCGdDecayRate([CGg2x2P, CGg2Px2P, CGg2PPx2P]) CGdx2PP = getCGdDecayRate([CGg2Px2PP, CGg2PPx2PP]) CGdx3MMM= getCGdDecayRate([CGg2MMx3MMM]) CGdx3MM = getCGdDecayRate([CGg2MMx3MM, CGg2Mx3MM]) CGdx3M = getCGdDecayRate([CGg2MMx3M, CGg2Mx3M, CGg2x3M]) CGdx3 = getCGdDecayRate([CGg2Mx3, CGg2x3, CGg2Px3]) CGdx3P = getCGdDecayRate([CGg2x3P, CGg2Px3P, CGg2PPx3P]) CGdx3PP = getCGdDecayRate([CGg2Px3PP, CGg2PPx3PP]) CGdx3PPP= getCGdDecayRate([CGg2PPx3PPP]) ''' Physical system parameters ''' # List the excited levels to include in the simulation. xlvls = [ 'x0', #'x1' 'x1M','x1','x1P', #'x2MM','x2M','x2','x2P','x2PP', #'x3MMM', 'x3MM','x3M','x3','x3P','x3PP', 'x3PPP' ] # List the coupling rates of the system. # kappa: Decay of the electric field out of the cavity. # gamma: Decay of the atomic amplitude. # deltaP: Splitting of the cavity polarisation eigenmodes. kappa = 3.543 * 2.*np.pi /2 gamma = 3 * 2.*np.pi deltaP = 3.471 * 2.*np.pi # State the detuning of the cavity and the laser w.r.t. the energy zero # defined above (with the E0shift parameter). deltaOffsetVStirap = 7.5 * 2.*np.pi deltaL = -2*deltaZ + deltaOffsetVStirap # |F,mF>=|1,-1> <--> |F',mF'>=|1,0> #deltaL = 2*deltaZ + deltaOffsetVStirap # |F,mF>=|1,+1> <--> |F',mF'>=|1,0> deltaC = deltaOffsetVStirap ''' Cavity coupling. Note g, the atom-cavity coupling rate, is given by d * A * coupling_factor where, d: dipole moment of transition (D2 line for us). A: Angular dependence of the considered transition. e.g. A=np.sqrt(5/24) for F=1,mF=+/-1 <--> F=1,mF=0 in zero magnetic field. ''' d = 3.584*10**(-29) # D2-line dipole moment lambda_cav = 780.233 * 10**-9 # Resonance frequency of cavity omega_cav = 2*np.pi * const.c / lambda_cav # Angular resonance frequency of cavity L_cav = 339 * 10**-6 # Cavity length R = 0.05 # Radius of curvature of cavity mirrors. waist_cav = np.sqrt( (lambda_cav/np.pi)*np.sqrt( L_cav/2*( R - (L_cav/2) ))) # Cavity waist V_mode = (np.pi * L_cav * waist_cav**2)/2 # Cavity mode volume. coupling_factor = np.sqrt(omega_cav / (2*const.hbar*const.epsilon_0*V_mode)) coupling_factor /= (10**6) #Convert here so that the final coupling comes out in MHz. ''' The variation in coupling of atoms in free-flight through the cavity mode can be corrected for with an reduced averaged coupling. Do so here if wanted. ''' #coupling_factor *= 0.7 print('Atom-cavity coupling for F=1,mF=+/-1 <--> F=1,mF=1 ' 'in zero magnetic field is: g={0}MHz'.format( np.round(d * np.sqrt(5/24) * coupling_factor / (2*np.pi),3))) # + ''' If you want to overide the atom-cavity coupling calculated from the cavity parameters (above) do so here. e.g. to set g=2MHz on F=1,mF=+/-1 <--> F=1,mF=1 tranistion in zero magnetic field use coupling_factor = 10*2*np.pi / (d*CGg1Mx1) ''' # coupling_factor = 10*2*np.pi / (d*CGg1Mx1) print('Atom-cavity coupling for F=1,mF=+/-1 <--> F=1,mF=0 ' 'in zero magnetic field is: g={0}MHz'.format( np.round(d * np.sqrt(5/24) * coupling_factor / (2*np.pi),3))) # + ''' Here we perform the set up required before we can define and simulate the Hamiltonian of the system. This entails: 1. Defining the size of our Hilbert space and how the states with it are labelled. 2. Creating shorthand functions for the 'ket'/'bras' of the state vectors. 3. Creating the operators who's expectation values we will wish to track through the simulations. These are the number operators and for the population of each atomic state and each cavity mode in various polarisation bases (the cavity basis, {|H>,|V>} modes, {|+>,|->} modes etc.). 4. Creating the collapse operators to account for spontaneous decay in our system. ''' # Set where to truncate Fock states 0,1,2..,N-1 for cavity modes. N = 2 cavStates = list(range(N)) # A dictionary of the atomic states. atomStates = { "g1M":0, "g1":1, "g1P":2, # Corresponding to F=1,mF=-1,0,+1 respectively "d":3 # The 'dark-state' for all the F=2 ground levels where spont. emm. will be dumped. } # Add the excited-states already configured. for k,v in zip(xlvls, range(len(atomStates), len(atomStates)+len(xlvls))): atomStates[k]=v M = len(atomStates) ''' State definitions - pre-calculated for speed. We create string-indexed dictionaries for the kets (|k>), bras (<b|), and ketbras (|k><b|). ''' def ket(atom, cavH, cavV): return tensor(basis(M, atomStates[atom]), basis(N, cavH), basis(N, cavV)) def bra(atom, cavH, cavV): return ket(atom, cavH, cavV).dag() kets,bras = {},{} ketbras = {} s=[ list(atomStates) + xlvls, range(N), range(N)] states = list(map(list, list(product(*s)))) for state in states: kets[str(state)] = ket(*state) bras[str(state)] = bra(*state) for x in list(map(list, list(product(*[states,states])))): ketbras[str(x)] = ket(*x[0])*bra(*x[1]) ''' Shorthand definitions for coding Hamiltonians. ''' i=np.complex(0,1) def ePit(x,t): return np.exp(i*t*x) def ePmit(x,t): return np.exp(-i*t*x) def kb(x,y): return ketbras[str([x,y])] ''' Create the operators that give the population of the atomic states. ''' aDict = {} def createStateOp(s): try: aOp = kb([s,0,0],[s,0,0]) + kb([s,1,0],[s,1,0]) + kb([s,0,1],[s,0,1])+ kb([s,1,1],[s,1,1]) aDict[s]=aOp except KeyError: aOp = None return aOp [ ad, ag1M, ag1, ag1P, ax0, ax1M, ax1, ax1P, ax2MM, ax2M, ax2, ax2P, ax2PP, ax3MMM, ax3MM, ax3M, ax3, ax3P, ax3PP, ax3PPP ] = [createStateOp(s) for s in ["d", "g1M", "g1", "g1P", "x0", "x1M", "x1", "x1P", "x2MM", "x2M", "x2", "x2P", "x2PP", "x3MMM", "x3MM", "x3M", "x3", "x3P", "x3PP", "x3PPP"]] ''' Create the photon number operators. anX, anY - photon number operator for cavity modes X, Y. anRotP_fast, anRotM_fast - photon number operators for different polarisation bases. They basis they correspond to is defined by the parameters of the rotation matrix from the new basis to the cavity basis: R = [[alpha* np.exp(i*phi1), -beta * np.exp(-i*phi2)], [beta * np.exp(i*phi2), alpha* np.exp(-i*phi1)]]. '_fast' denotes that these functions return the appropriate operators at a given time, t, with all time-independent calculations already performed to maximise the speed. ''' aX = tensor(qeye(M), destroy(N), qeye(N)) aY = tensor(qeye(M), qeye(N), destroy(N)) anX = aX.dag()*aX anY = aY.dag()*aY # Pre-compute time independent terms to speed up calculations. allAtomicStates = list(atomStates) + xlvls an_fast_1 = sum(map(lambda s: kb([s,1,0],[s,1,0]) + kb([s,1,1],[s,1,1]), allAtomicStates)) an_fast_2 = sum(map(lambda s: kb([s,0,1],[s,0,1]) + kb([s,1,1],[s,1,1]), allAtomicStates)) an_fast_3 = sum(map(lambda s: kb([s,0,1],[s,1,0]), allAtomicStates)) an_fast_4 = sum(map(lambda s: kb([s,1,0],[s,0,1]), allAtomicStates)) def anRotP_fast(t, alpha=alpha, phi1=phi1, phi2=phi2): beta = np.sqrt(1-alpha**2) delta_phi = phi2 - phi1 return \ (alpha**2 * an_fast_1 + beta**2 * an_fast_2) + \ alpha*beta * ( np.exp(-i*deltaP*t) * np.exp(i*delta_phi) * an_fast_3 + \ np.exp(i*deltaP*t) * np.exp(-i*delta_phi) * an_fast_4 ) def anRotM_fast(t, alpha=alpha, phi1=phi1, phi2=phi2): beta = np.sqrt(1-alpha**2) delta_phi = phi2 - phi1 return \ (alpha**2 * an_fast_2 + beta**2 * an_fast_1) - \ alpha*beta * ( np.exp(-i*deltaP*t) * np.exp(i*delta_phi) * an_fast_3 + \ np.exp(i*deltaP*t) * np.exp(-i*delta_phi) * an_fast_4 ) ''' Takes a series of times, t_series = [t0,t1,...], and returns [ [anRotP_fast[t0],anRotP_fast[t1],...], [anRotM_fast[t0],anRotM_fast[t1],...] ] with the fewest possible calculations. It is approx. twice as fast as calling anRotP_fast and andRotM_fast independently for every time in t_series. ''' def anRot_series(t_series, alpha=alpha, phi1=phi1, phi2=phi2): beta = np.sqrt(1-alpha**2) delta_phi = phi2 - phi1 an0P = (alpha**2 * an_fast_1 + beta**2 * an_fast_2) an0M = (alpha**2 * an_fast_2 + beta**2 * an_fast_1) an1s = [alpha*beta * ( np.exp(-i*deltaP*t) * np.exp(i*delta_phi) * an_fast_3 + \ np.exp(i*deltaP*t) * np.exp(-i*delta_phi) * an_fast_4 ) for t in t_series] anRots = [[an0P+an1,an0M-an1] for an1 in an1s] return [list(i) for i in zip(*anRots)] ''' The c_op_list is the collapse operators of the system. Namely - the rate of photon decay from the cavity - spontaneous decay of the excited atomic states ''' # Define collapse operators c_op_list = [] # Cavity decay rate c_op_list.append(np.sqrt(2*kappa) * aX) c_op_list.append(np.sqrt(2*kappa) * aY) spontEmmChannels = [ # |F',mF'> --> |F=1,mF=-1> ('g1M','x0',CGg1Mx0), ('g1M','x1M',CGg1Mx1M),('g1M','x1',CGg1Mx1), ('g1M','x2MM',CGg1Mx2MM),('g1M','x2M',CGg1Mx2M),('g1M','x2',CGg1Mx2), ('g1M','x3MM',CGg1Mx3MM),('g1M','x3M',CGg1Mx3M),('g1M','x3',CGg1Mx3), # |F',mF'> --> |F=1,mF=0> ('g1','x0',CGg1x0), ('g1','x1M',CGg1x1M),('g1','x1',CGg1x1),('g1','x1P',CGg1x1P), ('g1','x2M',CGg1x2M),('g1','x2',CGg1x2),('g1','x2P',CGg1x2P), ('g1','x3M',CGg1x3M),('g1','x3',CGg1x3),('g1','x3P',CGg1x3P), # |F',mF'> --> |F=1,mF=+1> ('g1P','x0',CGg1Px0), ('g1P','x1',CGg1Px1),('g1P','x1P',CGg1Px1P), ('g1P','x2',CGg1Px2),('g1P','x2P',CGg1Px2P),('g1P','x2PP',CGg1Px2PP), ('g1P','x3',CGg1Px3),('g1P','x3P',CGg1Px3P),('g1P','x3PP',CGg1Px3PP), # |F',mF'> --> |F=2,mF> ('d','x0',CGdx0), ('d','x1M',CGdx1M),('d','x1',CGdx1),('d','x1P',CGdx1P), ('d','x2MM',CGdx2MM), ('d','x2M',CGdx2M),('d','x2',CGdx2),('d','x2P',CGdx2P), ('d','x2PP',CGdx2PP), ('d','x3MmM',CGdx3MMM), ('d','x3MM',CGdx3MM), ('d','x2M',CGdx3M), ('d','x2',CGdx3),('d','x2P',CGdx3P), ('d','x2PP',CGdx3PP), ('d','x2PPP',CGdx3PPP) ] spontDecayOps = [] # np.sqrt(2) in font of trans strength is because sum of strengths # is 1/2 for D2 but splitting ratios need to sum to 1 for x in spontEmmChannels: try: spontDecayOps.append(np.sqrt(2) * x[2] * np.sqrt(2*gamma) * tensor( basis(M, atomStates[x[0]]) * basis(M, atomStates[x[1]]).dag(), qeye(N), qeye(N))) except KeyError: pass c_op_list += spontDecayOps sigma_spontDecayOp = sum([x.dag()*x for x in spontDecayOps]) # + ''' Define the pump pulse parameters. The pump pulse by default has a sin**2 amplitude profile. Parameters: lengthStirap - length of the pump pulse wStirap - pi / lengthStirap lengthSim - length of the simulation (as the photon can continue to decay out of the cavity after the pump pulse is over, we typically use lengthSim > lengthStirap). OmegaStirap - the peak Rabi frequency of the pump pulse. Note that, again, this does not include the angualr dependence of the transition so the Rabi frequency on a given transition is A * OmegaStirap where, A: Angular dependence of the considered transition. e.g. A=np.sqrt(5/24) for F=1,mF=+/-1 <--> F=1,mF=1 in zero magnetic field. ''' lengthStirap=0.33 lengthSim =0.4 OmegaStirap = 10 * 2 *np.pi print('Peak Rabi frequency of pump pulse for F=1,mF=+/-1 <--> F=1,mF=0 ' 'in zero magnetic field is: Omega={0}MHz'.format( np.round(np.sqrt(5/24) * OmegaStirap/ (2*np.pi),3))) wStirap = np.pi / lengthStirap t,tStep = np.linspace(0, lengthSim, 251, retstep=True) Omega = lambda t, A=OmegaStirap, w=wStirap: np.piecewise(t, [t<lengthStirap], [A*np.sin(w*t)**2,0]) args_stirap = dict([('wStirap',wStirap), ('lengthStirap',lengthStirap), ('lengthSim',lengthSim)]) f, a1 = plt.subplots(figsize=(3, 2)) a1.plot(t, [Omega(x)/(2*np.pi) for x in t], 'b') f.patch.set_facecolor('white') # + ''' Create couplings for the Hamiltonian ''' ''' Create a laser coupling. Parameters: Omega - The peak rabi frequency of the pump pulse. g - The ground atomic atomic level. x - The excited atomic level. omegaL - The detuning of the pump laser. deltaM - The angular momentum change from g --> x. This is ignored but included for consistancy with the cavityCoupling function. args_list - A dictionary of arguments for the qutip simulation. pulseShape - The shape of the pump pulse. Returns: (List of cython-ready Hamiltonian terms, args_list with relevant parameters added) ''' def laserCoupling(Omega,g,x,omegaL,deltaM,args_list,pulseShape='np.sin(w*t)**2'): omegaL_lab = 'omegaL_{0}{1}'.format(g,x) args_list[omegaL_lab] = omegaL return ( [ [ -(Omega/2)*( ( kb([g,0,0],[x,0,0]) + kb([g,0,1],[x,0,1]) + kb([g,1,0],[x,1,0]) + kb([g,1,1],[x,1,1]) ) + ( kb([x,0,0],[g,0,0]) + kb([x,0,1],[g,0,1]) + kb([x,1,0],[g,1,0]) + kb([x,1,1],[g,1,1]) ) ),'{0} * cos({1}*t)'.format(pulseShape,omegaL_lab)], [ -i*(Omega/2)*( ( kb([g,0,0],[x,0,0]) + kb([g,0,1],[x,0,1]) + kb([g,1,0],[x,1,0]) + kb([g,1,1],[x,1,1]) ) - ( kb([x,0,0],[g,0,0]) + kb([x,0,1],[g,0,1]) + kb([x,1,0],[g,1,0]) + kb([x,1,1],[g,1,1]) ) ),'{0} * sin({1}*t)'.format(pulseShape,omegaL_lab)] ], args_list ) ''' Create a cavity coupling. Parameters: g0 - The atom-cavity coupling rate. g - The ground atomic atomic level. x - The excited atomic level. omegaC - The detuning of the cavity resonance. deltaM - The angular momentum change from g --> x. args_list - A dictionary of arguments for the qutip simulation. Returns: (List of cython-ready Hamiltonian terms, args_list with relevant parameters added) ''' def cavityCoupling(g0,g,x,omegaC,deltaM,args_list): omegaC_X = omegaC + deltaP/2 omegaC_Y = omegaC - deltaP/2 omegaC_X_lab = 'omegaC_X_{0}{1}'.format(g,x) omegaC_Y_lab = 'omegaC_Y_{0}{1}'.format(g,x) args_list[omegaC_X_lab] = omegaC_X args_list[omegaC_Y_lab] = omegaC_Y if deltaM==1: H_coupling = ( [ [ -g0*alpha*( kb([g,1,0],[x,0,0])+ kb([g,1,1],[x,0,1]) + kb([x,0,0],[g,1,0]) + kb([x,0,1],[g,1,1]) ),'cos({0}*t + phi1)'.format(omegaC_X_lab)], [ -i*g0*alpha*( kb([g,1,0],[x,0,0])+ kb([g,1,1],[x,0,1]) - kb([x,0,0],[g,1,0]) - kb([x,0,1],[g,1,1]) ),'sin({0}*t + phi1)'.format(omegaC_X_lab)], [ -g0*beta*( kb([g,0,1],[x,0,0]) + kb([g,1,1],[x,1,0]) + kb([x,0,0],[g,0,1]) + kb([x,1,0],[g,1,1]) ),'cos({0}*t + phi2)'.format(omegaC_Y_lab)], [ -i*g0*beta*( kb([g,0,1],[x,0,0]) + kb([g,1,1],[x,1,0]) - kb([x,0,0],[g,0,1]) - kb([x,1,0],[g,1,1]) ),'sin({0}*t + phi2)'.format(omegaC_Y_lab)] ], args_list ) elif deltaM==-1: H_coupling = ( [ [ -g0*alpha*( kb([g,0,1],[x,0,0])+ kb([g,1,1],[x,1,0]) + kb([x,0,0],[g,0,1]) + kb([x,1,0],[g,1,1]) ),'cos({0}*t - phi1)'.format(omegaC_Y_lab)], [ -i*g0*alpha*( kb([g,0,1],[x,0,0])+ kb([g,1,1],[x,1,0]) - kb([x,0,0],[g,0,1]) - kb([x,1,0],[g,1,1]) ),'sin({0}*t - phi1)'.format(omegaC_Y_lab)], [ g0*beta*( kb([g,1,0],[x,0,0]) + kb([g,1,1],[x,0,1]) + kb([x,0,0],[g,1,0]) + kb([x,0,1],[g,1,1]) ),'cos({0}*t - phi2)'.format(omegaC_X_lab)], [ i*g0*beta*( kb([g,1,0],[x,0,0]) + kb([g,1,1],[x,0,1]) - kb([x,0,0],[g,1,0]) - kb([x,0,1],[g,1,1]) ),'sin({0}*t - phi2)'.format(omegaC_X_lab)] ], args_list ) else: raise Exception("deltaM must be +/-1") return H_coupling ''' Get the laser and cavity couplings for between all configured levels with angular momentum changes of +/-1. Parameters: delta - The detuning of the coupling field. ''' def getCouplings_SigmaPlus(delta): return [ # For |F,mF>=|1,mF> <--> |F',mF'>=|3,mF+1> (CGg1Mx3, 'g1M', 'x3', delta + deltaZ - deltaZx3 - deltaEx3, 1), (CGg1x3P, 'g1', 'x3P', delta - deltaZx3P - deltaEx3, 1), (CGg1Px3PP, 'g1P', 'x3PP', delta - deltaZ - deltaZx3PP - deltaEx3, 1), # For |F,mF>=|1,mF> <--> |F',mF'>=|2,mF+1> (CGg1Mx2, 'g1M', 'x2', delta + deltaZ - deltaZx2 - deltaEx2, 1), (CGg1x2P, 'g1', 'x2P', delta - deltaZx2P - deltaEx2, 1), (CGg1Px2PP, 'g1P', 'x2PP', delta - deltaZ - deltaZx2PP - deltaEx2, 1), # For |F,mF>=|1,mF> <--> |F',mF'>=|1,mF+1> (CGg1Mx1, 'g1M', 'x1', delta + deltaZ - deltaZx1 - deltaEx1, 1), (CGg1x1P, 'g1', 'x1P', delta - deltaZx1P - deltaEx1, 1), # For |F,mF>=|1,mF> <--> |F',mF'>=|0,mF+1> (CGg1Mx0, 'g1M', 'x0', delta + deltaZ - deltaZx0 - deltaEx0, 1), ] def getCouplings_SigmaMinus(delta): return [ # For |F,mF>=|1,mF> <--> |F',mF'>=|3,mF-1> (CGg1Mx3MM, 'g1M', 'x3MM', delta + deltaZ - deltaZx3MM - deltaEx3, -1), (CGg1x3M, 'g1', 'x3M', delta - deltaZx3M - deltaEx3, -1), (CGg1Px3, 'g1P', 'x3', delta - deltaZ - deltaZx3 - deltaEx3, -1), # For |F,mF>=|1,mF> <--> |F',mF'>=|2,mF-1> (CGg1Mx2MM, 'g1M', 'x2MM', delta + deltaZ - deltaZx2MM - deltaEx2, -1), (CGg1x2M, 'g1', 'x2M', delta - deltaZx2M - deltaEx2, -1), (CGg1Px2, 'g1P', 'x2', delta - deltaZ - deltaZx2 - deltaEx2, -1), # For |F,mF>=|1,mF> <--> |F',mF'>=|1,mF-1> (CGg1x1M, 'g1', 'x1M', delta - deltaZx1M - deltaEx1, -1), (CGg1Px1, 'g1P', 'x1', delta - deltaZ - deltaZx1 - deltaEx1, -1), # For |F,mF>=|1,mF> <--> |F',mF'>=|0,mF-1> (CGg1Px0, 'g1P', 'x0', delta - deltaZ - deltaZx0 - deltaEx0, -1), ] ''' Define the couplings used here. ''' cavityCouplings = getCouplings_SigmaMinus(deltaC) + getCouplings_SigmaPlus(deltaC) laserCouplings = getCouplings_SigmaMinus(deltaL) + getCouplings_SigmaPlus(deltaL) ''' Convert a list of couplings as returned by getCouplings...() to a list of time-dependent Hamiltonian terms as required by qutip.mesolve(...). Needs: couplings: the list of couplings as returned by getCouplings...() rabiFreq: the intensity of the couplings (with angular dependence factored out) pulseShape: the shape of the pulse (in the string format for mesolve compilation into Cython). If None the coupling is presumed constant and a cavityCoupling(...) is set, otherwise laserCoupling(...) is used. Returns: hams: the list of Hamiltonian terms args_hams: the dictionary of args needed by qutip.mesolve ''' def couplingsToHamiltonians(couplings, rabiFreq, pulseShape=None): hams, args_hams = [], dict() for x in couplings: # Check if this is a coupling between configured states. if x[1] in atomStates and x[2] in atomStates: if pulseShape != None: ham, args_ham = laserCoupling(rabiFreq*x[0], x[1], x[2], x[3], x[4], args_hams, pulseShape) else: ham, args_ham = cavityCoupling(rabiFreq*x[0], x[1], x[2], x[3], x[4], args_hams) hams.append(ham) return list(chain(*hams)), args_hams ''' Create Hamiltonian terms for configured couplings. ''' hams_cavity, args_hams_cavity = couplingsToHamiltonians(cavityCouplings, d*coupling_factor) hams_laser, args_hams_laser = couplingsToHamiltonians(laserCouplings, OmegaStirap, 'np.piecewise(t, [t<lengthStirap], [np.sin(wStirap*t)**2,0])') ''' The full list of Hamiltonian terms and arguments. ''' H_Stirap = list(chain(*[hams_laser,hams_cavity])) args_hams_Stirap = {**args_stirap,**args_system, **args_hams_cavity,**args_hams_laser} # + ''' When repeatedly simulating a system where only the time-dependent variables, or initial state change, it is possible to reuse the Hamiltonian data stored in QuTiP and there by avoid spending time needlessly preparing the Hamiltonian and collapse terms for simulation. To turn on the the reuse features, we must pass a qutip.Options object with the rhs_reuse flag turned on, or we can pre-compile the Hamiltonian and pass it explicitly to the solver. ''' # A list of Hamiltonians to pre-compile in the form: # (Cythonised Hamiltonian, arguement dictionary, compiled file name) H_list = [ (H_Stirap, args_hams_Stirap, 'H_Stirap') ] H_rhs_compiled=[] for H, args, label in H_list: t_start = time.time() rhs_generate(H, c_op_list, args=args, name=label, cleanup=False) H_rhs_compiled.append(H) print('Hamiltonian \'{0}\': states pre-computed in in {1} seconds'.format(label, time.time()-t_start)) # + ''' Perform simulation. ''' t,tStep = np.linspace(0, lengthSim, 201, retstep=True) psi0 = ket(*['g1M',0,0]) # Initial state of the system opts = Options(rhs_reuse=False,rhs_filename='H_Stirap') t_start = time.time() output = mesolve(H_Stirap, psi0, t, c_op_list, [], options=opts, args=args_hams_Stirap, progress_bar=None) print('Simulation with {0} timesteps completed in {1} seconds'.format( t.size,np.round(time.time()-t_start,3) )) # + ''' Handle simulation results. Here we produce plots showing the photon emission in three polarisation bases: - circular {|+>,|->}, - cavity {|X>,|Y>}, - linear {|H>,|V>}, and the evolution of the atomic state. ''' output_states = output.states ''' Photon number operators for calculating the population in the circular and linear bases. ''' # anP_t = [anRotP_fast(time, alpha=alpha, phi1=phi1, phi2=phi2) for time in t] # anM_t = [anRotM_fast(time, alpha=alpha, phi1=phi1, phi2=phi2) for time in t] anP_t,anM_t = anRot_series(t, alpha=alpha, phi1=phi1, phi2=phi2) alpha_LC = np.clip(np.abs(R_LC[0, 0]),0,1) phi1_LC, phi2_LC = np.angle(R_LC[0, 0]), np.angle(R_LC[1, 0]) # anH_t = [anRotP_fast(time, alpha=alpha_LC, phi1=phi1_LC, phi2=phi2_LC) for time in t] # anV_t = [anRotM_fast(time, alpha=alpha_LC, phi1=phi1_LC, phi2=phi2_LC) for time in t] anH_t,anV_t = anRot_series(t, alpha=alpha_LC, phi1=phi1_LC, phi2=phi2_LC) ''' Calculate expectation values of photon number operators. ''' exp_anP = np.abs(np.array([(x[0]*x[1]).tr() for x in zip(output_states, anP_t)])) exp_anM = np.abs(np.array([(x[0]*x[1]).tr() for x in zip(output_states, anM_t)])) exp_anH = np.abs(np.array([(x[0]*x[1]).tr() for x in zip(output_states, anH_t)])) exp_anV = np.abs(np.array([(x[0]*x[1]).tr() for x in zip(output_states, anV_t)])) exp_anX = np.abs(np.array([(x*anX).tr() for x in output_states])) exp_anY = np.abs(np.array([(x*anY).tr() for x in output_states])) exp_spontDecay = np.abs( np.array([(x*sigma_spontDecayOp).tr() for x in output_states]) ) # Total photonic population of each mode. n_ph = np.trapz(2*kappa*(exp_anP+exp_anM), dx=tStep) n_P = np.trapz(2*kappa*(exp_anP), dx=tStep) n_M = np.trapz(2*kappa*(exp_anM), dx=tStep) n_X = np.trapz(2*kappa*(exp_anX), dx=tStep) n_Y = np.trapz(2*kappa*(exp_anY), dx=tStep) n_H = np.trapz(2*kappa*(exp_anH), dx=tStep) n_V = np.trapz(2*kappa*(exp_anV), dx=tStep) # Total spontaneous emission. n_spont = np.trapz(exp_spontDecay, dx=tStep) # Atomic populations exp_ag1M = np.abs(np.array([(x*ag1M).tr() for x in output_states]) ) exp_ag1P = np.abs( np.array([(x*ag1P).tr() for x in output_states]) ) exp_ax1 = np.abs( np.array([(x*ax1).tr() for x in output_states]) ) [exp_ag1, exp_ad] = [ np.real( np.array([(x*a).tr() for x in output_states]) ) for a in [ag1,ad] ] if 'x0' in xlvls: exp_ax0 = np.real( np.array([(x*ax0).tr() for x in output_states]) ) ''' Summaries ''' plt.rcParams['text.usetex'] = True print('Photon emission from cavity:', np.round(n_ph,3)) print('Total spontaneous emission:', np.round(n_spont,3)) print('Photon + / Photon - / frac_+: {0} / {1} = {2}'.format(*[np.round(n,3) for n in [n_P,n_M,n_P/n_ph]])) print('Photon X / Photon Y / frac_X: {0} / {1} = {2}'.format(*[np.round(n,3) for n in [n_X,n_Y,n_X/n_ph]])) print('Photon H / Photon V / frac_H: {0} / {1} = {2}'.format(*[np.round(n,3) for n in [n_H,n_V,n_H/n_ph]])) print('Final pops: |1,-1>,|1,1>:', np.round(exp_ag1M[-1],3),np.round(exp_ag1P[-1],3)) # Plot the results f, (a1, a2, a3, a4) = plt.subplots(4, 1, sharex=True, figsize=(12, 11/.7)) a1.plot(t, 2*kappa * exp_anP, 'b', label='$+ (\sigma^{+})$') a1.plot(t, 2*kappa * exp_anM, 'g', label='$- (\sigma^{-})$') a1.set_ylabel('Cavity emission rate, $1/\mu s$') a1.legend(loc=2) a2.plot(t, 2*kappa * exp_anX, 'b', label='$X$') a2.plot(t, 2*kappa * exp_anY, 'g', label='$Y$') a2.set_ylabel('Cavity emission rate, $1/\mu s$') a2.legend(loc=2) a3.plot(t, 2*kappa * exp_anH, 'b', label='$H$') a3.plot(t, 2*kappa * exp_anV, 'g', label='$V$') a3.set_ylabel('Cavity emission rate, $1/\mu s$') a3.legend(loc=2) for a in [a1,a2,a3]: aB = a.twinx() aB.plot(t, [Omega(x)/(2*np.pi) for x in t], '--k', label='$\Omega(t)$') aB.legend(loc=0) a4.plot(t, exp_ag1M, 'b', label='$g1M:|F,mF>=|1,-1>$') a4.plot(t, exp_ag1P, 'g', label='$g1P: |F,mF>=|1,1>$') a4.plot(t, exp_ax1, '--r', label='$x1: |F\',mF\'>=|1,0>$') a4.plot(t, exp_ad, '--y', label='$d: |F\',mF\'>=|2,...>$') a4.legend(loc=2) # + ''' Jones matrices for waveplates. theta and phi are the angle of the fast axis w.r.t. the horizontal ''' def HWP(theta): return np.matrix([[np.cos(2*theta), np.sin(2*theta)], [np.sin(2*theta), -1*np.cos(2*theta)]]) def QWP(phi): return np.matrix([[np.cos(phi)**2 + i*np.sin(phi)**2, (1-i)*np.sin(phi)*np.cos(phi)], [(1-i)*np.sin(phi)*np.cos(phi), np.sin(phi)**2 + i*np.cos(phi)**2]]) # + ''' Routing into arbitrary basis with HWP and QWP ''' QWPfast = 41. * np.pi/180 # fast axis of QWP #QWPfast = (41. - 90) * np.pi/180 # fast axis of QWP thetaHWP = 0 * np.pi/180 # angle of HWP ''' R_CR for: (cavity) --> QWP --> (measure in lab <--> PBS) ''' phiQWP = (86 * np.pi/180) - QWPfast #phiQWP = (108.5 * np.pi/180) - QWPfast #phiQWP = (131 * np.pi/180) - QWPfast R_RC = R_LC * QWP(phiQWP).getH() # R_RC = R_LC * QWP(phiQWP) ''' R_CR for: (cavity) --> QWP --> HWP --> (measure in lab <--> PBS) ''' #R_RC = R_LC * (QWP(phiQWP) * HWP(thetaHWP)).getH() alpha_RC = np.abs(R_RC[0, 0]) phi1_RC, phi2_RC = np.angle(R_RC[0, 0]), np.angle(R_RC[1, 0]) print('For R_CR: alpha_RC={0}, phi1_RC={1}, phi2_RC={2}'.format(*[np.round(x,2) for x in [alpha_RC, phi1_RC * 180/np.pi, phi2_RC * 180/np.pi]])) # Calculate anRotP_t = [anRotP_fast(t=time, alpha=alpha_RC, phi1=phi1_RC, phi2=phi2_RC) for time in t] anRotM_t = [anRotM_fast(t=time, alpha=alpha_RC, phi1=phi1_RC, phi2=phi2_RC) for time in t] exp_anRotP = np.real(np.array([(x[0]*x[1]).tr() for x in zip(output.states, anRotP_t)])) exp_anRotM = np.real(np.array([(x[0]*x[1]).tr() for x in zip(output.states, anRotM_t)])) # Plot the results f, (ax) = plt.subplots(1, 1, sharex=True, figsize=(12, 11/3.)) ax.plot(t, 2*kappa * exp_anRotP, 'b', label='$\mathrm{+\ rot.\ from\ routing:\ }|H>$') ax.plot(t, 2*kappa * exp_anRotM, 'r', label='$\mathrm{-\ rot.\ from\ routing:\ }|V>$') ax.set_ylabel('Cavity emission rate, $1/\mu s$') ax.legend(loc=2) axB = ax.twinx() axB.plot(t, [Omega(x)/(2*np.pi) for x in t], '--k', label='$\Omega(t)$') axB.legend(loc=0) f.patch.set_facecolor('white') n_ph = np.trapz(2*kappa*(exp_anRotP+exp_anRotM), dx=tStep) n_RotP = np.trapz(2*kappa*(exp_anRotP), dx=tStep) n_RotM = np.trapz(2*kappa*(exp_anRotM), dx=tStep) print('Photon:', np.round(n_ph,3)) print('Photon |+> / Photon |->: {0} / {1}'.format(*[np.round(n,3) for n in [n_RotP,n_RotM]])) print('t_peak1 |+> / tpeak |->: {0}, {1}'.format(*[t[np.argmax(x)] for x in [exp_anRotP, exp_anRotM]])) # + ''' Export photon shapes for outside use ''' export_targets = [ (list(map(float,exp_anRotP)), "alp{0}_qwp{2}_exp_anRotP".format(*[int(np.round(alpha_CL*1000))] + [int(np.round(x * 180/np.pi)%360) for x in [thetaHWP, phiQWP]])), (list(map(float,exp_anRotM)), "alp{0}_qwp{2}_exp_anRotM".format(*[int(np.round(alpha_CL*1000))] + [int(np.round(x * 180/np.pi)%360) for x in [thetaHWP, phiQWP]])) ] if psi0 == ket(*['g1M',0,0]): driving_dir = 'gM_gP' elif psi0 == ket(*['g1P',0,0]): driving_dir = 'gP_gM' else: driving_dir = 'nonstandard_driving' export_path='/data/18-10-08/Omega{0}gBar{1}deltaZ{2}/{3}/wavepackets/'.format(*[int(np.round(x/(2.*np.pi))) for x in [OmegaStirap,d*coupling_factor,deltaZ]] + [driving_dir]) export_dir_local = '..' + export_path export_dir_aldaq = '/Volumes/KuhnGroup/Tom/Python/STIRAP modelling_Zeeman Scheme with Birefringence' + export_path #export_dir = '/Volumes/KuhnGroup/Tom/Python/STIRAP modelling_Zeeman Scheme with Birefringence\ # /data/18-06-13/Omega{0}gBar{1}deltaZ{2}deltaP{3}/gM_gP'.format(*[int(np.round(x/(2.*np.pi))) for x in # [OmegaStirap,d*coupling_factor,deltaZ,deltaP] ]) for export_dir in [export_dir_local,export_dir_aldaq]: if not os.path.exists(os.path.dirname(export_dir)): try: os.makedirs(os.path.dirname(export_dir)) print('created dir: ', export_dir) except OSError as exc: # Guard against race condition if exc.errno != exc.errno.EEXIST: raise except Exception: pass for data, fname in export_targets: with open(os.path.join(export_dir, fname + '.csv'), 'w+') as file: wr = csv.writer(file) wr.writerow(data) print (os.path.join(export_dir, fname + '.csv')) # + phis = [x*np.pi/180 for x in np.linspace(0, 360, 180)] args = [] for phi in phis: R_RC = R_LC * QWP(phi).getH() #R_RC = R_LC * QWP(phi) alpha_RC = np.abs(R_RC[0, 0]) phi1_RC, phi2_RC = np.angle(R_RC[0, 0]), np.angle(R_RC[1, 0]) args.append((alpha_RC, phi1_RC, phi2_RC)) anRotP_t_list, anRotM_t_list = [], [] for alpha, phi1, phi2 in args: anRotP_t_list.append([anRotP_fast(t=time, alpha=alpha, phi1=phi1, phi2=phi2) for time in t]) anRotM_t_list.append([anRotM_fast(t=time, alpha=alpha, phi1=phi1, phi2=phi2) for time in t]) # + '''Plot expected splitting of polarisation peaks for different routing angles''' # phis = [(x-67.5)*np.pi/180 for x in np.linspace(0, 225, 11)] detH_n, detV_n = [],[] for anRotP_t, anRotM_t in zip(anRotP_t_list, anRotM_t_list): exp_anRotP = np.real(np.array([(x[0]*x[1]).tr() for x in zip(output.states, anRotP_t)])) exp_anRotM = np.real(np.array([(x[0]*x[1]).tr() for x in zip(output.states, anRotM_t)])) detH_n.append(np.trapz(2*kappa*(exp_anRotP), dx=tStep)) detV_n.append(np.trapz(2*kappa*(exp_anRotM), dx=tStep)) # + f, (a1) = plt.subplots(1, 1, sharex=True, figsize=(10, 2)) wpAngles = phis pltAngles = [(180*(x))/np.pi for x in wpAngles] a1.plot(pltAngles, detH_n, 'b', label='det1 nPh: $|H>$') a1.plot(pltAngles, detV_n, 'r', label='det2 nPh: $|V>$') a1.set_ylabel('nPh vs QWP') a1.legend(loc=5) # + export_targets = [(list(map(float,detH_n)), "alp{0}_detHn_vs_qwp".format(int(np.round(alpha_CL*1000)))), (list(map(float,detV_n)), "alp{0}_detVn_vs_qwp".format(int(np.round(alpha_CL*1000)))) ] if psi0 == ket(*['g1M',0,0]): driving_dir = 'gM_gP' elif psi0 == ket(*['g1P',0,0]): driving_dir = 'gP_gM' else: driving_dir = 'nonstandard_driving' export_path='/data/18-10-08/Omega{0}gBar{1}deltaZ{2}/{3}/routingVsQWP/'.format(*[int(np.round(x/(2.*np.pi))) for x in [OmegaStirap,d*coupling_factor,deltaZ]] + [driving_dir]) export_dir_local = '..' + export_path export_dir_aldaq = '/Volumes/KuhnGroup/Tom/Python/STIRAP modelling_Zeeman Scheme with Birefringence' + export_path for export_dir in [export_dir_local,export_dir_aldaq]: if not os.path.exists(os.path.dirname(export_dir)): try: os.makedirs(os.path.dirname(export_dir)) print('created dir: ', export_dir) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise for data, fname in export_targets: with open(os.path.join(export_dir, fname + '.csv'), 'w+') as file: wr = csv.writer(file) wr.writerow(data) print (os.path.join(export_dir, fname + '.csv')) # - # %load_ext cython import numpy as np # + language="cython" # import numpy as np # cimport numpy as np # cimport cython # from libc.math cimport sin # # def sign_c_np(float x): # return np.piecewise(x, [x<0], [-1,1]) # # def sign_c_raw(float x): # if x<0: return -1 # else: return 1 # # def pulse_c_np(float t, float t_len): # return np.piecewise(t, [t<t_len], [sin(t),0]) # # def pulse_c_raw(float t, float t_len): # if t<t_len: return sin(t) # else: return 1 # + def sign_py(x): return np.piecewise(x, [x<0], [-1,1]) def pulse_py(t,t_len): return np.piecewise(t, [t<t_len], [np.sin(t),0]) # + # %timeit -n 10 sign_c_raw(5) # %timeit -n 10 sign_c_np(5) # %timeit -n 10 sign_py(5) # %timeit -n 10 pulse_c_raw(0.5,1) # %timeit -n 10 pulse_c_np(0.5,1) # %timeit -n 10 pulse_py(0.5,1) # -
jupyter_notebooks/notebooks-single/py_pulses/vStirap_zeeman_scheme.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import math import pandas as pd import random #import seaborn as sns #sns.set(style = 'ticks' , palette = 'Set2') df = pd.read_csv('Breast-Cancer.csv',na_values = ['?']) means = df.mean().to_dict() df.drop(['id'],1,inplace=True) header = list(df) df.fillna(df.mean(),inplace = True) full_data = df.astype(float).values.tolist() X = np.array(full_data)[:,:-1] mat_cov = np.cov(X,rowvar = False) np.linalg.eig(mat_cov) x = np.array([[1,1],[2,2],[3,3],[4,5],[5,4],[-1,-1],[-2,-2],[-3,-3],[-4,-5],[-5,-4]]) cov_mat = np.cov(x,rowvar=False) cov_mat = np.array([[2,0.8],[0.8,0.6]]) cov_mat v = [-1,1] m = [] for i in range(15): m.append(np.arctan(v[1]/v[0])) v = np.dot(cov_mat,v) print(v,v[1]/v[0]) plt.plot([0,v[0]] , [0,v[1]]) plt.show() import numpy as np import pandas as pd gene_data = np.array([[10,6],[11,4],[8,5],[3,5],[2,2.8],[1,1]]) gene_labels = np.array([[1],[4],[3],[2],[7],[9]]) mean = np.mean(gene_data,axis = 0) #mean std = np.std(gene_data,axis = 0) #standard deviation std mean X = (gene_data - mean)/std #NORMALIZATION X.shape X.T.shape cov_X = np.dot(X.T,X) cov_X eig_val , eig_vec = np.linalg.eig(cov_X) #returns eig_val and eig_vec np.linalg.eig(cov_X) eig_val eig_vec eig_vec.shape X.shape X PC = np.dot(X,eig_vec) #(6,2) x (2,2) --> (6,2) PC1 = PC[:,0] gene_labels PC1 df = pd.read_csv('Breast-Cancer.csv',na_values=['?']) df.dropna(inplace=True) df.drop(['id'],axis=1,inplace=True) full_data = np.array(df.astype(float).values.tolist()) df.head() features = full_data[:,:-1] labels = full_data[:,-1].reshape(-1,1) mean = np.mean(features,0) std = np.std(features,0) mean std X = (features-mean)/std #normalization X.shape cov_X = np.dot(X.T,X) cov_X.shape eig_val, eig_vec = np.linalg.eig(cov_X) eig_val PC = np.dot(X,eig_vec) PC16 = PC[:,0] #PC16 means PC1 to PC6 PC16.shape PC16_train = PC[:583] label_train = labels[:583] PC16_test = PC[583:] label_test = labels[583:] from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(PC16_train,label_train) y_hat_test = model.predict(PC16_test) y_test = label_test.reshape(1,-1) (np.sum(np.abs(y_test[0]-y_hat_test)))/2 # + m = 0 alpha = 0.1 prev_SSE = 0 curr_SSE = 999999 def reg_line(m,X,prev_SSE,curr_SSE,alpha): if(alpha < 0.00001): return m #y_hat = m*X theta = np.arctan(m) w = np.array([math.cos(theta),math.sin(theta)]) curr_SSE = np.linalg.norm(np.dot(X,w)) print(curr_SSE) if(curr_SSE >= prev_SSE).all(): prev_SSE = curr_SSE m+=alpha return reg_line(m,X,prev_SSE,curr_SSE,alpha) else: m -= alpha prev_SSE = 999999 alpha*=0.1 m+=alpha return reg_line(m,X,prev_SSE,curr_SSE,alpha) m_PC1= reg_line(m,X,prev_SSE,curr_SSE,alpha) # - m_PC1 m_PC2 = -1/m_PC1 plt.plot(X[:,0],m_PC2*X[:,0],'y') plt.plot(X[:,0],m_PC1*X[:,0],'g') plt.scatter(X[:,0],X[:,1],c='r') plt.axis([-15,15,-15,15]) def get_vector(m_PC1): theta_PC1 = np.arctan(m_PC1) v_PC1 = (math.cos(theta_PC1),math.sin(theta_PC1)) return v_PC1 v_PC1 = get_vector(m_PC1) v_PC2 = get_vector(m_PC2) var_PC1 = np.linalg.norm(np.dot(X,v_PC1))**2/5 var_PC2 = np.linalg.norm(np.dot(X,v_PC2))**2/5 var_PC2/(var_PC1+var_PC2) PC1_X = np.dot(X,v_PC1) PC2_X = np.dot(X,v_PC2) plt.scatter(PC1_X,PC2_X,c='g') plt.xlabel('PC1') plt.ylabel('PC2') sns.despine()
2 Unsupervised Learning/Dimensionality Reduction/Principle Component Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Word splitting # # We split words in parts that also occur separately in the corpus. # # The result is in a new feature, `letterx`, where the wordsplit positions are marked with `┼`. from itertools import chain import collections from tf.app import use from splitting import splitupx A = use("CLARIAH/wp6-daghregisters:clone", checkout="clone", hoist=globals()) words = [F.letters.v(w) for w in range(1, F.otype.maxSlot + 1)] def splitup(r, words): chunks = list(chain.from_iterable(word.split("┼") for word in words)) allChunks = sorted( {chunk for chunk in chunks if not (chunk.startswith("⁼") or chunk.isdigit())} ) freqs = collections.Counter() for chunk in chunks: freqs[chunk] += 1 chunkSplits = collections.defaultdict(dict) nHapaxes = sum(1 for (c, f) in freqs.items() if f == 1) print(f"Round {r} start: {len(allChunks)} distinct word-like chunks") print(f"{nHapaxes} hapaxes") for chunk in allChunks: cn = len(chunk) if cn < 6: continue maxQ = None (chunk1Best, chunk2Best) = (None, None) for s in range(2, cn - 1): chunk1 = chunk[0:s] chunk2 = chunk[s:] chunk1L = len(chunk1) chunk2L = len(chunk2) chunk1C = freqs[chunk1] chunk2C = freqs[chunk2] if (chunk1C < 3 or chunk2C < 3) and (chunk1L < 15 and chunk2L < 15): continue q = (chunk1C - 1) * (chunk2C - 1) if q: if maxQ is None or maxQ < q: maxQ = q (chunk1Best, chunk2Best) = (chunk1, chunk2) if maxQ: chunkSplits[chunk] = (chunk1Best, chunk2Best) print(f"Round {r}: {len(chunkSplits)} splits") newWords = [] allChunks = set() for word in words: chunks = [] for chunk in word.split("┼"): if chunk.startswith("⁼"): chunks.append(chunk) elif chunk.isdigit(): chunks.append(f"⁼{chunk}") elif chunk in chunkSplits: chunks.extend(chunkSplits[chunk]) else: chunks.append(chunk) newWords.append("┼".join(chunks)) allChunks |= set(chunks) print(f"Round {r} end: {len(allChunks)} distinct word-like chunks") return (len(chunkSplits), newWords) def splitupx(words): newWords = words r = 0 while True: r += 1 (nSplits, newWords) = splitup(r, newWords) if nSplits < 1: break return newWords newWords = splitupx(words) A.TF.save( nodeFeatures=dict(letterx={i + 1: newWords[i] for i in range(F.otype.maxSlot)}), metaData=dict(letterx=dict(valueType="str", description="word with split points marked by ┼")), )
programs/wordsplit.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.1 # language: julia # name: julia-1.3 # --- # # MA3J8 Approximation Theory and Applications # # ## 05 - Least Squares Methods # # In this notebooks we will explore in a little more detail how to fit functions using least squares fitting instead of interpolation. This is closer to the Hilbert-space setting, i.e., best approximation by projection. # # First we implement some auxiliary functions: evaluating the chebyshev basis functions as well as a polynomial represented in terms of those basis functions. using Plots, Polynomials, QuadGK, SoftGlobalScope, LinearAlgebra, LaTeXStrings, Plots, Printf gr() # + """ A convenience wrapper to store a polynomial in terms of its coefficients and its basis """ struct Poly c::Vector evalb end (p::Poly)(x) = dot(p.c, p.evalb(x, length(p.c)-1)) """ linear lsq fit using the QR factorisation f : function to be fitted X : sample points N : polynomial degree evalbasis : a function to evaluate the basis at a point x """ function pfit(f, X, N, evalbasis) Y = f.(X) A = lsqsys(X, N, evalbasis) c = qr(A) \ Y return Poly(c, evalbasis) end function lsqsys(X, N, evalbasis) M = length(X) A = zeros(M, N+1) for m = 1:M A[m, :] = evalbasis(X[m], N) end return A end """ evaluate the monomial basis at a point """ monobasis(x, N) = [x^n for n = 0:N] # - # ### 05 - 1 Motivation / Fit via QR Factorisation # # We begin by recalling a naive result from our polynomial approximation tests, where we experimented using least squares in place of (equi-spaced) polynomial interpolation. But this time, let's use our own code and also push the polynomial degree a bit higher! # + f(x) = 1 / (1 + 25 * x^2) NN = 10:10:100 X = range(-1, 1, length=1000) P = plot(X, f.(X), lw=2, label = "f") ylims!(P, (-0.05, 0.2)) err = [] for N in NN p = pfit(f, X, N, monobasis) push!(err, norm(f.(X) - p.(X), Inf)) # TODO: add N = 100 to the plot! if N in [10, 20, 40, 100]; plot!(P, X, p.(X), lw=2, label = "p$N"); end end plot(P, plot(NN, err, lw=2, m=:o, yaxis = (:log,), label = ""), layout = (1,2)) # - # We observe that around $N = 40$ even $M = 400$ datapoints are not enough to produce a stable / accurate fit? Or does this have to do with the conditioning of the basis? println(" degree | cond(A) | norm(c)") println("--------|------------|----------") for N in NN[1:end-1] p = pfit(f, X, N, monobasis) cnd = cond(lsqsys(X, N, monobasis)) @printf(" %3d | %.2e | %.2e \n", N, cnd, norm(p.c)) end # Let us attempt the same using the Chebyshev basis. We know this is a much better behaved basis. The results now look much more promising! """ evaluate the chebyshev basis at a point x """ function chebbasis(x, N) @assert N >= 2 T = zeros(typeof(x), N+1) T[1] = 1 T[2] = x for n = 2:N T[n+1] = 2*x*T[n] - T[n-1] end return T end f(x) = 1 / (1 + 100 * x^2) NN = 10:30:300 xp = range(-1, 1, length=1000) errmono = [] errcheb = [] for N in NN pm = pfit(f, xp, N, monobasis) push!(errmono, norm(f.(xp) - pm.(xp), Inf)) pc = pfit(f, xp, N, chebbasis) push!(errcheb, norm(f.(xp) - pc.(xp), Inf)) end plot(NN, errmono, lw=2, m=:o, yaxis = (:log,), label = "monofit") plot!(NN, errcheb, lw=2, m=:o, label = "chebfit") println(" degree | cond(A) | norm(c)") println("--------|------------|----------") for N in NN[1:end-1] pc = pfit(f, xp, N, chebbasis) cnd = cond(lsqsys(xp, N, chebbasis)) @printf(" %3d | %.2e | %.2e \n", N, cnd, norm(pc.c)) end # But we have made a very naive mistake ... we are estimating the error on the same points on which we are training! Studying this issue in detail goes far beyond this module, but we can at least test it numerically by estimating the error on a much finer grid. f(x) = 1 / (1 + 100 * x^2) NN = 10:30:300 x_train = range(-1, 1, length=1000) x_test = range(-1, 1, length=4*1000) errmono1 = [] errcheb1 = [] for N in NN pm = pfit(f, x_train, N, monobasis) push!(errmono1, norm(f.(x_test) - pm.(x_test), Inf)) pc = pfit(f, x_train, N, chebbasis) push!(errcheb1, norm(f.(x_test) - pc.(x_test), Inf)) end plot(NN, errmono, lw=2, m=:o, yaxis = (:log,), label = "polyfit-train") plot!(NN, errcheb, lw=2, m=:o, label = "chebfit-train") plot!(NN, errmono1, lw=2, m=:o, yaxis = (:log,), label = "polyfit-test") plot!(NN, errcheb1, lw=2, m=:o, label = "chebfit-test") ylims!((1e-7, 1.0)) # This can be related to conditioning of the normal equations! See the table above! We can see that this is a form of overfitting related to the Runge phenomenon # + P1 = plot(x_train, f.(x_train), lw=2, label = "f") for N in [100, 200, 400] pc = pfit(f, x_train, N, chebbasis) plot!(x_test, pc.(x_test), lw=1, label = "cheb($N)") end ylims!(-0.1, 0.3) x_plot = range(0.99, 0.999999, length=1_000) P2 = plot(x_plot, f.(x_plot), lw=2, label = "") pc = nothing for N in [100, 200, 400] pc = pfit(f, x_train, N, chebbasis) plot!(x_plot, pc.(x_plot), lw=2, label = "") end plot!(x_train, pc.(x_train), lw=0, c=4, m=:o, label ="") xlims!(P2, (0.99, 1.005)) ylims!(P2, (-0.12, 0.12)) plot(P1, P2) # - # ### 05-2 Fitting with the right distribution # # In the previous section we did something that is actually quite odd. We used a Chebyshev basis to fit polynomials with datapoints $x_m$ uniformaly distributed in $[-1,1]$ whereas Chebyshev polynomials are orthogonal w.r.t the measure $(1-x^2)^{-1/2} dx$. It therefore seems natural to (1) either incorporate the Chebyshev weights # $$ # w_m = (1 - x_m^2)^{-1/2} # $$ # into the fitting process; or (2) to fit on Chebyshev-distributed data points. # $$ # x_m = \cos\big(\pi m/M\big) # $$ # We will next explore the consequences of this modification. # # NOTE: This can be done much more effectively using Gauss type quadrature rules, but this is not the point of these experiments which are gearing up towards the next section below! # + function wchebfit(f, N, M, data=:unif, weights=:unif) if data == :unif X = range(-1, 1, length=M) elseif data == :cheb X = cos.(range(0, pi, length=M)) else error("Unknown `data`") end if weights == :unif W = ones(length(X)) elseif weights == :cheb # W<-√W W = (1+1e-10 .- X.^2).^(-0.25) else error("Unknown `weights`") end # weighted lsq system Y = W .* f.(X) A = Diagonal(W) * lsqsys(X, N, chebbasis) return Poly(qr(A) \ Y, chebbasis) end # + # easy parameters # C, Mfit, NN = 25, 400, 19:20:399 # medium parameters C, Mfit, NN = 100, 400, 20:20:380 # harder parameters # C, Mfit, NN = 400, 1_000, 20:40:400 f(x) = 1 / (1 + C * x^2) x_test = cos.(range(0, pi, length=4000)) err_uu, err_uc, err_cu = [], [], [] get_err = (_N, _x, _w) -> norm(f.(x_train) - wchebfit(f, _N, Mfit, _x, _w).(x_train), Inf) for N in NN push!(err_uu, get_err(N, :unif, :unif)) push!(err_uc, get_err(N, :unif, :cheb)) push!(err_cu, get_err(N, :cheb, :unif)) end plot(; yaxis = (:log, ), xaxis = ("polynomial degree",), title = "Max-Error") plot!(NN, err_uu, lw=2, m=:o, label = "unif, unif") plot!(NN, err_uc, lw=2, m=:o, label = "unif, cheb") plot!(NN, err_cu, lw=2, m=:o, label = "cheb, unif") plot!(NN, 0.1*(1+1/sqrt(C)).^(- NN), lw=2, c=:black, ls=:dash, label="predicted") P_maxe = hline!([1e-16], label = "eps") plot(P_maxe, legend=:bottomleft) # - println("Conditioning of Chebyshev LSQ System") println(" degree | cond(A) | norm(c)") println("--------|------------|----------") for N in 40:40:400 X = cos.(range(0, pi, length=1_000)) pc = pfit(f, X, N, chebbasis) cnd = cond(lsqsys(X, N, chebbasis)) @printf(" %3d | %.2e | %.2e \n", N, cnd, norm(pc.c)) end # ### 05-3 Fit at Random Points # # The most realistic real-world context is that we cannot choose the data-points but are given - likely random - datapoints. The first step should then be to construct an orthogonal basis adapted to the distribution of the points. Since we work mostly with Chebyshev here we assume that the distribution is the Chebyshev distribution. function rndchebfit(f, N, M, data) if data == :unif X = range(-1, 1, length=M) elseif data == :cheb X = cos.(range(0, pi, length=M)) elseif data == :rand X = 2*(rand(M) .- 0.5) elseif data == :randcheb X = cos.(pi * rand(M)) else error("Unknown `data`") end # weighted lsq system p = pfit(f, X, N, chebbasis) # errors X_test = cos.(range(0, pi, length=5*M)) return p, norm(f.(X_test) - p.(X_test), Inf) end # + # easy parameters C, Mfit, NN = 25, 400, 19:20:399 # medium parameters # C, Mfit, NN = 100, 400, 20:20:380 # harder parameters # C, Mfit, NN = 400, 981, 20:60:980 f(x) = 1 / (1 + C * x^2) err_u, err_ru, err_c, err_rc = [], [], [], [] for N in NN push!(err_u, rndchebfit(f, N, Mfit, :unif)[2]) push!(err_ru, rndchebfit(f, N, Mfit, :rand)[2]) push!(err_c, rndchebfit(f, N, Mfit, :cheb)[2]) push!(err_rc, rndchebfit(f, N, Mfit, :randcheb)[2]) end plot(; yaxis = (:log,), xaxis = ("polynomial degree",), title="RMSE-TEST") plot!(NN, err_u, lw=2, m=:o, label = "unif") plot!(NN, err_ru, lw=2, ls=:dash, m=:o, label = "unif, rnd") plot!(NN, err_c, lw=2, m=:o, label = "cheb") plot!(NN, err_rc, lw=2, ls=:dash, m=:o, label = "cheb, rnd") P_maxe = hline!([1e-16], label = "") plot(P_maxe) # - # The analysis of [Cohen, Davenport, Leviatan, 2012] suggests that we should turn the problem around: given a number of data-points $M$ we should then choose an appropriate degree $N$. In practise it seems best to combine theory with experimentation. # # Here, we learn that we need $N$ slightly smaller than $M$ (a log-factor is suggested in the paper) to ensure that the LSQ system is stable (moderate condition number) with high probability. The additional error that arises is # $$ # M^{-r} # $$ # where $M$ is the number of data points and $r$ is given by a complicated relation, but for the Chebyshev basis with random points $x_m$ distributed according to the Chebyshev distribtion we have $r \approx C M / N$. To balance the errors we want $r \log M \approx \alpha N$, i.e., $C M/N \approx C \log M$, or, $N \approx C M/\log M$. The $C$ is a bit difficult to determine analytically, so we do it experimentally. # + C, MM = 25, 50:50:1000 f(x) = 1 / (1 + C * x^2) Nfun(M, Nsugg) = floor(Int, min(M-1, Nsugg)) N1(M) = Nfun(M, M / log(M)) N2(M) = Nfun(M, 3*M/log(M)) N3(M) = Nfun(M, 5*M/log(M)) N4(M) = Nfun(M, 7*M/log(M)) err, err1, err2, err3, err4 = [], [], [], [], [] for M in MM push!(err, rndchebfit(f, M-1, M, :cheb)[2]) push!(err1, rndchebfit(f, N1(M), M, :randcheb)[2]) push!(err2, rndchebfit(f, N2(M), M, :randcheb)[2]) push!(err3, rndchebfit(f, N3(M), M, :randcheb)[2]) push!(err4, rndchebfit(f, N4(M), M, :randcheb)[2]) end plot(; yaxis = (:log,), xaxis = ("polynomial degree",), title="max-error") plot!(MM.-1, err, lw=2, m=:o, label = "chebyshev grid") plot!(N1.(MM), err1, lw=2, m=:o, label = "N= M/log(M)") plot!(N2.(MM), err2, lw=2, m=:o, label = "N=3M/log(M)") plot!(N3.(MM), err3, lw=2, m=:o, label = "N=5M/log(M)") plot!(N4.(MM), err4, lw=2, m=:o, label = "N=7M/log(M)") PN = hline!([1e-16], label = "eps") plot(; yaxis = (:log,), xaxis = ("# sample points",), title="max-error") plot!(MM, err, lw=2, m=:o, label = "") plot!(MM, err1, lw=2, m=:o, label = "") plot!(MM, err2, lw=2, m=:o, label = "") plot!(MM, err3, lw=2, m=:o, label = "") plot!(MM, err4, lw=2, m=:o, label = "") PM = hline!([1e-16], label = "") plot(PN, PM) # -
jl/05-Least-Squares.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import of libraries import pandas as pd from splinter import Browser from bs4 import BeautifulSoup import time from webdriver_manager.chrome import ChromeDriverManager import pymongo # - executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # # NASA Mars News : Scrape the NASA Mars News Site and collected the latest News Title and Paragraph Text # + # Visit the NASA Mars News Site news_url = "https://mars.nasa.gov/news/" browser.visit(news_url) # + # Results HTML with BeautifulSoup html = browser.html soup = BeautifulSoup(html, "html.parser") # + # Scrape the latest News Title and latest Paragraph Text article = soup.find("div", class_='list_text') news_title = article.find("div", class_="content_title").text news_p = article.find("div", class_ ="article_teaser_body").text # Printing the news and paragraph print(news_title) print(news_p) # - # # JPL Mars Space Images - Featured Image # + # Visit the NASA JPL (Jet Propulsion Laboratory) Site image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars" browser.visit(image_url) # Parse Results HTML with BeautifulSoup html = browser.html soup = BeautifulSoup(html, "html.parser") # - image = soup.find("img", class_="thumb")["src"] featured_image_url = "https://www.jpl.nasa.gov" + image print(featured_image_url) # # Mars Facts # + # Visit the Space Facts website facts_url = "https://space-facts.com/mars/" browser.visit(facts_url) # Using Pandas to reas the URL mars_data = pd.read_html(facts_url) mars_data = pd.DataFrame(mars_data[0]) mars_facts = mars_data.to_html(header = False, index = False) print(mars_facts) # - # # Mars Hemispheres # + # Visit the USGS Astrogeology Science Center Site hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars" browser.visit(hemispheres_url) html = browser.html soup = BeautifulSoup(html, "html.parser") mars_hemisphere = [] products = soup.find("div", class_ = "result-list" ) hemispheres = products.find_all("div", class_="item") # + # Iterate through the List of All Hemispheres for hemisphere in hemispheres: title = hemisphere.find("h3").text title = title.replace("Enhanced", "") end_link = hemisphere.find("a")["href"] image_link = "https://astrogeology.usgs.gov/" + end_link browser.visit(image_link) html = browser.html soup=BeautifulSoup(html, "html.parser") downloads = soup.find("div", class_="downloads") image_url = downloads.find("a")["href"] mars_hemisphere.append({"title": title, "img_url": image_url}) mars_hemisphere # -
Missions_to_Mars/.ipynb_checkpoints/Mission to Mars-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense # Initialising the CNN classifier = Sequential() # Step 1 - Convolution classifier.add(Conv2D(32, (3, 3), input_shape = (120, 120, 3), activation = 'relu')) classifier.add(Conv2D(64, (3, 3), activation = 'relu')) # Step 2 - Pooling classifier.add(MaxPooling2D(pool_size = (2, 2))) classifier.add(Conv2D(128, (3, 3), activation = 'relu')) classifier.add(Conv2D(128, (3, 3), activation = 'relu')) # Adding a second convolutional layer classifier.add(MaxPooling2D(pool_size = (2, 2))) # Step 3 - Flattening classifier.add(Flatten()) # Step 4 - Full connection classifier.add(Dense(units = 512, activation = 'relu')) classifier.add(Dense(units = 218, activation = 'relu')) classifier.add(Dense(units = 4, activation = 'softmax')) # Compiling the CNN classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) # Part 2 - Fitting the CNN to the images from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255) test_datagen = ImageDataGenerator(rescale = 1./255) training_set = train_datagen.flow_from_directory('G:/dl project/fruit', target_size = (120, 120), batch_size = 32, class_mode = 'categorical') test_set = test_datagen.flow_from_directory('G:/dl project/fruit', target_size = (120, 120), batch_size = 32, class_mode = 'categorical') model = classifier.fit_generator(training_set, steps_per_epoch = 5, epochs = 8, validation_data = test_set, validation_steps = 4) classifier.save("modelmulticlassi_fruit.h5") print("Saved model to disk") # Part 3 - Making new predictions # + import matplotlib.pyplot as plt acc = model.history['accuracy'] val_acc = model.history['val_accuracy'] loss = model.history['loss'] val_loss = model.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.figure() plt.plot(epochs, loss, 'bo', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.title('Training and validation loss') plt.legend() plt.show() # + from keras.models import load_model import numpy as np from keras.preprocessing import image import numpy as np from keras.preprocessing import image # load model model = load_model('modelmulticlassi_fruit.h5') test_image = image.load_img('G:/dl project/fruit/mango/mango11.jpg', target_size = (120, 120)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) result = model.predict(test_image) training_set.class_indices if result[0][0] == 1: prediction = 'apple' print(prediction) elif result[0][1] == 1: prediction = 'carrot' print(prediction) elif result[0][2] == 1: prediction = 'husky' print(prediction) else: result[0][3] == 1 prediction = 'mango' print(prediction) # -
Deployment cloud/multiclassifier_ipynb_ad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip install tensorflow-datasets --use-feature=2020-resolver # # !pip install graphviz pydot # # !pip install pydotplus # + import tensorflow as tf tf.random.set_seed(1234) import tensorflow_datasets as tfds import os import re import numpy as np import matplotlib.pyplot as plt # - # # loading dataset # + # path_to_zip = tf.keras.utils.get_file( # 'cornel_movie_dialogs.zip', # origin='http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip', # extract=True # ) # + # path_to_dataset = os.path.join( # os.path.dirname(path_to_zip), "cornell movie-dialogs corpus") # - path_to_dataset = './extra/cornell movie-dialogs corpus' # + # path_to_dataset = r'D:\projects\chatbot_vis\extra\cornell movie-dialogs corpus' # for further running without # - path_to_movie_lines = os.path.join(path_to_dataset, "movie_lines.txt") path_to_movie_conversations = os.path.join(path_to_dataset, "movie_conversations.txt") # path_to_movie_conversations = './extra/cornell movie-dialogs corpus/movie_conversations.txt' # # Preprocess dataset sentence = 'He is a boy. !123' print(sentence) def preprocess_sentence(sentence): sentence = sentence.lower().strip() sentence = re.sub(r"([.!])", r" ", sentence) # changed here sentence = re.sub(r"[ ]+", r" ",sentence) sentence = re.sub(r"[^A-Za-z?.!,]", r" ", sentence) sentence = sentence.strip() return sentence sentence = preprocess_sentence(sentence) sentence # + # path_to_movie_conversations = './extra/movie_conversations.txt' # - path_to_movie_conversations path_to_movie_lines MAX_SAMPLES = 50000 # + # load conversation def load_conversations(): id2line = {} with open(path_to_movie_lines, errors='ignore') as f: lines = f.readlines() for line in lines: line = line.replace("\n","") parts = line.split(" +++$+++ ") id2line[parts[0]] = parts[4] inputs, outputs = [], [] with open(path_to_movie_conversations, 'r') as file: lines = file.readlines() for line in lines: line = line.replace("\n","") parts = line.split(" +++$+++ ") conversation = [line[1:-1] for line in parts[3][1:-1].split(", ")] for i in range(len(conversation)-1): inputs.append(preprocess_sentence(id2line[conversation[i]])) outputs.append(preprocess_sentence(id2line[conversation[i+1]])) if len(inputs) >= MAX_SAMPLES: # remove this constrain for doing training on full dataset return inputs, outputs return inputs, outputs # - questions, answers = load_conversations() len(questions) len(answers) # sample question and answer print(f"sample question: {questions[20]}") print(f"sample answer: {answers[20]}") # builiding tokenizer for both questions and answers tokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus( questions + answers, target_vocab_size=2**13 ) tokenizer START_TOKEN, END_TOKEN = [tokenizer.vocab_size], [tokenizer.vocab_size+1] VOCAB_SIZE = tokenizer.vocab_size + 2 VOCAB_SIZE # tokenizing sample question print(f"tokenize sample question: {tokenizer.encode(questions[20])}") MAX_LENGTH = 40 # tokenize, filter and pad sentence def tokenize_and_filter(inputs, outputs): tokenized_inputs, tokenized_outputs = [], [] for sentence1, sentence2 in zip(inputs, outputs): sentence1 = START_TOKEN + tokenizer.encode(sentence1) + END_TOKEN sentence2 = START_TOKEN + tokenizer.encode(sentence2) + END_TOKEN # check tokenized sentence max_length # remove below constraint for improving performance if len(sentence1) <= MAX_LENGTH and len(sentence2) <= MAX_LENGTH: tokenized_inputs.append(sentence1) tokenized_outputs.append(sentence2) # pad tokenized sentences tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences( tokenized_inputs, maxlen=MAX_LENGTH, padding='post' ) tokenized_outputs = tf.keras.preprocessing.sequence.pad_sequences( tokenized_outputs, maxlen=MAX_LENGTH, padding='post' ) return tokenized_inputs, tokenized_outputs questions, answers = tokenize_and_filter(questions, answers) print(f"number of samples is {len(questions)}") print(f"vocab size: {VOCAB_SIZE}") # + # creating DataSet # - answers[0] answers.shape questions.shape dataset = tf.data.Dataset.from_tensor_slices(( { 'inputs': questions, 'dec_inputs': answers[:, :-1] }, { 'outputs': answers[:, 1:] } )) dataset BATCH_SIZE = 64 BUFFER_SIZE = 20000 dataset = dataset.cache() dataset = dataset.shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) print(dataset) def scaled_dot_product_attention(query, key, value, mask): matmul_qk = tf.matmul(query, key, transpose_b=True) depth = tf.cast(tf.shape(key)[-1], tf.float32) logits = matmul_qk / tf.math.sqrt(depth) if mask is not None: logits += (mask * -1e9) attention_weights = tf.nn.softmax(logits, axis=-1) output = tf.matmul(attention_weights, value) return output class MultiHeadAttention(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, name="multi_head_attention"): super(MultiHeadAttention, self).__init__(name=name) self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.query_dense = tf.keras.layers.Dense(units=d_model) self.key_dense = tf.keras.layers.Dense(units=d_model) self.value_dense = tf.keras.layers.Dense(units=d_model) self.dense = tf.keras.layers.Dense(units=d_model) def split_heads(self, inputs, batch_size): inputs = tf.reshape( inputs, shape=(batch_size, -1, self.num_heads, self.depth)) return tf.transpose(inputs, perm=[0,2,1,3]) def call(self, inputs): query, key, value, mask = inputs['query'], inputs['key'], inputs['value'], inputs['mask'] batch_size = tf.shape(query)[0] # linear heads query = self.query_dense(query) key = self.key_dense(key) value = self.value_dense(value) # split heads query = self.split_heads(query, batch_size) key = self.split_heads(query, batch_size) value = self.split_heads(query, batch_size) # scaled dot product attention scaled_attention = scaled_dot_product_attention(query, key, value, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0,2,1,3]) concate_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) outputs = self.dense(concate_attention) return outputs a = tf.constant([[1,2,0,3,0], [0,0,0,4,5]]) def create_padding_mask(x): mask = tf.cast(tf.math.equal(x,0), tf.float32) return mask[:, tf.newaxis, tf.newaxis, :] print(create_padding_mask(a)) def create_look_ahead_mask(x): seq_len = tf.shape(x)[1] look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1,0) padding_mask = create_padding_mask(x) return tf.maximum(look_ahead_mask, padding_mask) create_look_ahead_mask(tf.constant([[1,2,0,4,5]])) tf.maximum([1,2,3], [3,2,1]) # + # class PositionalEncoding(tf.keras.layers.Layer): # def __init__(self, position, d_model): # super(PositionalEncoding, self).__init__() # self.pos_encoding = self.positional_encoding(position, d_model) # def get_angles(self, position, i, d_model): # angles = 1 / tf.pow(10000, (2*(i//2)/ tf.cast(d_model, tf.float32))) # return position * angles # def positional_encoding(self, position, d_model): # angle_rads = self.get_angles( # position=tf.range(position, dtype=tf.float32), # i = tf.range(d_model, dtype=tf.float32)[tf.newaxis,:], # d_model = d_model # ) # # apply sin to even index in the array # sines = tf.math.sin(angle_rads[0::2]) # # apply cos to odd index in the array # cosines = tf.math.cos(angle_rads[1::2]) # pos_encoding = tf.concat([sines, cosines], axis=-1) # pos_encoding = pos_encoding[tf.newaxis, ...] # return tf.cast(pos_encoding, tf.float32) # def call(self, inputs): # return inputs + self.pos_encodings[:, :tf.shape(inputs)[1], :] # - class PositionalEncoding(tf.keras.layers.Layer): def __init__(self, position, d_model): super(PositionalEncoding, self).__init__() self.pos_encoding = self.positional_encoding(position, d_model) def get_angles(self, position, i, d_model): angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32)) return position * angles def positional_encoding(self, position, d_model): angle_rads = self.get_angles( position=tf.range(position, dtype=tf.float32)[:, tf.newaxis], i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :], d_model=d_model) # apply sin to even index in the array sines = tf.math.sin(angle_rads[:, 0::2]) # apply cos to odd index in the array cosines = tf.math.cos(angle_rads[:, 1::2]) pos_encoding = tf.concat([sines, cosines], axis=-1) pos_encoding = pos_encoding[tf.newaxis, ...] return tf.cast(pos_encoding, tf.float32) def call(self, inputs): return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :] sample_pos_encoding = PositionalEncoding(50,512) plt.pcolormesh(sample_pos_encoding.pos_encoding.numpy()[0], cmap="RdBu") plt.xlabel('Depth') plt.xlim([0,512]) plt.ylabel('Position') plt.colorbar() plt.show() # + # encoder layer # - def encoder_layer(units, d_model, num_heads, dropout, name='encoder_layer'): inputs = tf.keras.Input(shape=(None,d_model), name="inputs") padding_mask = tf.keras.Input(shape=(1,1,None), name="padding_mask") attention = MultiHeadAttention(d_model, num_heads, name="attention")({ 'query': inputs, 'key': inputs, 'value': inputs, 'mask': padding_mask }) attention = tf.keras.layers.Dropout(rate=dropout)(attention) attention = tf.keras.layers.LayerNormalization( epsilon=1e-6)(inputs + attention) outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention) outputs = tf.keras.layers.Dense(units=d_model)(outputs) outputs = tf.keras.layers.Dropout(rate=dropout)(outputs) attention = tf.keras.layers.LayerNormalization( epsilon=1e-6)(attention + outputs) return tf.keras.Model( inputs=[inputs, padding_mask], outputs=outputs, name=name) # + sample_encoder_layer = encoder_layer( units=512, d_model=128, num_heads=4, dropout=0.3, name="sample_encoder_layer" ) # need to solve below error tf.keras.utils.plot_model(sample_encoder_layer, to_file='encoder_layer.png', show_shapes=True) # + # # !pip install graphvis # # !pip3 install graphviz # # !pip3 install pydot # # !choco install graphviz # - def encoder(vocab_size, num_layers, units, d_model, num_heads, dropout, name="encoder"): inputs = tf.keras.Input(shape=(None,), name="inputs") padding_mask = tf.keras.Input(shape=(1,1,None), name="padding_mask") embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs) embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32)) embeddings = PositionalEncoding(vocab_size, d_model)(embeddings) outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings) for i in range(num_layers): outputs = encoder_layer( units=units, d_model=d_model, num_heads=4, dropout=dropout, name='encoder_layer_{}'.format(i), )([outputs, padding_mask]) return tf.keras.Model(inputs=[inputs, padding_mask], outputs=outputs, name=name) sample_encoder = encoder( vocab_size=8192, num_layers=2, units=512, d_model=128, num_heads=4, dropout=0.3, name="sample_encoder" ) tf.keras.utils.plot_model( sample_encoder, to_file="encoder.png", show_shapes=True )
chatbot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/amitkp57/colab/blob/main/MLiB_L4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hwtceSK4E0gY" # ## Install Libraries # + id="mjIToFedE8-7" colab={"base_uri": "https://localhost:8080/"} outputId="1a617b12-477c-406f-bae0-4e051667a628" # !pip install tokenizer path tokenizers # + [markdown] id="mh9kQrikCiPA" # ## Data # # # # # + colab={"base_uri": "https://localhost:8080/"} id="Oph4L3CBBqsV" outputId="e14d0689-4324-402b-fd9d-74244da666e9" # import from google drive # run this code piece, enter the autorization code # For mount instructions: https://colab.research.google.com/notebooks/io.ipynb#scrollTo=XDg9OBaYqRMd from google.colab import drive drive.mount("/content/drive") # or import from environment PROJECT_DIR = "/content/drive/My Drive/MLBiology/MLiB-Lab4" # + colab={"base_uri": "https://localhost:8080/"} id="RlcWVsE2fXNM" outputId="e80b0190-c633-45ca-9bdd-84d88a52c1de" # %cd '$PROJECT_DIR' # !unzip '$PROJECT_DIR/MLiB-Lab4.zip' -d /content/GPT-J # + colab={"base_uri": "https://localhost:8080/"} id="Dfy5Khj1gMLW" outputId="d6310934-b339-48a0-e24f-45887656f92b" # %cd /content/GPT-J # !bash install_gpt-j.sh # + [markdown] id="mAQzTbqjV1yF" # Parse the output prompt and response files. Sample 10 data point from each of the files and store them. # + id="PLZled-Ld5GI" def parse_prompt_reponse(f): prompt_response = [] while True: next_line = f.readline() if next_line == '': break elif next_line.startswith('Enter prompt or quit:'): next_line = next_line[len('Enter prompt or quit:'):].strip(' ') if next_line.startswith('Prompt:'): prompt = next_line[len('Prompt:'):].strip() next_line = f.readline() response = [] while not next_line.startswith('Enter prompt or quit:'): if next_line.startswith('Response:'): response.append(next_line[len('Response:'):].strip()) else: response.append(next_line) next_line = f.readline() prompt_response.append((prompt, ' '.join(response).strip())) return prompt_response # + colab={"base_uri": "https://localhost:8080/"} id="iou3kr6SaP5k" outputId="bf43db26-43c0-4db7-b64f-324b61905479" import numpy as np import os # sample 10 data point from each of the files for f_name in os.listdir('/content/GPT-J/GPT-J-Run-Output'): file_path = f'/content/GPT-J/GPT-J-Run-Output/{f_name}' print(file_path) f = open(file_path, 'r') prompt_response = parse_prompt_reponse(f) f.close() prompt_response = np.array(prompt_response) samples = prompt_response[np.random.choice(range(len(prompt_response)), 10)] # sample 10 items with open(f'{PROJECT_DIR}/results/{f_name}', 'w') as f: for sample in samples: prompt, response = sample f.write('prompt: {}\n'.format(prompt)) f.write('response: {}\n'.format(response)) # + [markdown] id="C1Fx3deWWDa7" # Sample 100 drugs and for each drug combine and save drug inhibition, mechanism and targers. # + colab={"base_uri": "https://localhost:8080/"} id="50gK2YLw0Rl_" outputId="01c4501a-34f6-4a4c-99d2-f33d7b313dbd" import numpy as np # take 100 points each from 'drug_inhibits_output.txt', 'drug_mechanism_of_action_output.txt', 'drug_targets_output.txt' # select 100 drugs sample_indices = np.random.choice(range(2835), 100) file_path = f'/content/GPT-J/GPT-J-Run-Output/drug_inhibits_output.txt' f = open(file_path, 'r') prompt_response = parse_prompt_reponse(f) f.close() prompt_response = {prompt: reponse for (prompt, reponse) in prompt_response} drugs = prompt_response.keys() drugs = map(lambda x: x[:-len('is a drug that inhibits')], drugs) drugs = np.array(list(drugs))[sample_indices] print(drugs) # + id="CW9iSezniwrs" from collections import defaultdict drug_fn_map = { 'drug_inhibits_output.txt': lambda x : x.strip().split()[0][:-2], #S4819is a drug that inhibits 'drug_mechanism_of_action_output.txt': lambda x : x.strip().split()[-2], # The mechanism of action of S4819 is 'drug_targets_output.txt': lambda x : x.strip().split()[0] # S4637 is a drug that targets } drug_data = defaultdict(list) for f_name in ['drug_inhibits_output.txt', 'drug_mechanism_of_action_output.txt', 'drug_targets_output.txt']: file_path = f'/content/GPT-J/GPT-J-Run-Output/{f_name}' f = open(file_path, 'r') prompt_response = parse_prompt_reponse(f) f.close() prompt_response = {prompt: reponse for (prompt, reponse) in prompt_response} with open(f'{PROJECT_DIR}/results/drug_100.txt', 'w') as f: for drug in drug_data.keys(): inhibit, mechanism, target = drug_data[drug] f.write(f'{drug}: \n') f.write(f'inhibits: {inhibit}\n') f.write(f'mechanism: {mechanism}\n') f.write(f'target: {target}\n') # + [markdown] id="-duUmuJaX-ev" # Create a bag of words model from the text in the 7 files. We will use a BPE tokenizer with 30,000 tokens as vocabulary. # + id="OFlERTrLYRb6" from path import Path from sklearn import preprocessing from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer # + id="_doMEgLvYjnF" def save_tokenizer(corpus_path, tokenizer_path): tokenizer = Tokenizer(BPE()) tokenizer.pre_tokenizer = Whitespace() trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"], vocab_size=30000) tokenizer.train(files=list(Path(corpus_path).walkfiles('*.txt')), trainer=trainer) tokenizer.save(tokenizer_path) return def load_tokenizer(tokenizer_path): return Tokenizer.from_file(tokenizer_path) save_tokenizer('/content/GPT-J/GPT-J-Run-Output', '{}/tokenizer.txt'.format(PROJECT_DIR)) tokenizer = load_tokenizer('{}/tokenizer.txt'.format(PROJECT_DIR)) # + id="7syQE4OuboXy" def get_bow_vector(tokenizer, input): ids = tokenizer.encode(input).ids output = np.zeros((1, 30000)) for id in ids: output[0][id] = 1 return output # + id="Pdgb2Q48aS9m" tokenizer = load_tokenizer('{}/tokenizer.txt'.format(PROJECT_DIR)) # gene products file_path = f'/content/GPT-J/GPT-J-Run-Output/gene_product_output.txt' f = open(file_path, 'r') gene_product = parse_prompt_reponse(f) f.close() gene_product = list(map(lambda x: (x[0].split()[3], x[1]), gene_product)) gene_product_tokens = list(map(lambda x: (x[0], get_bow_vector(tokenizer, x[1])), gene_product)) # drug targets file_path = f'/content/GPT-J/GPT-J-Run-Output/drug_targets_output.txt' f = open(file_path, 'r') drug_target = parse_prompt_reponse(f) f.close() drug_target = list(map(lambda x: (x[0].split()[0], x[1]), drug_target)) drug_target_tokens = list(map(lambda x: (x[0], get_bow_vector(tokenizer, x[1])), drug_target)) # + id="_gmexb1Zvx5X" # patents file_path = f'/content/GPT-J/GPT-J-Run-Output/names_patents_output.txt' f = open(file_path, 'r') drug_patents = parse_prompt_reponse(f) f.close() drug_patents = list(map(lambda x: (x[0].split()[0], x[1]), drug_patents)) drug_patents_tokens = list(map(lambda x: (x[0], get_bow_vector(tokenizer, x[1])), drug_patents)) # + id="_Y_gZjYPggeC" from scipy import spatial gene_drug = [] for i in range(min(100, len(gene_product))): max_score, max_idx = 0, 0 for j in range(len(drug_target)): similarity = 1 - spatial.distance.cosine(gene_product_tokens[i][1], drug_target_tokens[j][1]) if similarity > max_score: max_score = similarity max_idx = j gene_drug.append((i,max_idx)) # + id="vdADJcXUwSy_" from scipy import spatial drug_patent = [] for i in range(min(100, len(drug_target))): max_score, max_idx = 0, 0 for j in range(len(drug_patents)): similarity = 1 - spatial.distance.cosine(drug_target_tokens[i][1], drug_patents_tokens[j][1]) if similarity > max_score: max_score = similarity max_idx = j drug_patent.append((i,max_idx)) # + id="3YN5JCCtlHnP" import re def get_gene_product(response): pat = re.compile(r'(The gene product of[^\.!?]*[\.!?])', re.M) return pat.findall(response)[0].split(maxsplit=7)[-1].strip(' .') # def get_drug_product(response): # print(response) # idx = response.find('is a drug that targets') # return pat.findall(response)[:idx] gene_drug_prod = [] for i in range(len(gene_drug)): try: drug = drug_target[gene_drug[i][1]][0] gene = gene_product[gene_drug[i][0]][0] prod = get_gene_product(gene_product[gene_drug[i][0]][1]) except Exception as e: print(e) continue gene_drug_prod.append((gene, prod, drug)) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="Yf9_0P8-p0vy" outputId="3afdec39-769a-4f51-cfa6-ff101fcc7089" import pandas as pd pd.DataFrame(gene_drug_prod, columns=['gene', 'product', 'drug']) # + id="FSNebmOduk7T" import re def get_gene_product(response): pat = re.compile(r'(The gene product of[^\.!?]*[\.!?])', re.M) return pat.findall(response)[0].split(maxsplit=7)[-1].strip(' .') # def get_drug_product(response): # print(response) # idx = response.find('is a drug that targets') # return pat.findall(response)[:idx] gene_drug_prod = [] for i in range(len(gene_drug)): try: drug = drug_target[gene_drug[i][1]][0] gene = gene_product[gene_drug[i][0]][0] prod = get_gene_product(gene_product[gene_drug[i][0]][1]) patent = drug_patents[gene_drug[i][1]] except Exception as e: print(e) continue gene_drug_prod.append((gene, prod, drug, patent)) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="gPgT5H6mfgrA" outputId="d8ad79f2-a687-4834-afc8-0672860d3332" import pandas as pd pd.DataFrame(gene_drug_prod, columns=['gene', 'product', 'drug', 'patent']) # + [markdown] id="QFO3SyiZS7ur" # # Writeup # # I sampled 10 prompt and responses from each of the 7 files each. I tried to verify the truth of responses. I could not verify most of the responses. It looked like the response was completedly made using similar meaning words from different references. # #
MLiB_L4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## CSC8631 : Data Management and Explore Data Analysis---------Online Learning # + [markdown] slideshow={"slide_type": "slide"} # - Before using, please read the README.md to success easily # - This analysis project based on the CRISP-DM model method, the first three stages of this analysis will be: # <center><img src="https://github.com/haoranD/CRISP-DM-PYTHON/blob/master/notebooks/myloop.png?raw=true" width="350" height="300" align="center"/></center> # + [markdown] slideshow={"slide_type": "skip"} # ## <center><font size=5>Start at Business understanding </font></center> # + [markdown] slideshow={"slide_type": "slide"} # ###### With the development of society and science,data analysing becoming more and more sophisticated and widely use. This project focus on using EDA(Exploratory Data Analysis) to find something from Cyber Security Online Course in Newcastle University by CRISP-DM model. So that the improvement will make the course, this type of education better in the future. And for the Online Course in Newcastle University : Cyber Security, it will be well-known and everyone can get good experiences, also obtain what they want after this course. # # ###### All the information comes from Newcastle University Online Course, so first of all, background in Online Learning Newcastle: # # <center><img src="https://github.com/haoranD/CRISP-DM-PYTHON/blob/master/notebooks/bgs.png?raw=true" width="1000" height="100" align="center"/></center> # - # #### Also, online course learing have oppotunity to increase the income of university.Related to recently development of education, online learning is becoming more and more popular, many people will choose the online learning course,it is flexible time choosing and high quanlity education from top university. # + [markdown] slideshow={"slide_type": "slide"} # ###### So with the help of Dr <NAME> who is the is the Technical Advisor and Dr Claire who has rich experinces in education, we can define the attainable data mining goals from business objectives as: # # - As a international well konwn university, the course should design very well for all the students who comes from whole world, it is better for attracting more students whole world.-------------OB1 # - The level of difficulty of this course should be balance and appropriate for all students in order to keep the completion and help students to understand them.-------------OB2 # - The custom churn can be interpreted as students churn problem,it is very important to know the why students left this course, why they can't complete or why they don't want to continue. -------------OB3 # # # ##### In this project, we use some data analysis technology to get well knows of this course in Newcastle, and at the final, we can successfuly get some summary and give some excellent advises to director to make this course better than before so it will attract more people and obtain the good reputation, also the income can be 1.5 times higer than before. This will be test in next terms of this course. # + [markdown] slideshow={"slide_type": "subslide"} # ##### Tools : # * Ubuntu 16.04 system # * Git and Github for the Version Control # * Cookiecutter for the reproducable ProjectTemplate # * Python for EDA : Numpy, Pandas, Matplotlib, Plotly, Google API # * Jupyter notebook -- 'Literate Programming' # * Pytest framework for Sorftware Testing # # #### Resource : # * Newcastle Cyber Security Online learning course Data # - # #### Note : Data comes from last 7 terms of this course and some specific information of students will not be included, and the deadline is 23th Nov 2018, also during and after this analysising, information will be protected. # #### For this period, this project just focus on the first three parts of analysis, and for the later stage, it demands some good PC to analysis or will cost plenty of time. And the results can be implement timely in recently next term. # + [markdown] slideshow={"slide_type": "slide"} # <center><img src="https://github.com/haoranD/CRISP-DM-PYTHON/blob/master/notebooks/plan.png?raw=true" width="1000" height="100" align="center"/></center> # + slideshow={"slide_type": "subslide"} #Use for hide all the code from IPython.display import display_html display_html(""" <div style="text-align:center; margin: 20px 0;"> <p style="color:#FA5882;text-align:center; margin: 10px 0 20px 0;"> If you don't want to see all code click here<br/> Just focus on analysizing </p> <button onclick="$('.input, .prompt, .output_stderr, .output_error').toggle();">hide code</button> <hr/> </div> """, raw=True) # + [markdown] slideshow={"slide_type": "slide"} # # <center><font size=5>Data Understanding</font></center> # + [markdown] slideshow={"slide_type": "subslide"} # ### Our data was collected from a online course in Newcastle University named :Cyber Security. And the data has the both type of ‘static data’ and ‘fluid data’. The ‘static data’ is the data collected traditionally by some institution, they can be the all kinds of records in university. The ‘fluid data’ is collected from daily activity like swiping the card, login to the virtual online learning classroom. There are seven Terms of this course data we have and was ordered as cyber-security-1 to cyber-security-7, each of term means a new loop of this course. And many necessary meaningful data were recorded(We will check them later). # - # #### Negative Data Quality : # - The learning ID is too mess and we won't use learner ID in this project, they will be processed later. # - There too many Unknown, NaN or the null values. # - Some variables should be labeled for modelling later # + [markdown] slideshow={"slide_type": "subslide"} # #### Randomly choose some data file, and check them in excel,there are many 'Unknown' and 'NaN', they will influence our data analysis, so the unusual value will be processed as : # # | Type | Solution | # |-|-| # |NaN for not used data|Ignore| # |Unknown in Enrolment | Ignore and Delete| # |Unknown in question-response |Ignore| # |Unknown in leaving-response |Delete after merged| # # * Note: The categorical data from the table can be processed, the Unknown continuouse values will be replaced by median values of this column because of the robust poperty.And why some unusual value were just ignore because we won't use them. # + [markdown] slideshow={"slide_type": "skip"} # #### It is necessary to import some analysis package # + slideshow={"slide_type": "skip"} #This is for importing necessary analysis dependence import numpy as np import squarify from geopy.geocoders import Nominatim import matplotlib.pyplot as plt import plotly plotly.tools.set_credentials_file(username='haoran88', api_key='o8i3aa8qgpoIpCOBQgt8') import pandas as pd import sklearn from os import listdir import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import csv from IPython.display import display import plotly.plotly as py import plotly.graph_objs as go import gmaps import gmaps.datasets from time import time # %matplotlib inline # + slideshow={"slide_type": "skip"} #Using the Relative Path is easy to reproduce the project Main_Data_Path = '../Data/raw/Engagement of Cyber Serurity/dataset201819/' data_file_name = sorted(listdir(Main_Data_Path)) # + [markdown] slideshow={"slide_type": "skip"} # Find the location of our codes # + slideshow={"slide_type": "skip"} # cd ../src # + slideshow={"slide_type": "fragment"} #Import the data understanding code import data.Data_Check as read print('Data Type Table:') #Load all data file and check what they are read.All_Kinds_Data(data_file_name) # - # #### In my opinion, the first and second terms course can't be used for analyse, because there are many uncertainty and additional matters in the period of start of the course. Also, as the table shows the first and second terms lack some informations. So, in this project, we focus on using the data from terms 3-7. # #### All the data were recorded as a '.csv' which is a comma separated values file which allows data to be saved in a table structured format. It is convient for us to use tools to load and use directly without any changes of type of file. # + [markdown] slideshow={"slide_type": "slide"} # # ----->OB 1 # - # #### For the first Objective, it can be infer that we need to use the detail of students, so as we can konw in nomal life, students will give thier personal information when they first enrolled into this course.So start from the data named 'Cyber-Security-(3-7)_enrolment.csv' . # ### Check the last table above, the first thing is to check the basic details and backgrounds of students, so we check the one enrolments randomly choose term 7. # + slideshow={"slide_type": "subslide"} #set the parameters about the data we want tNum = 7 type_file = 'enrolments' #Load the data we want, only single one single_enrlm = read.Load_single_file(tNum, type_file) print(single_enrlm.shape) single_enrlm.head() # + [markdown] slideshow={"slide_type": "subslide"} # ##### In this table,some column can't show us the detail of students, so we just ignore it and choose the 'age_range' ,'detected_country' for OB 1. # + slideshow={"slide_type": "skip"} import visualization.visualize as vis # + slideshow={"slide_type": "fragment"} #Draw the pie plot labels,values = vis.gender_pie(single_enrlm) trace = go.Pie(labels=labels, values=values) py.iplot([trace], filename='basic_pie_chart') # + [markdown] slideshow={"slide_type": "subslide"} # ### If the number of international students enrolled in this course more than one third in all the students, it is necessary to make some additional design for them, such as the Multi-language in recap # + slideshow={"slide_type": "subslide"} #Draw the line plot re_name,re_vale = vis.country_bar(single_enrlm) print('Chosing tweenty countries(Number of students in other countries are too small to represent) to check the Population ') trace0 = go.Bar( x=re_name, y=re_vale, marker=dict( color='rgb(158,202,225)', line=dict( color='rgb(8,48,107)', width=1.5, ) ), opacity=0.6 ) data = [trace0] layout = go.Layout( title='Different Countries vs Number of students in Term 7', ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='text-hover-bar') # + [markdown] slideshow={"slide_type": "subslide"} # #### It shows that most of students come from The United Kingdom of Great Britain and Northern Ireland. Let's now move on to check the percentage of them. # + slideshow={"slide_type": "subslide"} #Set the parameters about the data we want tNum = 6 type_file = 'enrolments' #Load the data we want, single data single_enrlm2 = read.Load_single_file(tNum, type_file) re_name6,re_value6,re_name7,re_value7 = vis.country_pie(single_enrlm2, single_enrlm) #Draw the pie plot fig = { "data": [ { "values": re_value6, "labels": re_name6, "domain": {"x": [0, .48]}, "name": "Term 6", "hoverinfo":"label+percent+name", "hole": .4, "type": "pie" }, { "values": re_value7, "labels": re_name7, "textposition":"inside", "domain": {"x": [.52, 1]}, "name": "Term 7", "hoverinfo":"label+percent+name", "hole": .4, "type": "pie" }], "layout": { "title":"Percentage of students come from", "annotations": [ { "font": { "size": 20 }, "showarrow": False, "text": "Term 6", "x": 0.20, "y": 0.5 }, { "font": { "size": 20 }, "showarrow": False, "text": "Term 7", "x": 0.8, "y": 0.5 } ] } } py.iplot(fig, filename='donut') # + [markdown] slideshow={"slide_type": "subslide"} # #### As the pie plots show that, international students is more than 50%, so it is necessary to keep the design of teaching balance, to make all students to adapt it. # + [markdown] slideshow={"slide_type": "subslide"} # #### It is worth to use the location data to check where students come from,and design different type of teaching type. What kinds of the recap should be designed. So for the objective 1, we can use the enrollments data to analysis with later work in next Stage(Data preparations): # - Merge enrollment data in term3 to term7 # - Clean and process the unusual data # - Generate the certain location(lattitude,longitude)for each unduplicated location in our data. # # #### Move to the Data preparetions and re-loop # + [markdown] slideshow={"slide_type": "slide"} # # ----->OB 2 # - # ###### For the objective of the level of difficulty, accessing coursework is the one of the most important method, check what the proportion of students who can correctly answer the questions in or out of the online lecture. If more than half of students can't make it, the level of the course or some parts of the course are inappropriate for many of students. # + [markdown] slideshow={"slide_type": "subslide"} # #### Chek the Data Type Table, there are some files named 'question response',randomly chooose the term 6: # + slideshow={"slide_type": "subslide"} tNum = 6 type_file = 'question-response' single_question = read.Load_single_file(tNum, type_file) print(single_question.shape) single_question.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### There is a column named 'correct ',it can show if students can answer the questions correctly, so that if the performance is good, it can be assumed that the difficulty of the course is appropriate,but if the number of worng answer is obviouse big in any question, for example almost more than half students failed to answer, the director should consider the level of difficulty or if teachers presented an appropriate lectures. # + slideshow={"slide_type": "subslide"} question, WrongAnswer = vis.answer_line(single_question) # Create traces trace0 = go.Scatter( x = question, y = WrongAnswer, mode = 'lines+markers', name = 'lines+markers' ) data = [trace0] layout = dict(title = 'Each part of Question vs Percentage Wrong answers', xaxis = dict(title = 'Question'), yaxis = dict(title = 'Percentage Wrong answers'), ) fig = dict(data=data, layout=layout) py.iplot(fig, filename='line-mode') # + [markdown] slideshow={"slide_type": "subslide"} # #### As the line chart shows that, at the last stage(chapter 3), the performance of students are very good, very small number of people make wrong answers, but at the '1.8.6', there are large number of people make wrong answers, with checking the official websites of the course, we can infer that because the issue of home is common, so everyone has the experince. so they can answer correctly. But different countries have different cultures, so it is a bit abstract concept., teacher may need to give more examples here. # + [markdown] slideshow={"slide_type": "subslide"} # #### For the later work, data can be processed in next Stage(Data preparations) as: # - Extract the column we need as a new '.csv' file # - Transform necessary fields to numeric values # # #### Move to the Data preparetions and re-loop # + [markdown] slideshow={"slide_type": "slide"} # # ----->OB 3 # - # #### The 'csv' data file named leaving-survey-responses recorded the time and reason that students left.It is very useful if more than one third students left because of the same reason,or if a reason that has a big proportion,so the director of this course should focus on improving it in this kind of aspect. # + slideshow={"slide_type": "subslide"} tNum = 6 type_file = 'leaving-survey-responses' single_leaving = read.Load_single_file(tNum, type_file) single_leaving.head() # - # #### I think the director will be interested in why the students want to leave the course, it is the very important feedback from student to improve the course in the future.Then we check the column named 'leaving-reason' : reasons, re_va = vis.Treemap(single_leaving) # + slideshow={"slide_type": "subslide"} x = 0. y = 0. width = 100. height = 100. values = re_va normed = squarify.normalize_sizes(values, width, height) rects = squarify.squarify(normed, x, y, width, height) # Choose colors from http://colorbrewer2.org/ under "Export" color_brewer = ['rgb(166,206,227)','rgb(31,120,180)','rgb(178,223,138)', 'rgb(51,160,44)','rgb(251,154,153)','rgb(227,26,28)'] shapes = [] annotations = [] counter = 0 for r in rects: shapes.append( dict( type = 'rect', x0 = r['x'], y0 = r['y'], x1 = r['x']+r['dx'], y1 = r['y']+r['dy'], line = dict( width = 2 ), fillcolor = color_brewer[counter] ) ) annotations.append( dict( x = r['x']+(r['dx']/2), y = r['y']+(r['dy']/2), text = reasons[counter], showarrow = False ) ) counter = counter + 1 if counter >= len(color_brewer): counter = 0 # For hover text trace0 = go.Scatter( x = [ r['x']+(r['dx']/2) for r in rects ], y = [ r['y']+(r['dy']/2) for r in rects ], text = [ str(v) for v in reasons ], mode = 'text', ) layout = dict( height=500, width=950, xaxis=dict(showgrid=False,zeroline=False), yaxis=dict(showgrid=False,zeroline=False), shapes=shapes, annotations=annotations, hovermode='closest' ) figure = dict(data=[trace0], layout=layout) py.iplot(figure, filename='squarify-treemap') # - # #### For the 'Other' reason it can be just ignored,because of lots of uncertainty. And check the big percentage reasons, we can know the main reson is that students has different identities. Most of them maybe the part time,so they don't have enough time to watch it. So I think teachers can not only make some recap video, but also make some brief notes and highlight the key contents.For the pink area, we can make some questionaire or the video meeting to understand students. # #### After this, it is necessary to check that if there any step of this course is obviousely inappropriate which made students leave.For the later work, data can be processed in next Stage(Data preparations) as: # - For the single file, the 'last_completed_step' column has many NaN missing values,so combine term3 to term 7 'leaving-response' data file to get enough data to check later # # #### Move to the Data preparetions and re-loop # + [markdown] slideshow={"slide_type": "slide"} # # <center><font size=5>Data Preparations</font></center> # + [markdown] slideshow={"slide_type": "skip"} # # ----->OB 1 # + [markdown] slideshow={"slide_type": "skip"} # - Merge all the 'enrolment' data 需要把merge改为combine或者concact # + slideshow={"slide_type": "skip"} import data.Data_Preparation as pp # + slideshow={"slide_type": "subslide"} startNum = 3 endNum = 7 type_file = 'enrolments' mergedData = pp.Merge_Data(startNum, endNum, type_file) mergedData.info() # - # - Clean the unknown data merged_enrolment = pd.read_csv('../Data/processed/Merged3-7_enrolments.csv' ) cty = merged_enrolment['detected_country'] cty = cty.unique() cty = pd.DataFrame(cty) cty = cty[cty[0] != '--'] cty.columns=['country'] outputPath = '../Data/processed/' +'Detected_Country_' + str(len(cty)) + '.csv' cty.to_csv(outputPath) # - Generate new country file for finding location and lattitude # + [markdown] slideshow={"slide_type": "skip"} # ### *****Be careful this step*it will cost lots of time to get real lattitude and longitude from google. I already saved them in external folder. # - # Be careful, this will cost 5 hours pp.Get_Loc() # + [markdown] slideshow={"slide_type": "skip"} # #### Then we can get the certain lattitude and longitude for each students # + slideshow={"slide_type": "skip"} locations = pp.all_loc() # + [markdown] slideshow={"slide_type": "skip"} # #### Then we can use the heatmap to check the new data # + [markdown] slideshow={"slide_type": "skip"} # - Check the map in jupyter book and can play it # + slideshow={"slide_type": "subslide"} #jupyter note book only #Generates the heatmap in google map gmaps.configure(api_key="<KEY>") # Your Google API key print('Alomost half of students come from European,and many of others are from Asia and South America') print('Also, the proportions of the not english speaking country is big enough, and for the students better experience, it is necessary to give some subtitles for these major area of students ') fig = gmaps.figure() fig.add_layer(gmaps.heatmap_layer(locations,max_intensity=20, point_radius=9.0)) fig # + [markdown] slideshow={"slide_type": "skip"} # # ----->OB 2 # + [markdown] slideshow={"slide_type": "skip"} # - Extract the data we use and generate new data file # + slideshow={"slide_type": "subslide"} data_type = 'question-response' extract_name = ['quiz_question', 'correct'] term = 7 ifMerge = False extracted_new = pp.extract_data(data_type, extract_name, term, ifMerge) extracted_new.head() # + [markdown] slideshow={"slide_type": "skip"} # - Labelling # + slideshow={"slide_type": "subslide"} #Change the text categorical to numerical categorical, for later modelling label_line = pd.Categorical(extracted_new['correct']).codes extracted_new['correct_label'] = label_line.tolist() extracted_new.to_csv('../Data/processed/extracted-' + str(term) + '-' + data_type + '.csv') extracted_new.head() # + [markdown] slideshow={"slide_type": "skip"} # # ----->OB 3 # + [markdown] slideshow={"slide_type": "skip"} # - Combine all the data # + slideshow={"slide_type": "skip"} startNum = 3 endNum = 7 type_file = 'leaving-survey-responses' mergedData = pp.Merge_Data(startNum, endNum, type_file) print(mergedData.shape) mergedData.head() # + [markdown] slideshow={"slide_type": "skip"} # - Clean the NaN # + slideshow={"slide_type": "subslide"} mergedData = mergedData[mergedData['last_completed_step'].notnull()] mergedData.head() # - # ## Future Work # # - Do more understanding cross different single data file and extract them, for example different type of students(enrolment detail) will make wrong answer. # + [markdown] slideshow={"slide_type": "slide"} # #### This is a checkpoint of the first three stages of this project. Use some unittest and give some report. # # Next begaining can start from here # + [markdown] slideshow={"slide_type": "skip"} # ## Modelling................................................ # + [markdown] slideshow={"slide_type": "skip"} # ## Evaluation................................................ # + [markdown] slideshow={"slide_type": "skip"} # ## Deploy................................................
notebooks/CRISP-DM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aggregate transcripts # # Depending on how you want to analyse them, it can be useful to group the transcripts by prime minister. # # This notebook aggregates the transcripts in two ways: by extracting the text content of each XML file and combining them into one big text file, and by zipping up the original XML files. # + from operator import itemgetter, attrgetter from bs4 import BeautifulSoup import os import re import pandas as pd import zipfile INDEX = "index.csv" # - # ## Combine into one big text file for each PM # + def combine_pm(pm, release_type=None): ''' Extract text from the XML files for the specified PM and combine into one big text file. Can be filtered by 'release_type'. ''' os.makedirs('pms', exist_ok=True) transcripts = [] df = pd.read_csv(INDEX, keep_default_na=False) if release_type: rows = df.loc[(df['pm'] == pm) & (df['release_type'] == release_type)] else: rows = df.loc[(df['pm'] == pm)] transcript_ids = rows.sort_values(by='date')['id'].to_list() filename = pm.lower().replace(', ', '-') if release_type: filename = '{}-{}'.format(filename, release_type.lower()) with open(os.path.join('pms', filename + '.txt'), 'w') as pm_file: for t_id in transcript_ids: with open(os.path.join('transcripts', 'transcript-{}.xml'.format(t_id)), 'rb') as xml_file: soup = BeautifulSoup(xml_file, 'xml') content = soup.find('content').string.replace('<![CDATA[', '').replace(']]>', '') clean_content = re.sub('<[^<]+?>', '', content) pm_file.write(clean_content + '\n\n') def combine_all_pms(type=None): df = pd.read_csv(INDEX, keep_default_na=False) pms = [pm for pm in pd.unique(df['pm']) if pm != ''] for pm in pms: combine_pm(pm, type) # - combine_all_pms() # Just get the speeches combine_all_pms('Speech') # ## Zip up the transcripts for each PM # + def zip_pm(pm): os.makedirs('pms', exist_ok=True) filename = os.path.join('pms', '{}.zip'.format(pm.lower().replace(', ', '-'))) transcript_ids = df.loc[(df['pm'] == pm)]['id'].to_list() zf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) for t_id in transcript_ids: t_file = 'transcript-{}.xml'.format(t_id) t_path = os.path.join('transcripts', t_file) zf.write(t_path, t_file) zf.close() def zip_all_pms(): df = pd.read_csv(INDEX, keep_default_na=False) pms = [pm for pm in pd.unique(df['pm']) if pm != ''] for pm in pms: zip_pm(pm) # - zip_all_pms()
aggregate_transcripts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- p=int(input("Enter Principle Amount")) n=int(input("Enter the time period")) if n <= 6: s=p+(p*0.08*n) print(s) else: s=p+(p*0.1*n) print(s) if p > 50000: s=p+(p*0.10*n) print(s)
SI condition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="tH3lvFAXfjGz" # import libraries import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression # + colab={} colab_type="code" id="O0ob0P0RftvC" # column headers _headers = ['CIC0', 'SM1', 'GATS1i', 'NdsCH', 'Ndssc', 'MLOGP', 'response'] # read in data df = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter06/Dataset/qsar_fish_toxicity.csv', names=_headers, sep=';') # + colab={"base_uri": "https://localhost:8080/", "height": 203} colab_type="code" id="RbFPJeOKfvFx" outputId="6aca4240-e9a8-4fb0-880e-c6de22b27c45" df.head() # + colab={} colab_type="code" id="wS62XTnDf1VC" # Let's split our data features = df.drop('response', axis=1).values labels = df[['response']].values X_train, X_eval, y_train, y_eval = train_test_split(features, labels, test_size=0.2, random_state=0) X_val, X_test, y_val, y_test = train_test_split(X_eval, y_eval, random_state=0) # + colab={} colab_type="code" id="h2BBicmsf5Gi" model = LinearRegression() # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Sbmn6Wfif6sa" outputId="a9333572-30c9-4742-f719-fd363cc95957" model.fit(X_train, y_train) # + colab={} colab_type="code" id="j52nxGrLf_Y6" y_pred = model.predict(X_val) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="dboYPiDhgAzS" outputId="b026bce3-c27d-4fde-d701-8bded8c5219a" r2 = model.score(X_val, y_val) print('R^2 score: {}'.format(r2)) # + colab={"base_uri": "https://localhost:8080/", "height": 203} colab_type="code" id="KWhxdfXmgZPk" outputId="17f6c7dd-a6fb-44c6-94ac-8ca0b6d0011e" _ys = pd.DataFrame(dict(actuals=y_val.reshape(-1), predicted=y_pred.reshape(-1))) _ys.head()
Chapter06/Exercise6.02/Exercise6_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a> # ___ # <center><em>Copyright by Pierian Data Inc.</em></center> # <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center> # # Dealing with Outliers # # In statistics, an outlier is a data point that differs significantly from other observations.An outlier may be due to variability in the measurement or it may indicate experimental error; the latter are sometimes excluded from the data set. An outlier can cause serious problems in statistical analyses. # # Remember that even if a data point is an outlier, its still a data point! Carefully consider your data, its sources, and your goals whenver deciding to remove an outlier. Each case is different! # # ## Lecture Goals # * Understand different mathmatical definitions of outliers # * Use Python tools to recognize outliers and remove them # # ### Useful Links # # * [Wikipedia Article](https://en.wikipedia.org/wiki/Outlier) # * [NIST Outlier Links](https://www.itl.nist.gov/div898/handbook/prc/section1/prc16.htm) # # ------------- # # Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # ## Generating Data # + # Choose a mean,standard deviation, and number of samples def create_ages(mu=50,sigma=13,num_samples=100,seed=42): # Set a random seed in the same cell as the random call to get the same values as us # We set seed to 42 (42 is an arbitrary choice from Hitchhiker's Guide to the Galaxy) np.random.seed(seed) sample_ages = np.random.normal(loc=mu,scale=sigma,size=num_samples) sample_ages = np.round(sample_ages,decimals=0) return sample_ages # - sample = create_ages() sample # ## Visualize and Describe the Data sns.distplot(sample,bins=10,kde=False) sns.boxplot(sample) ser = pd.Series(sample) ser.describe() # ## Trimming or Fixing Based Off Domain Knowledge # # If we know we're dealing with a dataset pertaining to voting age (18 years old in the USA), then it makes sense to either drop anything less than that OR fix values lower than 18 and push them up to 18. ser[ser > 18] # It dropped one person len(ser[ser > 18]) def fix_values(age): if age < 18: return 18 else: return age # "Fixes" one person's age ser.apply(fix_values) len(ser.apply(fix_values)) # -------- # There are many ways to identify and remove outliers: # * Trimming based off a provided value # * Capping based off IQR or STD # * https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba # * https://towardsdatascience.com/5-ways-to-detect-outliers-that-every-data-scientist-should-know-python-code-70a54335a623 # ## Ames Data Set # # Let's explore any extreme outliers in our Ames Housing Data Set df = pd.read_csv("../DATA/Ames_Housing_Data.csv") df.head() sns.heatmap(df.corr()) df.corr()['SalePrice'].sort_values() sns.distplot(df["SalePrice"]) sns.scatterplot(x='Overall Qual',y='SalePrice',data=df) df[(df['Overall Qual']>8) & (df['SalePrice']<200000)] sns.scatterplot(x='Gr Liv Area',y='SalePrice',data=df) df[(df['Gr Liv Area']>4000) & (df['SalePrice']<400000)] df[(df['Gr Liv Area']>4000) & (df['SalePrice']<400000)].index ind_drop = df[(df['Gr Liv Area']>4000) & (df['SalePrice']<400000)].index df = df.drop(ind_drop,axis=0) sns.scatterplot(x='Gr Liv Area',y='SalePrice',data=df) sns.scatterplot(x='Overall Qual',y='SalePrice',data=df) df.to_csv("../DATA/Ames_outliers_removed.csv",index=False) # ----
Data Science Resources/Jose portila - ML/09-Feature-Engineering/00-Dealing-with-Outliers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import feather import scipy as sp import numpy as np import pandas as pd import lightgbm as lgb from collections import Counter from functools import partial from math import sqrt from scipy.stats import rankdata from sklearn.metrics import cohen_kappa_score, mean_squared_error from sklearn.metrics import confusion_matrix as sk_cmatrix from sklearn.model_selection import StratifiedKFold, GroupKFold import matplotlib.pyplot as plt import seaborn as sns def get_score(y_true, y_pred): return cohen_kappa_score(y_true, y_pred, weights='quadratic') def get_y(): return pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv', usecols=[target]).values.flatten() def run_model(X_train, y_train, X_valid, y_valid, categorical_features, numerical_features, predictors, maxvalue_dict, fold_id): train = lgb.Dataset(X_train, y_train, categorical_feature=categorical_features, feature_name=predictors) valid = lgb.Dataset(X_valid, y_valid, categorical_feature=categorical_features, feature_name=predictors) evals_result = {} model = lgb.train( MODEL_PARAMS, train, valid_sets=[valid], valid_names=['valid'], evals_result=evals_result, **FIT_PARAMS ) # validation score y_pred_valid = model.predict(X_valid) # feature importances importances = pd.DataFrame() importances['feature'] = predictors importances['gain'] = model.feature_importance(importance_type='gain') importances['split'] = model.feature_importance(importance_type='split') importances['fold'] = fold_id return y_pred_valid, importances def plot_mean_feature_importances(feature_importances, max_num=50, importance_type='gain', path=None): mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean() feature_importances['mean_' + importance_type] = feature_importances['feature'].map(mean_gain[importance_type]) if path is not None: data = feature_importances.sort_values('mean_'+importance_type, ascending=False).iloc[:max_num, :] plt.clf() plt.figure(figsize=(16, 8)) sns.barplot(x=importance_type, y='feature', data=data) plt.tight_layout() plt.savefig(path) return feature_importances def to_bins(x, borders): for i in range(len(borders)): if x <= borders[i]: return i return len(borders) class OptimizedRounder_(object): def __init__(self): self.coef_ = 0 def _loss(self, coef, X, y, idx): X_p = np.array([to_bins(pred, coef) for pred in X]) ll = -get_score(y, X_p) return ll def fit(self, X, y): coef = [1.5, 2.0, 2.5, 3.0] golden1 = 0.618 golden2 = 1 - golden1 ab_start = [(1, 2), (1.5, 2.5), (2, 3), (2.5, 3.5)] for it1 in range(10): for idx in range(4): # golden section search a, b = ab_start[idx] # calc losses coef[idx] = a la = self._loss(coef, X, y, idx) coef[idx] = b lb = self._loss(coef, X, y, idx) for it in range(20): # choose value if la > lb: a = b - (b - a) * golden1 coef[idx] = a la = self._loss(coef, X, y, idx) else: b = b - (b - a) * golden2 coef[idx] = b lb = self._loss(coef, X, y, idx) self.coef_ = {'x': coef} def predict(self, X, coef): X_p = np.array([to_bins(pred, coef) for pred in X]) return X_p def coefficients(self): return self.coef_['x'] class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _loss(self, coef, X, y, idx): X_p = np.array([to_bins(pred, coef) for pred in X]) ll = -get_score(y, X_p) return ll def fit(self, X, y): coef = [0.2, 0.4, 0.6, 0.8] golden1 = 0.618 golden2 = 1 - golden1 ab_start = [(0.01, 0.3), (0.15, 0.56), (0.35, 0.75), (0.6, 0.9)] for it1 in range(10): for idx in range(4): # golden section search a, b = ab_start[idx] # calc losses coef[idx] = a la = self._loss(coef, X, y, idx) coef[idx] = b lb = self._loss(coef, X, y, idx) for it in range(20): # choose value if la > lb: a = b - (b - a) * golden1 coef[idx] = a la = self._loss(coef, X, y, idx) else: b = b - (b - a) * golden2 coef[idx] = b lb = self._loss(coef, X, y, idx) self.coef_ = {'x': coef} def predict(self, X, coef): X_p = np.array([to_bins(pred, coef) for pred in X]) return X_p def coefficients(self): return self.coef_['x'] class StratifiedGroupKFold(): def __init__(self, n_splits=5): self.n_splits = n_splits def split(self, X, y=None, groups=None): fold = pd.DataFrame([X, y, groups]).T fold.columns = ['X', 'y', 'groups'] fold['y'] = fold['y'].astype(int) g = fold.groupby('groups')['y'].agg('mean').reset_index() fold = fold.merge(g, how='left', on='groups', suffixes=('', '_mean')) fold['y_mean'] = fold['y_mean'].apply(np.round) fold['fold_id'] = 0 for unique_y in fold['y_mean'].unique(): mask = fold.y_mean==unique_y selected = fold[mask].reset_index(drop=True) cv = GroupKFold(n_splits=n_splits) for i, (train_index, valid_index) in enumerate(cv.split(range(len(selected)), y=None, groups=selected['groups'])): selected.loc[valid_index, 'fold_id'] = i fold.loc[mask, 'fold_id'] = selected['fold_id'].values for i in range(self.n_splits): indices = np.arange(len(fold)) train_index = indices[fold['fold_id'] != i] valid_index = indices[fold['fold_id'] == i] yield train_index, valid_index def merge(train, test, path, add_cols): df_ = feather.read_dataframe(path) add_cols += list(df_.columns) train = pd.concat((train, df_[:len_train]), axis=1) test = pd.concat((test, df_[len_train:].reset_index(drop=True)), axis=1) return train, test, add_cols # + target = 'AdoptionSpeed' len_train = 14993 len_test = 3948 # =============== # Params # =============== seed = 777 n_splits = 5 np.random.seed(seed) # feature engineering n_components = 5 img_size = 256 batch_size = 256 # model MODEL_PARAMS = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'learning_rate': 0.01, 'num_leaves': 63, 'subsample': 0.9, 'subsample_freq': 1, #'colsample_bytree': 0.6, 'max_depth': 9, 'max_bin': 127, 'reg_alpha': 0.11, 'reg_lambda': 0.01, 'min_child_weight': 0.2, 'min_child_samples': 20, 'min_gain_to_split': 0.02, 'min_data_in_bin': 3, 'bin_construct_sample_cnt': 5000, 'cat_l2': 10, 'verbose': -1, 'nthread': 16, 'seed': 777, } FIT_PARAMS = { 'num_boost_round': 5000, 'early_stopping_rounds': 100, 'verbose_eval': 10000, } # define maxvalue_dict = {} categorical_features = [ 'Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'Dewormed', 'FurLength', 'Gender', 'Health', 'MaturitySize', 'State', 'Sterilized', 'Type', 'Vaccinated', 'Type_main_breed', 'BreedName_main_breed', 'Type_second_breed', 'BreedName_second_breed', ] numerical_features = [] text_features = ['Name', 'Description'] remove = ['index', 'seq_text', 'PetID', 'Name', 'Description', 'RescuerID', 'StateName', 'annots_top_desc','sentiment_text', 'Description_Emb', 'Description_bow', 'annots_top_desc_pick', "BreedName_y", "BreedName_x"] # - train = feather.read_dataframe('from_kernel/all_datav17.feather') #df_ = feather.read_dataframe("from_kernel/all_data.feather")[['magnitude_mean', 'document_sum_magnitude_mean', 'score_mean', 'document_sum_score_mean']] #train = pd.concat((train, df_), axis=1) test = train[len_train:] train = train[:len_train] add_cols = [] # + # %%time categorical_features = list(set(categorical_features) - set(remove)) predictors = list(set(list(np.load("from_kernel/common_colsv17.npy"))+list(np.load("from_kernel/t_colsv17.npy"))) - set([target] + remove)) predictors.remove("contain_dog") predictors = predictors + ["desc_contain_dog"] #predictors = [c for c in predictors if "dense" not in c] #predictors = predictors + add_cols categorical_features = [c for c in categorical_features if c in predictors] print(len(predictors)) #predictors = [c for c in predictors if "densenet121" not in c and "inception" not in c and "gnvec" not in c and "glove" not in c] train = train.loc[:, ~train.columns.duplicated()] X = train.loc[:, predictors] y = feather.read_dataframe('../input/X_train.feather')["AdoptionSpeed"].values rescuer_id = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv').loc[:, 'RescuerID'].iloc[:len_train] feature_importances = pd.DataFrame() y_pred = np.empty(len_train,) y_test = [] #cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337) #for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)): #cv = GroupKFold(n_splits=n_splits) #for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=None, groups=rescuer_id)): cv = StratifiedGroupKFold(n_splits=n_splits) for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)): X_train = X.loc[train_index, :] X_valid = X.loc[valid_index, :] y_train = y[train_index] y_valid = y[valid_index] y_pred_valid, importances = run_model(X_train, y_train, X_valid, y_valid, categorical_features, numerical_features, predictors, maxvalue_dict, fold_id) y_pred_valid = rankdata(y_pred_valid)/len(y_pred_valid) y_pred[valid_index] = y_pred_valid.ravel() feature_importances = pd.concat([feature_importances, importances], axis=0, sort=False) # plot feature_importances = plot_mean_feature_importances( feature_importances, max_num=50, importance_type='gain', path='gain_feature_importances.png') optR = OptimizedRounder() optR.fit(y_pred, y) coefficients = optR.coefficients() y_pred_opt = optR.predict(y_pred, coefficients) score = get_score(y, y_pred_opt) print(score) # - print(score) 0.44891696100537537 0.4478579489570108 from sklearn.metrics import confusion_matrix pd.DataFrame(confusion_matrix(y, y_pred_opt)) 0.489964545495568 importance_type="gain" mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean().reset_index() data = mean_gain.sort_values(importance_type, ascending=False) print(len(data)) data.head() data.to_csv("importance10.csv", index=False) data["gain"] = data["gain"] / data["gain"].sum() data.head() list(data.feature[:150].values) # + # %%time #n_feats =2024 #predictors = list(data.feature[:n_feats]) use_cols = pd.read_csv("importance10.csv") use_cols["gain"] = use_cols["gain"] / use_cols["gain"].sum() predictors = list(use_cols[use_cols.gain>0.0002].feature) categorical_features = [c for c in categorical_features if c in predictors] numerical_features = list(set(predictors) - set(categorical_features + [target] + remove)) #predictors = categorical_features + numerical_features X = train.loc[:, predictors] y = feather.read_dataframe('../input/X_train.feather')["AdoptionSpeed"].values rescuer_id = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv').loc[:, 'RescuerID'].iloc[:len_train] feature_importances = pd.DataFrame() y_pred = np.empty(len_train,) y_test = [] #cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337) #for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)): #cv = GroupKFold(n_splits=n_splits) #for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=None, groups=rescuer_id)): cv = StratifiedGroupKFold(n_splits=n_splits) for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)): X_train = X.loc[train_index, :] X_valid = X.loc[valid_index, :] y_train = y[train_index] y_valid = y[valid_index] y_pred_valid, importances = run_model(X_train, y_train, X_valid, y_valid, categorical_features, numerical_features, predictors, maxvalue_dict, fold_id) y_pred_valid = rankdata(y_pred_valid)/len(y_pred_valid) y_pred[valid_index] = y_pred_valid.ravel() feature_importances = pd.concat([feature_importances, importances], axis=0, sort=False) # plot feature_importances = plot_mean_feature_importances( feature_importances, max_num=50, importance_type='gain', path='gain_feature_importances.png') optR = OptimizedRounder() optR.fit(y_pred, y) coefficients = optR.coefficients() y_pred_opt = optR.predict(y_pred, coefficients) score = get_score(y, y_pred_opt) print(score) # - 100-0.4289945476630629 150-0.42939314866795686 200-0.4283107506878675 importance>0-0.42532940215583626 importance>0.0002- X_train.shape a = pd.DataFrame({"ID": rescuer_id, "target": y}) a.groupby("ID")["target"].agg(["mean", "count"]) # local(16core) # 特徴数-kappa(time) # --- # 100-0.4605709670182728(31s) # 150-0.4630525352163998(38s) # 200-0.4627631126511653(46s) # 300-0.4614585304106906(59s) # 500-0.4609961498746312(1min40s) # 1000-0.4498377072144968 # gain>0(1985)-0.4467023561915181(4min13s) # all(3784)-0.449078003109212(5min46s) # # timeはOptimizedRounderも含む len(data[data.gain>0.0002]), len(data[data.gain>0]), len(data) list(data[data.gain>0.0002].feature.values) data[data.feature=="nan_count"] MODEL_PARAMS = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'learning_rate': 0.01, 'num_leaves': 31, #no 15 'subsample': 0.9, 'subsample_freq': 1, #'colsample_bytree': 0.6, 'max_depth': 9, #no 7 'max_bin': 127, #no63, 255 'reg_alpha': 0.5, #no 1.0 'reg_lambda': 0.01, #no0.5 'min_child_weight': 0.2, #no impact 'min_child_samples': 10, #no 5 'min_gain_to_split': 0.02,#no0.1, 0.01 'min_data_in_bin': 3, #no10 'bin_construct_sample_cnt': 5000, #no 3000, 7000 'cat_l2': 10, #no 'verbose': -1, 'nthread': 16, 'seed': 777, }
code/notebook/train_new.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 第11讲 整除和余数(二) # ### Problem 问题描述 # There are some basic laws when checking whether an integer is divisable by some other small integers: # 在判断一个数能否被另一些简单的数整除时,有一些基本的规律: # # 1. An integer is even if its smallest(far right) bit is one of 0, 2, 4, 6, 8. All even numbers are divisable by 2. For example, 12 is an even, it is divisable by 2(12/2=6...0). # 一个数字末位以0,2,4,6,8结尾,则为偶数。偶数都能被2整除。举例:12是一个偶数,它能被2整除。事实上,12/2=6...0; # 2. If the sum of numbers on an integer's all bits is divisable by 3, so is this integer. For example, the numbers on all bits of integer 183 are 1, 8, 3; the sum of 1+8+3 is 12, which is divisable by 3; therefore, Therefore, 183 is also divisable by 3. # 如果一个数各个位置上的数字加起来得到的和如果能被3整除,那么这个数也能被3整除。举例:183是一个三位数,把它的百、十、个位数字加起来为1+8+3=12,12能被3整除,那么183这个数也能被3整除。事实上,183/3=61...0; # 3. If an even is divisable by 2, and the quotien of the even divided by 2 is still an even, this previous even is therefore divisable by 4. For example, 12 is an even, the quotien of 12 divided by 2 is 6 with remainder 0; therefore, 12 is divisable by 4(12/4=3...0). # 如果一个偶数除以2得到的还是偶数,那么前一个偶数能够被4整除。举例:12是一个偶数,它除以2得到的结果6还是一个偶数,那么12就能被4整除。事实上,12/4=3...0; # 4. if the smallest(far right) bit of an integer is either 0 or 5, this integer is divisable by 5. For example, the smallest bit of 65 is 5, then 65 can be completely divided by 5 (65/5=13...0). Integer 80 ends with 0 as its smallest bit, therefore, 80 is divisable by 5 as well (80/5=16...0). # 末位数字是0或5的数能被5整除。举例:65的末位数字是5,它能被5整除,事实上,65/5=13...0。80的末位数字是0,它也能被5整除。事实上,80/5=16...0; # 5. If an integer is divisable by both 2 and 3, it is therefore also divisable by the multiplication of 2 and 3, which is 6. For example, 24 is divisable by both 2 and 3, then 24 is also divisable by 6 (24/6=4...0). # 能同时被2和3整除的数也能被6整除。举例:24能同时被2和3整除,24也能被6整除。事实上,24/6=4...0; # 6. If the sum of the numbers on all bits in an integer is divisable by 9, then this integer is also divisable by 9. For example, the numbers of all bits of 288 are 2, 8, and 8, whose sum is 18. 18 is divisable by 9 (18/9=2...0), which means 288 is also divisable by 9(288/9=32...0). # 如果一个数各个位置上的数字加起来得到的和如果能被9整除,那么这个数也能被9整除。举例:288是一个三位数,把它的百、十、个位数字加起来为2+8+8=18,18能被9整除,那么288这个数也能被9整除。事实上,288/9=32...0; # 7. ... # # There are still many other laws addressing the division. # 类似的规律还有很多。 # # Based on the above laws of division, write python program and check whether each of the following numbers chosen from 1 to 100 is divisable by both 3, 5, and 9: # 结合上面的规律,编写程序判断下面从100以内选出的这串数字能否被3,5,和9整除: # # $$12, 34, 6, 8, 5, 45, 72, 96, 22, 48, 15, 36, 81, 19, 37, 9$$ # traditional apporach. num1, num2, num3, num4 = 12, 45, 34, 6, if num1 % 3 == 0 and num1 % 5 == 0 and num1 % 9 == 0: print("{} is divisable by 3, 5, 9".format(num1)) if num2 % 3 == 0 and num2 % 5 == 0 and num2 % 9 == 0: print("{} is divisable by 3, 5, 9".format(num2)) # new approach: a list, index 索引, length = 16 (numbers) nums = [12, 34, 6, 8, 5, 45, 72, 96, 22, \ 48, 15, 36, 81, 19, 37, 9] print(nums[0]) print(nums[-2]) # error when you set the number >= 16 126 % 3 2879694315984 % 8 # ### Math Background 数学背景 # # 1. An integer can have 2, 3, ... bits. The far left bit is the highest bit. For example, the name of the three bits, from far left(highest) to far right(lowest), of an integer with three bits are hundred, ten, and unit bits, respectively. Specifically, the hundred, ten, and unit bits of integer 123 are 1, 2, and 3, respectively. # 整数根据其拥有位数的多少可以分为两位数,三位数,...,其中最左侧的数字是最高位的数。例如:三位数从左到右依次是百位、十位和各位。具体说来,123这个三位数,百位上的数字是最左侧的数字1,十位上的数字是2,个位上的数字是3。 # 2. The number on the far left bit of an integer is just the quotien of this integer divided by an smallest integer who owns that highest bits. For example, 873 is an integer which has 3 bits, the number in the highest bit is 8, which can be considered as the quotien of this integer divided by 100, the smallest integer who owns three bits. And the remainder of this division is an integer composed by the other lower bits, which in this case is 73. Similarly, 73 is a two-bit integer, where 7 is the quotien of 73 divided by 10, where 10 is the smallest integer who owns two bits. Again, 3 is an integer with only one bit, and 3 is also the quotien of 3 divided by 1, where 1 is the smallest (positive) integer who has only one bit. # 一个多位数最高位上的数字就是这个多位数除以一个除数的商,而这个除数选用具有这个最高位最小的数。例如873这个三位数,最高位百位上的数字是8,这个8其实就是873/100这个除法算式的商,100是最小的的百位数。这个除法算式的余数则这个数字去掉百位数字剩下的数,也就是73。类似的,73是一个十位数,其中7是73/10的商,10是最小的十位数。同样,3是一个个位数,3也是3/1的商,其中1是最小的个位数。 # + [markdown] heading_collapsed=true # ### Prerequisites 预备知识 # - # #### 1. Access to the numbers on all bits of an Integer 获取一个多位数各个位置上的数字 # # Write an program to find the numbers of all bits(from highest to lowest) of 873, assign these numbers to the variable `n_hundred`,`n_ten`, and `n_one` respectively. # 编程找出873这个三位数的每一位(从高到低)上的数字8,7,3,分别赋给变量:`n_hundred`,`n_ten`,和`n_one`。 num = 873 n_hundred = num // 100 n_ten = (num - n_hundred * 100) // 10 n_one = (num - n_hundred * 100 - n_ten * 10) #// 1 print("{}的百位数字是{}".format(num, n_hundred)) print("{}的十位数字是{}".format(num, n_ten)) print("{}的个位数字是{}".format(num, n_one)) # __Exercise__: Write program to assign numbers of all bits(from highest to lowest) of 2021 to 4 variables, and print out the value of these varaibles in order: # __练习__: 编程将数字2021的四个位置(千、百、十、个位)的数分别赋值给4个变量,并输出相关信息 # #### 2. A new Data(Variable) Type 新的数据(变量)类型`list` # # When storeing and processing a list of values is required, it's not really necessary to assign every different variable to each of these values. Instead, a `list` variable can be used to handle this scenario. This special data type has the similar role with the `string` and `integer` type. For example, we can declare a variable `ages` to store the ages of all four students, Yunzi, Tony, Sophie, and Jasson, in this class: # 当有一串数字需要存储、处理时,我们可以不必为这串数字中的每一个都声明一个变量,而是可以用一个“列表”类型的变量来声明这一串数字,这个特殊的数据类型与字符串和整数类型地位类似。例如,我们可以使用下面这样的一行代码声明一个变量(ages)来存储班级里所有四个小朋友:Yunzi, Tony, Sophie和Jasson的年龄: ages = [9, 10, 10, 11] print(type(ages)) Sophie_name = "Sophie" print(type(Sophie_name)) # It is still an assignment expression, the left side of the assignment operator is a variable with the name `ages`, while at its right side, a list of age values in order are enclosed in square brackets. Every value in this list is one of its elements. we can operate the values of an element in the list by using the combination of the list variable name and the index of the element. The index of first element is 0; the index of second is 1, and so on. If an list has `n` elements, the index of the last(far right) element is `n-1`. # 这是一条赋值语句,赋值符号的左侧是变量名ages,右侧是用中括号括起来用逗号依次隔开来的多个数值。列表中的每一个数值成为这个列表变量的一个元素,可以用变量名联合数值在列表中的次序(也称为索引)来获取和操作这个数据,第1个元素的索引值是0,第2个元素的索引值是1,以此类推。如果一个列表变量一共有`n`个元素,那么最后一个元素的索引值为`n-1`。 # # Actually, we can not tell to which student each age(value of the element) refers; in other word, we can only konw the first element in this `ages` list is 8, but we can not tell this 8 is the age of Tony, nor can we tell that it is the age of Sophie or other students. # 从上面的列表型变量的声明语句中,我们并没有说明那一个年龄数据是哪一个小朋友的。也就是说,我们只知道这个列表中的第一个年龄数值是8,但是没有任何信息显示数字8是Tony的还是Sophie或者是其他小朋友的。 print(ages[0]) # get the first value in ages, which is 8 # 获取ages变量里的第一个数值,8 print(ages[1]) # get the second value in ages, which is 9 # 获取ages变量里的第二个数值, 9 print(ages[3]) # get the fourth value in ages, which is 10 # 获取ages变量里的第四个数值,10 # In addition, the index of an element in the list can also be defined by a variable with Integer type. # 也可以用一个整数类型的变量来表示列表变量中的索引: index = 2 print(ages[index]) # A method `len` can be called to get the total number(length) of the elements in a list. For exmale, the following codes read the length of the `ages` list and print it out. # 可以用方法`len`来获取一个列表性变量的总的元素的个数。例如下面的代码讲得到ages变量所有的年龄个数,也就是小朋友的人数:4。 ages = [9, 10, 10, 11] total_length = len(ages) print("There are {} students' ages in ages list.".format(total_length)) # The values of the elements in a list can be updated. For example, the following codes change the value of the third element(with index 2) in the `ages` list to 20, and then print all the elements in the list. We can see the new value of the third element. # 也可以修改ages变量某一个(些)元素的值。例如,下面的代码将ages列表变量的第3个(索引值为2)元素的数据修改为20,并打印出ages变量: ages = [9, 10, 10, 11] ages[2] = 20 print(ages) # __Exercise__: declare a variable with the type of `list`; in this variable, store the following values that describe you best: the number of toy cars you have, the number of you toy doll, the number of your favorate books, and the times that you spent weekend outside with your family so far this year, etc. Print out this variable and explain to your classmates the meaning of each element in order. # __练习__: 用一个列表型变量来记录你所拥有的玩具汽车、玩具娃娃、喜欢的课外书的数量、今年周末除去玩的次数等等。打印这个列表型变量,并向其它小朋友解释变量中每一个元素值的意义。 ___ = [12, 43, 4, 9] # ### Solution Codes 编程求解 numbers = [12, 34, 6, 8, 5, 45, 72, 96, 22, 48, 15, 36, 81, 19, 37, 9] length = len(numbers) index = 0 while index < length: num = numbers[index] n_ten = num // 10 n_one = num - (n_ten * 10) if (n_ten + n_one) % 3 == 0: print("{}能被3整除,因为它的个位和十位数字的和能被3整除".format(num)) if (n_ten + n_one) % 9 == 0: print(" {}能被9整除,因为它的个位和十位数字的和能被9整除".format(num)) if n_one % 5 == 0: print("{}能被5整除,因为它的个位数字能被5整除".format(num)) index += 1 # ### Summary 知识点小结 # - 理解如何通过对一个多位数除以10,100,... 的取整和取余来得到这个多位数的各个位置上的数字; # - 学习新的变量类型:列表型变量 list; # - 学习用索引值获取列表型变量内某一个元素的值,修改列表型变量中某一个元素的值。 # - 新方法`len`来获取一个列表性变量所有的元素个数; # - 复习整除和余数操作符`//`,`%`. # + hidden=true # + hidden=true # - # ### 计算机小知识 # 暂缺 # + [markdown] heading_collapsed=true # ### Assignments 作业 # - # 1. if an integer is divisable by 2, and the quotien of this division is still divisable by 2, again the next quotien is still divisable by 2, then the original integer is divisalbe by 8. For example, 16 is divisable by 2 with the quotien 8 and remainder 0; 8 is also divisable by 2 with quotien 4; again, 4 is still divisable by 2. Therefore, 16 is divisable by 8, which is 2 times 2 times 2. In fact, 16/8=2...0. Complete the following codes based on the law described above so that the whole program can select out integers that are divisable by 8 from the list given. The given codes verify that the selected integers is indeed divisable by 8 by directly using the `%` operator to get the remainder of the integer divided by 8. # 如果一个数能被2整除,所得的商仍能被2整除,再次除以2得到的商仍然能被2整除,那么这个数就能被8整除。例如数字16,它能被2整除且商为8;8也能被2整除且商为4,4仍然能被2整除,那么16就能被8整除。实际上,16除以8商为2余数为0。按照这个规律补全下面的程序,使之能够寻找给定的一串数字中哪些能被8整除。所给的代码同时使用了取余运算符直接验证这个数确实能被8整除。 # + # a list data with variable name: numbers # 一个列表数据变量,变量名为numbers numbers = [176, 437, 236, 121, 155, 440, 363, 413, 223, 462, 330, 116, 459, 281, 111, 89, 49, 284, 29, 208, 397, 386, 464, 150, 411, 211, 143, 51, 18, 230] index = 0 # start from index = 0 从索引值为0开始 while index < len(numbers): num = numbers[index] # get a number from list with certain index # 根据索引从列表中获取一个数准备分析 if num % 2 == 0: # first time completely divided by 2 # 第一次能被2整除 # TODO: assign correct expression to quot1 by replaceing num # 替换None,给quot1一个正确的赋值表达式 quot1 = num if quot1 % 2 == 0: # second time completely divided by 2 # 第二次能被2整除 # TODO: assign correct expression to quot2 by replaceing num # 替换None,给quot2一个正确的赋值表达式 quot2 = num if quot2 % 2 == 0: # third time completely divided by 2 # 第三次能被2整除 # TODO: assign correct expression to quot3 by replacing num, # pay attention to the use of quot3 in below codes # 替换num,给quot3一个正确的表达式,注意下面的代码对quot3变量的使用 quot3 = num print("{}能够被8整除,3次除以2所得到的商分别为{},{},{}".\ format(num, quot1, quot2, quot3)) # verify by direclty // 8 直接对8取余验证 if num % 8 == 0: print("直接将{}对8取余数,余数是0, 验证了{}能被8整除。".\ format(num, num)) else: print("验证没通过,一定是哪里出了问题。需要检查代码。") index += 1 # - # 2. If an integer A is divisable by two adjacent(neighbor) integers, D1 and D2, who are both greater than 1, this integer A is divisable by the mutiplication of D1 and D2. For example, 112 is divisable by 7 and 8: # 如果一个整数A能够同时被大于1的另两个相邻的整数D1和D2整除,那么把D1和D2的乘积当作除数仍然能够整除A。例如112既能被7整除,也能被8整除: # $$112 \div 7 = 16 \cdots 0$$ # $$112 \div 8 = 13 \cdots 0$$ # # where 7 and 8 are two neighbor integers, and: # 由于7和8是相邻的两个整数,且:$$7\times 8=56$$ # # then, 112 is divisable by 56, which is multiplication of 7 and 8: # 那么112就能被56(=7*8)整除: # $$112 \div 56 = 2 \cdots 0$$ # # Given the fact that integer 40320 is divisable by **multiple** groups of two neighbor integers (greater than 1). # 已知40320可以被**多组**100以内大于1的两个连续的整数整除。 # # Please write program to: # 请自己编写完整的程序: # 1. look for those neighbor integers who are smaller than 100 and greater than 1. # 查找这个整数可以被哪些组的100以内的两个相邻的整数整除. # 2. verify that 40320 is indeed divisable by multiplication of neighbor integers in each group by using operator `//` or `%`. # 直接使用运算符`//`和`%`来验证40320确实能够被这些两个相邻的整数的乘积整除。 # 3. There are also some laws talking about the division by integer 11. # 能够被11整除的数也有一些规律。 # 1. Write program to select out those integers divisable by 11 by directly using the operator `%` or `//`; # 编写程序,从下面列表中给出的数中,直接使用`//`或`%`运算符挑选出能被11整除的数; # 2. Observe those integers selected out, try to find some features(or laws) among them, write down the features(or laws) you find below indicated; # 观察所有能被11整除的数都有什么规律,并把你找到的规律以文字的形式填写在下面提示的地方。 # 3. (hard, optional) Can you write another program to verify the features(or laws) you declared? # (难,可选做)你能够编写程序验证你发现的规律吗? # # **[Write your findings here 在这里写下你观察到的规律]** numbers = [1, 11, 12, 121, 131, 132, 133, 12321, 1234321, 253, 254, 2631, 2651, 3641, 3631, 2652, 2552, 1574, 1473, 1673, 1563] #TODO: write codes to check which element(number) in the list can be completely divided # by 11 with remainder 0. # 编写代码检查列表中的哪些元素(数值)能够被11整除。 numbers = [1, 11, 12, 121, 131, 132, 133, 12321, 1234321, 253, 254, 2631, 2651, 3641, 3631, 2652, 2552, 1574, 1473, 1673, 1563] #TODO: (Optional) write your code to verify your observation(guess) # (可选做) 编写程序验证你的观察(猜测):能被11整除的数的规律。 # + [markdown] hidden=true # 4. In this lecture, we demonstrated how to get all bit numbers of an integer by dividing this integer by 10, 100, .... The reason that we use 10, 100, ..., to divide this integer is, the integer is organized by a dcimal counting system, which means the number in second bit (counted from far right lowest bit) actually represents ten times of the number, and the number in third bit represents hundred times of that number. For example, number in second(counted from far right lowest) bit of the integer 359 is 5. Here 5 is not just 5, but 10 times 5, which is 50, because it is located in the second bit. Similally, the number 3 locatd in hundred bit represents 300 actually. In whole: # 在本节的例题中,我们演示了如何通过除以10,100, ... 编程获取一个多位数每一位上的数字。我们之所以选择10,100,...,这些数作为除数,是因为十位数上的数字实际表示的是这个数字乘以10的结果,百位上的数字是这个数字乘以100的结果。例如359这个数字十位上的数是5,这里的5表示的是50,百位上的数字3表示的是300。也就是说: # $$359=3\times100\times10+5=3\times 10\times10+5\times10+9$$ # # This counting system is called "Decimal Counting System". In such system, we only have number of 0-9 in each bit. Actually, this system is just one of the many count systems. In normal life, we also use a counting system based on 60. For example, when expressing minutes and seconds of time, this 60 based counting system is used. For example, 8 hours 3 minutes and 40 seconds can be represented by the format: # 这种计数的方式我们称之为“十进制”计数。在十进制计数法中,我们在每一位上用到的数字只有从0-9。实际上,十进制计数法只是许多计数法中的一种,生活中常见的计数方法还有60进制。例如在表示分钟和秒时,我们就用了60进制。例如8点3分40秒,我们可以用下面的写法来表示: # # $$08:03:40$$ # # In fact, when time is at $08:03:40$ of a day, it means it has passed the following seconds of the day: # 事实上,如果时间处于一天中的 $08:03:40$, 意味着哪一天已经过去了如下所示的秒数: # # $$29020=8\times3600+3\times60+40=8\times60\times60 + 3\times60 + 40$$ # # 125 seconds don't means the time is 1 hour 2 minutes 5 seconds, but 0 hour 2 munites 5 seconds since: # 而125秒并不表示1小时2分钟5秒,而是表示2分钟5秒,因为: # # $$120=2\times60+5=0\times60\times60+2\times60+5$$ # # Please **modify only the fifth line code in the following cell**, so that the program can correctly convert a time represented by seconds to the time with the format "hours:minutes:seconds", and print it out the following result: # 请**仅修改下面代码单元格中的第5行代码**,使之能正确地将用“秒”表示的时间换算成“小时:分钟:秒"的格式,并打印输出如下的结果: # ```code # 125 秒(seconds) == 000:02:05(HHH:MM:SS) # 60 秒(seconds) == 000:01:00(HHH:MM:SS) # 612 秒(seconds) == 000:10:12(HHH:MM:SS) # 3600 秒(seconds) == 001:00:00(HHH:MM:SS) # 3661 秒(seconds) == 001:01:01(HHH:MM:SS) # 43200 秒(seconds) == 012:00:00(HHH:MM:SS) # 86400 秒(seconds) == 024:00:00(HHH:MM:SS) # ``` # - times = [125, 60, 612, 3600, 3661, 43200, 86400] # times represented by seconds # 用秒为单位表示的时间 # TODO: change the value of variabls: num_secret1 and num_secret2 # 修改变量num_secret1和secret2的值 num_secret1, num_secret2 = 100, 10 index = 0 while index < len(times): time = times[index] hour, minute, second = None, None, None hour = time // num_secret1 minute = (time - hour * num_secret1) // num_secret2 second = time - hour * num_secret1 - minute * num_secret2 print("{:>6} 秒(seconds) == {:>03}:{:>02}:{:02}(HHH:MM:SS)".\ format(time, hour, minute, second)) index += 1 # 5. Sophie's class in primary school has 25 classmates. Their teacher organized a math exam recently. Here are the scores of all 25 students: # Sophie所在的小学班级有25个学生,他们的老师最近组织了一次数学考试,25个学生的考试成绩如下: # # $$95, 74, 72, 64, 85, 79, 96, 65, 91, 92, 81, 71, 55, 78, 51, 69, 55, 85, 82, 57, 98, 57, 96, 62, 96$$ # # Please write program to find out: # 请编写程序找出: # 1. the highest and lowest scores and indices(pl. of index) among those 25 scores; # 这25个学生的成绩中的最高分和最低分,以及最高最低分对应的索引值; # 2. How many socres are below 60, what are the indices(pl. of index) for these scores? # 有几个成绩低于60分,对应的索引分别是什么? # + hidden=true # a list ariable records the mathematical score of each student in the class # 一个列表变量记录了班级里每一个同学的数学考试成绩 scores = [95, 74, 72, 64, 85, 79, 96, 65, 91, 92, 81, 71, 55, 78, 51, 69, 55, 85, 82, 57, 98, 57, 96, 62, 96] # TODO: write your own codes here to answer the questions. # 编写你自己的代码来回答本题
source/2021/100Beginner/content/011_divide_remainder_2.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.0 # language: julia # name: julia-1.0 # --- # # Plotting # # ## Basics # There are a few different ways to plot in Julia (including calling PyPlot). <br> # # Here we'll show you how to use `Plots.jl`. If it's not installed yet, you need to use the package manager to install it, and Julia will precompile it for you the first time you use it: # using Pkg # Pkg.add("Plots") using Plots # One of the advantages to `Plots.jl` is that it allows you to seamlessly change backends. In this notebook, we'll try out the `gr()` and `plotlyjs()` backends.<br> # # In the name of scientific inquiry, let's use this notebook to examine the relationship between the global temperature and the number of pirates between roughly 1860 and 2000. globaltemperatures = [14.4, 14.5, 14.8, 15.2, 15.5, 15.8] numpirates = [45000, 20000, 15000, 5000, 400, 17]; # Plots supports multiple backends — that is, libraries that actually do the drawing — all with the same API. To start out, let's try the GR backend. You choose it with a call to `gr()`: gr() # and now we can use commands like `plot` and `scatter` to generate plots. plot(numpirates, globaltemperatures, label="line") scatter!(numpirates, globaltemperatures, label="points") # The `!` at the end of the `scatter!` function name makes `scatter!` a mutating function, indicating that the scattered points will be added onto the pre-existing plot. # # In contrast, see what happens when you replace `scatter!` in the above with the non-mutating function `scatter`. # # Next, let's update this plot with the `xlabel!`, `ylabel!`, and `title!` commands to add more information to our plot. xlabel!("Number of Pirates [Approximate]") ylabel!("Global Temperature (C)") title!("Influence of pirate population on global warming") # This still doesn't look quite right. The number of pirates has decreased since 1860, so reading the plot from left to right is like looking backwards in time rather than forwards. Let's flip the x axis to better see how pirate populations have caused global temperatures to change over time! xflip!() # And there we have it! # # Note: We've had some confusion about this exercise. :) This is a joke about how people often conflate correlation and causation. # # **Without changing syntax, we can create this plot with the UnicodePlots backend** Pkg.add("UnicodePlots") unicodeplots() plot(numpirates, globaltemperatures, label="line") scatter!(numpirates, globaltemperatures, label="points") xlabel!("Number of Pirates [Approximate]") ylabel!("Global Temperature (C)") title!("Influence of pirate population on global warming") # And notice how this second plot differs from the first! Using text like this is a little silly in a Jupyter notebook where we have fancy drawing capabilities, but it can be very useful for quick and dirty visualization in a terminal. # ### Exercises # # #### 8.1 # Given # ```julia # x = -10:10 # ``` # plot y vs. x for $y = x^2$. You may want to change backends back again. # #### 8.2 # Execute the following code p1 = plot(x, x) p2 = plot(x, x.^2) p3 = plot(x, x.^3) p4 = plot(x, x.^4) plot(p1, p2, p3, p4, layout = (2, 2), legend = false) # and then create a $4x1$ plot that uses `p1`, `p2`, `p3`, and `p4` as subplots.
introductory-tutorials/intro-to-julia/08. Plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: RevData39 # language: python # name: rd39 # --- # ---- # <img src="../../../files/refinitiv.png" width="20%" style="vertical-align: top;"> # # # Data Library for Python # # ---- # ## Delivery - OMM Stream - Market By Price data via callback # # This notebook demonstrates how to use the OMM Stream interface to request streaming Full Depth Orderbook data # ## Set the location of the configuration file # For ease of use, you can set various initialization parameters of the RD Library in the **_refinitiv-data.config.json_** configuration file - as described in the Quick Start -> Sessions example. # # ### One config file for the tutorials # As these tutorial Notebooks are categorised into sub-folders and to avoid the need for multiple config files, we will use the _RD_LIB_CONFIG_PATH_ environment variable to point to a single instance of the config file in the top-level ***Configuration*** folder. # # Before proceeding, please **ensure you have entered your credentials** into the config file in the ***Configuration*** folder. import os os.environ["RD_LIB_CONFIG_PATH"] = "../../../Configuration" from refinitiv.data.delivery import omm_stream import refinitiv.data as rd import datetime import json # ## Open the default session # # To open the default session ensure you have a '*refinitiv-data.config.json*' in the ***Configuration*** directory, populated with your credentials and specified a 'default' session in the config file # # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} rd.open_session() # - # # Example # ## Define a function to display events # Callback function to display data or status events def display_event(eventType, event): currentTime = datetime.datetime.now().time() print("----------------------------------------------------------") print(">>> {} event received at {}".format(eventType, currentTime)) # Print only 1000 characters to limit the output print(json.dumps(event, indent=2)[0:1000]) return # ## Create an OMM Stream and register the event callbacks # + stream = omm_stream.Definition(name="VOD.L", domain='MarketByPrice').get_stream() # Refresh - the first full imaage we get back from the server stream.on_refresh(lambda stream, event : display_event("Refresh", event)) # Update - as and when field values change, we receive updates from the server stream.on_update(lambda stream, event : display_event("Update", event)) # Status - if data goes stale or item closes, we get a status message stream.on_status(lambda stream, event : display_event("Status", event)) # Other errors stream.on_error(lambda stream, event : display_event("Error", event)) # - # ## Open the Item Stream # The **open()** call to open the OMM Stream is a synchronous one. This means the first event (either via on_refresh(), on_status() or on_error()) can occur before the open() method returns. # However, if we use the **open_async()** asynchronous method instead, the first event callback will be after open_async() returns. # + tags=[] # Library will request OrderBook from server stream.open() # We should intially receive the full orderbook, # after which we will receive updates for specific order (Add, Update, Delete) # Note from the above display_event() function that I am dumping just the first 1000 characters to minimise output # You should remove this limit to see the full response. # - # You may notice that there are multiple Refresh events - this is often the case for the more actively traded instruments with large orders books. # Once all Refresh events have been received, you can then expect to receive Update events with just the Order changes i.e. Add, Update or Delete orders. # **NOTE:** I am truncating the output to the 1st 1000 character of each response payload - for ease of viewing. # # ## Close Stream stream.close() # ### Close Session rd.close_session()
Tutorials/3.Delivery/3.1-Streaming/TUT_3.1.02-OMMStream-MarketByPrice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Python3-powered_hello-world.svg/1000px-Python3-powered_hello-world.svg.png" width="300px" height="100px" /> # # # ## <font color= #8A0829> Simulación de procesos financieros.</font> # #### <font color= #2E9AFE> `Martes - Jueves de 18:00 a 20:00 hrs`</font> # - <Strong> <NAME> </Strong> # - <Strong> Año </Strong>: 2021 # - <Strong> Copyright: </Strong> Public Domain como en [CC](https://creativecommons.org/licenses/by/2.0/) (Exepto donde se indique lo contrario) # # - <Strong> Email: </Strong> <font color="blue"> `<EMAIL>, <EMAIL>` </font> # ___ # ### Presentación mia # ___ # ### **Presentación de ustedes** # ___ # ### **Algunas reglas de juego** # * No comidas ni bebidas en las clases. # * <font color= red> EL CELULAR. </font> # * La regla más importante de todas (regla de oro): todas las entregas se realizan a través de canvas con los plazos asignados. No se aceptan trabajos fuera de la plataforma ni fuera de plazo. No insistan en este punto, no hay negocio. # ___ # ### Horario de asesoría # <font color = red> Martes 17:00 - 18:00 # ### `Descripción de la asignatura` # *Este es un curso básico de simulación utilizando python, por ende vamos a iniciar elaborando programas simples y conforme avancemos el **nivel de exigencia aumentará**, hasta donde el tiempo nos permita.* # # - Se analiza el comportamiento de procesos financieros mediante diseñar, formular, estructurar, solucionar y dar recomendaciones sobre procesos de simulación financiera para la toma de decisiones, estructurando estrategias dinámicas y políticas apropiadas al sistema. # # - La asignatura está diseñada para que logres dichos propósitos e inicies un proceso que te permita apropiarte de desempeños profesionales muy útiles en tu formación profesional y en tu futuro, al incorporarte a la industria u organizaciones que te demandarán resolver e implementar la simulación de escenarios bajo diferentes situaciones a través de la sistematización de la solución al problema planteado. # #### `OBJETIVO GENERAL ` # > <p style='text-align: justify;'> Apropiarse de competencias de análisis de sistemas financieros mediante la evaluación del comportamiento en el tiempo obtenido a través de la simulación dinámica de los procesos que lo integran, para la toma de decisiones financieras corporativas sobre valores, créditos, opciones, intereses, monedas, y precios bajo incertidumbre.</p> # `TEMA 1`: **Introducción a la Simulación** # > En este primer módulo se presentan los contenidos del curso, junto con las herramientas computacionales necesarias para utlizar durante todo el semestre. También, aprenderás a realizar una optimización de los códigos desarrollados usando programación vectorizada y funcional. Finalmente, se estudiarán algunos ejemplos de simulación, para ilustrar las métodologías estudiadas. # # 1. Gestión de proyectos (git, GitHub, GitKraken) I. # - Introducción e instalación de software # 2. Gestión de proyectos (git, GitHub, GitKraken) II. # - Tarea 1. # 3. Introducción a la simulación. # 5. Optimización de código **(Programación vectorizada y funcional)**. # - Quiz próxima clase. # - Tarea 2. # 6. Continuación optimización de código # 6. Generación de Números Pseudoaleatorios # - Quiz próxima clase. # - Tarea 3. # 7. Metodología de un estudio de simulación usando ejemplos simples de aplicación y números pseudoaleatorios. # 8. Método Montecarlo crudo. # 9. **Evaluación 1**. # # # `TEMA 2.` **Simulación Montecarlo** # > En este Tema se recordará formalmente ciertas distribuciones de probabilidad y su respectiva aplicación usando python. También, se estudiarán sus aplicaciones en el mundo ingenieril para modelar diferentes tipos de problemas. Por otro lado, se usarán ciertas herramientas matemáticas, las cuales permiten reducir la varianza cuando necesitamos generar variables aleatorias con cierta distribución de probabilidad. # # 1. Generación de observaciones aleatorias a partir de una distribución de probabilidad. # - Método de la transformada inversa. # - Quiz próxima clase. # - Tarea 4. # - Método de aceptación rechazo. # - Tarea 5. # - **Definición del proyecto**. # 2. Distribuciones de probabilidad. # - Distribución uniforme general # - Distribución triangular # - Distribución normal # - Distribución exponencial # - Distribución de Erlang # - Distribución Binomial # - Distribución de Poisson # - Tarea 6. # 3. Aplicaciones de la simulación. # 4. Técnicas de reducción de varianza. # - Muestreo estratificado. # - Método de números aleatorios complementarios. # - Quiz próxima clase. # - Tarea 7. # 5. Prueba de bondad de ajuste para ajuste de distribuciones de probabilidad. # 6. **Evaluación 2**. # 7. **Presentación del proyecto**. # # `TEMA 3.` **Valuación de Opciones usando Simulación Monte Carlo** # > En este Tema final, usando las herramientas aprendidas en los dos Temas previos, se pretende estudiar la valuación de cuatro tipos de opciones, vainilla, Asiática, Americana y Barrera. También, se pretende solucionar ciertos problemas prácticos donde es necesario la valuación de opciones. # # 1. Opciones Plan Vainilla: opción de compra y opción de venta europea # - Tarea 8. # 2. Opciones Asiáticas # - Tarea 9. # - Quiz próxima clase. # 4. Opciones de barrera # - Quiz al final de la clase. # 3. Opciones americanas # - Tarea 10. # - Quiz próxima clase. # 5. Evaluación 3. # ### `Evaluación` # - **10 Tareas 25%** # - La evaluación de cada tarea se divide en dos partes # - Primera entrega 60% # - Segunda entrega 40% # # - **3 Exámenes 45%** # - Examen 1 -> 15% # - Examen 2 -> 15% # - Examen 3 -> 15% # # - **7 Quices 10%** # # - <strong> Proyecto (trabajo en equipo) 20%</strong> # - La evaluación de cada proyecto se divide en dos partes # - Reporte 10% # - Exposición 10% # - <font color="blue">Equipos de 2 integrantes mínimo y 3 máximo</font>. # - Si durante algún proyecto las cosas no funcionan entre los integrantes, para el siguiente proyecto se pueden formar equipos nuevos. # ### `Bibliografía ` # > ``` # - Simulation techniques in financial risk management by <NAME> and <NAME>, year 2015 # - Monte Carlo methods in finalcial engineering by <NAME>, year 2010 # - Handbook in Monte Carlo simulation applications in financial engineering, risk management, and economics by <NAME>, year 2014``` # Estos y muchos mas libros los pueden encontrar en la Biblioteca. # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. Modified by <NAME>. # </footer>
TEMA-1/Clase0_GuiaSimulacionPF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # grex feed # Configure and run this to submit new work to the grex tasks Celery machinery # %load_ext autoreload # %autoreload 2 import neo4j import lib.grexutils as gu from grex_tasks import add, run_epp_code_strings import random driver = neo4j.GraphDatabase.driver("neo4j://neo4j:7687", auth=("neo4j", "test")) driver.verify_connectivity() ex_key = gu.get_experiment_key_from_name(driver, name='t4') ex_key procedures = dict(gu.get_procedure_names_keys_from_experiment_key(driver, key=ex_key)) procedures proc_key = procedures['Train Pauls ADCs'] proc_key param_keys = gu.get_unstarted_parameters_of_procedure(driver, procedure_unikey=proc_key) param_keys[:3] random.shuffle(param_keys) len(param_keys) cnt = 0 for param_key in param_keys: #print(ex_key, proc_key, param_key) run_epp_code_strings.delay(experiment_unikey=ex_key, procedure_unikey=proc_key, parameters_unikey=param_key) cnt += 1 if cnt % 100 == 0: print(cnt, ex_key, proc_key, param_key) assert False, "stop here" # --- # # Debugging scratch space gu.get_code_strings_of_experiment_procedure_parameters(driver, experiment_unikey='7R42cwhPZvhpmEUd51W5xQ', procedure_unikey='<KEY>', parameters_unikey='<KEY>') import tools.neotools as nj def get_code_strings_of_experiment_procedure_parameters(driver, **kwargs): q = """ MATCH (e:Experiment {unikey: $experiment_unikey}) -[:INCLUDES]-> (proc:Procedure {unikey: $procedure_unikey}) -[:INCORPORATES]-> (par:Parameters {unikey: $parameters_unikey}) RETURN par.prepend_code_strings, proc.code_strings, par.append_code_strings """ records = nj.query_read_return_list(driver, q, **kwargs) if len(records) < 1: raise KeyError(f'No experiment,procedure,parameters found to match "{kwargs}"') if len(records) > 1: raise KeyError(f'Found {len(records)} experiment,procedure,parameters "{kwargs}"') r = records[0] #print(f"r['proc.code_strings'] = {r['proc.code_strings']}") #print(f"r['par.code_strings'] = {r['par.code_strings']}") return r['par.prepend_code_strings'] + \ r['proc.code_strings'] + \ r['par.append_code_strings'] import tools.neotools as nj def get_code_strings_of_experiment_procedure_parameters(driver, **kwargs): q = """ MATCH (e:Experiment {unikey: $experiment_unikey}) --> (proc:Procedure {unikey: $procedure_unikey}) --> (par:Parameters {unikey: $parameters_unikey}) RETURN par.prepend_code_strings, proc.code_strings, par.append_code_strings """ records = nj.query_read_return_list(driver, q, **kwargs) if len(records) < 1: raise KeyError(f'No experiment,procedure,parameters found to match "{kwargs}"') if len(records) > 1: raise KeyError(f'Found {len(records)} experiment,procedure,parameters "{kwargs}"') r = records[0] #print(f"r['proc.code_strings'] = {r['proc.code_strings']}") #print(f"r['par.code_strings'] = {r['par.code_strings']}") return r['par.prepend_code_strings'] + \ r['proc.code_strings'] + \ r['par.append_code_strings'] import tools.neotools as nj def get_code_strings_of_experiment_procedure_parameters(driver, **kwargs): q = """ MATCH (e:Experiment {unikey: $experiment_unikey}) --> (proc:Procedure {unikey: $procedure_unikey}) RETURN proc.code_strings """ records = nj.query_read_return_list(driver, q, **kwargs) if len(records) < 1: raise KeyError(f'No experiment,procedure,parameters found to match "{kwargs}"') if len(records) > 1: raise KeyError(f'Found {len(records)} experiment,procedure,parameters "{kwargs}"') r = records[0] #print(f"r['proc.code_strings'] = {r['proc.code_strings']}") #print(f"r['par.code_strings'] = {r['par.code_strings']}") return r import tools.neotools as nj def get_code_strings_of_experiment_procedure_parameters(driver, **kwargs): q = """ MATCH (par:Parameters {unikey: $parameters_unikey}) RETURN par.prepend_code_strings """ records = nj.query_read_return_list(driver, q, **kwargs) if len(records) < 1: raise KeyError(f'No experiment,procedure,parameters found to match "{kwargs}"') if len(records) > 1: raise KeyError(f'Found {len(records)} experiment,procedure,parameters "{kwargs}"') r = records[0] #print(f"r['proc.code_strings'] = {r['proc.code_strings']}") #print(f"r['par.code_strings'] = {r['par.code_strings']}") return r get_code_strings_of_experiment_procedure_parameters(driver, experiment_unikey='<KEY>', procedure_unikey='<KEY>', parameters_unikey='<KEY>') for i in range(10): for j in range(10): add.apply_async((i,j), countdown=3*j) from tasks import div div.delay(1, 7) div.delay(22, 7) div.delay(1,0)
nbs/grex feed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow_p27 # language: python # name: conda_tensorflow_p27 # --- # # ResNet CIFAR-10 with tensorboard # # This notebook shows how to use TensorBoard, and how the training job writes checkpoints to a external bucket. # The model used for this notebook is a ResNet model, trained with the CIFAR-10 dataset. # See the following papers for more background: # # [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf) by <NAME>, <NAME>, <NAME>, and <NAME>, Dec 2015. # # [Identity Mappings in Deep Residual Networks](https://arxiv.org/pdf/1603.05027.pdf) by <NAME>, <NAME>, <NAME>, and <NAME>, Jul 2016. # ### Set up the environment # + import os import sagemaker from sagemaker import get_execution_role sagemaker_session = sagemaker.Session() role = get_execution_role() # - # ### Download the CIFAR-10 dataset # Downloading the test and training data will take around 5 minutes. # + import utils utils.cifar10_download() # - # ### Upload the data to a S3 bucket inputs = sagemaker_session.upload_data(path='/tmp/cifar10_data', key_prefix='data/DEMO-cifar10') # **sagemaker_session.upload_data** will upload the CIFAR-10 dataset from your machine to a bucket named **sagemaker-{region}-{*your aws account number*}**, if you don't have this bucket yet, sagemaker_session will create it for you. # ### Complete source code # - [source_dir/resnet_model.py](source_dir/resnet_model.py): ResNet model # - [source_dir/resnet_cifar_10.py](source_dir/resnet_cifar_10.py): main script used for training and hosting # ## Create a training job using the sagemaker.TensorFlow estimator # + from sagemaker.tensorflow import TensorFlow source_dir = os.path.join(os.getcwd(), 'source_dir') estimator = TensorFlow(entry_point='resnet_cifar_10.py', source_dir=source_dir, role=role, framework_version='1.8', hyperparameters={'throttle_secs': 30}, training_steps=1000, evaluation_steps=100, train_instance_count=2, train_instance_type='ml.c4.xlarge', base_job_name='tensorboard-example') estimator.fit(inputs, run_tensorboard_locally=True) # - # The **```fit```** method will create a training job named **```tensorboard-example-{unique identifier}```** in two **ml.c4.xlarge** instances. These instances will write checkpoints to the s3 bucket **```sagemaker-{your aws account number}```**. # # If you don't have this bucket yet, **```sagemaker_session```** will create it for you. These checkpoints can be used for restoring the training job, and to analyze training job metrics using **TensorBoard**. # # The parameter **```run_tensorboard_locally=True```** will run **TensorBoard** in the machine that this notebook is running. Everytime a new checkpoint is created by the training job in the S3 bucket, **```fit```** will download the checkpoint to the temp folder that **TensorBoard** is pointing to. # # When the **```fit```** method starts the training, it will log the port that **TensorBoard** is using to display the metrics. The default port is **6006**, but another port can be choosen depending on its availability. The port number will increase until finds an available port. After that the port number will printed in stdout. # # It takes a few minutes to provision containers and start the training job.**TensorBoard** will start to display metrics shortly after that. # # You can access **TensorBoard** locally at [http://localhost:6006](http://localhost:6006) or using your SageMaker notebook instance [proxy/6006/](/proxy/6006/)(TensorBoard will not work if forget to put the slash, '/', in end of the url). If TensorBoard started on a different port, adjust these URLs to match.This example uses the optional hyperparameter **```throttle_secs```** to generate training evaluations more often, allowing to visualize **TensorBoard** scalar data faster. You can find the available optional hyperparameters [here](https://github.com/aws/sagemaker-python-sdk#optional-hyperparameters). # # Deploy the trained model to prepare for predictions # # The deploy() method creates an endpoint which serves prediction requests in real-time. predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # # Make a prediction with fake data to verify the endpoint is up # # Prediction is not the focus of this notebook, so to verify the endpoint's functionality, we'll simply generate random data in the correct shape and make a prediction. # + import numpy as np random_image_data = np.random.rand(32, 32, 3) predictor.predict(random_image_data) # - # # Cleaning up # To avoid incurring charges to your AWS account for the resources used in this tutorial you need to delete the **SageMaker Endpoint:** sagemaker.Session().delete_endpoint(predictor.endpoint)
sagemaker-python-sdk/tensorflow_resnet_cifar10_with_tensorboard/tensorflow_resnet_cifar10_with_tensorboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Heroes Of Pymoli Data Analysis # *This game is largely popular for teenagers from age 15-24 compromising of 63% of the users. Assuming majority of the players made purchase, this is the age group the game should aim to attract. Researches to find the interest of the gamers in the age group should be completed to increase the players. # # *Assuming the game is gaining a lot of popularity, the count of users who made the purchase is low. Only 576 players made purchase. With ease of access to the game, the count should be much higher for the game to be profitable. The low count presents couple improvements to make. First, there should be efforts to increase the number of users who would make in-game purchases. Second, understand that the purchase analysis will be general overview. The analysis with low population does not accurately depict the current market status, however, it does show what this game could thrive in. # # *Among the top 5 items that are sold, 3 items are sold at high 4 dollars range. It shows that users are more than willing to pay high 4 dollars, which gives a good point of pricing for items. If the customers are willing to pay 4.61 dollars for an item, company should not be posting items that are 2 dollars. Specialized and unique items could be much higher in cost. # # *Looking at the age group analysis, people in their 30's are buying high priced items. It is shown through lower number of counts of users making purchases yet high in purchase costs per person. This age group could be a good target to increase the profit, for they are more willing to spend money. # + # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) filepath = "./Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(filepath) purchase_data.head() # - # ## Player Count # # * Display the total number of players #Create a list of unique players and count players=purchase_data["SN"].unique() players_count_dict={"Total Players":[len(players)]} #Produce a data frame for total count of players tot_players_df=pd.DataFrame(players_count_dict) tot_players_df # ## Purchasing Analysis (Total) # # * Run basic calculations to obtain number of unique items, average price, etc. # # * Create a summary data frame to hold the results # # * Optional: give the displayed data cleaner formatting # # * Display the summary data frame # + #Number of Items Purchased items=purchase_data["Item Name"].unique() items_count=len(items) #Average Price avg_price=purchase_data["Price"].mean() #Number of Puchases numb_purchase=purchase_data["Price"].count() #Total Revenue tot_rev=purchase_data["Price"].sum() # + #Summary Table summary_df=pd.DataFrame( [{"Number of Unique Items":items_count, "Average Price":avg_price, "Number of Purchase":numb_purchase, "Total Revenue":tot_rev }]) #Formatting the table summary_df["Average Price"]=summary_df["Average Price"].map("${:.2f}".format) summary_df["Total Revenue"]=summary_df["Total Revenue"].map("${:.2f}".format) summary_df # - # ## Gender Demographics # * Percentage and Count of Male Players # # * Percentage and Count of Female Players # # * Percentage and Count of Other / Non-Disclosed # + #Creating dataframe with genders male_df=purchase_data.loc[purchase_data["Gender"]=="Male",] female_df=purchase_data.loc[purchase_data["Gender"]=="Female",] other_df=purchase_data.loc[purchase_data["Gender"]=="Other / Non-Disclosed"] #Counting each genders male_count=male_df["Gender"].count() female_count=female_df["Gender"].count() other_count=other_df["Gender"].count() tot_count=male_count+female_count+other_count # + #Creating Output Demo_df=pd.DataFrame({ "Total Count":[male_count, female_count, other_count], "Percentage of Players":[male_count, female_count, other_count]/tot_count*100 }) #Output formatting Demo_df["Percentage of Players"]=Demo_df["Percentage of Players"].map("{:.2f}%".format) Demo_df[""]=["Male", "Female", "Other / Non-Disclosed"] Demo_df.set_index("",inplace=True) Demo_df # - # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # * Create a summary data frame to hold the results # # * Display the summary data frame # + #Create a dataframe grouped by gender pivot_gender=purchase_data.groupby(by="Gender") gender_count=pivot_gender["Item Name"].count() price_avg =pivot_gender["Price"].mean() price_tot =pivot_gender["Price"].sum() #Create a dataframe made of gender count and price average merge_1_df = pd.merge( left=gender_count, right=price_avg, on="Gender" ) #Add a column of total price merge_2_df = pd.merge( left=merge_1_df, right=price_tot, on="Gender" ) #Count number of users dependent on gender users_per_gender=pivot_gender["SN"].unique() f=len(users_per_gender["Female"]) m=len(users_per_gender["Male"]) o=len(users_per_gender["Other / Non-Disclosed"]) #Calculate price per user depending on gender price_per_user=price_tot/([f, m, o]) price_per_user #Add the price per user to the dataframe merge_3_df = pd.merge( left=merge_2_df, right=price_per_user, on="Gender" ) #Format the dataframe Gender_Analysis_df=merge_3_df.rename(columns={"Price_x":"Average Purchase Price", "Price_y":"Total Purchase Value", "Price": "Avg Total Purchase per Person"}) Gender_Analysis_df["Average Purchase Price"]=Gender_Analysis_df["Average Purchase Price"].map("${:.2f}".format) Gender_Analysis_df["Total Purchase Value"]=Gender_Analysis_df["Total Purchase Value"].map("${:.2f}".format) Gender_Analysis_df["Avg Total Purchase per Person"]=Gender_Analysis_df["Avg Total Purchase per Person"].map("${:.2f}".format) Gender_Analysis_df # - # ## Age Demographics # # * Establish bins for ages # # * Categorize the existing players using the age bins # # * Calculate the numbers and percentages by age group # # * Create a summary data frame to hold the results # # * Display Age Demographics Table # + #Create a bin bin_data_df=purchase_data.copy() age=bin_data_df["Age"] bin_group=[0,9,14,19,24,29,34,39,150] bin_label=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"] #Add bin to the dataframe bin_data_df["Age Groups"]=pd.cut( x=age, bins=bin_group, labels=bin_label ) #Group the dataframe by the bin pivot_age_groups=bin_data_df.groupby(by="Age Groups") names=pivot_age_groups["SN"].unique() #Create a series on age group counts """" age_count=[len(names.iloc[0]), len(names.iloc[1]), len(names.iloc[2]), len(names.iloc[3]), len(names.iloc[4]), len(names.iloc[5]), len(names.iloc[6]), len(names.iloc[7]) ] """ age_count=[len(names.iloc[i])for i in range(len(names))] #Creating output Age_Demo=pd.DataFrame({ "Age":["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"], "Total Count":age_count} ) Age_Demo.set_index("Age",inplace=True) #Calculating the percentage Age_Demo["Percentage of Players"]=Age_Demo["Total Count"]/Age_Demo["Total Count"].sum()*100 Age_Demo["Percentage of Players"]=Age_Demo["Percentage of Players"].map("{:.2f}%".format) Age_Demo # - # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # * Create a summary data frame to hold the results # # * Display the summary data frame # + #Create variables for the groupby created agegroup_count=pivot_age_groups["Item Name"].count() agegroup_price_avg=pivot_age_groups["Price"].mean() agegroup_price_tot=pivot_age_groups["Price"].sum() #Merge age group count and price average per group age_merge_1_df = pd.merge( left=agegroup_count, right=agegroup_price_avg, on="Age Groups" ) #Add total price per group age_merge_2_df = pd.merge( left=age_merge_1_df, right=agegroup_price_tot, on="Age Groups" ) #Calculate the number of users in each age group users_per_age_group=pivot_age_groups["SN"].unique() ten=len(users_per_age_group["<10"]) ear_ten=len(users_per_age_group["10-14"]) late_ten=len(users_per_age_group["15-19"]) ear_twen=len(users_per_age_group["20-24"]) late_twen=len(users_per_age_group["25-29"]) ear_thirt=len(users_per_age_group["30-34"]) late_thirt=len(users_per_age_group["35-39"]) fourt=len(users_per_age_group["40+"]) #Calculate the price per group price_per_age_group=agegroup_price_tot/([ten, ear_ten, late_ten, ear_twen, late_twen, ear_thirt, late_thirt, fourt]) #Add the price per group in dataframe age_merge_3_df = pd.merge( left=age_merge_2_df, right=price_per_age_group, on="Age Groups" ) #Dataframe formatting Age_Analysis_df=age_merge_3_df.rename(columns={"Price_x":"Average Purchase Price", "Price_y":"Total Purchase Value", "Price": "Avg Total Purchase per Person"}) Age_Analysis_df["Average Purchase Price"]=Age_Analysis_df["Average Purchase Price"].map("${:.2f}".format) Age_Analysis_df["Total Purchase Value"]=Age_Analysis_df["Total Purchase Value"].map("${:.2f}".format) Age_Analysis_df["Avg Total Purchase per Person"]=Age_Analysis_df["Avg Total Purchase per Person"].map("${:.2f}".format) Age_Analysis_df # - # ## Top Spenders # # * Run basic calculations to obtain the results in the table below # # * Create a summary data frame to hold the results # # * Sort the total purchase value column in descending order # # * Display a preview of the summary data frame # + #Group the dataframe by username pivot_spenders_df=bin_data_df.groupby(by="SN") spenders_df=pivot_spenders_df.count() top_spenders_df=spenders_df.sort_values(["Item Name"], ascending=False).head() top_spenders_count=top_spenders_df["Item Name"] #Grab the top 5 usernames top_SN_df=top_spenders_df.reset_index()["SN"] #Grab the average price for the top 5 users top_spenders_price_avg_df=pivot_spenders_df.mean() top_spenders_price_avg=top_spenders_price_avg_df["Price"].loc[top_SN_df] #Merge count of items purchased with average purchase price top_spenders_merge_1_df = pd.merge( left=top_spenders_count, right=top_spenders_price_avg, on="SN" ) #Grab total price for the user top_spenders_price_tot_df=pivot_spenders_df.sum() top_spenders_price_tot=top_spenders_price_tot_df["Price"].loc[top_SN_df] #Add the total price to the dataframe top_spenders_merge_2_df = pd.merge( left=top_spenders_merge_1_df, right=top_spenders_price_tot, on="SN" ) #Formatting the output Spender_Analysis_df=top_spenders_merge_2_df.rename(columns={"Itenm Name":"Purchase Count", "Price_x":"Average Purchase Price", "Price_y": "Total Purchase Value"}) Spender_Analysis_df["Average Purchase Price"]=Spender_Analysis_df["Average Purchase Price"].map("${:.2f}".format) Spender_Analysis_df["Total Purchase Value"]=Spender_Analysis_df["Total Purchase Value"].map("${:.2f}".format) Spender_Analysis_df # - # ## Most Popular Items # # * Retrieve the Item ID, Item Name, and Item Price columns # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value # # * Create a summary data frame to hold the results # # * Sort the purchase count column in descending order # # * Display a preview of the summary data frame # + pivot_item_df=bin_data_df.groupby(by="Item Name") #Count of top item items_df=pivot_item_df.count() top_items_df=items_df.sort_values(["SN"], ascending=False).head() top_items_count=top_items_df["SN"] #Price of Top item top_items_df=top_items_df.reset_index()["Item Name"] top_items_price_df=pivot_item_df.mean() top_items_price=top_items_price_df["Price"][top_items_df] #Merge two dataframe top_items_merge_1_df = pd.merge( left=top_items_count, right=top_items_price, on="Item Name" ) #Total Price top_items_price_tot_df=pivot_item_df.sum() top_items_price_tot=top_items_price_tot_df["Price"].loc[top_items_df] top_items_price_tot #Merge Total Price into dataframe top_items_merge_2_df = pd.merge( left=top_items_merge_1_df, right=top_items_price_tot, on="Item Name" ) #Formatting the dataframe Popular_Items_Analysis_df=top_items_merge_2_df.rename(columns={"SN":"Purchase Count", "Price_x":"Item Price", "Price_y": "Total Purchase Value"}) Popular_Items_Analysis_df["Item Price"]=Popular_Items_Analysis_df["Item Price"].map("${:.2f}".format) Popular_Items_Analysis_df["Total Purchase Value"]=Popular_Items_Analysis_df["Total Purchase Value"].map("${:.2f}".format) Popular_Items_Analysis_df # - # ## Most Profitable Items # # * Sort the above table by total purchase value in descending orderb # # * Display a preview of the data frame # + #Setting the table in order profitable_df=Popular_Items_Analysis_df.sort_values(["Total Purchase Value"], ascending=False) #Adding item ID item_id=top_items_price_df["Item ID"][top_items_df] profitable_merge_df = pd.merge( left=item_id, right=profitable_df, on="Item Name" ) #Resetting index table=profitable_merge_df.reset_index() table.set_index(["Item ID","Item Name"],inplace=True) table # -
HeroesofPymoli.ipynb