code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Xg9UqqN0LPBo" colab_type="code" colab={} from google.colab import drive # + id="3HjI9FgQLq1h" colab_type="code" outputId="4ef399dd-af58-40e5-d932-0eb25dfa0bd2" executionInfo={"status": "ok", "timestamp": 1581768619266, "user_tz": -60, "elapsed": 451, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} drive.mount("/content/drive") # + id="u7ts2sDpMHoT" colab_type="code" outputId="0734bc9e-3c9c-4fde-b1ef-8dea53ee938c" executionInfo={"status": "error", "timestamp": 1581768593580, "user_tz": -60, "elapsed": 456, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 797} # ls # #cd "/content/drive/My Drive/Colab Notebooks" # + id="AbNq1OAxM_iN" colab_type="code" outputId="65246dc3-685a-47b9-d219-0b1784450db6" executionInfo={"status": "error", "timestamp": 1581768602630, "user_tz": -60, "elapsed": 577, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 962} # cd # + id="fyczHh4KNxOH" colab_type="code" colab={} # + id="UH6Vec_OSK5C" colab_type="code" colab={} # + id="oFDw8yhNSNqe" colab_type="code" colab={} # + id="snuSquEUW6fe" colab_type="code" outputId="3f2857b1-1e53-4755-c7bd-5dee39ae36c6" executionInfo={"status": "ok", "timestamp": 1581694495627, "user_tz": -60, "elapsed": 1607, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 51} # ls # + id="6Z_8pbcMW99a" colab_type="code" outputId="dac03cb3-fcbf-4514-cae9-95e6e8fe765c" executionInfo={"status": "ok", "timestamp": 1581767647270, "user_tz": -60, "elapsed": 8579, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 313} # !pip install eli5 # + id="YdLANPNlYiMk" colab_type="code" outputId="58b7bb03-9f65-4017-b324-532d3817c8e2" executionInfo={"status": "ok", "timestamp": 1581767657665, "user_tz": -60, "elapsed": 3250, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 177} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook # + id="b1aCyqRHZuo6" colab_type="code" outputId="aaa43da9-4099-422c-f4a9-44590ee204cd" executionInfo={"status": "ok", "timestamp": 1581768145294, "user_tz": -60, "elapsed": 489, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix" # + id="gO_p_E4Dakue" colab_type="code" colab={} # + id="TFqNf_8QamjN" colab_type="code" colab={} df = pd.read_csv('data/men_shoes.csv', low_memory=False) # + id="i3oTlmSda3e5" colab_type="code" colab={} def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): x = df[feats].values y = df['prices_amountmin'].values scores = cross_val_score(model, x, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="blV_uDl4bIxU" colab_type="code" colab={} df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0] # + id="yuOu2PoLbxCC" colab_type="code" outputId="24c24061-1fd6-4d7d-d99d-a6ecad41ba3e" executionInfo={"status": "ok", "timestamp": 1581767673537, "user_tz": -60, "elapsed": 521, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} run_model(['brand_cat']) # + id="xDk_9fjib4om" colab_type="code" outputId="9afbc5d5-3119-4300-d8df-b4eb3752dfaa" executionInfo={"status": "ok", "timestamp": 1581767677698, "user_tz": -60, "elapsed": 3421, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'], model) # + id="Upr40iFnchf9" colab_type="code" outputId="5db02c8b-bc09-4420-fd37-0ab38ecac66d" executionInfo={"status": "ok", "timestamp": 1581767678959, "user_tz": -60, "elapsed": 475, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 147} df.features.head().values # + id="9zhJ0vbYmni6" colab_type="code" outputId="c9035abc-1dd9-4aa3-91a7-fb9bf7127495" executionInfo={"status": "ok", "timestamp": 1581767680260, "user_tz": -60, "elapsed": 512, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} test = { 'key': 'value'} test['key'] # + id="WsF7ye6BnuHG" colab_type="code" outputId="b7c8a0df-cb06-41ba-a3c0-0fd268d0d2f6" executionInfo={"status": "ok", "timestamp": 1581767681630, "user_tz": -60, "elapsed": 475, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} str_dict ='[{"key":"Gender","value":["Men"]},{"key":"Shoe Size","value":["M"]},{"key":"Shoe Category","value":["Men\'s Shoes"]},{"key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]' literal_eval(str_dict)[1]['key'] # + id="iEvsDbBBoiZn" colab_type="code" colab={} def parse_features(x): output_dict = {} if str(x) == 'nan' : return output_dict features = literal_eval(x.replace('\\"','"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict # + id="IVQDOGkwpiWI" colab_type="code" outputId="ef82e654-70df-4a9e-c5b7-62e3b9380894" executionInfo={"status": "ok", "timestamp": 1581767686381, "user_tz": -60, "elapsed": 1992, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 147} df['features_parsed'] = df['features'].map(parse_features) df['features_parsed'].head().values # + id="sHbxQPsHrBuN" colab_type="code" outputId="86e31932-a2ec-4ee1-f3ac-477ec8ac6e8b" executionInfo={"status": "ok", "timestamp": 1581767687530, "user_tz": -60, "elapsed": 472, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} keys = set() df['features_parsed'].map( lambda x: keys.update(x.keys()) ) len(keys) # + id="-os79oRhrIVq" colab_type="code" outputId="35c29a33-8a4e-4dc6-a08b-4a5ef61f2d11" executionInfo={"status": "ok", "timestamp": 1581767692749, "user_tz": -60, "elapsed": 4336, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": ["5e1ef9b059fc4d20876ae5bf05a766d5", "eeeed125f5df4856b4d2edc74af203ff", "4ec7f1cf44a3444c875f35d36961e10c", "bf0dca00576f4f19a74a581a0c263702", "ff9526e73915461baac83b5465d29bc0", "fdec4bf5e64e44cb91ae3e2c73385434", "<KEY>", "<KEY>"]} def get_name_feat(keys): return 'feat_' + key for key in tqdm_notebook(keys): df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan) # + id="ybBQEj6E935Z" colab_type="code" colab={} keys_stat = {} for key in keys: keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] * 100 # + id="uMakc2UAATMd" colab_type="code" outputId="8f025abf-be59-41f2-ed6d-8f61dac5f91a" executionInfo={"status": "ok", "timestamp": 1581767698329, "user_tz": -60, "elapsed": 467, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 109} {k:v for k,v in keys_stat.items() if v > 30} # + id="Y64OPZJPAZrZ" colab_type="code" colab={} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] for key in keys: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] # + id="QISs-iM_Bdui" colab_type="code" colab={} df['brand'] = df['brand'].map(lambda x : str(x).lower()) # + id="LvqIbKxoFsdC" colab_type="code" outputId="715f6eb0-282f-403c-8874-4e084c7320eb" executionInfo={"status": "ok", "timestamp": 1581767706426, "user_tz": -60, "elapsed": 3938, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model( ['brand_cat'], model) feats_cats =[x for x in df.columns if 'cat' in x] feats_cats # + id="nuQi3MjhGH7p" colab_type="code" colab={} feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat', 'feat_shape_cat', 'feat_metal type_cat', 'feat_adjustable_cat', 'feat_weight_cat', 'feat_fabric content_cat'] #feats += feats_cats #feats = list(set(feats)) model = RandomForestRegressor(max_depth=5, n_estimators=100) result = run_model( feats, model) # + id="L4Enql7gGioi" colab_type="code" outputId="f827f226-37d9-41e2-e849-67470fc850b2" executionInfo={"status": "ok", "timestamp": 1581767718154, "user_tz": -60, "elapsed": 4760, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 226} X= df[ feats ].values y=df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X,y) print(result) perm= PermutationImportance(m, random_state=1).fit(X,y); eli5.show_weights(perm, feature_names=feats) # + id="IK6IsMSkHTG5" colab_type="code" outputId="9c8ba977-3f3a-44f4-8d86-afe724e8e4d2" executionInfo={"status": "ok", "timestamp": 1581767719398, "user_tz": -60, "elapsed": 530, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 237} df['brand'].value_counts(normalize=True) # + id="5Q_F1ap7IBar" colab_type="code" outputId="83e86002-d251-4516-b5d4-ed1c96b10490" executionInfo={"status": "ok", "timestamp": 1581767720846, "user_tz": -60, "elapsed": 504, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 147} df[ df['brand'] == 'nike'].features_parsed.sample(5).values # + id="3Og1h9PcIbdi" colab_type="code" outputId="c46f0f83-3538-4480-8858-5fd921735e5a" executionInfo={"status": "error", "timestamp": 1581768159551, "user_tz": -60, "elapsed": 478, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 962} # cd "/content/drive/My Drive/Colab Notebooks" # + id="h4pVCVl5Nzkw" colab_type="code" outputId="1c09b21c-6e27-47bb-dcaf-311cec0f2a0c" executionInfo={"status": "ok", "timestamp": 1581767724268, "user_tz": -60, "elapsed": 442, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix" # + id="b2bbeGk_N1ke" colab_type="code" colab={} # ls matrix_one # + id="iTcQ99a-N8R7" colab_type="code" colab={} # + id="-bK4ttedN_Q2" colab_type="code" colab={} # !git add matrix_one/Day5.ipynb # + id="6_BiJwb-ONYz" colab_type="code" outputId="dc9b7f38-9984-4f50-a802-c5f8fb334e1e" executionInfo={"status": "ok", "timestamp": 1581767738912, "user_tz": -60, "elapsed": 3404, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 72} # !git commit -m "Druga część modelu" # + id="gF3tnsSLQ49y" colab_type="code" colab={} # !git config --global user.email "<EMAIL>" # !git config --global user.name "Weronika" # + id="UFL7dKSPRA3f" colab_type="code" outputId="934fe30f-74db-449b-8892-6b35315da947" executionInfo={"status": "ok", "timestamp": 1581767748767, "user_tz": -60, "elapsed": 1972, "user": {"displayName": "Weronika \u0141ach", "photoUrl": "", "userId": "14428004015318063088"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # !git push # + id="4yOtxQWpRxX0" colab_type="code" colab={}
Day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import os myDirectory='E:\OneDrive\Python\PythonMegaCourse\Jupyter\DataAnalysisPandas' os.listdir(myDirectory) import pandas dfcsv=pandas.read_csv('supermarkets.csv') dfcsv dfjson=pandas.read_json('supermarkets.json') dfjson dir(dfjson)
DataAnalysisPandas/.ipynb_checkpoints/DataAnalysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import numpy as np import os import nemo from nemo import logging from nemo.utils.lr_policies import WarmupAnnealing import nemo.collections.nlp as nemo_nlp from nemo.collections.nlp.data import NemoBertTokenizer from nemo.collections.nlp.nm.trainables import PunctCapitTokenClassifier from nemo.backends.pytorch.common.losses import CrossEntropyLossNM, LossAggregatorNM from nemo.collections.nlp.callbacks.punctuation_capitalization_callback import eval_iter_callback, eval_epochs_done_callback from nemo.collections.nlp.data.datasets.datasets_utils import calc_class_weights DATA_DIR = "PATH_TO_WHERE_THE_DATA_IS" WORK_DIR = "PATH_TO_WHERE_TO_STORE_CHECKPOINTS_AND_LOGS" # See the list of available pre-trained models by calling # the nemo_nlp.nm.trainables.get_bert_models_list() PRETRAINED_BERT_MODEL = "bert-base-uncased" # model parameters BATCHES_PER_STEP = 1 BATCH_SIZE = 128 CLASSIFICATION_DROPOUT = 0.1 MAX_SEQ_LENGTH = 64 NUM_EPOCHS = 10 LEARNING_RATE = 0.00002 LR_WARMUP_PROPORTION = 0.1 OPTIMIZER = "adam" STEP_FREQ = 200 # determines how often loss will be printed and checkpoint saved PUNCT_NUM_FC_LAYERS = 3 NUM_SAMPLES = 100000 # - # # Download and preprocess the data # In this notebook we're going to use a subset of English examples from the [Tatoeba collection of sentences](https://tatoeba.org/eng), set NUM_SAMPLES=-1 and consider including other datasets to improve the performance of the model. Use [NeMo/examples/nlp/token_classification/get_tatoeba_data.py](https://github.com/NVIDIA/NeMo/blob/master/examples/nlp/token_classification/get_tatoeba_data.py) to download and preprocess the Tatoeba data. # + # This should take about a minute since the data is already downloaded in the previous step # ! python get_tatoeba_data.py --data_dir $DATA_DIR --num_sample $NUM_SAMPLES # - # After the previous step, you should have a `DATA_DIR` folder with the following files: # - labels_train.txt # - labels_dev.txt # - text_train.txt # - text_dev.txt # # The format of the data described in NeMo docs. # # Define Neural Modules # Instantiate neural factory with supported backend nf = nemo.core.NeuralModuleFactory( # If you're training with multiple GPUs, you should handle this value with # something like argparse. See examples/nlp/token_classification.py for an example. local_rank=None, # If you're training with mixed precision, this should be set to mxprO1 or mxprO2. # See https://nvidia.github.io/apex/amp.html#opt-levels for more details. optimization_level="O1", # Define path to the directory you want to store your results log_dir=WORK_DIR, # If you're training with multiple GPUs, this should be set to # nemo.core.DeviceType.AllGpu placement=nemo.core.DeviceType.GPU) # + # If you're using a standard BERT model, you should do it like this. To see the full # list of MegatronBERT/BERT/ALBERT/RoBERTa model names, call nemo_nlp.nm.trainables.get_pretrained_lm_models_list() bert_model = nemo_nlp.nm.trainables.get_pretrained_lm_model( pretrained_model_name=PRETRAINED_BERT_MODEL) tokenizer = nemo.collections.nlp.data.tokenizers.get_tokenizer( tokenizer_name="nemobert", pretrained_model_name=PRETRAINED_BERT_MODEL) # - # # Describe training DAG # + train_data_layer = nemo_nlp.nm.data_layers.PunctuationCapitalizationDataLayer( tokenizer=tokenizer, text_file=os.path.join(DATA_DIR, 'text_train.txt'), label_file=os.path.join(DATA_DIR, 'labels_train.txt'), max_seq_length=MAX_SEQ_LENGTH, batch_size=BATCH_SIZE) punct_label_ids = train_data_layer.dataset.punct_label_ids capit_label_ids = train_data_layer.dataset.capit_label_ids hidden_size = bert_model.hidden_size # Define classifier for Punctuation and Capitalization tasks classifier = PunctCapitTokenClassifier( hidden_size=hidden_size, punct_num_classes=len(punct_label_ids), capit_num_classes=len(capit_label_ids), dropout=0.1, punct_num_layers=3, capit_num_layers=2, ) # If you don't want to use weighted loss for Punctuation task, use class_weights=None punct_label_freqs = train_data_layer.dataset.punct_label_frequencies class_weights = calc_class_weights(punct_label_freqs) # define loss punct_loss = CrossEntropyLossNM(logits_ndim=3, weight=class_weights) capit_loss = CrossEntropyLossNM(logits_ndim=3) task_loss = LossAggregatorNM(num_inputs=2) # + input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, punct_labels, capit_labels = train_data_layer() hidden_states = bert_model( input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask) punct_logits, capit_logits = classifier(hidden_states=hidden_states) punct_loss = punct_loss( logits=punct_logits, labels=punct_labels, loss_mask=loss_mask) capit_loss = capit_loss( logits=capit_logits, labels=capit_labels, loss_mask=loss_mask) task_loss = task_loss( loss_1=punct_loss, loss_2=capit_loss) # - # # Describe evaluation DAG # + # Note that you need to specify punct_label_ids and capit_label_ids - mapping form labels to label_ids generated # during creation of the train_data_layer to make sure that the mapping is correct in case some of the labels from # the train set are missing in the dev set. eval_data_layer = nemo_nlp.nm.data_layers.PunctuationCapitalizationDataLayer( tokenizer=tokenizer, text_file=os.path.join(DATA_DIR, 'text_dev.txt'), label_file=os.path.join(DATA_DIR, 'labels_dev.txt'), max_seq_length=MAX_SEQ_LENGTH, batch_size=BATCH_SIZE, punct_label_ids=punct_label_ids, capit_label_ids=capit_label_ids) eval_input_ids, eval_input_type_ids, eval_input_mask, _, eval_subtokens_mask, eval_punct_labels, eval_capit_labels\ = eval_data_layer() hidden_states = bert_model( input_ids=eval_input_ids, token_type_ids=eval_input_type_ids, attention_mask=eval_input_mask) eval_punct_logits, eval_capit_logits = classifier(hidden_states=hidden_states) # - # # Create callbacks # + callback_train = nemo.core.SimpleLossLoggerCallback( tensors=[task_loss, punct_loss, capit_loss, punct_logits, capit_logits], print_func=lambda x: logging.info("Loss: {:.3f}".format(x[0].item())), step_freq=STEP_FREQ) train_data_size = len(train_data_layer) # If you're training on multiple GPUs, this should be # train_data_size / (batch_size * batches_per_step * num_gpus) steps_per_epoch = int(train_data_size / (BATCHES_PER_STEP * BATCH_SIZE)) print ('Number of steps per epoch: ', steps_per_epoch) # Callback to evaluate the model callback_eval = nemo.core.EvaluatorCallback( eval_tensors=[eval_punct_logits, eval_capit_logits, eval_punct_labels, eval_capit_labels, eval_subtokens_mask], user_iter_callback=lambda x, y: eval_iter_callback(x, y), user_epochs_done_callback=lambda x: eval_epochs_done_callback(x, punct_label_ids, capit_label_ids), eval_step=steps_per_epoch) # Callback to store checkpoints ckpt_callback = nemo.core.CheckpointCallback( folder=nf.checkpoint_dir, step_freq=STEP_FREQ) # - # # Training # + lr_policy = WarmupAnnealing(NUM_EPOCHS * steps_per_epoch, warmup_ratio=LR_WARMUP_PROPORTION) nf.train(tensors_to_optimize=[task_loss], callbacks=[callback_train, callback_eval, ckpt_callback], lr_policy=lr_policy, batches_per_step=BATCHES_PER_STEP, optimizer=OPTIMIZER, optimization_params={"num_epochs": NUM_EPOCHS, "lr": LEARNING_RATE}) # - # 10 epochs of training on the subset of data, should take about 20 minutes on a single V100 GPU. # The model performance should be similar to the following: # # precision recall f1-score support # O 1.00 0.99 0.99 137268 # , 0.58 0.95 0.72 2347 # . 0.99 1.00 1.00 19078 # ? 0.98 0.99 0.99 1151 # # accuracy 0.99 159844 # macro avg 0.89 0.98 0.92 159844 # weighted avg 0.99 0.99 0.99 159844 # # precision recall f1-score support # O 1.00 1.00 1.00 136244 # U 1.00 0.99 0.99 23600 # # accuracy 1.00 159844 # macro avg 1.00 1.00 1.00 159844 # weighted avg 1.00 1.00 1.00 159844 # # Inference # Define the list of queiries for inference queries = ['can i help you', 'yes please', 'we bought four shirts from the nvidia gear store in santa clara', 'we bought four shirts one mug and ten thousand titan rtx graphics cards', 'the more you buy the more you save'] # + infer_data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer( queries=queries, tokenizer=tokenizer, max_seq_length=MAX_SEQ_LENGTH, batch_size=1) input_ids, input_type_ids, input_mask, _, subtokens_mask = infer_data_layer() hidden_states = bert_model( input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask) punct_logits, capit_logits = classifier(hidden_states=hidden_states) evaluated_tensors = nf.infer(tensors=[punct_logits, capit_logits, subtokens_mask], checkpoint_dir=WORK_DIR + '/checkpoints') # + # helper functions def concatenate(lists): return np.concatenate([t.cpu() for t in lists]) punct_ids_to_labels = {punct_label_ids[k]: k for k in punct_label_ids} capit_ids_to_labels = {capit_label_ids[k]: k for k in capit_label_ids} punct_logits, capit_logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors] punct_preds = np.argmax(punct_logits, axis=2) capit_preds = np.argmax(capit_logits, axis=2) for i, query in enumerate(queries): print(f'Query: {query}') punct_pred = punct_preds[i][subtokens_mask[i] > 0.5] capit_pred = capit_preds[i][subtokens_mask[i] > 0.5] words = query.strip().split() if len(punct_pred) != len(words) or len(capit_pred) != len(words): raise ValueError('Pred and words must be of the same length') output = '' for j, w in enumerate(words): punct_label = punct_ids_to_labels[punct_pred[j]] capit_label = capit_ids_to_labels[capit_pred[j]] if capit_label != 'O': w = w.capitalize() output += w if punct_label != 'O': output += punct_label output += ' ' print(f'Combined: {output.strip()}\n') # - # The inference output should look something like this:<br> # # Query: can i help you<br> # Combined: Can I help you?<br> # # Query: yes please<br> # Combined: Yes, please.<br> # # Query: we bought four shirts from the nvidia gear store in santa clara<br> # Combined: We bought four shirts from the Nvidia gear store in Santa Clara.<br> # # Query: we bought four shirts one mug and ten thousand titan rtx graphics cards<br> # Combined: We bought four shirts, one mug, and ten thousand Titan Rtx graphics cards.<br> # # Query: the more you buy the more you save<br> # Combined: The more you buy, the more you save.<br> # **Set NUM_SAMPLES=-1 and consider including other datasets to improve the performance of the model.**
examples/nlp/token_classification/PunctuationWithBERT.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # Nesse arquivo foi usada a seguinte estratégia: # # Utilizei um recurso chamado [DTW](dtw.r-forge.r-project.org) (Dynamic Time Warping) onde se calcula a dissimilaridades entre as séries temporais. # Nesse caso entre cada movimento ou path realizado pelo usuário. # Dessa forma podemos contabilizar o quão similar as series são entre si. # # Com essa estratégia podemos calcular uma **`distancia`** e assim realizar procedimentos como *clusterizações* e até *classificações*. Nesse caso utilizei um simples **KNN**(k-nearest neighbors) para realizar a classificação binária. # # Importante dizer que esse método (**DTW**) é robusto para séries com múltiplas variaveis (multivariate). # # Obs. Podemos melhorar essa abordagem adicionando um filtro **Kernel** na contrução de um modelo **KNN** mais robusto. # + library(dtw) library(KODAMA) library(caret) library(pROC) groups <- read.csv(file="./MovementAAL/groups/MovementAAL_DatasetGroup.csv",head=TRUE,sep=",") targetAll <- read.csv(file="./MovementAAL/dataset/MovementAAL_target.csv",head=TRUE,sep=",") # + #Group 1 allDataGroup1<-list() allDataGroup1Target<-list() groups1 = groups[groups$dataset_ID==1, ] index<-1 for (id in groups1$X.sequence_ID){ caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="") allDataGroup1[[index]]<-read.csv(file=caminho,head=TRUE,sep=",") allDataGroup1Target[index]<-targetAll[[2]][id] index<-index+1 } kdist <- dist(allDataGroup1, method="DTW",diag = TRUE,upper = TRUE) kdistM<- as.matrix(kdist) # - target <- unlist(allDataGroup1Target) allDataGroup1TargetFrame = as.data.frame(target) train <- createDataPartition(allDataGroup1TargetFrame$target, p = .7, list = TRUE,times=10) y <- allDataGroup1TargetFrame$target # #### Média e Desvio padrão respectivamente. # ##### Group 1, com Cross Validation tipo 10-fold # + allAccuracyGroup1 <- c() for( i in 1:length(train)){ test <- (1:nrow(allDataGroup1TargetFrame))[-train[[i]]] preds <- knn.predict(train[[i]], test,y, kdistM, k=3,agg.meth="majority") matrix <- confusionMatrix(preds, y[test]) #accuracy allAccuracyGroup1 <- c(allAccuracyGroup1,matrix[3]$overall[[1]]) } mean(allAccuracyGroup1) sd(allAccuracyGroup1) # + #Group 2 allDataGroup2<-list() groups2 = groups[groups$dataset_ID==2, ] allDataGroup2Target<-list() index<-1 for (id in groups2$X.sequence_ID){ caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="") allDataGroup2[[index]]<-read.csv(file=caminho,head=TRUE,sep=",") allDataGroup2Target[index]<-targetAll[[2]][id] index<-index+1 } kdist <- dist(allDataGroup2, method="DTW",diag = TRUE,upper = TRUE) kdistM<- as.matrix(kdist) # - target <- unlist(allDataGroup2Target) allDataGroup2TargetFrame = as.data.frame(target) train <- createDataPartition(allDataGroup2TargetFrame$target, p = .7, list = TRUE,times=10) y <- allDataGroup2TargetFrame$target # #### Média e Desvio padrão respectivamente. # ##### Group 2, com Cross Validation tipo 10-fold # + allAccuracyGroup2 <- c() for( i in 1:length(train)){ test <- (1:nrow(allDataGroup2TargetFrame))[-train[[i]]] preds <- knn.predict(train[[i]], test,y, kdistM, k=3,agg.meth="majority") matrix <- confusionMatrix(preds, y[test]) #accuracy allAccuracyGroup2 <- c(allAccuracyGroup2,matrix[3]$overall[[1]]) } mean(allAccuracyGroup2) sd(allAccuracyGroup2) # + #Group 3 allDataGroup3<-list() groups3 = groups[groups$dataset_ID==3, ] allDataGroup3Target<-list() index<-1 for (id in groups3$X.sequence_ID){ caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="") allDataGroup3[[index]]<-read.csv(file=caminho,head=TRUE,sep=",") allDataGroup3Target[index]<-targetAll[[2]][id] index<-index+1 } kdist <- dist(allDataGroup3, method="DTW",diag = TRUE,upper = TRUE) kdistM<- as.matrix(kdist) # - target <- unlist(allDataGroup3Target) allDataGroup3TargetFrame = as.data.frame(target) train <- createDataPartition(allDataGroup3TargetFrame$target, p = .7, list = TRUE,times=10) y <- allDataGroup3TargetFrame$target # #### Média e Desvio padrão respectivamente. # ##### Group 3, com Cross Validation tipo 10-fold # + allAccuracyGroup3 <- c() for( i in 1:length(train)){ test <- (1:nrow(allDataGroup3TargetFrame))[-train[[i]]] preds <- knn.predict(train[[i]], test,y, kdistM, k=3,agg.meth="majority") matrix <- confusionMatrix(preds, y[test]) #accuracy allAccuracyGroup3 <- c(allAccuracyGroup3,matrix[3]$overall[[1]]) } mean(allAccuracyGroup3) sd(allAccuracyGroup3) # + #All DataBases targetAll <- read.csv(file="./MovementAAL/dataset/MovementAAL_target.csv",head=TRUE,sep=",") allData<-list() allDataTarget<-list() index<-1 for (id in targetAll$X.sequence_ID){ caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="") allData[[index]]<-read.csv(file=caminho,head=TRUE,sep=",") allDataTarget[index]<-targetAll[[2]][id] index<-index+1 } kdist <- dist(allData, method="DTW",diag = TRUE,upper = TRUE) kdistM<- as.matrix(kdist) # - target <- unlist(allDataTarget) allDataTargetFrame = as.data.frame(target) train <- createDataPartition(allDataTargetFrame$target, p = .7, list = TRUE,times=10) y <- allDataTargetFrame$target # #### Média e Desvio padrão respectivamente. # ##### Todos os Groups em uma base apenas, com Cross Validation tipo 10-fold # + allAccuracy <- c() for( i in 1:length(train)){ test <- (1:nrow(allDataTargetFrame))[-train[[i]]] preds <- knn.predict(train[[i]], test,y, kdistM, k=3,agg.meth="majority") matrix <- confusionMatrix(preds, y[test]) #accuracy allAccuracy <- c(allAccuracyGroup3,matrix[3]$overall[[1]]) } mean(allAccuracy) sd(allAccuracy) # - # #### Matrix de confusão # #### Todos os Groups em uma base apenas #All groups datasets Confusion Matrix target <- unlist(allDataTarget) allDataTargetFrame = as.data.frame(target) train <- createDataPartition(allDataTargetFrame$target, p = .7, list = TRUE,times=1) y <- allDataTargetFrame$target y <- factor(unlist(y),labels=c("No", "Yes")) test <- (1:nrow(allDataTargetFrame))[-train[[1]]] preds <- knn.predict(train[[1]], test,y, kdistM, k=3,agg.meth="majority") classes <- factor(unlist( y[test]),labels=c("No", "Yes")) matrix <- confusionMatrix(preds,classes) matrix # #### Curva ROC e AUC # #### Todos os Groups em uma base apenas #ROC CURVE AND AUC predsProb <- t(knn.probability(train[[1]], test,y, kdistM, k=3)) outcome<- predsProb[,1] rocobj <- roc(classes, outcome,levels=c("No", "Yes")) plot(rocobj)
With DTW and KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python pmp_nightly_20180622 # language: python # name: myenv # --- # # Reusable User-Frendly Function for Portrait Plots # # Notebook written by *<NAME>* (Feb. 2019) # # *** # # - **OBJECTIVE**: Provide user-frendly and reusable function to generate portrait plot from given 2D array in type of CDMS Transisent Variable. # - **ADVENTAGE**: Users can adjust image size, colormap, font size, margin, etc., without knowing details of portrait plot generation. # - **USAGE**: Feed CDMS MV2 2-D array with customizing options to the [function](#function) to generate a portrait plot. See [below](#options) for details. from __future__ import print_function # ## Prepare the Notebook # Below is for embedded VCS image plotting in the Jupyter Notebook (Courtesy of *<NAME>*). # # Prepare the Notebook # import tempfile import base64 import pcmdi_metrics.graphics.portraits class PortraitNotebook(pcmdi_metrics.graphics.portraits.Portrait): def __init__(self,x,*args, **kargs): super(PortraitNotebook, self).__init__(*args, **kargs) self.x = x def _repr_png_(self): tmp = tempfile.mktemp() + ".png" self.x.png(tmp) f = open(tmp, "rb") st = f.read() return st # ## User-friendly reuseable function to generate a portrait plot as static image<a id="function"></a> # # User provides cdms MV2 2D array to the function with custom options if needed. Below code is basically a wrapper of existing portrait plot generation, but help users easily customize their plots, using various options listed below. Users can adjust image size, colormap, font size, margin, etc., without learning details of portrait plot generation. # # ### Input<a id="options"></a> # # - <span style="color:steelblue">**stat_xy**</span>: *cdms2 MV2 2D array* with proper axes decorated, values to visualize. # - <span style="color:steelblue">**imgName**</span>: *string*, file name for PNG image file (e.g., `'YOUR_PLOT.png'` or `'YOUR_PLOT'`. If `.png` is not included, it will be added, so no worry). # - <span style="color:steelblue">**plotTilte**</span>: *string*, text to show above plot as its title (optional) # - <span style="color:steelblue">**img_length**</span>: *integer*, pixels for image file length. default=800. # - <span style="color:steelblue">**img_height**</span>: *integer*, pixels for image file height. default=600. # - <span style="color:steelblue">**colormap**</span>: *string* or actual *VCS colormap*. Default is 'viridis' that is default in VCS. # - <span style="color:steelblue">**clevels**</span>: *list* of numbers (int or float). Colorbar levels. If not given automatically generated. # - <span style="color:steelblue">**ccolors**</span>: *list* of colors. If not given automatically generate. # - <span style="color:steelblue">**xtic_textsize**</span>: *int*, size of text for x-axis tic. If not given automatically generated. # - <span style="color:steelblue">**ytic_textsize**</span>: *int*, size of text for y-axis tic. If not given automatically generated. # - <span style="color:steelblue">**parea**</span>: *list* or *tuple* of float numbers between 0 to 1. Plotting area: (x1, x2, y1, y2). If not given automatically placed. # - <span style="color:steelblue">**missing_color**</span>: *string* or *color code* (tuple or list of R, G, B, alpha). Color for missing data box. Default is 'black' # - <span style="color:steelblue">**Annotate**</span>: *bool*, default=False. If Annotate, show numbers in individual boxes. # - <span style="color:steelblue">**stat_xy_annotate**</span>: *cdms2 MV2 2D array* with proper axes decorated. Only needed when number to show as value annotated is not corresponding to the colormap. Not even bother when Annotate=False. For example, color for values those normalized by median, while annotate actual value for metrics. # - <span style="color:steelblue">**num_box_partitioning**</span>: *integer*. It defines how many partitioning in a box. e.g., 4 indicates 4 triangles in each box. Default=1, should be less equal than 4. # - <span style="color:steelblue">**stat_xy_2**</span>: *cdms2 MV2 2D array*. Stat for 2nd triangle in box. Default=None # - <span style="color:steelblue">**stat_xy_3**</span>: *cdms2 MV2 2D array*. Stat for 3rd triangle in box. Default=None # - <span style="color:steelblue">**stat_xy_4**</span>: *cdms2 MV2 2D array*. Stat for 4th triangle in box. Default=None # - <span style="color:steelblue">**logo**</span>: *bool*, default=True. If False, CDAT logo turned off # - <span style="color:steelblue">**GridMeshLine**</span>: *bool*, default=True. If False, no lines for boundary of individual boxes # # ### Output # # - **PNG image file** # + import pcmdi_metrics.graphics.portraits import vcs import sys def plot_portrait( stat_xy, # array to visualize imgName='portrait_plot', # file name plotTitle=None, # title string on top img_length=800, img_height=600, # image size in pixel colormap='viridis', clevels=None, ccolors=None, # colormap and levels xtic_textsize=None, ytic_textsize=None, # font size for axes tic labels parea=None, # plotting area in ratio, in purpose of margin control missing_color='black', # color for missing data box Annotate=False, stat_xy_annotate=None, # annotation (showing value number in each box) num_box_partitioning=1, stat_xy_2=None, stat_xy_3=None, stat_xy_4=None, # additional triangle logo=True, GridMeshLine=True, # miscellaneous ): """ NOTE: Input - stat_xy: cdms2 MV2 2D array with proper axes decorated, values to visualize. - imgName: string, file name for PNG image file (e.g., 'YOUR_PLOT.png' or 'YOUR_PLOT'. If .png is not included, it will be added, so no worry). - plotTilte: string - img_length: integer, pixels for image file length. default=800. - img_height: integer, pixels for image file height. default=600. - colormap: string or actual VCS colormap. Default is 'viridis' that is default in VCS. - clevels: list of numbers (int or float). Colorbar levels. If not given automatically generated. - ccolors: list of colors. If not given automatically generate. - xtic_textsize: int, size of text for x-axis tic. If not given automatically generated. - ytic_textsize: int, size of text for y-axis tic. If not given automatically generated. - parea: list or tuple of float numbers between 0 to 1. Plotting area: (x1, x2, y1, y2). If not given automatically placed. - missing_color: string or color code (tuple or list of R, G, B, alpha). Color for missing data box. Default is 'black' - Annotate: bool, default=False. If Annotate, show numbers in individual boxes. - stat_xy_annotate: cdms2 MV2 2D array with proper axes decorated. Only needed when number to show as value annotated is not corresponding to the colormap. Not even bother when Annotate=False. For example, color for values those normalized by median, while annotate actual value for metrics. - num_box_partitioning: integer. How many partitioning in a box? e.g., 4: 4 triangles in each box. Default=1, should be less equal than 4. - stat_xy_2: cdms2 MV2 2D array. Stat for 2nd triangle in box. Default=None - stat_xy_3: cdms2 MV2 2D array. Stat for 3rd triangle in box. Default=None - stat_xy_4: cdms2 MV2 2D array. Stat for 4th triangle in box. Default=None - logo: bool, default=True. If False, CDAT logo turned off - GridMeshLine: bool, default=True. If False, no lines for boundary of individual boxes Output - PNG image file """ # VCS Canvas x = vcs.init(bg=True,geometry=(img_length, img_height)) # CDAT logo control if not logo: x.drawlogooff() # Set up Portrait Plot """ If you are NOT using JUPYTER NOTEBOOK, it is okay to DEACTIVATE below line and ACTIVATE second below line, and skip the "Prepare the Notebook" part above. """ P = PortraitNotebook(x) #P = pcmdi_metrics.graphics.portraits.Portrait() # # Preprocessing step to "decorate" the axis # axes = stat_xy.getAxisList() xax = [t+' ' for t in list(axes[1][:])] yax = [t+' ' for t in list(axes[0][:])] P.decorate(stat_xy, yax, xax) # # Customize # SET = P.PLOT_SETTINGS # Viewport on the Canvas if parea is not None: SET.x1, SET.x2, SET.y1, SET.y2 = parea # Both X (horizontal) and y (VERTICAL) ticks # Text table SET.tictable = vcs.createtexttable() SET.tictable.color = "black" # X (bottom) ticks # Text Orientation SET.xticorientation = vcs.createtextorientation() SET.xticorientation.angle = -90 SET.xticorientation.halign = "right" if xtic_textsize: SET.xticorientation.height = xtic_textsize # Y (vertical) ticks SET.yticorientation = vcs.createtextorientation() SET.yticorientation.angle = 0 SET.yticorientation.halign = "right" if ytic_textsize: SET.yticorientation.height = ytic_textsize # We can turn off the "grid" if needed if GridMeshLine: SET.draw_mesh = "y" else: SET.draw_mesh = "n" # Color for missing data SET.missing_color = missing_color # Timestamp SET.time_stamp = None # Colormap SET.colormap = colormap if clevels: SET.levels = clevels if ccolors: SET.fillareacolors = ccolors # Annotated Plot (i.e. show value number in boxes) if Annotate: SET.values.show = True if stat_xy_annotate is None: SET.values.array = stat_xy else: SET.values.array = stat_xy_annotate # Check before plotting if num_box_partitioning > 4: sys.exit('ERROR: num_box_partitioning should be less equal than 4') # # Plot # P.plot(stat_xy, multiple=pp_multiple(1, num_box_partitioning), x=x) # Add triangles if needed # Decorate additional arrays with empty string axes to avoid overwriting same information (if not, font will look ugly) xax_empty = [' ' for t in stat_xy.getAxis(1)[:]] yax_empty = [' ' for t in stat_xy.getAxis(0)[:]] if stat_xy_2 is not None: P.decorate(stat_xy_2, yax_empty, xax_empty) P.plot(stat_xy_2, x=x, multiple=pp_multiple(2, num_box_partitioning)) if stat_xy_3 is not None: P.decorate(stat_xy_3, yax_empty, xax_empty) P.plot(stat_xy_3, x=x, multiple=pp_multiple(3, num_box_partitioning)) if stat_xy_4 is not None: P.decorate(stat_xy_4, yax_empty, xax_empty) P.plot(stat_xy_4, x=x, multiple=pp_multiple(4, num_box_partitioning)) # Plot title if plotTitle: plot_title = vcs.createtext() plot_title.x = .5 plot_title.y = (SET.y2 + 1) / 2. plot_title.height = 30 plot_title.halign = 'center' plot_title.valign = 'half' plot_title.color = 'black' plot_title.string = plotTitle x.plot(plot_title) # Save if imgName.split('.')[-1] not in ['PNG', 'png']: imgName = imgName+'.png' x.png(imgName) # Preserve original axes stat_xy.setAxisList(axes) return P def pp_multiple(a, b): """ Note a, b to a.b Input - a, b: integer Output - c: float, a.b """ c = float(str(a)+'.'+str(b)) return c # - # *** # ## Let's testing it with dummy data # Below we create a dummy array to visualize. import cdms2 import genutil import MV2 import numpy as np def normalize_by_median(stat_xy): """ NOTE: Input - stat_xy: cdms2 MV2 2D array with proper axes decorated, values to visualize. Output - stat_xy: stat_xy after normalized by median of each row """ # Get median median = genutil.statistics.median(stat_xy, axis=1)[0] # Match shapes stat_xy, median = genutil.grower(stat_xy, median) # Normalize by median value median = np.array(median) stat_xy_normalized = MV2.divide(MV2.subtract(stat_xy,median), median) # Decorate axes stat_xy_normalized.setAxisList(stat_xy.getAxisList()) stat_xy_normalized.id = stat_xy.id stat_xy = stat_xy_normalized return stat_xy # ## Dummy data # + # Prepare dummy data -- create random array for testing random_array = np.random.rand(10,30) X = cdms2.createAxis(['model_ '+str(r) for r in list(range(0,30))]) Y = cdms2.createAxis(['metric_ '+str(r) for r in list(range(0,10))]) stat_xy = MV2.array(random_array, axes=(Y,X), id='statistics') # Plant missing value stat_xy[5][5] = -1.e20 stat_xy = MV2.masked_where(MV2.equal(stat_xy, -1.e20), stat_xy) # Normalize rows by its median Normalize = True if Normalize: # Normalize by median value stat_xy = normalize_by_median(stat_xy) # Additional dummy data for annotate test stat_xy_annotate = MV2.multiply(stat_xy, 2) # Additional dummy data for additional triangles stat_xy_2 = normalize_by_median(MV2.add(stat_xy, 2)) stat_xy_3 = normalize_by_median(MV2.add(stat_xy, 3)) stat_xy_4 = normalize_by_median(MV2.add(stat_xy, 4)) axes = stat_xy.getAxisList() stat_xy_2.setAxisList(axes) stat_xy_3.setAxisList(axes) stat_xy_4.setAxisList(axes) # - # ## Portrait plot generation # ### Exampe 1 # Colormap to be used clevels = [-1.e20, -.5, -.4, -.3, -.2, -.1, 0, .1, .2, .3, .4, .5, 1.e20] ccolors = vcs.getcolors(clevels, split=0, colors=range(16,240)) # Generate plot plot_portrait(stat_xy, imgName='pp_example1.png', plotTitle='Example 1', clevels=clevels, ccolors=ccolors ) # ### Example 2 # - Add title # - Adjust margin # - Change missing box color to white # - Add number annotation plot_portrait(stat_xy, imgName='pp_example2.png', plotTitle='Example 2', clevels=clevels, ccolors=ccolors, xtic_textsize=15, ytic_textsize=15, parea=(.05, .88, .25, .9), missing_color='white', Annotate=True, GridMeshLine=True, stat_xy_annotate=stat_xy_annotate) # ### Example 3 # - Add triangels plot_portrait(stat_xy, imgName='pp_example3.png', plotTitle='Example 3', clevels=clevels, ccolors=ccolors, num_box_partitioning=2, stat_xy_2=stat_xy_2, GridMeshLine=True) # ### Example 4 # - Add more triangles # - Change (increase, in this case) image size # - Change font size for axes tic labels # - Hide grid lines plot_portrait( stat_xy, imgName='pp_example4.png', plotTitle='Example 4', img_length=2400, img_height=1200, clevels=clevels, ccolors=ccolors, xtic_textsize=10, ytic_textsize=10, num_box_partitioning=4, stat_xy_2=stat_xy_2, stat_xy_3=stat_xy_3, stat_xy_4=stat_xy_4, GridMeshLine=False) # ### Example 5 # - Change colormap # - Missing box as grey # - Turn off CDAT logo # + colormap = 'bl_to_darkred' plot_portrait(stat_xy, imgName='pp_example5.png', plotTitle='Example 5', colormap=colormap, clevels=clevels, ccolors=ccolors, missing_color='grey', logo=False, num_box_partitioning=4, stat_xy_2=stat_xy_2, stat_xy_3=stat_xy_3, stat_xy_4=stat_xy_4, GridMeshLine=False)
doc/jupyter/ReusablePortraitPlot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib as mpl import matplotlib.pyplot as plt import random import numpy as np import beadpy import pandas as pd # %matplotlib notebook def trajectory_simulator(pre_duration = 250, #Mean event start time pre_sigma = 50, #Sigma of event start time distribution post_duration = 250, #The bead stays on for this long at the end of the trajectory mean_duration = 100, #Mean event duration min_duration = 10, #Minimum event duration mean_rate = 500, #Mean rate (distance units/timestep) rate_sigma = 50, #Sigma of the rate distribution noise_sigma = 500, #Mean sigma for the bead movement noise_sigma_sigma = 100, #Sigma of the noise sigma distribution pause_prob = 0.001, #Probability of entering a pause in a given timestep pause_duration_prob = 0.2, #Probability of remaining paused in a given timestep once a pause has begun. rate_change_prob = 0.1, #Probablity that the rate will change in a given timestep DNA_length = 15000, #Length of the DNA - a hard limit on the event length trajectory_number = 0): length = int(np.random.exponential(mean_duration)) #Length is drawn from an exponential distribution. while length < min_duration: length = int(np.random.exponential(mean_duration)) #The length should be at least a certain value. current_rate = 0 pre = int(np.random.normal(loc=pre_duration, scale = pre_sigma)) post = post_duration rate = 0 ratesequence = [0]*pre noise_sigmaval = int(np.random.normal(loc=noise_sigma, scale = noise_sigma_sigma)) position = [0]*pre nucleotides = [] current_position = 0 for i in range(0,pre): nucleotides.append(float(position[i]+np.random.normal(loc=0.0, scale = noise_sigmaval))) for i in range(0,length): randomnumber = random.random() #generate a random float between 0 and 1 if i == 0: #Start the event rate = np.random.normal(loc=mean_rate, scale = rate_sigma) elif not rate == 0: #When during an event/no pause. if (randomnumber <= pause_prob): #Start a pause. rate = 0 elif (randomnumber > pause_prob) & (randomnumber <= (pause_prob + rate_change_prob)): #Change the rate rate = np.random.normal(loc=mean_rate, scale = rate_sigma) else: #No rate change rate = rate #just FYI! elif (rate == 0) & (not i ==0): #When in a pause. if (randomnumber < (1- pause_duration_prob)): #End the pause. rate = np.random.normal(loc=mean_rate, scale = rate_sigma) else: rate = 0 #Continue the pause. ratesequence.append(rate) current_position = current_position + rate position.append(current_position) nucleotides.append(float(current_position+np.random.normal(loc=0.0, scale = noise_sigmaval))) if current_position > DNA_length: length = i break for i in range(0,post): ratesequence.append(0) position.append(current_position) nucleotides.append(float(current_position+np.random.normal(loc=0.0, scale = noise_sigmaval))) time = range(0,len(nucleotides)) results = pd.DataFrame({'time' : time, 'nucleotides' : nucleotides, 'rate' : ratesequence, 'position' : position}) results['trajectory'] = trajectory_number return results test = trajectory_simulator(pre_duration = 300, pre_sigma = 5, post_duration = 250, mean_duration = 100, min_duration = 10, mean_rate = 80, rate_sigma = 30, noise_sigma = 100, noise_sigma_sigma = 20, pause_prob = 0.1, pause_duration_prob = 0.9, rate_change_prob = 0.01, DNA_length = 15000, trajectory_number = 0) exampletrajseg = beadpy.trajectory_plotter(test, 0, method = ('auto', 'whole'), sigma_start = 10, sigma_end = 250, eventregion = (200,500), segmenttable = 0) exampletrajseg # Compare the table of actual changepoints below (ignoring rate changes of less than 10) with the above table of the discovered changepoints above. changepoints = [] changepoint_values = [] duration = [] for i in test.time[1:]: if abs(test.rate[i] - test.rate[i-1]) > 10: changepoints.append(i) changepoint_values.append(test.rate[i]) for i in range(0, (len(changepoints) - 1)): duration.append(changepoints[i+1]-changepoints[i]) duration.append(0) cpts = pd.DataFrame({'changepoint' : changepoints, 'rate' : changepoint_values, 'duration' : duration}) cpts # ## Simulation of a combined phi29 + E. coli experiment # Generate a large results table. phi29results = pd.DataFrame() for j in range(0,1000): temp = trajectory_simulator(pre_duration = 300, pre_sigma = 20, post_duration = 250, mean_duration = 100, min_duration = 10, mean_rate = 80, rate_sigma = 40, noise_sigma = 100, noise_sigma_sigma = 20, pause_prob = 0.1, pause_duration_prob = 0.5, rate_change_prob = 0.01, DNA_length = 15000, trajectory_number = j) phi29results = phi29results.append(temp) coliresults = pd.DataFrame() for j in range(1001,2000): temp = trajectory_simulator(pre_duration = 300, pre_sigma = 20, post_duration = 250, mean_duration = 150, min_duration = 10, mean_rate = 442, rate_sigma = 198, noise_sigma = 100, noise_sigma_sigma = 20, pause_prob = 0.1, pause_duration_prob = 0.7, rate_change_prob = 0.05, DNA_length = 15000, trajectory_number = j) coliresults = coliresults.append(temp) results = pd.concat([phi29results,coliresults]) combosegments = beadpy.segment_finder(results, method = 'auto', sigma_start=10, sigma_end=200) beadpy.segmentplotter(combosegments,1000,-2000,15000, 2, 10) beadpy.ratehist(combosegments,0, 1100, 40, 'displacement')
Trajectory_Simulator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false # # Welcome to LipidFinder 2.0 # # LipidFinder is an open-source Python workflow designed to facilitate further targeted lipidomics analysis. LipidFinder categorises and removes noise and artifacts from liquid chromatography/mass spectrometry (LC/MS) datasets, searches the outcome in different databases to obtain putative identification of lipids, and assigns them to a class based on the [LIPID MAPS](http://www.lipidmaps.org) classification system. The software quickly distinguishes and quantifies lipid-like features from contaminants, adducts and noise in LC/MS datasets that have been pre-processed using XCMS. Although we advise users to use XCMS, LipidFinder accepts input files from other pre-processing software tools (e.g. SIEVE™ from ThermoFisher). LipidFinder 1.0: [O'Connor et al., 2017](https://insight.jci.org/articles/view/91634). LipidFinder on LIPIDMAPS: [Fahy et al., 2018](https://doi.org/10.1093/bioinformatics/bty679). # # This Jupyter notebook will explain how to get your computer ready to use LipidFinder and will guide you through LipidFinder's workflow to process your data. Before continuing, note that your data needs to be high resolution MS (e.g. at least 60000ppm) and with long chromatography to separate isobaric lipids. The approach is not suitable for shotgun lipidomics, MS/MS or low resolution datasets. The demo data provided in the **tests** folder was generated with an Orbitrap Elite Mass Spectrometer, but the software is MS-platform independent. It is composed by 12 LC/MS runs of macrophage-like cells: first 6 samples from RAW cells (264.7) and last 6 samples from mouse wildtype (C57BL/6). Afterwards, the data was pre-processed with [XCMS Online](https://xcmsonline.scripps.edu/) using the set of parameters Orbitrap II. # # LipidFinder can be downloaded from [GitHub](https://github.com/ODonnell-Lipidomics/LipidFinder) or accessed online via LIPID MAPS: http://www.lipidmaps.org/resources/tools/lipidfinder. For questions or suggestions regarding LipidFinder please contact us at [<EMAIL>](mailto:<EMAIL>). # # LipidFinder is distributed under the **MIT license** (see *LICENSE* file for more information). # # ### Read before using LipidFinder for the first time # # LipidFinder has been designed for extensive clean-up of LC/MS datasets where a high degree of artifact removal is desired (e.g. discovery lipidomics). ESI-high resolution MS experiments contain many spurious signals that can arise from diverse sources, including common contaminants, adducts, in-source fragments, etc. LipidFinder is devised to work primarily as an add-on to XCMS, focusing on the clean-up of MS data files which have already been pre-processed for peak alignment and integration. Removal of these artifacts results in significantly cleaner datasets that perform better in downstream statistical analysis pipelines. # # #### Key points for users: # * LipidFinder shares some functionalities with XCMS (e.g. isotope removal or retention time correction), however these use different algorithms and perform differently. Thus, running these functionalities again in LipidFinder significantly improves the quality of XCMS datasets. # * LipidFinder includes extra functionalities specifically designed to improve artifact removal that are not in XCMS, including: contaminant, adduct and stack removal, solvent removal, mass reassignment and outlier correction. # * Extensive LC chromatography is essential with the LipidFinder approach to separate isobaric lipids which are a major complicating issue in lipidomics MS. This method is not suitable for “shotgun” applications. # * Qualitative and semi-quantitative comparison of lipids using high resolution MS is a powerful approach for screening all lipids, both unknown and known in an unbiased manner so long as it is used appropriately and its benefits and limitations are appreciated and acknowledged in full by the user. When correctly applied, it is an MS approach that is extremely powerful for lipid discovery and comparative profiling of biological samples. # * Nowadays, targeted MS/MS based methods can measure up to 500 or more known lipids, however, we know that lipidomes from mammalian cells and plasma can contain thousands, perhaps up to 5,000 or more per sample, with approximately 50% of these not appearing in any database (true unknowns). Thus there is huge potential for discovery of new lipids using untargeted approaches, something that cannot be accomplished using MS/MS. The LipidFinder approach is a hypothesis-generating screening tool designed specifically to clean up MS datasets that initially present with around 60K datapoints of which we estimate around 10% to be real lipids [(Slatter et al., 2016)](https://www.sciencedirect.com/science/article/pii/S1550413116301218?via%3Dihub). All observations of interest obtained using this method require rigorous validation using **(i)** manual examination of chromatographic data where significant differences are detected to ensure peak quality, followed by **(ii)** gold standard MS/MS methods, where the differences seen are for known lipids that can be purchased as standards. # * Database matches provided using LipidFinder are putative, and ions are assigned to putative lipid classes, not to actual molecular species because the isobaric nature of lipids makes this impossible. # * The LipidFinder approach is analogous to the older Affymetrix array methods in genomics, which also require strict validation using qPCR, etc. The approach is semi-quantitative, and reports on relative changes between datasets. Internal standards can be used if desired to calculate A/IS. # * Statistical analysis post-LipidFinder, which can be found on its online version, can be used to identify significantly-different lipids that then need to be followed up using targeted methods and fully validated, etc. # + [markdown] deletable=false # ## 1. Configuring your computer # # LipidFinder has been tested for Python 2.7.9 and Python 3.6.3. This doesn't mean it won't work in earlier versions, but you might get errors or significant differences in the results. Some computer’s operating systems come bundled with Python, but it can also be downloaded and installed from the [Python Software Foundation](https://www.python.org/downloads/). The first step is to download LipidFinder's package file (Wheel format) for GitHub: # * *Python 2.7:* [LipidFinder-2.0-py2-none-any.whl](https://github.com/ODonnell-Lipidomics/LipidFinder/archive/LipidFinder-2.0-py2-none-any.whl) # * *Python 3.6:* [LipidFinder-2.0-py3-none-any.whl](https://github.com/ODonnell-Lipidomics/LipidFinder/archive/LipidFinder-2.0-py3-none-any.whl) # # ### Default installation # # LipidFinder's package includes all the instructions to install all the dependencies required. The easiest way to install LipidFinder is to open a command prompt/terminal, go to the folder where the downloaded Wheel file is locates, and run one of the following commands: # # ```shell # pip install LipidFinder-2.0-py2-none-any.whl # for Python 2.7 # # pip install LipidFinder-2.0-py3-none-any.whl # for Python 3.6 # ``` # # ### Alternative option: Anaconda # # Many users prefer to use [Anaconda](https://www.anaconda.com/download/), an open-source distribution aimed to do Python data science and machine learning in Windows, Linux, and MacOS. To install LipidFinder, open an Anaconda prompt/terminal and run one of the following commands: # # ```shell # pip install LipidFinder-2.0-py2-none-any.whl # for Python 2.7 # # pip install LipidFinder-2.0-py3-none-any.whl # for Python 3.6 # ``` # # *Note:* We suggest to download and install Anaconda for **Python 3.6**. All the scripts include the `.py` extension that needs to be removed in Windows systems. # + [markdown] deletable=false # ## 2. Pre-processing the input files # # We have included a thorough manual on how to pre-process your input files to leave them ready for LipidFinder in the **docs** folder, in a PDF document named *Input_preparation_manual.pdf*. # + [markdown] deletable=false # ## 3. Setting up your parameters # # There are two different ways to set the parameters of each module of LipidFinder's workflow. The first one is to run the **`config_params`** script. This script requires as argument the module you want to configure: # + deletable=false config_params.py -m peakfilter # + [markdown] deletable=false # Additionally, if you already have a parameters JSON file, you can load its values instead of LipidFinder's defaults (see example below). Once launched, the process will guide you through a question-answering system to configure each parameter. At the end, the program will ask for the path and file name in which you want to save the new set of parameters: # + deletable=false config_params.py -m peakfilter -p my_parameters.json # + [markdown] deletable=false # The second option is through a Jupyter notebook (like this one). The *Configuration* module includes a graphical user interface (GUI) class to set up each parameter of the selected module interactively based on Jupyter's widgets. The following code shows an example of how to launch the GUI to set *Amalgamator*'s parameters based on default values: # + deletable=false from LipidFinder.Configuration.LFParametersGUI import LFParametersGUI LFParametersGUI(module='amalgamator'); # + [markdown] deletable=false # To use an existing parameters JSON file instead of the default values, you need to add the argument `src=x`, where `x` is the path to the JSON file, to the `LFParametersGUI()` call. # # **Hint:** once you have configured *PeakFilter*'s parameters, you can use that JSON file as template for the other modules so you do not need to type in again the value of the parameters they all share (e.g. *m/z* column name). *Warning:* parameter `firstSampleIndex` needs to be changed when using PeakFilter's summary output file as input. # # We have included a **help** option to display the description, usage and other information of each Python script included in LipidFinder. For instance, for the previous script, the command to run would be the following: # # ```shell # config_params.py -h # ``` # # ### 3.1. Backwards compatibility # # A user that has used LipidFinder 1.0 might be interested in repeating their experiments with the new version or run new ones under a similar parameter configuration. Thus, we have developed a script to transform the old parameters CSV file for *PeakFilter* and *Amalgamator* to the new parameters JSON files for the same modules. To run it you will also need the old adducts CSV file to update the lists of adduct pairs. We have included an example of these two files in the **`tests`** folder (available on GitHub) to illustrate how to use the script: # # ```shell # update_params.py -p tests/LipidFinder-1.0/old_parameters.csv -a test/LipidFinder-1.0/old_adducts.csv -o results # ``` # The script will generate two files: `peakfilter.json` and `amalgamator.json`. Be aware that these new parameters JSON files are incomplete (some new parameters have been introduced in LipidFinder 2.0) and will raise an error when used for their corresponding module. They should be handled first by `config_params.py` (`-p` argument) to fill in the missing parameters and generate a complete version. # + [markdown] deletable=false # ## 4. LipidFinder's workflow # # LipidFinder's complete workflow is composed by three modules: PeakFilter, Amalgamator and MSSearch. We have developed one script for each one to ease their usage. Each module will create a `log` file (named after the module) that will save any information that might be useful for the user, e.g. which frames that have been removed by which stages during *PeakFilter*. A new run will append the new information at the end of the `log` file if it already exists, so no information is lost. # # A standard LipidFinder workflow would first process the pre-aligned data with *PeakFilter* (once for negative and once for positive ion polarity), afterwards *Amalgamator* would merge both files' information based on matching *m/z* values, and finally, *MSSearch* would identify and classify lipid-like features with the selected LIPID MAPS database. Alternatively, LipidFinder can also process a single file with *PeakFilter* and run *MSSearch* afterwards. # # The following examples are all based on the demo data pre-processed with XCMS, but we also provide an alternative to show LipidFinder's flexibility with SIEVE™ pre-processed files (just replace *XCMS* by *SIEVE* in each command). # # ### 4.1. PeakFilter # # *PeakFilter* is intended to clean-up the data from contaminants, adducts, stacks and other artifacts like in-source ion fragments and salt clusters. Among its parameters, *PeakFilter* has several "switches" for determined filtering functionalities that should be configured based on the experimental set-up that generated the input dataset. # # In most scenarios, an experiment involving LC/MS will generate two sets of data with different ion polarity: one negative and one positive. After they have been pre-processed separately with XCMS, we need to process each file individually with PeakFilter. Using our demo data available on GitHub under the `tests` folder, we show first how to process the negative polarity CSV file: # + deletable=false run_peakfilter.py -i tests/XCMS/negative.csv -o results -p tests/XCMS/params_peakfilter_negative.json # + [markdown] deletable=false # And then the positive one: # + deletable=false run_peakfilter.py -i tests/XCMS/positive.csv -o results -p tests/XCMS/params_peakfilter_positive.json # + [markdown] deletable=false # By default, *PeakFilter* will generate the complete filtered file and a **summary** output CSV file with the relevant information of each remaining frame. # # The output file names will always contain ion polarity, so running *PeakFilter* once for each polarity will not be a problem when choosing the same output folder (e.g. `results` in the previous examples). However, if we change the parameters and run *PeakFilter* again with the same output folder, we will overwrite any previous output file for the same polarity. # + [markdown] deletable=false # ### 4.2. Amalgamator # # *Amalgamator* merges the output files for both negative and positive ion polarities generated with *PeakFilter*. By default, it will keep every frame that exists in only one of the input files, and for those with a match in both files, *Amalgamator* will retain the information of the one with the highest mean intensity for all samples tagging the selected source in the output file's **Polarity** column. # + deletable=false run_amalgamator.py -neg results/peakfilter_negative_summary.csv \ -pos results/peakfilter_positive_summary.csv \ -p tests/XCMS/params_amalgamator.json -o results # + [markdown] deletable=false # Duplicates are identified by comparing the negative file with the positive file within a small retention time tolerance and a corrected *m/z* tolerance (negative *m/z* + 2H<sup>+</sup>, followed by negative *m/z* + H<sup>+</sup> + CH3<sup>+</sup> for phosphotidylcholine and sphingomyelins with phosphocholine head group). Any hits are classed as a match. # # Alternatively, you can use the complete output files generated by *PeakFilter* as input files if you want to keep every column of your source data file. # + [markdown] deletable=false # ### 4.3. MSSearch # # *MSSearch* has been designed to identify and classify lipid-like features from either *PeakFilter* or *Amalgamator* output file, using the knowledge available in LIPID MAPS. The output file will include all the matches for each *m/z* value in the input file (within the indicated tolerance in the parameters JSON file). The output file will also include every frame not found in the selected database, and they will be classified as *unknown*. Finally, *MSSearch* will create a lipid-category scatter plot of the results by *m/z* and retention time in a PDF file (by default). # + deletable=false run_mssearch.py -i results/amalgamated.csv -o results \ -p tests/XCMS/params_mssearch.json
docs/LipidFinder_2.0_manual.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Prepare data input for Amazon Forecast # # > *This notebook should work with either the `Python 3 (Data Science)` kernel in SageMaker Studio, or `conda_python3` in classic SageMaker Notebook Instances* # # Forecasting is used in a variety of applications and business use cases: For example, retailers need to forecast the sales of their products to decide how much stock they need by location, Manufacturers need to estimate the number of parts required at their factories to optimize their supply chain, Businesses need to estimate their flexible workforce needs, Utilities need to forecast electricity consumption needs in order to attain an efficient energy network, and enterprises need to estimate their cloud infrastructure needs. # <img src="https://amazon-forecast-samples.s3-us-west-2.amazonaws.com/common/images/forecast_overview.png" width="98%"> # # Notebook Overview # # This notebook provides a template of typical steps required to prepare data input for Amazon Forecast. So, this is part of "Upload your data to Amazon Forecast" in the diagram above. Your data may need additional Prepare steps and/or you may not need all the steps in this notebook.<br> # # You will need lots of customization in the RTS section. Otherwise, look for following comment string to indicate places you will need to customize this notebook for your data: # + #### # EDIT THIS FOR YOUR DATA #### # - # <b>Step 1 Read data</b>. Modify this section to read your data. # # <b>Step 2 Make forecast choices</b>. You need to make some decisions. How many time units in future do you want to make forecasts? E.g. if time unit is Hour, then if you want to forecast out 1 week that would be 24*7 = 168 hours. You'll also be asked if you want integer or float target_values. And if 0's really mean 0 or are they missing data? # # <b>Step 10 Aggregate based on the time granularity of your data.</b> Possible aggregations are minute, hour, day, week, month. See <a href="https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html#howitworks-data-alignment" target="_blank">documenatation. </a> # # <b>Step 12 Visualization time series and validate the time granularity you chose makes sense.</b> Too low-granularity means the time series looks like white noise and the data could benefit by aggregation at higher-time-level. For example, you might start to notice time series cyclical patterns when aggregating historical sales by-hour instead of of by-minute. Alternate between Step 7 Visualization and Step 5 Aggregation until you are happy with the chosen time-grandularity for your forecasts. # # <b>Steps 10 (Aggregate) and 13 (Split train) include a set of common checks on your data for forecasting.</b> One common mistake is customers think they should "try Amazon Forecast with a small sample of data". If the majority of your time series have fewer than 300 data points, Amazon Forecast will fail, because Amazon Forecast is designed for deep-learning AI forecasting algorithms, which require many data points (typically 1000+) per time series. Small data is better off using traditional, open-source forecast methods such as ETS (available in Excel), ARIMA, or Prophet. Small data is not a good fit for Amazon Forecast. If you really want to try Amazon Forecast, but your data fails the Error Checks consider: # # <ul> # <li>Can you get more data such that each time series will have longer history with more data points? If so, return to Step 1.</li> # <li>Can you combine items into fewer item_ids such that each time series will have longer history with more data points? If so, return to Step 1.</li> # <li>Can you reduce forecast dimensions, such as use item_id only and drop location_id? If so, return to Step 5.</li> # <li>Can you drop to a lower time-frequency without your data turning into white noise? Check your data again using Step 10 Aggregate and Step 12 Visualize. </li> # </ul> # # It may be that you find your data is not a good fit for Amazon Forecast. In that case, it's better that you discover this early. # # <b>In steps 14-20, we will save headerless Target Time Series (TTS), Item metadata (IM), Related Time Series (RTS) to S3</b> so we can trigger Amazon Forecast automation, see # <a href="https://github.com/aws-samples/amazon-forecast-samples/blob/master/workshops/pre_POC_workshop/install-forecast-solution.md" target="_blank">Workshop Instructions Install Automation and Demo</a> # TODO: bucket steps so list doesn't look so long! <br> # TODO: Add data normalization step # # # Table of Contents # * Step 0: [Set up and install libraries](#setup) # * Step 1: [Read data](#read) # * Step 2: [Correct dtypes](#fix_dtypes) # * Step 3: [Make forecast choices](#choices) # * Step 4: [Drop null item_ids](#drop_null_items) # * Step 5: [Drop null timestamps](#drop_null_times) # * Step 7: [Inspect and treat extremes](#treat_extremes) # * Step 8: [Optional - Round negative targets up to 0](#round_negatives) # * Step 9: [Optional - Convert negative targets to_nan](#negatives_to_nan) # * Step 10: [Aggregate at chosen frequency](#groupby_frequency) # * Step 11: [Typical Retail scenario: Find top-moving items](#top_moving_items) # * Step 12: [Visualize time series](#visualize) # * Step 13: [Split train/test data](#split_train_test) # * Step 14: [Prepare and save Target Time Series](#TTS) # * Step 15: [Remove time series with no target values at all](#TTS_remove_all0) # * Step 16: [Remove time series with end of life](#TTS_remove_end_of_life) # * Step 17: [Remove time series with fewer than 5 data points](#TTS_remove_too_few_data_points) # * Step 18: [Optional - Assemble and save TTS_sparse, TTS_dense](#TTS-dense_sparse) # * Step 19: [Optional - Assemble and save TTS_top, TTS_slow](#TTS_top) # * Step 20: [Assemble and save RTS (if any)](#RTS) # * Step 21: [Classify time series](#Classify) # * Step 22: [Optional - Assemble and save TTS_smooth, TTS_erratic, TTS_intermittent, TTS_lumpy](#TTS_classes) # * Step 23: [Assemble and save metadata (if any)](#IM) # ## Data used in these notebooks: NYC Taxi trips open data # # Given hourly historical taxi trips data for NYC, your task is to predict #pickups in next 7 days, per hour and per pickup zone. <br> # # <ul> # <li>Original data source: <a href="https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page" target="_blank"> https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page</a> </li> # <li>AWS-hosted public source: <a href="https://registry.opendata.aws/nyc-tlc-trip-records-pds/" target="_blank">https://registry.opendata.aws/nyc-tlc-trip-records-pds/ </a> </li> # <li>AWS managed weather data ingestion as a service that is bundled with Amazon Forecast, aggregated by location and by hour. Initially only for USA and Europe, but depending on demand, possibly in the future for other global regions. </li> # <li>Data used: Yellow taxis dates: 2018-12 through 2020-02 to avoid COVID effects </li> # </ul> # # # ### Features and cleaning # Note: ~5GB Raw Data has already been cleaned and joined using AWS Glue (tutorials to be created in future). # <ul> # <li>Join shape files Latitude, Longitude</li> # <li>Add Trip duration in minutes</li> # <li>Drop negative trip distances, 0 fares, 0 passengers, less than 1min trip durations </li> # <li>Drop 2 unknown zones ['264', '265'] # </ul> # ## Step 0: Set up and install libraries <a class="anchor" id="setup"></a> # # This notebook will use a range of Python's built-in modules, some open source libraries from AWS and third parties, and some local 'utility' modules where we've packaged some common functions to help keep the notebook concise. # # If you'd like to dive deeper, you'll be able to find the source code for all referenced `util` and `local_util` functions in this code repository. # Optionally install dask for faster joins when df is large # !pip install "dask[dataframe]" # + # %load_ext autoreload # %autoreload 2 # Python standard built-ins: import datetime import os import random import sys import time # Open-source libraries: import boto3 # The AWS SDK for Python from dateutil.relativedelta import relativedelta import matplotlib as mpl # Graph plotting import matplotlib.pyplot as plt # %matplotlib inline import numpy as np # Numerical processing import pandas as pd # Dataframe (tabular data) processing import seaborn as sns # Graph plotting print('matplotlib: {}'.format(mpl.__version__)) print('numpy: {}'.format(np.__version__)) print('pandas: {}'.format(pd.__version__)) print('seaborn: {}'.format(sns.__version__)) # Local code: import local_util # From the local_util/ folder sys.path.insert( 0, os.path.abspath("../../notebooks/common") ) import util # From this repository's /notebooks/common folder # - # Here we'll perform some initial display configuration and choose a color palette, for later visualizations: # + np.random.seed(42) # Seed random number generator for consistent results local_util.plotting.configure_pandas_display() color_pal = sns.color_palette("colorblind", 6).as_hex() colorblind6 = ','.join(color_pal).split(",") # - # <b>Create a new S3 bucket for this lesson</b> # - The cell below will create a new S3 bucket with name ending in "forecast-demo-taxi" # + # create unique S3 bucket for saving your own data #### # EDIT THIS FOR YOUR DATA #### region = boto3.Session("s3").region_name print(f"region = {region}") account_id = boto3.client('sts').get_caller_identity().get('Account') # create unique S3 bucket for saving your own data bucket_name = account_id + '-forecast-demo-taxi' print(f"bucket_name = {bucket_name}") util.create_bucket(bucket_name, region) # create prefix for organizing your new bucket prefix = "nyc-taxi-trips" print(f"using folder '{prefix}'") # ...and a folder for local files os.makedirs("data", exist_ok=True) # - # Connect API sessions session = boto3.Session(region_name=region) s3 = session.client(service_name='s3') forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') # check you can communicate with Forecast APIs forecast.list_predictors() # <b>Create IAM Role for Forecast</b> <br> # Like many AWS services, Forecast will need to assume an IAM role in order to interact with your S3 resources securely. In the sample notebooks, we use the get_or_create_iam_role() utility function to create an IAM role. Please refer to "notebooks/common/util/fcst_utils.py" for implementation. # + # check sagemaker session first, if not found create a role try: from sagemaker import get_execution_role role_arn = get_execution_role() except: # Create the role to provide to Amazon Forecast. role_name = "ForecastNotebookRole-Basic" print(f"Creating Role {role_name} ...") role_arn = util.get_or_create_iam_role( role_name = role_name ) # # echo user inputs without account print(f"Success! Using role arn = {role_arn.partition('/')[2]}") # - # ## Step 1. Read data <a class="anchor" id="read"></a> # # The first thing we're going to do is read the headerless .csv file. Then we need to identify which columns map to required Amazon Forecast inputs. # # <img src="https://amazon-forecast-samples.s3-us-west-2.amazonaws.com/common/images/nyctaxi_map_fields.png" width="82%"> # <br> # # <b>In order to use Weather Index, you need a geolocation-type column.</b> The geolocation-type column connects your locations to geolocations, and can be 5-digit postal code or latitude_longitude. For more details, see: # <ul> # <li><a href="https://docs.aws.amazon.com/forecast/latest/dg/weather.html" target="_blank">Link to documentation about geolocations</a></li> # <li><a href="https://aws.amazon.com/blogs/machine-learning/amazon-forecast-weather-index-automatically-include-local-weather-to-increase-your-forecasting-model-accuracy/" target="_blank">Our Weather blog, which shows UI steps.</a></li> # </ul> # # The cell below shows an example reading headerless csv file with lat_lon geolocation column, "pickup_geolocation". The rest of this notebook writes headerless csv files to be able to use automation. If you are not planning on using the automation solution, .csv files with headers are allowed. # + #### # EDIT THIS FOR YOUR DATA #### ## Read cleaned, joined, featurized data from Glue ETL processing df_raw = pd.read_csv( "s3://amazon-forecast-samples/data_prep_templates/clean_features.csv", parse_dates=True, header=None, dtype={ 0: "str", 1: "str", 2: "str", 3: "str", 4: "int32", 5: "float64", 6: "str", 7: "str", 8: "str", }, names=[ "pulocationid", "pickup_hourly", "pickup_day_of_week", "day_hour", "trip_quantity", "mean_item_loc_weekday", "pickup_geolocation", "pickup_borough", "binned_max_item", ], ) # drop duplicates print(df_raw.shape) df_raw.drop_duplicates(inplace=True) df_raw['pickup_hourly'] = pd.to_datetime(df_raw["pickup_hourly"], format="%Y-%m-%d %H:%M:%S", errors='coerce') print(df_raw.shape) print(df_raw.dtypes) start_time = df_raw.pickup_hourly.min() end_time = df_raw.pickup_hourly.max() print(f"Min timestamp = {start_time}") print(f"Max timestamp = {end_time}") df_raw.sample(5) # + #### # EDIT THIS FOR YOUR DATA #### # map expected column names item_id = "pulocationid" target_value = "trip_quantity" timestamp = "pickup_hourly" location_id = None geolocation = "pickup_geolocation" if location_id is None: use_location = False else: use_location = True print(f"use_location = {use_location}") # specify array of dimensions you'll use for forecasting if use_location: forecast_dims = [timestamp, location_id, item_id] else: forecast_dims = [timestamp, item_id] print(f"forecast_dims = {forecast_dims}") dims_except_timestamp = [i for i in forecast_dims if i != timestamp] print(f"dims_except_timestamp = {dims_except_timestamp}") # - original_shape = df_raw.shape original_shape # ## Step 2. Correct dtypes <a class="anchor" id="fix_dtypes"></a> # + #### # EDIT THIS FOR YOUR DATA #### # # correct dtypes # df_raw['ReportingMonth'] = pd.to_datetime(df_raw["ReportingMonth"], format="%Y-%m-%d", errors='coerce') # # Use the new pandas Integer type # df_raw.ProductGroup3Id = df_raw.ProductGroup3Id.astype('Int64').astype(str) # # df_raw.ProductGroup3Id = df_raw.ProductGroup3Id.astype('Int64') print(df_raw.shape) print(df_raw.dtypes) # df_raw.sample(5) # - # ## Step 3. Make forecast choices <a class="anchor" id="choices"></a> # # Below, you need to make some choices. First, you will be asked how you want to treat target values (the values you're forecasting). # <ol> # <li><b>Do you want your target values to be floating point numbers or integers? Pro-tip: If your raw data values are already integers or if you need to forecast new items/cold-starts, leave this setting as 'False' to do nothing; otherwise, best practice is set equal 'True'.</b> If you have floating point numbers, you won't be able to use negative-binomial distribution later in the DeepAR+ algorithm. Setting 'False' will leave raw data values as they are. </li> # <li><b>Decide if you want to convert 0's to nulls. Pro-tip: Use setting 'True',</b> then let Amazon Forecast <a href="https://docs.aws.amazon.com/forecast/latest/dg/howitworks-missing-values.html" target="_blank">do automatic null-filling through its Featurization settings.</a>. Setting 'True' will convert 0's to nulls. It means you think some of your 0's were missing data instead of actual 0's. Warning: if you have cold-starts, or new product introductions, they may get dropped. You will need to manually re-add them to only the Inference data, since they should be dropped from Train data. However, if you believe in your data '0' always means '0', then type 'False' to do nothing to 0's. </li> # <li><b>Decide if you want to replace extreme values with mean?</b> If you think you only have one or two extreme values it might make sense to replace them. On the other hand, if you have quite a few, that is indication those values are not really extreme.</li> # <li><b>Do you want to generate future RTS that extends into future?</b> If you set this to True, all data will be used for Training. If you set it to False, a hold-out of length Forecast Horizon will be used to alculte RTS.</li> # <li><b>What is the time granularity for your forecasts?</b>. For example, if your time unit is Hour, answer = "H". </li> # <li><b>How many time units do you want to forecast?</b>. For example, if your time unit is Hour, then if you want to forecast out 1 week, that would be 24*7 = 168 hours, so answer = 168. </li> # <li><b>What is the first date you want to forecast?</b> Training data will be cut short 1 time unit before the desired first forecast snapshot date. For example, if the granularity of your data is "D" and you want your first forecast to happen on Feb 8, 2019, then the last timestamp for training will be Feb 7, 2019.</li> # <li><b>Think of a name you want to give this experiment</b>, so all files will have the same names. You should also use this same name for your Forecast DatasetGroup name, to set yourself up for reproducibility. </li> # </ol> # + # FORECAST SETTINGS ################### # INSTRUCTIONS: # 1. replace occurences "xxx=+FORECAST_LENGTH" with correct dictionary value # 2. run this cell 1x, look at suggested "snapshot_date" # 3. use suggested "snapshot_date", run again. ################### # Round target values to integers target_to_integer = False # Replace 0's with nulls replace_all_zeroes_with_null = False # Replace extremes with mean of last 3 months replace_extremes_with_mean = False # Create RTS with unknown future data # Note: if you set this to True, all known data will be used for Training # Note: if you set this to False, a hold-out of length Forecast Horizon will be used to calculate RTS create_future_RTS_with_unknown_data = False # What is your forecast time unit granularity? # Choices are: ^Y|M|W|D|H|30min|15min|10min|5min|1min$ FORECAST_FREQ = "H" # what is your forecast horizon in number time units you've selected? # e.g. if you're forecasting in hours, how many hours out do you want a forecast? FORECAST_LENGTH = 168 # What is the first date you want to forecast? # Training data will be cut short 1 time unit before the desired first forecast snapshot date # get snapshot date (date of 1st forecast) as last time minus forecast horizon AF_freq_to_dateutil_freq = {"Y":"years", "M":"hours", "W":"weeks", "D":"days", "H":"hours"} #### # EDIT THIS FOR YOUR DATA #### ################### # INSTRUCTIONS: replace "xxx=FORECAST_LENGTH" with correct dictionary value from above # example if FORECAST_FREQ="W" then use "weeks=FORECAST_LENGTH" ################### end_time_train = df_raw[timestamp].max() - relativedelta(hours=FORECAST_LENGTH) snapshot_date = end_time_train.date() + relativedelta(days=1) print(f"Suggested snapshot date = {snapshot_date}") # Run entire cell 1x, to view the "Suggested snapshot date" # Change snapshot date below to match suggested SNAPSHOT_DATE = datetime.datetime(2020, 2, 23, 0, 0, 0) # What name do you want to give this experiment? # Be sure to use same name for your Forecast Dataset Group name. EXPERIMENT_NAME = "nyctaxi" DATA_VERSION = 1 # print some validation back to user print(f"Convert your frequency to python dateutil = {AF_freq_to_dateutil_freq[FORECAST_FREQ]}") print(f"Forecast horizon = {FORECAST_LENGTH} {AF_freq_to_dateutil_freq[FORECAST_FREQ]}") AF_freq_to_dateutil_freq = {"Y":"years", "M":"hours", "W":"weeks", "D":"days", "H":"hours"} #### # EDIT THIS FOR YOUR DATA #### ################### # INSTRUCTIONS: replace "xxx=+FORECAST_LENGTH" with correct dictionary value from above # example if FORECAST_FREQ="W" then use "weeks=+FORECAST_LENGTH" ################### snapshot_end = SNAPSHOT_DATE + relativedelta(hours=+FORECAST_LENGTH) snapshot_end = snapshot_end - relativedelta(hours=1) print(f"Training data end date = {end_time_train}") print(f"Forecast start date = {SNAPSHOT_DATE}") print(f"Forecast end date = {snapshot_end}") start_time = df_raw[timestamp].min() end_time = snapshot_end snapshot_date_monthYear = SNAPSHOT_DATE.strftime("%m%d%Y") EXPERIMENT_NAME = f"{EXPERIMENT_NAME}_snap{snapshot_date_monthYear}_{FORECAST_LENGTH}{FORECAST_FREQ}" print(f"Experiment name = {EXPERIMENT_NAME}") # - # ## Step 4. Drop null item_ids <a class="anchor" id="drop_null_items"></a> # + templist = df_raw[item_id].unique() print(f"Number unique items: {len(templist)}") print(f"Number nulls: {pd.isnull(templist).sum()}") if len(templist) < 20: print(templist) # - ## Drop the null item_ids, if any exist if pd.isnull(templist).sum() > 0: print(df_raw.shape) df_raw = df_raw.loc[(~df_raw[item_id].isna()), :].copy() print(df_raw.shape) print(len(df_raw[item_id].unique())) else: print("No missing item_ids found.") # ## Step 5. Drop null timestamps <a class="anchor" id="drop_null_times"></a> # + # check null timestamps templist = df_raw.loc[(df_raw[timestamp].isna()), :].shape[0] print(f"Number nulls: {templist}") if (templist < 10) & (templist > 0) : print(df_raw.loc[(df_raw[timestamp].isna()), :]) # - ## Drop the null quantities and dates if templist > 0: print(df_raw.shape) df_raw = df_raw.loc[(~df_raw[timestamp].isna()), :].copy() print(df_raw.shape) print(df_raw['timestamp'].isna().sum()) else: print("No null timestamps found.") # ## Step 7. Inspect (and treat) extremes <a class="anchor" id="treat_extremes"></a> # Decide how many extreme values is considered unusual MAX_EXTREMES = 10 EXTREME_VALUE = df_raw[target_value].quantile(0.999999) print(EXTREME_VALUE) # + # Inspect extremes templist = df_raw.loc[(df_raw[target_value]>=EXTREME_VALUE), :].shape[0] print(f"Number extremes: {templist}") if (templist < MAX_EXTREMES) & (templist > 0) : print(df_raw.loc[(df_raw[target_value]>=EXTREME_VALUE), :].set_index([item_id])) # - print ("Current configuration - replace extremes with mean:", replace_extremes_with_mean) ## PLOT OVERALL TIME SERIES - TO HELP INSPECT EXTREMES df_raw.plot(x=timestamp, y=target_value, figsize=(15, 8)) # + # This is a very basic example of replacing extremes. In your implementation it should be done more carefully than this. # Replace extreme value with treated median last n months values months_to_use = 3 if (use_location): # with location_id if (replace_extremes_with_mean & (templist < MAX_EXTREMES) & (templist > 0) ): df_clean = df_raw.copy() extremes = df_raw.loc[(df_raw[target_value]>=EXTREME_VALUE), :][[item_id, location_id if use_location else None, 'day_hour', timestamp]] for index, keys in extremes.iterrows(): print (keys) # calculate median value from last n months before the extreme temp = df_raw.loc[((df_raw[item_id]==keys[item_id]) & (df_raw[location_id]==keys[location_id]) & (df_raw['day_hour']==keys['day_hour']) & (df_raw[timestamp]<keys[timestamp]) & (df_raw[timestamp]>keys[timestamp] - relativedelta(months = months_to_use))), :] # calculate median replace_extreme = temp[target_value].median() print(replace_extreme) # visually check if replaced value looks like median target_value display(temp) # make the replacement df_clean.loc[((df_clean[item_id]==keys[item_id]) & (df_clean[location_id]==keys[location_id]) & (df_raw['day_hour']==keys['day_hour']) & (df_clean[timestamp]==keys[timestamp])), target_value] = replace_extreme print(f"new value is {df_clean.loc[((df_clean[item_id]==keys[item_id]) & (df_clean[location_id]==keys[location_id]) & (df_clean[timestamp]==keys[timestamp])), target_value].max()}") else: print("No extreme values found or do not replace any extremes.") df_clean = df_raw.copy() else: # without location_id if (replace_extremes_with_mean & (templist < MAX_EXTREMES) & (templist > 0) ): df_clean = df_raw.copy() extremes = df_raw.loc[(df_raw[target_value]>=EXTREME_VALUE), :][[item_id, 'day_hour', timestamp]] for index, keys in extremes.iterrows(): print (keys) # calculate median value from last n months before the extreme temp = df_raw.loc[((df_raw[item_id]==keys[item_id]) & (df_raw['day_hour']==keys['day_hour']) & (df_raw[timestamp]<keys[timestamp]) & (df_raw[timestamp]>keys[timestamp] - relativedelta(months = months_to_use))), :] # calculate median replace_extreme = temp[target_value].median() print(replace_extreme) # visually check if replaced value looks like median target_value display(temp) # make the replacement df_clean.loc[((df_clean[item_id]==keys[item_id]) & (df_raw['day_hour']==keys['day_hour']) & (df_clean[timestamp]==keys[timestamp])), target_value] = replace_extreme print(f"new value is {df_clean.loc[((df_clean[item_id]==keys[item_id]) & (df_clean[timestamp]==keys[timestamp])), target_value].max()}") else: print("No extreme values found or do not replace any extremes.") df_clean = df_raw.copy() # - ## PLOT THE CLEAN TIME SERIES - TO HELP INSPECT EXTREMES df_clean.plot(x=timestamp, y=target_value, figsize=(15, 8)) # save some memory del df_raw # throws error if we lost some values assert original_shape[0] == df_clean.shape[0] # ## Step 8. Optional - Round negative targets up to 0 <a class="anchor" id="round_negatives"></a> # Check negative values print(df_clean.loc[(df_clean[target_value] <0), :].shape) df_clean.loc[(df_clean[target_value] <0), :].sort_values([timestamp, item_id]).head() # + # CAREFUL!! MAKE SURE ROUNDING NEGATIVES UP TO 0 MAKES SENSE FOR YOUR USE CASE # If negative values found, round them up to 0 if df_clean.loc[(df_clean[target_value] <0), :].shape[0] > 0: # Check y-value before cleaning print(df_clean[target_value].describe()) # default negative values in demand to 0 print(f"{df_clean[target_value].lt(0).sum()} negative values will be rounded up to 0") print() ts_cols = [target_value] for c in ts_cols: df_clean.loc[(df_clean[c] < 0.0), c] = 0.0 # Check y-value after cleaning print(df_clean[target_value].describe()) else: print("No negative values found.") # - # ## Step 9. Optional - Convert negative targets to nan <a class="anchor" id="negatives_to_nan"></a> # + # # Check negative values # print(df_clean.loc[(df_clean[target_value] <0), :].shape) # df_clean.loc[(df_clean[target_value] <0), :].sort_values([timestamp, item_id]).head() # + # # # CAREFUL!! MAKE SURE CHANGING NEGATIVES TO NAN NEGATIVES MAKES SENSE FOR YOUR USE CASE # # If negative values found, round them up to 0 # if df_clean.loc[(df_clean[target_value] <0), :].shape[0] > 0: # # Check y-value before cleaning # print(df_clean[target_value].describe()) # # default negative values in demand to 0 # print(f"{df_clean[target_value].lt(0).sum()} negative values will be rounded up to 0") # print() # ts_cols = [target_value] # print () # for c in ts_cols: # df_clean.loc[(df_clean[c] < 0.0), c] = float('nan') # # Check y-value after cleaning # print(df_clean[target_value].describe()) # else: # print("No negative values found.") # + # # throws error if we lost some values # assert original_shape[0] == df_clean.shape[0] # - # ## Step 10. Aggregate at your chosen frequency <a class="anchor" id="groupby_frequency"></a> # # Below, we show an example of resampling at hourly frequency by forecast dimensions. Modify the code to resample at other frequencies. # # Decide which aggregation-level makes sense for your data, which is a balance between desired aggregation and what the data-collection frequency will support. forecast_dims # + ## CHECK TO SEE IF YOUR TIMESERIES DIMENSIONS ARE CORRECT # checking if there are multiple entries per item_id per timestamp per location df_aux = df_clean.copy().set_index(forecast_dims) duplicates = df_aux.pivot_table(index=forecast_dims, aggfunc='size') duplicates = pd.DataFrame( duplicates, columns=["NumberPerTS"]) print (duplicates[duplicates["NumberPerTS"]>1].head()) # - # checking to see if your timeseries dimensions are correct if duplicates[duplicates["NumberPerTS"]>1].shape[0] > 0: print("WARNING: YOUR AGGREGATION ASSUMPTION THAT timestamp, item_id, location_id ARE UNIQUE IS NOT CORRECT.") print("Inspect df_aux where you see 'NumberPerTS' > 1") else: print("Success! timestamp, item_id, location_id is a unique grouping of your time series.") # In case your assumed dimensions are not unique, code below is to explore adding a composite column. # + #### # EDIT THIS FOR YOUR DATA #### # # inspect what is happening on these repeated items # try: # df_aux.reset_index(inplace = True) # except Exception as e: # print (e) # test_aux = df_aux.loc[((df_aux[item_id]=='PRD-05685') & (df_aux[location_id]=='STCK-00605') # & (df_aux[timestamp]=="2019-09-30 01:36:45")), :] # test_aux.sort_values(by=[timestamp]) # # Possibly extra dimension Organization Name? # + # # Since location is not unique, create new fake composite column # df_clean['timeseries_key'] = df_clean[item_id] + '-'+ df_clean[location_id] \ # + '-' + df_clean['Organization Name'] # df_clean.head(2) # + # # If you changed dimensions, re-map expected column names # item_id = "timeseries_key" # use_location = False # forecast_dims = [timestamp, item_id] # print(f"forecast_dims = {forecast_dims}") # dims_except_timestamp = [i for i in forecast_dims if i != timestamp] # print(f"dims_except_timestamp = {dims_except_timestamp}") # + # ## CHECK AGAIN TO SEE IF YOUR DATA AGGREGATION ASSUMPTION IS CORRECT # # checking if there are multiple entries per item_id per timestamp per location # df_aux = df_clean[forecast_dims + [target_value]].copy().set_index(forecast_dims) # df_aux.drop_duplicates(inplace=True) # duplicates = df_aux.pivot_table(index=forecast_dims, aggfunc='size') # duplicates = pd.DataFrame( duplicates, columns=["NumberPerTS"]) # # checking to see if your data aggregation is correct # if duplicates[duplicates["NumberPerTS"]>1].shape[0] > 0: # print (duplicates[duplicates["NumberPerTS"]>1].head()) # print("WARNING: YOUR AGGREGATION ASSUMPTION THAT timestamp, item_id, location_id ARE UNIQUE IS NOT CORRECT.") # print("Inspect df_aux where you see 'NumberPerTS' > 1") # else: # print("Success! timestamp, item_id, location_id is a unique grouping of your time series.") # - print(f"forecast_dims = {forecast_dims}") df_clean.head(1) # + #### # EDIT THIS FOR YOUR DATA #### # restrict columns if desired # df_clean = df_clean[[timestamp, item_id, "Prod #", location_id, "Organization Name", target_value]].copy() print(df_clean.shape) df_clean.drop_duplicates(inplace=True) print(df_clean.shape) # Put all columns besides forecast_dims that you want to keep in a dictionary of aggregations (per # pandas agg()): agg_dict = { "pickup_day_of_week": "first", "day_hour": "first", "trip_quantity": "sum", "mean_item_loc_weekday": "mean", "pickup_geolocation": "first", "pickup_borough": "first", "binned_max_item": "last", } print("Validating agg_dict...") for dim in forecast_dims: if dim in agg_dict: dim_agg = agg_dict[dim] if (type(dim_agg) == str) or not hasattr(dim_agg, "__iter__"): print( "Single aggregation on forecast dimension column not supported: Ignoring\n" f"({dim}: {dim_agg})" ) del agg_dict[dim] # + # # THIS CODE BLOCK IS AN EXAMPLE OF Weekly AGGREGATION # g_week = local_util.dataprep.aggregate_time_series( # df_clean, # agg_freq="W", # timestamp_col=timestamp, # target_col=target_value, # dimension_cols=dims_except_timestamp, # agg_dict=agg_dict, # already_grouped=False, # analyze=True, # ) # # add new time dimension since original timestamp is not weekly # g_week['year_week'] = g_week[timestamp].dt.year.astype(str) + '_' \ # + g_week[timestamp].dt.isocalendar().week.astype(str) # display(g_week.sample(5)) # + # THIS CODE BLOCK IS AN EXAMPLE OF Hourly AGGREGATION # Note: The sample data shipped with notebook is ideal - all time series have 5856 data points, # which is squarely in the Deep Learning desired data size. g_hour = local_util.dataprep.aggregate_time_series( df_clean, agg_freq="H", timestamp_col=timestamp, target_col=target_value, dimension_cols=dims_except_timestamp, agg_dict=agg_dict, already_grouped=True, analyze=True, ) display(g_hour.sample(5)) # + ## TRY ANOTHER AGGREGATION LEVEL AND COMPARE TARGET_VALUE DISTRIBUTION SHAPES agg_freq = "2H" g_2hour = local_util.dataprep.aggregate_time_series( df_clean, "2H", timestamp_col=timestamp, target_col=target_value, dimension_cols=dims_except_timestamp, agg_dict=agg_dict, ) display(g_2hour.sample(5)) # + ## TRY ANOTHER AGGREGATION LEVEL AND COMPARE TARGET_VALUE DISTRIBUTION SHAPES agg_freq = "4H" g_4hour = local_util.dataprep.aggregate_time_series( df_clean, "4H", timestamp_col=timestamp, target_col=target_value, dimension_cols=dims_except_timestamp, agg_dict=agg_dict, ) display(g_4hour.sample(5)) # - # <br> # # <b> Select the aggregation-level to keep, based on results above.</b> # + ## USE THE GROUPING YOU SELECTED ABOVE df = g_hour.copy() # Delete no-longer-needed aggregations to save memory: del df_aux del g_hour del g_2hour del g_4hour print(df.shape, df_clean.shape) df.sample(5) # - # ## Step 11. Typical retail scenarios: Find top-moving items <a class="anchor" id="top_moving_items"></a> # # Next, we want to drill down and visualize some individual item time series. Typically customers have "catalog-type" data, where only the top 20% of their items are top-movers; the rest of the 80% of items are not top-movers. For visualization, we want to select automatically some of the top-moving items. use_location # %%time print(f"Calculating per item{'+location' if use_location else ''} velocities") top_movers, slow_movers = local_util.analysis.get_top_moving_items( df, timestamp, target_value, item_id, location_id if use_location else None, ) n_random = 5 print(f"Selecting {n_random} random top-moving series for plotting:") random_series = top_movers.sample( n_random, random_state=42, ).index.to_frame().reset_index(drop=True) random_series # + # Alternatively, you could explicitly specify some dimension combinations you'd like to explore: # random_series = pd.DataFrame({ # item_id: ["79", "135"], # location_id: ["here", "there"], # }) # random_series # - # ## Step12. Visualize time series <a class="anchor" id="visualize"></a> # + df_plot = local_util.dataprep.select_by_df(df, random_series) df_plot.set_index(timestamp, inplace=True) df_plot.head(2) local_util.plotting.make_plots( df_plot, random_series, target_value, "Hourly quantity", ) # - # ## Step 13. Split train/test data <a class="anchor" id="split_train_test"></a> # # In forecasting, "train" data is until a last-train date, sometimes called the forecast snapshot date. # <ul> # <li>Train data includes all data up to your last-train date. </li> # <li>Test data includes dates after your last-train date through end of desired forecast horizon.</li> # <li>Validation data might exist for part or maybe all of the desired forecast horizon. </li> # <li>TTS timestamps should start and end with Train data. </li> # <li>RTS timestamps should start with Train data and extend out past end of TTS to end of the desired forecast horizon.</li> # </ul> # # For model generalization, all processing from here on out will only be done on train data. # + # Forecast Horizon is number of time steps out in the future you want to predict # Time steps are defined in the time frequency you specified in Step 5 Aggregate # Example if aggregation was hourly, then forecast length=168 means forecast horizon of 7 days or 7*24=168 hours print(f"Forecast horizon = {FORECAST_LENGTH}") # = 12 print(f"Forecast unit of frequency = {AF_freq_to_dateutil_freq[FORECAST_FREQ]}") # = 30 print(f"Forecast start date = {SNAPSHOT_DATE}") # + # Create train data as all except last FORECAST_HORIZON length start_time = df[timestamp].min() end_time = snapshot_end start_time_test = SNAPSHOT_DATE print(f"start_time = {start_time}") print(f"end_time_train = {end_time_train}") print(f"start_time_test = {start_time_test}") print(f"end_time = {snapshot_end}") # - create_future_RTS_with_unknown_data # + if create_future_RTS_with_unknown_data: # Create train data as all data => this means RTS will extend into unknown future print("using all known data for training") train_df = df.copy() else: # Create train subset with hold-out of length FORECAST_LENGTH print("using hold-out with train data") train_df = df.copy() train_df = train_df.loc[(train_df[timestamp] <= end_time_train), :] # check you did the right thing print(f"start_time = {start_time}") print(f"end_time: {end_time}") print() print(f"start_time_train = {train_df[timestamp].min()}") print(f"end_time_train = {train_df[timestamp].max()}") train_df.head() # + # ERROR CHECK: DO YOU HAVE ENOUGH HISTORICAL DATA POINTS TO SUPPORT DESIRED FORECAST HORIZON? # calculate number data points in train data num_data_points = train_df.groupby(dims_except_timestamp).nunique()[timestamp].mean() print(f"1/3 training data points: {np.round(num_data_points/3,0)}") # Amazon Forecast length of forecasts can be 500 data points and 1/3 target time series dataset len if ((FORECAST_LENGTH < 500) & (FORECAST_LENGTH <= np.round(num_data_points/3,0))): print("".join(( f"Success, forecast horizon {FORECAST_LENGTH} is shorter than 500 data points and less ", "than 1/3 of the historical training data length.", ))) else: raise ValueError("".join(( f"Error, forecast horizon {FORECAST_LENGTH} is too long. Must be fewer than 500 data ", "points and less than 1/3 of the historical training data length.", ))) # If you have too few data points, return to step above and choose smaller time granularity # - # ## Step 14. Prepare and Save Target Time Series (TTS) <a class="anchor" id="TTS"></a> print(create_future_RTS_with_unknown_data) print(f"forecast_dims: {forecast_dims}") print(f"geolocation: {geolocation}") train_df.head(1) # + ## Assemble TTS required columns #### # EDIT THIS FOR YOUR DATA #### if geolocation is not None: print("Preparing TTS with geolocation data") # restrict train data to just tts columns tts = train_df[forecast_dims + [geolocation, target_value]].copy() tts = tts.groupby(forecast_dims+[geolocation])[[target_value]].sum() else: print("Running without geolocation data") # restrict train data to just tts columns tts = train_df[forecast_dims + [target_value]].copy() tts = train_df[[timestamp, item_id, target_value]].copy() tts = tts.groupby(forecast_dims)[[target_value]].sum() tts.reset_index(inplace=True) print(f"start date = {tts[timestamp].min()}") print(f"end date = {tts[timestamp].max()}") # check it print(tts.shape) print(tts.dtypes) tts.head(5) # + # check format of geolocation column # tts[geolocation].value_counts(normalize=True, dropna=False) # - # <b>Optional - convert target_value to integer if this is the last step for TTS. </b> # # Note: Currently in Amazon Forecast, if you declare target_value is integer in the schema, but you have any decimals in your numbers, you will get an error. # # Make sure you really see integers in the code below, if you want integers! target_to_integer # + # Use the new pandas Integer type # https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html # TODO: turn this into a function if target_to_integer: try: tts[target_value] = tts[target_value].astype(int) print("Success! Converted to numpy integer") except Exception as e: print (e) print("Trying pandas nullable Integer type instead of numpy integer type...") try: tts[target_value] = tts[target_value].astype('Int64', errors='ignore') print("Success! converted to pandas integer") except Exception as e: print (e) elif tts[target_value].dtype == 'object': # convert to float tts[target_value] = tts[target_value].astype(np.float32) elif tts[target_value].dtype != 'object': # do nothing print("target_value is already a float") print(tts.dtypes) tts.sample(5) # - # ## Step 15. Remove time series with no target values at all<a class="anchor" id="TTS_remove_all0"></a> # In case there are time series which are only 0's, may as well remove them, since their forecast should be all 0's too. Another reason to remove these time series is they could bias the overall forecast toward 0, when that's not what you want. print(tts.shape) tts.dtypes # + # check if sum of all sales is 0 g = tts.groupby(dims_except_timestamp).sum() g.fillna(0, inplace=True) skus_with_no_sales_in_warehouse = g[g[target_value] == 0].copy() # drop extra columns for cleaner merge skus_with_no_sales_in_warehouse.reset_index(inplace=True) skus_with_no_sales_in_warehouse = skus_with_no_sales_in_warehouse.iloc[:, 0:1] skus_with_no_sales_in_warehouse.drop_duplicates(inplace=True) display (skus_with_no_sales_in_warehouse.head(2)) if skus_with_no_sales_in_warehouse.shape[0] > 0: # https://stackoverflow.com/questions/32676027/how-to-do-df1-not-df2-dataframe-merge-in-pandas tts_copy = tts.merge(skus_with_no_sales_in_warehouse, how='left', on=dims_except_timestamp, indicator=True) \ .query("_merge=='left_only'") \ .drop('_merge',1) print(tts.shape, tts_copy.shape) display(tts_copy.sample(5)) else: print("No time series found with only 0's.") # + # really drop skus with only 0's if skus_with_no_sales_in_warehouse.shape[0] > 0: print(tts.shape, tts_copy.shape) tts = tts_copy.copy() del (tts_copy) # keep track of dropped dimensions and reason why dropped skus_with_no_sales_in_warehouse = skus_with_no_sales_in_warehouse[dims_except_timestamp].copy() # save the reason skus_with_no_sales_in_warehouse['reason'] = "All 0's" display(skus_with_no_sales_in_warehouse.head(2)) print(tts.shape) display(tts.sample(5)) else: print("Didn't drop anything") # - # ## Step 16. Remove time series with end of life<a class="anchor" id="TTS_remove_end_of_life"></a> # # Check if time series have any data in last 6 months and more than 5 data points, since 5 data points is minimum for Amazon Forecast to generate forecasts. print(tts.shape) tts.dtypes # + # Define end of life = No sales in the last 6 months # first get df of only last 6 months time_threshold = end_time - datetime.timedelta(6*30) # check if sum of sales last 6 months is 0 tts_aux = tts[tts[timestamp] >= time_threshold].copy() g = tts_aux.groupby(dims_except_timestamp).sum() g.fillna(0, inplace=True) skus_with_end_of_life = g[g[target_value] == 0].copy() # drop extra columns for cleaner merge skus_with_end_of_life.reset_index(inplace=True) skus_with_end_of_life = skus_with_end_of_life.iloc[:, 0:1] skus_with_end_of_life.drop_duplicates(inplace=True) display (skus_with_end_of_life.head(2)) if skus_with_end_of_life.shape[0] > 0: # https://stackoverflow.com/questions/32676027/how-to-do-df1-not-df2-dataframe-merge-in-pandas tts_copy = tts.merge(skus_with_end_of_life, how='left', on=dims_except_timestamp, indicator=True) \ .query("_merge=='left_only'") \ .drop('_merge',1) print(tts.shape, tts_copy.shape) display(tts_copy.sample(5)) else: print("No time series found with end of life.") # + # really drop the skus with end of life if skus_with_end_of_life.shape[0] > 0: print(tts.shape, tts_copy.shape) tts = tts_copy.copy() del (tts_copy) display(tts.dtypes) # keep track of dropped dimensions and reason skus_with_end_of_life = skus_with_end_of_life[dims_except_timestamp].copy() skus_with_end_of_life['reason'] = "end of life" display(skus_with_end_of_life.head(2)) print(tts.shape) display(tts.sample(5)) else: print("Didn't drop anything") # - # ## Step 17. Remove time series with fewer than 5 data points<a class="anchor" id="TTS_remove_too_few_data_points"></a> # # Minimum number of data points is 5 data points to make a forecast. <br> # # **Note: special consideration for cold-start or new product introductions**. For best results, do not include new items in your training data. However, do include new items in your inference data. Notice that there is a system constraint such that at least 5 data points need to exist for each time series. Therefore, for the item that has less than 5 observations, be sure that item's target_value is encoded as float and fill explicitly with "NaN". Also note: Cold-start forecasting only works if new items are tied to items with longer histories through Item Metadata. # # Run this to remove rows with <5 values (not explicitly "NaN") manually and save the list of time series with too few data points for your own reference. Otherwise if you skip this section, Forecast will automatically drop (silently) all time series with fewer than 5 data points, since that is too few to make a good forecast. print(tts.shape) tts.sample(5) # <b>Replacing '0's with null</b> replace_all_zeroes_with_null # + # # Null-value filling, if any # special case: replace 0s with nulls if (replace_all_zeroes_with_null): print(tts.shape) print(tts[target_value].describe()) if target_to_integer: tts.loc[(tts[target_value]==0), target_value] = pd.NA else: tts.loc[(tts[target_value]==0), target_value] = np.nan print () print(tts.shape) print(tts[target_value].describe()) else: tts.loc[:, target_value].fillna(0, inplace=True) print("No null-filling required.") # + # check per time series if count of data points is at least 5 g = tts.groupby(dims_except_timestamp).count() skus_with_too_few_sales = g[g[target_value] < 5].copy() # drop extra columns for cleaner merge skus_with_too_few_sales.reset_index(inplace=True) skus_with_too_few_sales = skus_with_too_few_sales.iloc[:, 0:1] skus_with_too_few_sales.drop_duplicates(inplace=True) display (skus_with_too_few_sales.head(2)) if skus_with_too_few_sales.shape[0] > 0: # https://stackoverflow.com/questions/32676027/how-to-do-df1-not-df2-dataframe-merge-in-pandas tts_copy = tts.merge(skus_with_too_few_sales, how='left', on=dims_except_timestamp, indicator=True) \ .query("_merge=='left_only'") \ .drop('_merge',1) print("TTS if you dropped items with too few data points") print(tts.shape, tts_copy.shape) display(tts_copy.sample(5)) else: print("No time series found with fewer than 5 datapoints.") # + # really drop skus with too few data points, only if more than a handful found if skus_with_too_few_sales.shape[0] > 0: print(tts.shape, tts_copy.shape) tts = tts_copy.copy() del (tts_copy) # keep track of dropped dimensions and reason why dropped skus_with_too_few_sales = skus_with_too_few_sales[dims_except_timestamp].copy() skus_with_too_few_sales['reason'] = "Fewer than 5 datapoints" display(skus_with_too_few_sales.head(2)) print(tts.shape) display(tts.sample(5)) else: print("Didn't drop anything") # - # <b> Keep track of dropped time series and reason why they were dropped. </b> if skus_with_too_few_sales.shape[0] > 0: dropped_dims = skus_with_too_few_sales.append([skus_with_no_sales_in_warehouse , skus_with_end_of_life]) print(f"unique ts dropped = {dropped_dims.shape[0]}") print(f"unique ts fewer than 5 data points = {skus_with_too_few_sales.shape[0]}") print(f"unique ts with all 0s = {skus_with_no_sales_in_warehouse.shape[0]}") print(f"unique ts with end of life = {skus_with_end_of_life.shape[0]}") display(dropped_dims.reason.value_counts(dropna=False, normalize=True)) display(dropped_dims.sample(1)) else: print("Didn't drop anything") # + # save list of dropped skus and reasons for reference and to check if data can be fixed if skus_with_too_few_sales.shape[0] > 2: # save all the dropped dimensions fields local_file = "data/dropped_fields.csv" # Save merged file locally dropped_dims.to_csv(local_file, header=True, index=False) key = f"{prefix}/v{DATA_VERSION}/dropped_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) # - # <b>Optional - convert target_value to integer if this is last step for TTS. </b> print(tts.shape) display(tts.dtypes) tts.head(5) target_to_integer # + # Use the new pandas Integer type # https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html if target_to_integer: try: tts[target_value] = tts[target_value].fillna(0).astype(int) print("Success! Converted to np.integer type") except Exception as e: print (e) print("Trying pandas nullable Integer type instead of numpy integer type...") try: tts[target_value] = tts[target_value].astype('Int64', errors='ignore') print("Success! Converted to nullable pd.integer type") except Exception as e: print (e) elif tts[target_value].dtype == 'object': # convert to float tts[target_value] = tts[target_value].astype(np.float32) elif tts[target_value].dtype != 'object': # do nothing print("target_value is already a float") print(tts.dtypes) tts.sample(5) # - # <b> Optional - replace 0's with nulls </b> replace_all_zeroes_with_null # + if replace_all_zeroes_with_null: tts.loc[(tts[target_value]==0), target_value] = pd.NA print(tts[target_value].describe()) print(tts.dtypes) tts.sample(5) # - # one last check... print(tts.shape) tts.drop_duplicates(inplace=True) print(tts.shape) print(tts[timestamp].min()) print(tts[timestamp].max()) # check for nulls print(tts.isnull().sum()) print(tts.dtypes) tts.sample(5) # Check input numbers of time series if skus_with_too_few_sales.shape[0] > 0: dropped = dropped_dims.groupby(dims_except_timestamp).first().shape[0] display(dropped) # check assert (train_df.groupby(dims_except_timestamp).first().shape[0] \ == (tts.groupby(dims_except_timestamp).first().shape[0] + dropped)) # + # Save tts to S3 local_file = "data/tts.csv" # Save merged file locally tts.to_csv(local_file, header=False, index=False) print(f"Saved TTS locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded TTS to s3://{bucket_name}/{key}") # - # ## Step 18. Optional - Assemble and save TTS_sparse, TTS_dense <a class="anchor" id="TTS-dense_sparse"></a> dims_except_timestamp dense, sparse = local_util.analysis.analyze_lengths_and_sparsity( tts, agg_freq="H", target_col=target_value, forecast_dims=dims_except_timestamp, dense_threshold_quantile=0.75, ) print(f"Found {len(dense)} dense timeseries") print("Datapoint count of densest items/timeseries:") display(dense.head()) print(f"\nFound {len(sparse)} sparse timeseries") print("Datapoint count of sparsest items/timeseries:") display(sparse.tail()) # Spot check some sparse time-series: local_util.plotting.make_plots( df, sparse.tail(3).index.to_frame(), target_value, ) # + # save sparse dimensions local_file = "data/sparse_fields.csv" # Save merged file locally sparse.reset_index().to_csv(local_file, header=False, index=False) print(f"Saved sparse dimensions locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/sparse_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # + # save dense dimensions local_file = "data/dense_fields.csv" # Save merged file locally dense.reset_index().to_csv(local_file, header=False, index=False) print(f"Saved dense dimensions locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/dense_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # - tts_dense = local_util.dataprep.select_by_df(tts, dense.index.to_frame()).copy() print(tts_dense.shape, tts.shape) tts_dense.sample(5) # + # Save tts_dense to S3 local_file = "data/tts_dense.csv" # Save merged file locally tts_dense.to_csv(local_file, header=False, index=False) print(f"Saved dense-only TTS locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/tts_dense_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # - # Free up some memory: del tts_dense del dense # We'll use `sparse` in an optional section later # ## Step 19. Optional - Assemble and save tts.top, tts.slow <a class="anchor" id="TTS_top"></a> use_location # + tts_top = local_util.dataprep.select_by_df(tts, top_movers.index.to_frame()) print(f"Selected {tts_top.shape} from {tts.shape}") num_top_items = tts_top.groupby(dims_except_timestamp).first().shape[0] print(f"Number top items = {num_top_items}") tts_top.sample(5) # + # Save tts_top to S3 local_file = "data/tts_top.csv" # Save merged file locally tts_top.to_csv(local_file, header=False, index=False) print(f"Saved top-moving TTS locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/tts_top_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # + tts_slow = local_util.dataprep.select_by_df(tts, slow_movers.index.to_frame()) print(f"Selected {tts_slow.shape} from {tts.shape}") num_slow_items = tts_slow.groupby(dims_except_timestamp).first().shape[0] print(f"Number slow items = {num_slow_items}") tts_slow.sample(5) # + # Save tts_slow to S3 local_file = "data/tts_slow.csv" # Save merged file locally tts_slow.to_csv(local_file, header=False, index=False) print(f"Saved slow-moving TTS locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/tts_slow_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # - # Free up some memory: del tts_top del tts_slow # ## Step 20. Prepare and save RTS (if any) <a class="anchor" id="RTS"></a> # # Make sure RTS does not have any missing values, even if RTS extends into future. <br> # Trick: create dataframe without any missing values using cross-join, faster than resample technique. <br> # if you get memory allocation error in merges below, try overriding default value 0 to 1 for overcommit # see https://www.kernel.org/doc/Documentation/vm/overcommit-accounting # Next 2 commands - open new terminal and do these directly in terminal # # !sudo -i # # !echo 1 > /proc/sys/vm/overcommit_memory # !cat /proc/sys/vm/overcommit_memory # + # Optionally, delete all local files to free up disk space # # !rm data/*.csv # + all_times = pd.DataFrame({ timestamp: pd.date_range(start=start_time, end=end_time, freq=FORECAST_FREQ), }) # # Create other time-related columns if you need them in RTS # all_times['year_week'] = all_times[timestamp].dt.year.astype(str) + '_' + all_times[timestamp].dt.month.astype(str) print(f"Number of data points: {len(all_times)}") print(f"Start date = {all_times[timestamp].min()}") print(f"End date = {all_times[timestamp].max()}") print(all_times.dtypes) print(all_times.isna().sum()) print(all_times.shape) all_times.sample(5) # - use_location # + # %%time # create master template of all possible locations and items try: print(f"found geolocation {geolocation}") items = df.groupby([item_id, geolocation])[[item_id, geolocation]].min() except: items = pd.DataFrame(list(df[item_id].unique())) items.columns = [item_id] # print(items.head(2)) if use_location: locations = pd.DataFrame(list(df[location_id].unique())) locations.columns = [location_id] # print(locations.head(2)) locations['key'] = 1 items['key'] = 1 # Do the cross-join master_records = locations.merge(items, on ='key').drop("key", 1) print(master_records.shape, items.shape, locations.shape) num_locs = len(master_records[location_id].value_counts()) print(f"num locations = {num_locs}") else: master_records = items.copy() print(master_records.shape, items.shape) # check you did the right thing num_items = len(master_records[item_id].value_counts()) print(f"num items = {num_items}") master_records.tail() # CPU times: user 688 ms, sys: 66.7 ms, total: 755 ms # Wall time: 752 ms # + # %%time # cross-join to create master template of all possible locations and items and times all_times['key'] = "1" master_records['key'] = "1" all_times.set_index('key', inplace=True) master_records.set_index('key', inplace=True) # Do the cross-join print("doing the merge...") full_history = master_records.merge(all_times, how="outer", left_index=True, right_index=True) print("done w/ merge...") full_history.reset_index(inplace=True, drop=True) # make sure you don't have any nulls print(full_history.shape) print("checking nulls...") print(full_history.isna().sum()) full_history.tail() # CPU times: user 265 ms, sys: 27.6 ms, total: 293 ms # Wall time: 290 ms # - # create small df of target_values - to merge later using dask temp_target = df[forecast_dims + [target_value]].copy() # add key for faster join temp_target["ts_key"] = temp_target[timestamp].astype(str).str.cat(temp_target[dims_except_timestamp], sep="-") temp_target = temp_target.groupby('ts_key').sum() # temp_target.drop(forecast_dims, inplace=True, axis=1) # temp_target.set_index('ts_key', inplace=True) print(temp_target.shape, df.shape) display(temp_target.head(2)) # **Parallelization for faster merge** # # In the [local_util/distributed.py](local_util/distributed.py) utilities, we use [Dask](https://docs.dask.org/en/latest/) to support parallelizing the merge for large datasets where this may be faster. # # For more information, check out the documentation in the source code or just run `help(local_util.distributed)` # + # %%time # CPU times: user 5.54 s, sys: 300 ms, total: 5.84 s # Wall time: 5.91 s full_history["ts_key"] = full_history[timestamp].astype(str).str.cat( full_history[dims_except_timestamp], sep="-", ) print("full_history:") display(full_history.head(2)) num_partitions = local_util.distributed.suggest_num_dask_partitions( dims_except_timestamp, num_items, ) temp = local_util.distributed.merge( full_history, temp_target, num_dask_partitions=num_items if use_location else 1, # use_dask_if_available=False, # Can try un-commenting this if you have memory issues how="left", left_on="ts_key", right_index=True, ).drop(columns=["ts_key"]) print("\nJOIN RESULT:") print(type(temp), temp.shape) display(temp.head(2)) print("Statistics should be unchanged after merge:") display(pd.concat( { "Original temp_target": temp_target.describe(), "Merged": temp.describe()}, axis=1, names=("DataFrame", "Column"), )) print("Missing values after merge:") print(temp.isna().sum()) # - # Careful!! # Really replace full_history with merged values, if the merge results above look OK full_history = temp del temp, temp_target full_history.head(2) # + #### # EDIT THIS FOR YOUR DATA #### # Create other time-related columns if you need them in RTS # Candidate variables for weekly data # full_history['month'] = full_history[timestamp].dt.month.astype(str) # full_history['year'] = full_history[timestamp].dt.year.astype(str) # full_history['quarter'] = full_history[timestamp].dt.quarter.astype(str) # full_history['year_month'] = full_history['year'] + '_' + full_history['month'] # full_history['year_quarter'] = full_history['year'] + '_' + full_history['quarter'] # Candidate variables for hourly data full_history['day_of_week'] = full_history[timestamp].dt.day_name().astype(str) full_history['hour_of_day'] = full_history[timestamp].dt.hour.astype(str) full_history['day_hour_name'] = full_history['day_of_week'] + "_" + full_history['hour_of_day'] full_history['weekend_flag'] = full_history[timestamp].dt.dayofweek full_history['weekend_flag'] = (full_history['weekend_flag'] >= 5).astype(int) full_history['is_sun_mon'] = 0 full_history.loc[((full_history.day_of_week=="Sunday") | (full_history.day_of_week=="Monday")), 'is_sun_mon'] = 1 print(full_history.sample(5)) # + # Example - create feature from target_value that is sometimes useful # # calculate mean sales per item per year # TODO: add normalization here # temp_year_item = train_df[['year', item_id, target_value]].copy() # temp_year_item.year = temp_year_item.year.astype(str) # temp_year_item = temp_year_item.groupby(['year', item_id]).mean() # temp_year_item.reset_index(inplace=True) # temp_year_item.rename(columns={target_value:"count_year_item"}, inplace=True) # print(temp_year_item.dtypes) # temp_year_item.sample(2) # + # # merge in year-item trend # temp2 = full_history.copy() # # temp.drop("count_day_loc_item", inplace=True, axis=1) # print(temp2.shape) # temp = temp2.merge(temp_year_item, how="left", on=["year", item_id]) # print(temp.shape, temp_year_item.shape) # # check nulls # print(temp.isna().sum()) # temp.sample(5) # + # # Careful!! # # Really replace full_history with merged values # full_history = temp.copy() # full_history.head(2) # - # zoom-in time slice so you can see patterns df_plot = local_util.dataprep.select_by_df(full_history, random_series) df_plot = df_plot.loc[ (df_plot[timestamp] > "2020-01-10") & (df_plot[timestamp] < end_time_train) ] print(df_plot.shape, full_history.shape) df_plot = df_plot.groupby([timestamp]).sum() df_plot.reset_index(inplace=True) df_plot.sample(3) #check: target_value distribution in full dataframe looks same as original df_plot[target_value].hist(bins=100) # + # EXAMPLE HOURLY RTS # Visualize candidate RTS variables plt.figure(figsize=(15, 8)) ax = plt.gca() df_plot.plot(x=timestamp, y=target_value, ax=ax); ax2 = ax.twinx() df_plot.plot(x=timestamp, y='weekend_flag', color='red', alpha=0.3, ax=ax2); # + # EXAMPLE HOURLY RTS # Visualize candidate RTS variables is_sun_mon plt.figure(figsize=(15, 8)) ax = plt.gca() df_plot.plot(x=timestamp, y=target_value, ax=ax); ax2 = ax.twinx() df_plot.plot(x=timestamp, y='is_sun_mon', color='red', alpha=0.3, ax=ax2); # - # It looks like lowest taxis rides are a combination of day and hour that seems to matter, not just day of week. full_history.head(1) geolocation # + # EXAMPLE HOURLY RTS # Assemble RTS - include whatever columns you finally decide if geolocation is not None: rts = full_history[forecast_dims + [geolocation] + ['day_hour_name']].copy() else: rts = full_history[forecast_dims + ['day_hour_name']].copy() print(rts.shape) print(rts.isnull().sum()) print(f"rts start: {rts[timestamp].min()}") print(f"rts end: {rts[timestamp].max()}") rts.sample(5) # + # Save rts to S3 local_file = "data/rts.csv" # Save merged file locally rts.to_csv(local_file, header=False, index=False) print(f"Saved RTS locally to {local_file}") key = f"{prefix}/v{DATA_VERSION}/{EXPERIMENT_NAME}.related.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # - del all_times del master_records # ## Step 21. Classify Time Series <a class="anchor" id="Classify"></a> # Using definitions given here: https://frepple.com/blog/demand-classification/ # Original article: https://robjhyndman.com/papers/idcat.pdf <br> # # Idea: Based on demand patterns, time series can be classified into one of 4 classes: Smooth, Intermittent, Erratic, or Lumpy. If you have more than 1 class of time series in your data, this might suggest more than 1 model for your time series predictions. # # Rules: # <ol> # <li><b>Smooth</b> demand (ADI < 1.32 and CV² < 0.49). Regular in time and in quantity. It is therefore easy to forecast and you won’t have trouble reaching a low forecasting error level. Suggested algorithm: <b>Traditional statistical such as Exponential Smoothing, Prophet, or ARIMA.</b></li> # <li><b>Intermittent demand</b> (ADI >= 1.32 and CV² < 0.49). The demand history shows very little variation in demand quantity but a high variation in the interval between two demands. Though specific forecasting methods tackle intermittent demands, the forecast error margin is considerably higher. Suggested algorithm: <b>Croston smoothing or some newer research approach coupled with adjusted error metric over longer time period.</b></li> # <li><b>Erratic</b> demand (ADI < 1.32 and CV² >= 0.49). The demand has regular occurrences in time with high quantity variations. Your forecast accuracy remains shaky. Suggested algorithm: <b>Deep Learning</b></li> # <li><b>Lumpy</b> demand (ADI >= 1.32 and CV² >= 0.49). The demand is characterized by a large variation in quantity and in time. It is actually impossible to produce a reliable forecast, no matter which forecasting tools you use. This particular type of demand pattern is unforecastable. Suggested algorithm: <b>bootstrap</b></li> # </ol> # + # !pip install squarify import squarify # SCRATCH - choose 4 colors print("\nTHIS IS JUST A TEST TO CHECK COLOR CHOICES...") colors = colorblind6[0:4] sizes = [40, 30, 5, 25] squarify.plot(sizes, color=colors) plt.show(); # + # %%time # CPU times: user 2.54 s, sys: 209 ms, total: 2.75 s # Wall time: 2.84 s full_history = local_util.analysis.classify_timeseries_set( full_history, time_col=timestamp, item_id_col=item_id, other_dimension_cols=[d for d in dims_except_timestamp if d != item_id], target_col=target_value, num_dask_partitions=num_partitions, # In case pandas-based method is too slow and you have sufficient memory available to # parallelize, set the below `True` to use Dask instead: use_dask_if_available=False, ) full_history.sample(3) # + # Count the *series* (not data points) in each ts_type: type_counts = full_history.groupby( dims_except_timestamp + ([geolocation] if geolocation else []) )["ts_type"].first().value_counts() # Plot the number of timeseries by type: display(pd.DataFrame({ "Count": type_counts, "Percentage": 100 * type_counts / type_counts.sum() })) squarify.plot( label=[f"{i} = \n{v / sum(type_counts):.2%}" for i, v in type_counts.iteritems()], sizes=type_counts, color=colors, text_kwargs={ "fontsize": 14 }, ) # + # Restore previous full_history: # full_history.drop(["ADI", "CV_square", "ts_type"], inplace=True, axis=1) # full_history.head(1) # - n_sample = min(type_counts.get("erratic", 0), 5) if n_sample > 0: print("SHOWING SAMPLE ERRATIC SERIES") erratic_series = full_history.loc[ full_history["ts_type"] == "erratic" ][dims_except_timestamp].drop_duplicates().sample(n_sample) display(erratic_series) local_util.plotting.make_plots( tts, erratic_series, target_value_col=target_value, ) else: print("No erratic time series found.") n_sample = min(type_counts.get("smooth", 0), 5) if n_sample > 0: print("SHOWING SAMPLE SMOOTH SERIES") smooth_series = full_history.loc[ full_history["ts_type"] == "smooth" ][dims_except_timestamp].drop_duplicates().sample(n_sample) display(smooth_series) local_util.plotting.make_plots( tts, smooth_series, target_value_col=target_value, ) else: print("No smooth time series found.") n_sample = min(type_counts.get("intermittent", 0), 5) if n_sample > 0: print("SHOWING SAMPLE INTERMITTENT SERIES") intermittent_series = full_history.loc[ full_history["ts_type"] == "intermittent" ][dims_except_timestamp].drop_duplicates().sample(n_sample) display(intermittent_series) local_util.plotting.make_plots( tts, intermittent_series, target_value_col=target_value, ) else: print("No intermittent time series found.") n_sample = min(type_counts.get("lumpy", 0), 5) if n_sample > 0: print("SHOWING LUMPY INTERMITTENT SERIES") lumpy_series = full_history.loc[ full_history["ts_type"] == "lumpy" ][dims_except_timestamp].drop_duplicates().sample(n_sample) display(lumpy_series) local_util.plotting.make_plots( tts, lumpy_series, target_value_col=target_value, ) else: print("No lumpy time series found.") # It looks like only "erratic" time series are worth predicting. All the rest look either suspiciously fake or too few data points. # ## Step 22. Optional - Assemble and save TTS_smooth, TTS_erratic, TTS_intermittent, TTS_lumpy <a class="anchor" id="TTS_classes"></a> # # for t in type_counts.index: print(f"\n#### Subset '{t}' ####") subset = full_history.loc[ full_history["ts_type"] == t ][dims_except_timestamp].drop_duplicates() num_subset_ts = len(subset) tts_subset = local_util.dataprep.select_by_df(tts, subset) num_subset_items = tts_subset[item_id].nunique() print(f"Selected {tts_subset.shape} from {tts.shape}") print(f"{num_subset_ts} time-series, {num_subset_items} unique item IDs") # Save file locally and upload to S3: local_file = f"data/tts_{t}.csv" tts_subset.to_csv(local_file, header=False, index=False) print(f"Saved {t} subset to {local_file}") key = f"{prefix}/v{DATA_VERSION}/tts_{t}_{EXPERIMENT_NAME}.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}") # ## Step 23. Assemble and save metadata (if any) <a class="anchor" id="IM"></a> # Identify metadata columns im = df[dims_except_timestamp + ['pickup_borough']].copy() im = im.groupby(dims_except_timestamp).first() im.reset_index(inplace=True) # check nulls display(im.isnull().sum()) im.sample(5) # + # Additional metadata created by binning just item target_value is sometimes useful. # aggregate sales by item synthetic = df.copy() synthetic = (synthetic.groupby(item_id).agg({ target_value: ["max"] })) synthetic = synthetic.reset_index() synthetic.sample(5) #bin data into 4 categories cat_scales = ["Cat_{}".format(i) for i in range(1,5)] synthetic['item_cat_by_max'] = list(pd.cut(synthetic[target_value]['max'].values, 4, labels=cat_scales)) synthetic.drop(target_value, axis=1, inplace=True) synthetic.columns = synthetic.columns.get_level_values(0) print(synthetic.shape) print(synthetic.dtypes) print(synthetic.columns) display(synthetic.sample(5)) print(synthetic.item_cat_by_max.value_counts(dropna=False)) # merge synthetic features im = im.merge(synthetic, how="left", on=[item_id]) print(im.shape, synthetic.shape) im.head() # + # check metadata so far print(im.shape) if im.shape[0] < 50: display(im) else: display(im.head()) # check cardinality of metadata columns im.describe() # - sparse.head() # + # merge in sparse or not column im['is_sparse'] = 0 im.loc[(im[item_id].isin(list(sparse.index.to_frame()[item_id].unique()))), 'is_sparse'] = 1 print(im.is_sparse.value_counts(dropna=False)) im.sample(5) # + # merge in top-moving or not column im['top_moving'] = 0 im.loc[(im[item_id].isin(list(top_movers.index.to_frame()[item_id].unique()))), 'top_moving'] = 1 print(im.top_moving.value_counts(dropna=False)) im.sample(5) # + # merge in time series categories column categories_df = full_history.groupby([item_id])[item_id, 'ts_type'].first() categories_df.reset_index(inplace=True, drop=True) # categories_df.head(2) im = im.merge(categories_df, how="left", on=[item_id]) print(im.ts_type.value_counts(dropna=False)) im.sample(5) # + # Assemble metadata just columns you want im = im.iloc[:, 0:3].groupby(item_id).max() im.reset_index(inplace=True) print(im.shape) print("checking nulls..") print(im.isnull().sum()) im.sample(5) # + # Save im to S3 local_file = "data/metadata.csv" # Save merged file locally im.to_csv(local_file, header=False, index=False) print(f"Saved metadata to {local_file}") key = f"{prefix}/v{DATA_VERSION}/{EXPERIMENT_NAME}.metadata.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file(local_file) print(f"Uploaded to s3://{bucket_name}/{key}")
workshops/pre_POC_workshop/1.Getting_Data_Ready_nytaxi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Nuclei Image Segmentation Tutorial # In this tutorial, we will implement a UNet to solve Kaggle's [2018 Data Science Bowl Competition](https://www.kaggle.com/c/data-science-bowl-2018). The challenge asks participants to find the location of nuclei from images of cells. The source of this tutorial and instructions to reproduce this analysis can be found at the [thomasjpfan/ml-journal repo](https://github.com/thomasjpfan/ml-journal/tree/master/notebooks/nuclei-cell-image-segmentation). # + # <!-- collapse=None --> from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch from torch.optim import SGD import torch.nn as nn from torch.nn.functional import binary_cross_entropy_with_logits from sklearn.model_selection import train_test_split from skorch.helper import ( filter_requires_grad, filtered_optimizer, predefined_split, ) from skorch.net import NeuralNet from skorch.callbacks import LRScheduler from skorch.callbacks import Checkpoint from skorch.callbacks.lr_scheduler import CyclicLR from dataset import CellsDataset, PatchedDataset from model import UNet from utils import plot_cells, plot_masks, freeze_layer, plot_mask_cells torch.manual_seed(0); # - # ## Exploring the Data # We can now define the datasets training and validiation datasets: # + samples_dirs = list(d for d in Path('data/cells/').iterdir() if d.is_dir()) train_dirs, valid_dirs = train_test_split( samples_dirs, test_size=0.2, random_state=42) train_cell_ds = CellsDataset(train_dirs) valid_cell_ds = CellsDataset(valid_dirs) # - # Overall the cell images come in different sizes, and fall in three different categories: # <!-- collapse=None --> type1_id, type2_id, type3_id = 13, 6, 25 plot_cells(valid_cell_ds[type1_id][0], valid_cell_ds[type2_id][0], valid_cell_ds[type3_id][0].crop((200, 200, 500, 500))) # Most of the data is of Type 2. Training a single model to be able to find the nuclei for all types may not be the best option, but we will give it a try! For reference here are the corresponding masks for the above three cases: # <!-- collapse=None --> plot_masks(valid_cell_ds[type1_id][1], valid_cell_ds[type2_id][1], valid_cell_ds[type3_id][1].crop((200, 200, 500, 500))) # In order to train a neutral net, each image we feed in must be the same size. For our dataset, we break our images up into 256x256 patches. The UNet architecture typically has a hard time dealing with objects on the edge of an image. In order to deal with this issue, we pad our images by 16 using reflection. The image augmentation is handled by `PatchedDataset`. Its implementation can be found in `dataset.py`. train_ds = PatchedDataset( train_cell_ds, patch_size=(256, 256), padding=16, random_flips=True) val_ds = PatchedDataset( valid_cell_ds, patch_size=(256, 256), padding=16, random_flips=False) # ## Defining the Module # Now we define the UNet module with the pretrained `VGG16_bn` as a feature encoder. The details of this module can be found in `model.py`: module = UNet(pretrained=True) # The features generated by `VGG16_bn` are prefixed with `conv`. These weights will be frozen, which restricts training to only our decoder layers. freeze_layer(module, "conv") # Since we have froze some layers, we configure the optimizer to filter out parameters that are frozen: optimizer = filtered_optimizer(SGD, filter_requires_grad) # ## Learning Rate Scheduler # We use a Cyclic Learning Rate scheduler to train our neutral network. cyclicLR = LRScheduler(policy=CyclicLR, base_lr=0.002, max_lr=0.2, step_size_up=540, step_size_down=540) # **Why is step_size_up 540?** # # Since we are using a batch size of 32, each epoch will have about 54 (`len(train_ds)//32`) training iterations. We are also setting `max_epochs` to 20, which gives a total of 1080 (`max_epochs*54`) training iterations. We construct our Cyclic Learning Rate policy to peak at the 10th epoch by setting `step_size_up` to 540. This can be shown with a plot of the learning rate: _, ax = plt.subplots(figsize=(10, 5)) ax.set_title('Cyclic Learning Rate Scheduler') ax.set_xlabel('Training iteration') ax.set_ylabel('Learning Rate') ax.plot(cyclicLR.simulate(1080, 0.002)); # ## Custom Loss Module # Since we have padded our images and mask, the loss function will need to ignore the padding when calculating the binary log loss. We define a `BCEWithLogitsLossPadding` to filter out the padding: class BCEWithLogitsLossPadding(nn.Module): def __init__(self, padding=16): super().__init__() self.padding = padding def forward(self, input, target): input = input.squeeze_( dim=1)[:, self.padding:-self.padding, self.padding:-self.padding] target = target.squeeze_( dim=1)[:, self.padding:-self.padding, self.padding:-self.padding] return binary_cross_entropy_with_logits(input, target) # ## Training Skorch NeutralNet # Now we can define the `skorch` NeutralNet to train out UNet! net = NeuralNet( module, criterion=BCEWithLogitsLossPadding, criterion__padding=16, batch_size=32, max_epochs=20, optimizer=optimizer, optimizer__momentum=0.9, iterator_train__shuffle=True, iterator_train__num_workers=4, iterator_valid__shuffle=False, iterator_valid__num_workers=4, train_split=predefined_split(val_ds), callbacks=[('cycleLR', cyclicLR), ('checkpoint', Checkpoint(f_params='best_params.pt'))], device='cuda' ) # Let's highlight some parametesr in our `NeutralNet`: # # 1. `criterion__padding=16` - Passes the padding to our `BCEWithLogitsLossPadding` initializer. # 2. `train_split=predefined_split(val_ds)` - Sets the `val_ds` to be the validation set during training. # 3. `callbacks=[(..., Checkpoint(f_params='best_params.pt'))]` - Saves the best parameters to `best_params.pt`. # # Next we train our UNet with the training dataset: net.fit(train_ds); # Before we evaluate our model, we load the best weights into the `net` object: net.load_params('best_params.pt') # ## Evaluating our model # Now that we trained our model, lets see how we did with the three types presented at the beginning of this tutorial. Since our UNet module, is designed to output logits, we must convert these values to probabilities: val_masks = net.predict(val_ds).squeeze(1) val_prob_masks = 1/(1 + np.exp(-val_masks)) # We plot the predicted mask with its corresponding true mask and original image: # + # <!-- collapse=None --> mask_cells = [] for case_id in [45, 8, 81]: cell, mask = val_ds[case_id] mask_cells.append((mask, val_prob_masks[case_id], cell)) plot_mask_cells(mask_cells) # - # Our UNet is able to predict the location of the nuclei for all three types of cell images! # ## Whats next? # In this tutorial, we used `skorch` to train a UNet to predict the location of nuclei in an image. There are still areas that can be improved with our solution: # # 1. Since there are three types of images in our dataset, we can improve our results by having three different UNet models for each of the three types. # 2. We can use traditional image processing to fill in the holes that our UNet produced. # 3. Our loss function can include a loss analogous to the compeititons metric of intersection over union.
notebooks/nuclei-cell-image-segmentation/.ipynb_checkpoints/notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tramyynt/Techlabs/blob/master/techlabs_notebook_titanic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="5IKYN_qOcTPG" # # Visualization and cleaning data with TITANIC DATASET # + [markdown] id="7-7Fb2uhcTPI" # ## Introduction # Congratulations! So far you have learned the basics of Python, Pandas and visualization techniques to analyse and to clean data. These are valuable skills because 80% of a data scientist's job is cleaning data and getting insights through visualizations. Only 20% are building and tuning machine learning models (which you will learn more about at the end of your curriculum). # # To see how powerful visualizations can be and how enthusiastic one can become about plots, check out the Ted talk "The best stats you've ever seen" by <NAME>: # + id="Kb8axoJxcTPJ" from IPython.display import YouTubeVideo YouTubeVideo('usdJgEwMinM', width=600, height=400) # + [markdown] id="AUrnQfPPcTPQ" # ## Learning Goals # # - Learn how to clean data # - Learn how to investigate your data through visualizations # - Explore what representations exist in your dataset # - Create new data/features using the data at hand # # Everything you will need to do will be inside of this notebook, and I've marked which cells you will need to edit by saying <b><font color='red'>"TODO! COMPLETE THIS SECTION!"</font></b>. # + [markdown] id="x4t4S6yxcTPR" # ## Personal experience # One interesting plot I saw recently is the one below. The graph shows the babies per woman and child survival rates for all contries. Each bubble on the graph represents a country, with the size of the bubble showing the size of the country's population. # ![](https://drive.google.com/uc?export=view&id=1mK4x7eJEP_PbtbNyu0lUoMO0f6JhzvPh) # The picture is from gapminder.org and shows that our assumption about the world being divided into developing and developed contries is overdue. Contries could be divided into the two categories in the past (1965) but the world has changed completely. The developing box is nearly empty and 85% of all countries are inside the box that used to be named developed. # # What interesting plot did you come across lately? Feel free to copy-paste it in our data science channel on Slack. I would love to hear from you. # + [markdown] id="b43qOZylcTPR" # Now let's come back to the notebook and the excersices. In this notebook we will explore the Titanic dataset to practice your visualization and pandas/cleaning skills more so that you also can create interesting visualizations. # + [markdown] id="NCSSHIMWcTPS" # ## Practice with TITANIC dataset # # Before you start make sure you read the README pdf file inside the unzipped folder. # # ### import libraries # We will use these packages to help us manipulate the data and visualize the features/labels/columns. Numpy and Pandas are helpful for manipulating the dataframe and its columns and cells. Up until now you have only worked with matplotlib in your curriculum. Here we will use matplotlib along with Seaborn to visualize our data. # # Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics. And it generates prettier plots than matplotlib. # # It is ok if you don't know how to plot data with Seaborn. That's what google is for. It is standard procedure to google while coding. I also often forget how to use certain functions and just google them. # + id="3QOAscN3cTPT" import warnings warnings.filterwarnings('ignore') import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np # + [markdown] id="ySw-IK3tcTPW" # ### first impression and informations to the dataset # # With Pandas, we can load the training data. You should always take a look at our data table to see the values that you'll be working with. You can use the shape and head function to look at some sample data. We can also look at its keys and column names. # + colab={"base_uri": "https://localhost:8080/"} id="s-i9uzplcTPX" outputId="9ff1bf64-7306-4344-dd58-1995f205183f" # Import the csv file using pandas # from google.colab import files # uploaded = files.upload() # import io # train = pd.read_csv(io.StringIO(uploaded['train.csv'].decode('utf-8'))) from google.colab import drive drive.mount('/content/gdrive') train = pd.read_csv('/content/gdrive/MyDrive/Techlabs/train.csv') # + colab={"base_uri": "https://localhost:8080/"} id="P3sCowudcTPd" outputId="4adf45dd-8ddb-4d23-f4db-62e6f1b2f27c" # display the shape of the dataframe train.shape # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Jvq7NnidcTPj" outputId="bccbbe95-a515-4f21-a7e8-7604c0a90e5e" # use head and display the first 5 rows of your dataframe train.head(5) # + [markdown] id="W_BJc9MkcTPn" # Short description of the data # 1. The Survived variable is our outcome or dependent variable. It is a binary nominal datatype of 1 for survived and 0 for did not survive. All other variables are potential predictors or independent variables. What's important to note, more predictor variables do not make a better model, but the right variables. # 2. The PassengerID and Ticket variables are assumed to be random unique identifiers, that have no impact on the outcome variable. Thus, they will be excluded from analysis. # 3. The Pclass variable is an ordinal datatype for the ticket class, a proxy for socio-economic status (SES), representing 1 = upper class, 2 = middle class, and 3 = lower class. # 4. The Name variable is a nominal datatype. Besides the name of the passenger it also holds the information about the title, like master and so on. # 5. The Sex stores the information about the passengers sex as string (female or male) # 6. Embarked variables are a nominal datatype and hold the information about the port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton) # 6. The Age and Fare variable are continuous quantitative datatypes. # 7. The SibSp represents number of related siblings/spouse aboard and Parch represents number of related parents/children aboard. Both are discrete quantitative datatypes. # 8. The Cabin represents the cabin number. # # Let's check how many entries are nan # + colab={"base_uri": "https://localhost:8080/"} id="vbEvq7QncTPo" outputId="390105b1-47b5-4984-8b6d-e8c72da2d51d" # use the isnull method to find out which columns contain null values train.isnull().sum() # + [markdown] id="RaB_u3XLcTPr" # There are NaN values in our data set in the age column. Furthermore, the Cabin column has a lot of missing values as well. These NaN values will get in the way of training a machine learning model. You need to fill in the NaN values with replacement values in order for the model to have a complete prediction for every row in the data set. This process is known as imputation and you will practice how to replace the missing data as well as get a feeling what story the data is telling you by using visualizations. # # Let's start with the age columns. # + [markdown] id="pOI68ljTcTPs" # ### visualize and clean data: Age # + [markdown] id="GOzZ0G0ScTPs" # Let's look how the age is dristributed. First create a copy of the dataframe to avoid maniplulating the main dataframe while doing first investigations. # # Then plot the distribution of the age column by using seaborns displot function (you can google "seaborn distribution" to see how the function is used). # + colab={"base_uri": "https://localhost:8080/", "height": 316} id="s-xLds-AcTPu" outputId="f2fe9a55-5089-4a82-e773-9cb5cdf5d2a2" # make a copy of the dataframe copy = train.copy() # drop all nan values of the age column using dropna. # don't forget to set the inplace parameter to True copy.dropna(subset= ['Age'], inplace= True) count_nan = copy['Age'].isnull().sum() print ('Count of NaN: ' + str(count_nan)) # plot the distribution of the age by using seaborns displot function # input the Age column of your copy dataframe. # And set the bins parameter to 15 sns.distplot(a= copy['Age'],bins=15, hist=True) # + [markdown] id="BpPIo5JAcTPx" # The distribution does not look crazy skewed. thus let's try using the mean value of the age to fill all nan values (if you want to use median instead feel free to try it) and display again the distribution: # + colab={"base_uri": "https://localhost:8080/", "height": 351} id="mW7dhCK2cTP0" outputId="2a283033-22bf-4a2d-dffa-735e92409aef" # make a copy of the dataframe copy = train.copy() # fill NaN values in age column with mean value. use the .fillna method. # Don't forget to set the inplace parameter to True copy.fillna(value = copy['Age'].mean(), inplace= True) # we will create a subplot with 2 figures to look how the age distribution changed fig, ax = plt.subplots(1, 2, figsize=(15, 5)) # this will plot the age distribution without nan values. # the ax parameter is set to ax[0] so that the subplot knows where to place the plot sns.distplot(train.dropna().Age, bins=15, ax=ax[0]) # plot here the age distribution in the copy dataframe where you # replaced all nan values with the mean age value. # don't forget to set the ax parameter to ax[1] sns.distplot(copy.Age, bins=15, ax=ax[1]) # + [markdown] id="xtxDJb43cTP3" # now there is a very large peak. maybe just the mean value is not a very good idea. another idea is to generate random numbers between (mean - std) and (mean + std). For this you will generate an age_generator function below. # + id="zqNDF4d9cTP4" def age_generator(df): # save the mean value of the age column here age_avg = df['Age'].mean() # save the standard deviation of the age column here age_std = df['Age'].std() # we need the number of null values to know how much random ages to generate # use the isnull and sum functions age_null_count = df['Age'].isnull().sum() # this will generate a list of random numbers between (mean - std) and (mean + std) age_null_random_list = np.random.randint( age_avg - age_std, age_avg + age_std, size=age_null_count) # select all nan ages and set it equal to the list of random ages df['Age'][np.isnan(df['Age'])] = age_null_random_list return df # + [markdown] id="5XVIS5SUcTP7" # After successfully implementig the age_generator you can now use it. # + colab={"base_uri": "https://localhost:8080/", "height": 351} id="LTFT2KHmcTP8" outputId="c3177833-0c02-4d56-b5a5-43d5cb48d0de" # again let's first create a copy of our dataframe copy = train.copy() # apply the age_generator function to the copied dataframe copy = age_generator(copy) # and here we will plot again the distribution of the age from the raw dataframe # and the dataframe where you replaced all nan values fig, ax = plt.subplots(1, 2, figsize=(15, 5)) # plot the age column of the train data. # don't forget to drop the nan values otherwise an error will occure. # set the bins and ax like in the plot above sns.distplot(train.dropna().Age, bins=15, ax=ax[0]) # plot the age column of the copy data. # you don't have to drop the nan values since to replaced them # with the age_generator. # set the bins and ax like in the plot above sns.distplot(copy.Age, bins = 15, ax = ax[1]) # + [markdown] id="qUjvg5kScTP_" # now the distribution is slightly skewed right, but this seems better than just using the mean value. So let's use the random age distribtion as cleaning method # + colab={"base_uri": "https://localhost:8080/"} id="LS9d0FlmcTQC" outputId="472fce3b-6adb-4acd-e028-f0617af03efe" # generate random age between (mean - std) and (mean + std) # this time we will apply it onto our raw dataframe since # we want to keep this change train = age_generator(train) # the age column should be clean now # display the sum of nan values for each column # use the isnull and sum functions train.isnull().sum() # + [markdown] id="-aBp2WtdcTQJ" # All NaN values should be removed from the age column. Great work. The only nan values left are in the Cabin column. But we will focus on the Cabin column later. # # Now let's get a feeling if the age column alone is a good feature to predict who survived. What do you think? Will it be a good predictor? # # We can analyze this for example with two different plots: a boxplot or as before by using a distribution plot. Let's start with a box plot # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="cG0ZI-t_cTQL" outputId="d167e989-adc9-4f3c-b02c-a1139122c0b8" # call the boxplot function from seaborn and set the x parameter # to the "Survived" column and the y parameter to the "Age" column. # also set the data parameter to the train dataset. # feel free to google seaborn boxplot if you are not sure # how to use it sns.boxplot(x= 'Survived', y = 'Age', data = train) # + [markdown] id="G2DSd8FhcTQQ" # We see that the age for both survived and not survived has nearly the same range. Thus the age alone won't be a good predictor if we want to know who survived and who didn't. # # As promised we can also use the distribution plot to visualize that the age of survived passengers and not survived passengers have a significant overlap. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="uE3Wxs5-cTQR" outputId="3deac105-3055-4ded-d9af-84b5db5e722d" # slice/filter the train dataframe to plot first only the survived passengers # and second only the not survived passengers sns.distplot(train.loc[train['Survived'] ==1 , 'Age'], color='black', label='Survived') sns.distplot(train.loc[train['Survived'] == 0, 'Age'], color='blue', label='not Survived') plt.legend() # + [markdown] id="MVdYbvSXcTQT" # Here you should see that the distribution of survived passengers has two peaks. The big one is very close to the peak of not survived passengers, but the second smaller peak is in the range of small children. This means a lot more small children survived the tragic accident. # + [markdown] id="JJJPZBHecTQU" # ### visualize and clean data: Sex # + [markdown] id="7SbiGN8ucTQV" # Let's now anaylse the sex of the passengers. What do you think? Did more women or men survive the tragic titanc accident? # # Let's find out by plotting a barplot of the Sex column. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="i0yYOsoDcTQX" outputId="355778ff-2278-4eeb-9017-587dc6dcac8a" # use the barplot function of seaborn. # enter the train dataset as data and plot Sex (x) # against Survived (y) sns.barplot(x= 'Sex', y='Survived', data = train) # + [markdown] id="y7VHZ5aMcTQZ" # The plot shows us that more female passengers survived than male passengers. # # Until now we have analysed the age and sex information. We saw that the age alone was not an important feature to predict who survived and who didn't, but the sex is. Do you think there is an age difference between survived and not survived inside each sex? # # Use boxplots to find out: # + colab={"base_uri": "https://localhost:8080/", "height": 368} id="FZ3Au0M_cTQb" outputId="35444579-06fa-4b41-ef8d-38d2c537d6bd" # this will create a subplot figure with two plot side by side fig, ax = plt.subplots(1, 2, figsize=(15, 5)) # first plot the data for females by # slicing/filtering the train data with 'female' sns.boxplot(x="Survived", y="Age", data=train[train.Sex == 'female'], ax=ax[0]) # second plot the data for males by slicing the train data with 'male' # don't forget to set the ax to ax[1] sns.boxplot(x="Survived", y="Age", data=train[train.Sex == 'male'], ax=ax[1]) plt.legend() # + [markdown] id="KDcCJd0HcTQd" # You should now see that both gender groups have an overlapping age range regarding survived and not survived passengers. # + [markdown] id="iX3H1gYwcTQe" # ### visualize and clean data: Pclass # + [markdown] id="cAV5Dx-HcTQg" # Recall that Pclass holds the information about the class each passenger had with 1 = first class, 2 = middle class and 3 = low class. Let's find out how the survival rate is distributed over the different classes. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="bEg-oAzLcTQi" outputId="51dba07a-36e2-4ea4-e47c-c020bd4bbb92" # use seaborns barplot function to plot the information of Pclass against Survived sns.barplot(y= 'Survived', x = 'Pclass', data = train) # + [markdown] id="Iy2iUCNXcTQm" # Did you anticipate the result? More people from the first class survived than from the second class and from the second class more people survived than from the thrid class. # # What does this distribution look like if we make the same analysis but differentiate between the two genders. You can do this by using the "hue" parameter of the barplot function in seaborn: # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="s52toJt5cTQn" outputId="977f5f59-09fb-43a2-b47b-2075530577ff" # create a barplot with sex plotted against survived and set hue to 'Pclass' sns.barplot(x = 'Sex', y ='Survived',data = train , hue='Pclass') # + [markdown] id="R1EdTBLwcTQu" # You should see a plot where each, male and female, have three bars representing the three different classes. # # Now we don't see a linear decline in survived passengers acrose the different classes. What you can see instead is that much more men from the first class survived. In contrast the percentage of survived women in the first and second class is nearly the same and much higher than the survival rate of the third class. # + [markdown] id="xleeNmhHcTQu" # ### clean data: Embarked and Cabin # Let's also clean the embarked and cabin column since it also has nan values: # + colab={"base_uri": "https://localhost:8080/"} id="zVCMJ9e-cTQw" outputId="77693d59-b278-4c60-9669-430ade25c415" train.isnull().sum() # + [markdown] id="y_70tRmXcTQ1" # the embarked column has some missing value. Fill those with the most occurred value ('S'). # + colab={"base_uri": "https://localhost:8080/"} id="Y_Q7XxmjcTQ4" outputId="dd615727-8c39-4998-b405-9291f8b77973" # use the fillna method to replace the nan's with 'S' train['Embarked'] = train['Embarked'].fillna('S') train['Embarked'].isnull().sum() # + [markdown] id="XL58dMLLcTQ8" # The cabin column has too many missing values (687 out of 891). Thus it is hard to replace the nan values with something meaningful. That's why we can just drop the column # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="8Eh3sSv4cTRC" outputId="310f92e2-525e-4086-b1de-7c37cf2ba983" # drop the cabin column # train.drop(['Cabin'],axis = 1) train # + [markdown] id="X2WhNtJ1cTRF" # Let's check once more if everything is clean # + colab={"base_uri": "https://localhost:8080/"} id="WkCWLaf5cTRG" outputId="11d5cb94-ca48-4df3-953b-6966aaa7ec0d" train.isnull().sum() # + [markdown] id="BaopGs24cTRJ" # YAHOO!!! You successfully managed to clean the dataframe. # + [markdown] id="-fIV_1rrcTRK" # ### create new feature # Besides analysing features in a dataset the task of a good data scientist is also to use the existing dataset to come up with new features. This it not an easy task but it is a task where you can be very creative. What do yo think could be another interesing feature? You can scroll to the describtion of the dataset if you want to remind yourself what other information is stored in the dataframe and what you could use to create new features. # # If you don't want to because you are busy and need to finish this notebook fast, here are some suggestens we are going to look at :) # # 1. family size # 2. alone # # but feel free to also try out your own ideas. # + id="en44i-WBcTRL" # + [markdown] id="3Xf11vV_cTRN" # ### new feature: family size # With the number of siblings/spouses from the column SibSp and the number of children/parents from the Parch column we can create a new feature called family size. # + id="1f4AvEsRcTRP" # create the family size feature by adding the SibSp and Parch columns # (don't forget to add 1 because the person with the family is also a member of the family) train['FamilySize'] = train['SibSp']+train['Parch'] # + [markdown] id="q7gjDXVDcTRV" # Great! You now created the FamilySize feature. Let's have a look at the question if bigger or smaller families survived. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="6qIbJW3ocTRY" outputId="7e1d4ec0-e29e-4aba-db16-a668a7bcf87b" # use a boxplot and plot Survived against FamilySize # use the train dataframe as data input into the boxplot method sns.boxplot( x = 'Survived', y='FamilySize', data = train) # + [markdown] id="D-i7o9qycTRh" # you should see that both boxplots have an overlap but the the family sizes of survived passengers are distributed over a larger range. Does this mean that bigger families had a bigger chance to survive? # + [markdown] id="vqXo-RuJcTRh" # ### new feature: is alone # + [markdown] id="6yCeWIuPcTRi" # In the last section you successfully created a new feature called FamilySize. Now you can leverage that feature to create another one. This time you will create the "is alone" feature. This will denote if a passenger was alone on the ship (= 1) or if he was there with his family (= 0). # + colab={"base_uri": "https://localhost:8080/"} id="OyjN1cXwcTRk" outputId="d8646bd5-43f0-4ac8-c6b3-d18ad576011e" # create a IsAlone column (1 = is alone, 0 = is not alone). # first create the column and set it so 0 train['IsAlone'] = 0 # now filter the dataframe and set IsAlone to 1 where FamilySize is equal to 1 train.loc[train['FamilySize'] == 1,'IsAlone'] = 1 train['IsAlone'] # + [markdown] id="hU2-ZuwNcTRp" # Now check how many passengers survived who were alone and how many survived who were not alone # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ui74bSmzcTRs" outputId="ab79289b-0d61-4c48-931c-4775de45886c" # use a barplot again and plot the IsAlone column against the Survived column sns.barplot(y = 'Survived', x = 'IsAlone', data = train) # + [markdown] id="_OJbqQBZcTRv" # You should see that approximately 50% of passengers survived who were not alone and that sadly only 30 of lone passengers survived. # # Since we now have a lot of interesting features, we can also ask and analyse interesting questions. For example we saw that more women than men survived and that inside both groups the age wasn't a big differentiator. But what do you think? Did more lone women than not lone women survive? And how did being alone affect the men? # # To answer this you can again use a barplot. # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="hpCG9xAXcTRv" outputId="5067b66d-4b89-4754-b541-d6a15ed3362e" # use seaborns barplot function and plot Sex against Survived # and set the hue to IsAlone sns.barplot( x ='Sex', y = 'Survived', hue= 'IsAlone', data = train) # + [markdown] id="X0Ol6Vw-cTRy" # You should now see that being alone affected the genders and their change of survival differently. More lone females survived the titanic accident but in contast more lone males died in the titanic accident. # + [markdown] id="JuIQnWn7cTRz" # ## machine learning model # If you reached this part - well done :) you should now be very good at cleaning, investigating and visualizing data. This part is an outlook for you. Cleaning data is a very important if not the most important part of every data sience project. # # Here you will see a model trained on your cleaned data. Don't be confused or anything if you don't understand what is happening below. Again, this is just an outlook for what you will learn during your TechLabs Track. At the end of the semester you will be able to wirte the same machine learning model and even more complex ones. Thus this part is here to motivate you and to show you that your data science journey has just started :) # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="R5iSvUJNcTR0" outputId="71f9c59b-936d-40e1-ad82-770219170ab6" from sklearn.model_selection import train_test_split from data_set_helper import dataset # split the dataset in training and testing data. This is a important step in # every machine learning project. But you will learn about this in detail in # your curriculum videos and exercises X = train.loc[:, train.columns != 'Survived'] y = train.loc[:, 'Survived'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=0 ) # we will create a dataset object and store the # training data in it ds = dataset(X_train) # we will run the preparation pipeline to # clean the data so that it can be used in a # machine learning model ds.preparation_pipeline('train') # we will do the same for the test dataset ds.add_dataset('test', X_test) ds.preparation_pipeline('test') # + [markdown] id="FtyZAzdXcTR3" # As an example we use a machine learning model called K-Nearest-Neighbors, which you will learn about in your curriculum # + id="N8zxBD3EcTR4" from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import make_scorer, accuracy_score from sklearn.model_selection import GridSearchCV knn_clf = KNeighborsClassifier() parameters_knn = { "n_neighbors": [x for x in range(1,15)], "weights": ["uniform", "distance"], "algorithm": ["auto", "ball_tree", "kd_tree"], "leaf_size": [20, 30, 50]} grid_knn = GridSearchCV(knn_clf, parameters_knn, scoring=make_scorer(accuracy_score)) grid_knn.fit(ds.data['train'], y_train) knn_clf = grid_knn.best_estimator_ knn_clf.fit(ds.data['train'], y_train) pred_knn = knn_clf.predict(ds.data['test']) acc_knn = accuracy_score(y_test, pred_knn) print("The Score for KNeighbors is: " + str(acc_knn)) # + [markdown] id="B8U0GsfDcTR9" # The score means that our created model has an accuracy of approximately 80%, thus it predicts 80% correctly of whether a passenger survived or didn't survive. # # Such models can be used on tragic events, as the titanic accident, to get clues on missing passengers and how likely it is that they survived. # # If you want, you can enter your data below to see whether you would have survived. # + id="KDTs7gL3cTR_" # enter a class of your choise (1, 2 or 3) pclass = 1 # enter your sex sex = 'male' # enter your age age = 30 # enter the number of siblings sibsp = 1 # enter the number of parents parch = 2 # enter your name name = 'kemal' my_data = pd.DataFrame({ 'Pclass': [pclass], 'Sex': sex, 'Age': age, 'SibSp': sibsp, 'Parch': parch, 'Fare': 32.20, 'Embarked': 'S', 'Cabin': 'x', 'Name': name, 'PassengerId': 123, 'Ticket': 123 }) ds.add_dataset('my_data', my_data) ds.preparation_pipeline('my_data') sur = knn_clf.predict(ds.data['my_data']) if sur == 0: print("you wouldn't have survived") elif sur == 1: print("you would have survived") # + id="4BewNzfEcTSD"
notebooks/techlabs_notebook_titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Handle MIDI file converted to CSV # *Uses MIDICSV/CSVMIDI linux program through bash* import pandas as pd import os # *I want to extract the main melody voice or melodic line*<br>Example for 1 file # + pathstr = './csv/' n_song = 23 filename = 'songs{}.csv'.format(n_song) header_names = [ 'track', 'time', 'title', 'text', 'note', 'velocity'] data = pd.read_csv( '{}{}'.format(pathstr, filename), names=header_names, encoding = 'ISO-8859-1') data.head(4) # - # Create DataFrame array. One per track. # + tracks_df = {} for idx in list(data['track'].unique()): tracks_df[idx] = data.loc[data['track'] == idx] # - # *Checking the first 3 rows from every new dataframe* for ndicc in range(len(tracks_df)): print(tracks_df[ndicc].head(3), '\n---------------------\n') dir = './csv/' song_dir = '{}{}'.format(dir, n_song) song_file = '{}/{}.csv'.format(song_dir, n_song) midi_file = '{}/{}.mid'.format(song_dir, n_song) print(song_file) # ! mkdir {song_dir} ndf = pd.DataFrame.append(tracks_df[0],tracks_df[4]) ndf.to_csv(song_file, index=False) # ! csvmidi {song_file} {midi_file} -v
csv_midi_handler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Electromagnetics: 3-loop model # # In the first part of this notebook, we consider a 3 loop system, consisting of a transmitter loop, receiver loop, and target loop. # # <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/em/FEM3Loop/SurveyParams.png?raw=true" style="width: 60%; height: 60%"> </img> # # # ## Import Necessary Packages # %matplotlib inline from geoscilabs.em.FDEM3loop import interactfem3loop from geoscilabs.em.FDEMpipe import interact_femPipe from matplotlib import rcParams rcParams['font.size'] = 14 # # Your Default Parameters should be: # # <table> # <tr> # <th>Parameter </th> # <th>Default value</th> # </tr> # <tr> # <td>Inductance:</td> # <td>L = 0.1</td> # </tr> # <tr> # <td>Resistance:</td> # <td>R = 2000</td> # </tr> # <tr> # <td>X-center of target loop:</td> # <td>xc = 0</td> # </tr> # <tr> # <td>Y-center of target loop:</td> # <td>yc = 0</td> # </tr> # <tr> # <td>Z-center of target loop:</td> # <td>zc = 1</td> # </tr> # <tr> # <td>Inclination of target loop:</td> # <td>dincl = 0</td> # </tr> # <tr> # <td>Declination of target loop:</td> # <td>ddecl = 90</td> # </tr> # <tr> # <td>Frequency:</td> # <td>f = 10000 </td> # </tr> # <tr> # <td>Sample spacing:</td> # <td>dx = 0.25 </td> # </tr> # </table> # # To use the default parameters below, either click the box for "default" or adjust the sliders for R, zc, and dx. When answering the lab questions, make sure all the sliders are where they should be! # ## Run FEM3loop Widget fem3loop = interactfem3loop() fem3loop # # Pipe Widget # # In the following app, we consider a loop-loop system with a pipe taget. Here, we simulate two surveys, one where the boom is oriented East-West (EW) and one where the boom is oriented North-South (NS). # # <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/em/FEM3Loop/model.png?raw=true" style="width: 40%; height: 40%"> </img> # # The variables are: # # - alpha: # $$\alpha = \frac{\omega L}{R} = \frac{2\pi f L}{R}$$ # - pipedepth: Depth of the pipe center # # We plot the percentage of Hp/Hs ratio in the Widget. pipe = interact_femPipe() pipe
notebooks/em/FDEM_ThreeLoopModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # <h2> 4.5ppm setting </h2> # No warning that there are too few retention correction groups. Not too many peak-data insertion problems # + import time import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from sklearn import preprocessing from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import StratifiedShuffleSplit from sklearn.cross_validation import cross_val_score #from sklearn.model_selection import StratifiedShuffleSplit #from sklearn.model_selection import cross_val_score from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import roc_curve, auc from sklearn.utils import shuffle from scipy import interp # %matplotlib inline # + def remove_zero_columns(X, threshold=1e-20): # convert zeros to nan, drop all nan columns, the replace leftover nan with zeros X_non_zero_colum = X.replace(0, np.nan).dropna(how='all', axis=1).replace(np.nan, 0) #.dropna(how='all', axis=0).replace(np.nan,0) return X_non_zero_colum def zero_fill_half_min(X, threshold=1e-20): # Fill zeros with 1/2 the minimum value of that column # input dataframe. Add only to zero values # Get a vector of 1/2 minimum values half_min = X[X > threshold].min(axis=0)*0.5 # Add the half_min values to a dataframe where everything that isn't zero is NaN. # then convert NaN's to 0 fill_vals = (X[X < threshold] + half_min).fillna(value=0) # Add the original dataframe to the dataframe of zeros and fill-values X_zeros_filled = X + fill_vals return X_zeros_filled toy = pd.DataFrame([[1,2,3,0], [0,0,0,0], [0.5,1,0,0]], dtype=float) toy_no_zeros = remove_zero_columns(toy) toy_filled_zeros = zero_fill_half_min(toy_no_zeros) print toy print toy_no_zeros print toy_filled_zeros # - # <h2> Import the dataframe and remove any features that are all zero </h2> # + ### Subdivide the data into a feature table data_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/processed/MTBLS315/'\ 'uhplc_pos/xcms_result_4.5.csv' ## Import the data and remove extraneous columns df = pd.read_csv(data_path, index_col=0) df.shape df.head() # Make a new index of mz:rt mz = df.loc[:,"mz"].astype('str') rt = df.loc[:,"rt"].astype('str') idx = mz+':'+rt df.index = idx df # separate samples from xcms/camera things to make feature table not_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax', 'npeaks', 'uhplc_pos', ] samples_list = df.columns.difference(not_samples) mz_rt_df = df[not_samples] # convert to samples x features X_df_raw = df[samples_list].T # Remove zero-full columns and fill zeroes with 1/2 minimum values X_df = remove_zero_columns(X_df_raw) X_df_zero_filled = zero_fill_half_min(X_df) print "original shape: %s \n# zeros: %f\n" % (X_df_raw.shape, (X_df_raw < 1e-20).sum().sum()) print "zero-columns repalced? shape: %s \n# zeros: %f\n" % (X_df.shape, (X_df < 1e-20).sum().sum()) print "zeros filled shape: %s \n#zeros: %f\n" % (X_df_zero_filled.shape, (X_df_zero_filled < 1e-20).sum().sum()) # Convert to numpy matrix to play nicely with sklearn X = X_df.as_matrix() print X.shape # - # <h2> Get mappings between sample names, file names, and sample classes </h2> # + # Get mapping between sample name and assay names path_sample_name_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\ 'MTBLS315/metadata/a_UPLC_POS_nmfi_and_bsi_diagnosis.txt' # Index is the sample name sample_df = pd.read_csv(path_sample_name_map, sep='\t', index_col=0) sample_df = sample_df['MS Assay Name'] sample_df.shape print sample_df.head(10) # get mapping between sample name and sample class path_sample_class_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\ 'MTBLS315/metadata/s_NMFI and BSI diagnosis.txt' class_df = pd.read_csv(path_sample_class_map, sep='\t') # Set index as sample name class_df.set_index('Sample Name', inplace=True) class_df = class_df['Factor Value[patient group]'] print class_df.head(10) # convert all non-malarial classes into a single classes # (collapse non-malarial febril illness and bacteremia together) class_map_df = pd.concat([sample_df, class_df], axis=1) class_map_df.rename(columns={'Factor Value[patient group]': 'class'}, inplace=True) class_map_df binary_class_map = class_map_df.replace(to_replace=['non-malarial febrile illness', 'bacterial bloodstream infection' ], value='non-malarial fever') binary_class_map # - # convert classes to numbers le = preprocessing.LabelEncoder() le.fit(binary_class_map['class']) y = le.transform(binary_class_map['class']) # <h2> Plot the distribution of classification accuracy across multiple cross-validation splits - Kinda Dumb</h2> # Turns out doing this is kind of dumb, because you're not taking into account the prediction score your classifier assigned. Use AUC's instead. You want to give your classifier a lower score if it is really confident and wrong, than vice-versa # + def rf_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1, n_estimators=1000): cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state) scores = cross_val_score(clf, X, y, cv=cross_val_skf) sns.violinplot(scores,inner='stick') rf_violinplot(X,y) # TODO - Switch to using caret for this bs..? # + # Do multi-fold cross validation for adaboost classifier def adaboost_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1, n_estimators=200): cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = AdaBoostClassifier(n_estimators=n_estimators, random_state=random_state) scores = cross_val_score(clf, X, y, cv=cross_val_skf) sns.violinplot(scores,inner='stick') adaboost_violinplot(X,y) # + # TODO PQN normalization, and log-transformation, # and some feature selection (above certain threshold of intensity, use principal components), et def pqn_normalize(X, integral_first=False, plot=False): ''' Take a feature table and run PQN normalization on it ''' # normalize by sum of intensities in each sample first. Not necessary if integral_first: sample_sums = np.sum(X, axis=1) X = (X / sample_sums[:,np.newaxis]) # Get the median value of each feature across all samples mean_intensities = np.median(X, axis=0) # Divde each feature by the median value of each feature - # these are the quotients for each feature X_quotients = (X / mean_intensities[np.newaxis,:]) if plot: # plot the distribution of quotients from one sample for i in range(1,len(X_quotients[:,1])): print 'allquotients reshaped!\n\n', #all_quotients = X_quotients.reshape(np.prod(X_quotients.shape)) all_quotients = X_quotients[i,:] print all_quotients.shape x = np.random.normal(loc=0, scale=1, size=len(all_quotients)) sns.violinplot(all_quotients) plt.title("median val: %f\nMax val=%f" % (np.median(all_quotients), np.max(all_quotients))) plt.plot( title="median val: ")#%f" % np.median(all_quotients)) plt.xlim([-0.5, 5]) plt.show() # Define a quotient for each sample as the median of the feature-specific quotients # in that sample sample_quotients = np.median(X_quotients, axis=1) # Quotient normalize each samples X_pqn = X / sample_quotients[:,np.newaxis] return X_pqn # Make a fake sample, with 2 samples at 1x and 2x dilutions X_toy = np.array([[1,1,1,], [2,2,2], [3,6,9], [6,12,18]], dtype=float) print X_toy print X_toy.reshape(1, np.prod(X_toy.shape)) X_toy_pqn_int = pqn_normalize(X_toy, integral_first=True, plot=True) print X_toy_pqn_int print '\n\n\n' X_toy_pqn = pqn_normalize(X_toy) print X_toy_pqn # - # <h2> pqn normalize your features </h2> X_pqn = pqn_normalize(X) print X_pqn # <h2>Random Forest & adaBoost with PQN-normalized data</h2> rf_violinplot(X_pqn, y) # Do multi-fold cross validation for adaboost classifier adaboost_violinplot(X_pqn, y) # <h2> RF & adaBoost with PQN-normalized, log-transformed data </h2> # Turns out a monotonic transformation doesn't really affect any of these things. # I guess they're already close to unit varinace...? X_pqn_nlog = np.log(X_pqn) rf_violinplot(X_pqn_nlog, y) adaboost_violinplot(X_pqn_nlog, y) def roc_curve_cv(X, y, clf, cross_val, path='/home/irockafe/Desktop/roc.pdf', save=False, plot=True): t1 = time.time() # collect vals for the ROC curves tpr_list = [] mean_fpr = np.linspace(0,1,100) auc_list = [] # Get the false-positive and true-positive rate for i, (train, test) in enumerate(cross_val): clf.fit(X[train], y[train]) y_pred = clf.predict_proba(X[test])[:,1] # get fpr, tpr fpr, tpr, thresholds = roc_curve(y[test], y_pred) roc_auc = auc(fpr, tpr) #print 'AUC', roc_auc #sns.plt.plot(fpr, tpr, lw=10, alpha=0.6, label='ROC - AUC = %0.2f' % roc_auc,) #sns.plt.show() tpr_list.append(interp(mean_fpr, fpr, tpr)) tpr_list[-1][0] = 0.0 auc_list.append(roc_auc) if (i % 10 == 0): print '{perc}% done! {time}s elapsed'.format(perc=100*float(i)/cross_val.n_iter, time=(time.time() - t1)) # get mean tpr and fpr mean_tpr = np.mean(tpr_list, axis=0) # make sure it ends up at 1.0 mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(auc_list) if plot: # plot mean auc plt.plot(mean_fpr, mean_tpr, label='Mean ROC - AUC = %0.2f $\pm$ %0.2f' % (mean_auc, std_auc), lw=5, color='b') # plot luck-line plt.plot([0,1], [0,1], linestyle = '--', lw=2, color='r', label='Luck', alpha=0.5) # plot 1-std std_tpr = np.std(tpr_list, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2, label=r'$\pm$ 1 stdev') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve, {iters} iterations of {cv} cross validation'.format( iters=cross_val.n_iter, cv='{train}:{test}'.format(test=cross_val.test_size, train=(1-cross_val.test_size))) ) plt.legend(loc="lower right") if save: plt.savefig(path, format='pdf') plt.show() return tpr_list, auc_list, mean_fpr # + rf_estimators = 1000 n_iter = 3 test_size = 0.3 random_state = 1 cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state) rf_graph_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\ isaac_feature_tables/uhplc_pos/rf_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=rf_estimators, cv=n_iter) print cross_val_rf.n_iter print cross_val_rf.test_size tpr_vals, auc_vals, mean_fpr = roc_curve_cv(X_pqn, y, clf_rf, cross_val_rf, path=rf_graph_path, save=False) # + # For adaboosted n_iter = 3 test_size = 0.3 random_state = 1 adaboost_estimators = 200 adaboost_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\ isaac_feature_tables/uhplc_pos/adaboost_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=adaboost_estimators, cv=n_iter) cross_val_adaboost = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = AdaBoostClassifier(n_estimators=adaboost_estimators, random_state=random_state) adaboost_tpr, adaboost_auc, adaboost_fpr = roc_curve_cv(X_pqn, y, clf, cross_val_adaboost, path=adaboost_path) # - # <h2> Great, you can classify things. But make null models and do a sanity check to make # sure you arent just classifying garbage </h2> # + # Make a null model AUC curve def make_null_model(X, y, clf, cross_val, random_state=1, num_shuffles=5, plot=True): ''' Runs the true model, then sanity-checks by: Shuffles class labels and then builds cross-validated ROC curves from them. Compares true AUC vs. shuffled auc by t-test (assumes normality of AUC curve) ''' null_aucs = [] print y.shape print X.shape tpr_true, auc_true, fpr_true = roc_curve_cv(X, y, clf, cross_val) # shuffle y lots of times for i in range(0, num_shuffles): #Iterate through the shuffled y vals and repeat with appropriate params # Retain the auc vals for final plotting of distribution y_shuffle = shuffle(y) cross_val.y = y_shuffle cross_val.y_indices = y_shuffle print 'Number of differences b/t original and shuffle: %s' % (y == cross_val.y).sum() # Get auc values for number of iterations tpr, auc, fpr = roc_curve_cv(X, y_shuffle, clf, cross_val, plot=True) null_aucs.append(auc) #plot the outcome if plot: flattened_aucs = [j for i in null_aucs for j in i] my_dict = {'true_auc': auc_true, 'null_auc': flattened_aucs} df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'], value_name='auc', var_name='AUC_type') #print flattened_aucs sns.violinplot(x='AUC_type', y='auc', inner='points', data=df_tidy) # Plot distribution of AUC vals plt.title("Distribution of aucs") #sns.plt.ylabel('count') plt.xlabel('AUC') #sns.plt.plot(auc_true, 0, color='red', markersize=10) plt.show() # Do a quick t-test to see if odds of randomly getting an AUC that good return auc_true, null_aucs # + # Make a null model AUC curve & compare it to null-model # Random forest magic! rf_estimators = 1000 n_iter = 50 test_size = 0.3 random_state = 1 cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state) true_auc, all_aucs = make_null_model(X_pqn, y, clf_rf, cross_val_rf, num_shuffles=5) # + # make dataframe from true and false aucs flattened_aucs = [j for i in all_aucs for j in i] my_dict = {'true_auc': true_auc, 'null_auc': flattened_aucs} df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'], value_name='auc', var_name='AUC_type') print df_tidy.head() #print flattened_aucs sns.violinplot(x='AUC_type', y='auc', inner='points', data=df_tidy, bw=0.7) plt.show() # - # <h2> Let's check out some PCA plots </h2> # + from sklearn.decomposition import PCA # Check PCA of things def PCA_plot(X, y, n_components, plot_color, class_nums, class_names, title='PCA'): pca = PCA(n_components=n_components) X_pca = pca.fit(X).transform(X) print zip(plot_color, class_nums, class_names) for color, i, target_name in zip(plot_color, class_nums, class_names): # plot one class at a time, first plot all classes y == 0 #print color #print y == i xvals = X_pca[y == i, 0] print xvals.shape yvals = X_pca[y == i, 1] plt.scatter(xvals, yvals, color=color, alpha=0.8, label=target_name) plt.legend(bbox_to_anchor=(1.01,1), loc='upper left', shadow=False)#, scatterpoints=1) plt.title('PCA of Malaria data') plt.show() PCA_plot(X_pqn, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever']) PCA_plot(X, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever']) # - # <h2> What about with all thre classes? </h2> # + # convert classes to numbers le = preprocessing.LabelEncoder() le.fit(class_map_df['class']) y_three_class = le.transform(class_map_df['class']) print class_map_df.head(10) print y_three_class print X.shape print y_three_class.shape y_labels = np.sort(class_map_df['class'].unique()) print y_labels colors = ['green', 'red', 'blue'] print np.unique(y_three_class) PCA_plot(X_pqn, y_three_class, 2, colors, np.unique(y_three_class), y_labels) PCA_plot(X, y_three_class, 2, colors, np.unique(y_three_class), y_labels)
notebooks/MTBLS315/exploratory/Old_notebooks/MTBLS315_uhplc_pos_classifer-4.5ppm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] Collapsed="false" # # <NAME> -- using cubic splines # # **<NAME>, PhD** # # This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>. # # Original (Matlab) CompEcon file: **demdp01b.m** # # Running this file requires the Python version of CompEcon. This can be installed with pip by running # # # !pip install compecon --upgrade # # <i>Last updated: 2021-Oct-01</i> # <hr> # - # ## About # # Profit maximizing owner of a commercial tree stand must decide when to clearcut the stand. # # + Collapsed="false" import numpy as np from compecon import NLP, demo, BasisSpline import matplotlib.pyplot as plt # + Collapsed="false" price = 1.0 # price of biomass kappa = 0.2 # clearcut-replant cost smax = 0.5 # stand carrying capacity gamma = 0.1 # biomass growth parameter delta = 0.9 # discount factor # + [markdown] Collapsed="false" # ### Code the growth function # + Collapsed="false" def h(s): return np.array(s + gamma*(smax - s)) # + [markdown] Collapsed="false" # ## SOLUTION # # ### Code the approximant and the residual # + Collapsed="false" ns = 200 vhat = BasisSpline(ns,0,smax,k=3) # + Collapsed="false" def vhat1(s): return price*s - kappa + delta * vhat(h(0)) def vhat0(s): return delta * vhat(h(s)) # + Collapsed="false" def resid(c,s=vhat.nodes): vhat.c = c return vhat(s) - np.maximum(vhat0(s), vhat1(s)) # + [markdown] Collapsed="false" # ### Solve collocation equation # + Collapsed="false" cc = NLP(resid).broyden(vhat.c) # + [markdown] Collapsed="false" # ### Compute critical biomass # + Collapsed="false" scrit = NLP(lambda s: vhat0(s)-vhat1(s)).broyden(0.0)[0] # + [markdown] Collapsed="false" # ## ANALYSIS # + [markdown] Collapsed="false" # ### Compute refined state grid # + Collapsed="false" ss = np.linspace(0,smax,1000) # + [markdown] Collapsed="false" # ### Plot Conditional Value Functions # + Collapsed="false" fig1 =demo.figure('Conditional Value Functions','Biomass','Value of Stand') plt.plot(ss,vhat0(ss),label='Grow') plt.plot(ss,vhat1(ss),label='Clear-Cut') plt.legend() vcrit = vhat(scrit) ymin = plt.ylim()[0] plt.vlines(scrit, ymin,vcrit,'grey',linestyles='--') demo.annotate(scrit,ymin,'$s^*$',ms=10) demo.bullet(scrit,vcrit) print(f'Optimal Biomass Harvest Level = {scrit:.4f}') # + [markdown] Collapsed="false" # ### Plot Value Function Residual # + Collapsed="false" fig2 = demo.figure('Bellman Equation Residual', 'Biomass', 'Percent Residual') plt.plot(ss, 100*resid(cc,ss) / vhat(ss)) plt.hlines(0,0,smax,linestyles='--') # + [markdown] Collapsed="false" # ### Compute ergodic mean annual harvest # + Collapsed="false" s = h(0) for n in range(100): if s > scrit: break s = h(s) print(f'Ergodic Mean Annual Harvest = {s/n:.4f} after {n+1} iterations') # + Collapsed="false" #demo.savefig([fig1,fig2])
notebooks/dp/01b Timber Harvesting -- cubic spline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/probml/JSL/blob/main/JSL_notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="rX9X_KFwEtoK" outputId="89fbddbe-e2b5-4fc5-b190-2a30852625be" # !pip install --upgrade git+https://github.com/google/flax.git # !pip install --upgrade tensorflow-probability # + id="zs54LQC9u9qL" outputId="e573820f-bdd6-4a3e-ff74-dc895f8c9829" colab={"base_uri": "https://localhost:8080/"} # #!pip install git+git://github.com/blackjax-devs/blackjax.git # #!pip install git@github.com:blackjax-devs/blackjax.git # #!pip install --upgrade git+https://github.com/blackjax-devs/blackjax.git # !pip install blackjax # + id="ARlJdPVrvllp" outputId="b9e87cfe-d06c-4dd3-eeca-01e3eb39814b" colab={"base_uri": "https://localhost:8080/"} # #!pip install git+git://github.com/deepmind/distrax.git. # !pip install distrax # + id="ye9LqR2JE-bD" import tensorflow as tf import tensorflow_probability as tfp # + colab={"base_uri": "https://localhost:8080/"} id="asDC0nQvFXee" outputId="d65cebdd-d399-48de-cf44-1dc13e2f6c0b" # !git clone https://github.com/probml/JSL.git # #!pip install git+https://github.com/probml/jsl # + colab={"base_uri": "https://localhost:8080/", "height": 590} id="RHRJ6MA-I6c9" outputId="2874c232-39e4-4c55-b04a-212a10bd82e0" # %cd /content/JSL/ # !pip install -e . # + id="OmuXLXtHwRaH" outputId="6478494f-fcdb-425e-fd51-0fca226b1c53" colab={"base_uri": "https://localhost:8080/"} # !pwd # + id="Sq2dul_2wd83" outputId="6fc8c1c8-5505-47ea-b4b9-9050b30da961" colab={"base_uri": "https://localhost:8080/"} # !ls # + colab={"base_uri": "https://localhost:8080/", "height": 813} id="YhvXkCeiI4aX" outputId="d6f7b479-b2b7-459f-efb2-2d2b455c4bb9" # %run /content/JSL/jsl/demos/kf_tracking.py # + colab={"base_uri": "https://localhost:8080/", "height": 813} id="RMKqBKfDFbcp" outputId="57cf142f-1c09-40d2-c503-f44996aa60ef" from jsl.demos import kf_tracking as demo figures = demo.main() #print(figures) # + colab={"base_uri": "https://localhost:8080/", "height": 218} id="_b8lNuou9RaT" outputId="7165d3db-0250-479b-83cc-77157beb6e56" from jsl.demos import hmm_casino as demo figures = demo.main() print(figures) # + id="CWEhPo9YJuut"
JSL_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ngQsy2AT256U" # # Build Freamework: Cloning and Building Darknet # # + colab={"base_uri": "https://localhost:8080/"} id="HCs4VQmESACk" executionInfo={"status": "ok", "timestamp": 1624899847515, "user_tz": -60, "elapsed": 4607, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="d99612d8-a838-4cea-995e-80b9503e468e" # clone darknet # !git clone https://github.com/AlexeyAB/darknet # + colab={"base_uri": "https://localhost:8080/"} id="QOyDql4iR9MQ" executionInfo={"status": "ok", "timestamp": 1624899847910, "user_tz": -60, "elapsed": 406, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="cdf2f6bb-f5ff-4872-d904-b4313c7c3c04" # change makefile to have GPU and OPENCV enabled # %cd darknet # !sed -i 's/OPENCV=0/OPENCV=1/' Makefile # !sed -i 's/GPU=0/GPU=1/' Makefile # !sed -i 's/CUDNN=0/CUDNN=1/' Makefile # !sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/' Makefile # + colab={"base_uri": "https://localhost:8080/"} id="A4PimdvDSAwX" executionInfo={"status": "ok", "timestamp": 1624899847911, "user_tz": -60, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="ed9e1382-c82e-468a-e731-6b7c3250d4cb" # verify CUDA # !/usr/local/cuda/bin/nvcc --version # + colab={"base_uri": "https://localhost:8080/"} id="JLnqPB9iSElt" executionInfo={"status": "ok", "timestamp": 1624899919028, "user_tz": -60, "elapsed": 71127, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="9e2ea46f-f6af-4b7e-e5ee-9a035b765a45" # make darknet # !make # + [markdown] id="tRMGzNEJJ63X" # Using the below helper function to view images # + id="w4Pm0VoqmGia" # define helper functions def imShow(path): import cv2 import matplotlib.pyplot as plt # %matplotlib inline image = cv2.imread(path) height, width = image.shape[:2] resized_image = cv2.resize(image,(3*width, 3*height), interpolation = cv2.INTER_CUBIC) fig = plt.gcf() fig.set_size_inches(18, 10) plt.axis("off") plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) plt.show() # use this to upload files def upload(): from google.colab import files uploaded = files.upload() for name, data in uploaded.items(): with open(name, 'wb') as f: f.write(data) print ('saved file', name) # use this to download a file def download(path): from google.colab import files files.download(path) # + [markdown] id="8ppsTIG0CtX3" # # Move files: Uploading Google Drive Files # # + colab={"base_uri": "https://localhost:8080/"} id="j9lmJEnGEu-7" executionInfo={"status": "ok", "timestamp": 1624899966719, "user_tz": -60, "elapsed": 47715, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="c9b23cb4-cd57-47cb-c5af-b55dbb65c72b" # %cd .. from google.colab import drive drive.mount('/content/gdrive') # + id="yreDPcJdoo29" executionInfo={"status": "ok", "timestamp": 1624900031023, "user_tz": -60, "elapsed": 541, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # !ln -s /content/gdrive/Shareddrives/FishOASIS_ML-Detector/ /drive # + colab={"base_uri": "https://localhost:8080/"} id="JafIRIU0Z6UQ" executionInfo={"status": "ok", "timestamp": 1624899967513, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="8c11c31c-8b20-471f-cadb-d5bd4f911e66" # # cd back into the darknet folder to run detections # %cd .. # %cd /content/darknet # + [markdown] id="i5Lv7beNwopq" # # TRAINING: # + [markdown] id="h4F90N1lhXR2" # # # In order to create a custom YOLOv4-TINY detector we will need the following: # # * Labeled Dataset # * .cfg file # * obj.data and obj.names files # * train.txt file, test.txt # # + [markdown] id="DWaLc6AZozBl" # # Step 1: Gathering and Labeling a Custom Dataset # In order to create an object detector you need a labeled dataset, this may be the most important area to focus on in order to obtain high accuracy models # # # + [markdown] id="Dye1-pgt4WS0" # # Step 2: Moving Your Custom Dataset Into Your Cloud VM # I recommend renaming the folder with your images and text files on your local machine to be called '**obj**' and then creating a .zip folder of the 'obj' folder. # + id="VfCJDYbQbknQ" executionInfo={"status": "ok", "timestamp": 1624899989649, "user_tz": -60, "elapsed": 435, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # !mkdir data/test # !mkdir data/train # + id="V9hUFUaPexEi" executionInfo={"status": "ok", "timestamp": 1624903062137, "user_tz": -60, "elapsed": 3021160, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # # copy the .zip files into the root directory of cloud VM # !cp -r /drive/CLARE/Data/one_train_d data/train # !cp -r /drive/CLARE/Data/one_valid_d data/test # + [markdown] id="bVst_Nyhkq0B" # # Step 3: Configuring Files for Training # This step involves properly configuring your custom .cfg file, obj.data, obj.names and train.txt file. # # + [markdown] id="9n3Y9Uozgua9" # ``` # set batch=24, this means we will be using 24 images for every training step # set subdivisions=8, the batch will be divided by 8 to decrease GPU VRAM requirement # width= 608 (multiples of 32) # height= 608 (increasing: more prescision, less speed) # maxbatches= max(# classes * 3000, # images, 6000) (How long to train for) # steps=(80% of maxbatch & 90% of maxbatch) # filters=(# of classes + 5) * 3 = 18 # for memory issues set random = 0 # ``` # + id="cLfh7LmoTd4O" executionInfo={"status": "ok", "timestamp": 1624904058172, "user_tz": -60, "elapsed": 1379, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # upload the yolov4-tiny .cfg back to cloud VM from Google Drive # !cp /drive/CLARE/YOLO_main/yolov4-tiny.cfg ./cfg # + [markdown] id="c_6pLfgvToDK" # Uploading obj.names & obj.data into darknet data folder # + id="qBmbHd7M16lK" executionInfo={"status": "ok", "timestamp": 1624904060224, "user_tz": -60, "elapsed": 965, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # upload the obj.names and obj.data files to cloud VM from Google Drive # !cp /drive/CLARE/YOLO_main/obj.names ./data # !cp /drive/CLARE/YOLO_main/obj.data ./data # + [markdown] id="Qtq9K_-1T3It" # Uploading scripts to create a txt file of the train.zip & test.zip # + id="cKBeIp7V44nu" executionInfo={"status": "ok", "timestamp": 1624904061962, "user_tz": -60, "elapsed": 772, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # upload the generate_train.py script to cloud VM from Google Drive # !cp /drive/CLARE/YOLO_main/generate_namelist.py ./ # + [markdown] id="k9Ikig-H5wpw" # Now run the python script # + id="o4HBBgk3503F" executionInfo={"status": "ok", "timestamp": 1624904063759, "user_tz": -60, "elapsed": 299, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # !python generate_namelist.py train # !python generate_namelist.py test # + colab={"base_uri": "https://localhost:8080/"} id="CIFjMSlX6DfP" executionInfo={"status": "ok", "timestamp": 1624904065552, "user_tz": -60, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="a554e07e-8c20-4ecd-e84d-0ec90c284618" # verify train.txt and text.txt can be seen in our darknet/data folder # !ls data/ # + colab={"base_uri": "https://localhost:8080/"} id="S7g-7thuac12" executionInfo={"status": "ok", "timestamp": 1624904068382, "user_tz": -60, "elapsed": 264, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="3ea1474f-794f-428f-bb1c-0d7962bc54cc" # %cd /content/darknet # + [markdown] id="DKDmFrUH7JHy" # # Step 4: Download pre-trained weights for the convolutional layers. # Using pretrained weights allows a more accurate detection with less training time # + id="ltk94yaF6DKq" executionInfo={"status": "ok", "timestamp": 1624904072538, "user_tz": -60, "elapsed": 2287, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} # upload pretrained convolutional layer weights # # !wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights # !cp /drive/CLARE/YOLO_main/yolov4-tiny.conv.29 ./ # + id="Pw-ctLAt5yLG" # + [markdown] id="FaQeCZ0sUoJ7" # #Hint: # + [markdown] id="VHw00Cro8ONr" # To avoid colab logging you out hit (CTRL + SHIFT + i) to open consol # # Paste the following code into your console window and hit enter # ``` # function ClickConnect(){ # console.log("Working"); # document.querySelector("colab-toolbar-button#connect").click() # } # setInterval(ClickConnect,60000) # ``` # + [markdown] id="r047MQB-7Irb" # # Step 5: Train the model # # # # + colab={"base_uri": "https://localhost:8080/"} id="XkRPr5qPZKfz" executionInfo={"status": "ok", "timestamp": 1624904076236, "user_tz": -60, "elapsed": 546, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="b55b41eb-f5b4-4553-ad04-ee3a7ebc9fb5" # # cd back into the darknet folder to run detections # %cd .. # %cd /content/darknet # + colab={"base_uri": "https://localhost:8080/"} id="8YtF9yq3h9nB" executionInfo={"status": "ok", "timestamp": 1624944757472, "user_tz": -60, "elapsed": 36736850, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}} outputId="27a5a982-da67-4cf2-da84-30f9f53ddf94" # train your yolov4-tiny detector # # %%capture training_output # !./darknet detector train data/obj.data cfg/yolov4-tiny.cfg yolov4-tiny.conv.29 -dont_show -map # + colab={"base_uri": "https://localhost:8080/"} id="UyNVRamGDfMG" executionInfo={"elapsed": 1084, "status": "ok", "timestamp": 1618212976674, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="b2cc5902-1c77-434f-fc3a-4f8565dd9636" # %cd /content/darknet # + id="iBDGEb58AcMG" # start training again from previous weights # !./darknet detector train data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_last.weights -dont_show -map # + [markdown] id="xCYF19wDArJz" # Start training again from previous weights # # + [markdown] id="HUEicw_d0hZW" # # Step 6: Checking the Mean Average Precision (mAP) of Your Model # + colab={"base_uri": "https://localhost:8080/"} id="GdG5058aH8m0" executionInfo={"elapsed": 171452, "status": "ok", "timestamp": 1623063748475, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="b8e963f4-c75c-4ce4-aad6-07039387ecc2" # !./darknet detector map data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_1000.weights # + colab={"base_uri": "https://localhost:8080/"} id="J0OLqLSM0LlJ" executionInfo={"elapsed": 74034, "status": "ok", "timestamp": 1623075290768, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="5fe57f01-23d4-4bd4-e192-87088f8db2f3" # !./darknet detector map data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_2000.weights # + colab={"base_uri": "https://localhost:8080/"} id="1-aICAZG8HRD" executionInfo={"elapsed": 660, "status": "ok", "timestamp": 1623077372132, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="420b8c2e-a291-41b6-d4b2-688fc6741b8e" # !./darknet detector map data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_3000.weights # + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="XY93UJ8XBRMt" outputId="33e1903e-5307-48b6-dff0-e36782f4bc8e" # !./darknet detector map data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_4000.weights # + id="PBb6FHdVBUoX" # !./darknet detector map data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_5000.weights # + colab={"base_uri": "https://localhost:8080/"} id="7S0Ow7nN0jkP" executionInfo={"elapsed": 1500964, "status": "ok", "timestamp": 1623054116056, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="e0f801c8-7963-4d42-d89d-a7932e74d65c" # !./darknet detector test data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_final.weights -dont_show < data/train.txt > result.txt # + colab={"base_uri": "https://localhost:8080/"} id="4LBvW-c7eonq" executionInfo={"elapsed": 497758, "status": "ok", "timestamp": 1623054617762, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="09679d15-289a-4c77-a4ae-e5d4a2e15efb" # !./darknet detector test data/obj.data cfg/yolov4-tiny.cfg /mydrive/INTERNS/CLARE/YOLO_main/backup/yolov4-tiny_final.weights -dont_show < data/test.txt > result_test.txt # + [markdown] id="N29uHgipYsY2" # # Step 7: Run Your Custom Object Detector!!! # You have done it! You now have a custom object detector to make your very own detections. Time to test it out and have some fun! # + id="X84FMzlWOjLc" # + id="JWQlu6bTwXNO" # run your custom detector on image # !./darknet detector test data/obj.data cfg/yolov4-tiny.cfg /mydrive/YOLO/backup/yolov4-tiny_2700.weights data/test/180716_190328.JPG -Threshold imShow('predictions.jpg') # + colab={"base_uri": "https://localhost:8080/"} id="I3Ny2jyVz9MI" executionInfo={"elapsed": 1965, "status": "ok", "timestamp": 1618223024277, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12997874546661597620"}, "user_tz": -60} outputId="11fe8159-641e-4277-cf7e-b09a775c44b1" # !ls # + [markdown] id="OaQ3uCJGJ5RY" # # Cheers!
Notebooks/YoloV4-FishOASIS-Train-D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + deletable=true editable=true from __future__ import print_function from ipyleaflet import ( Map, Marker, TileLayer, ImageOverlay, Polyline, Polygon, Rectangle, Circle, CircleMarker, GeoJSON, DrawControl ) from traitlets import link # + deletable=true editable=true center = [34.6252978589571, -77.34580993652344] zoom = 10 # + deletable=true editable=true m = Map(center=center, zoom=zoom) m # + deletable=true editable=true m.zoom # + [markdown] deletable=true editable=true # Now create the `DrawControl` and add it to the `Map` using `add_control`. We also register a handler for draw events. This will fire when a drawn path is created, edited or deleted (there are the actions). The `geo_json` argument is the serialized geometry of the drawn path, along with its embedded style. # + deletable=true editable=true dc = DrawControl() def handle_draw(self, action, geo_json): print(action) print(geo_json) dc.on_draw(handle_draw) m.add_control(dc) # + [markdown] deletable=true editable=true # In addition, the `DrawControl` also has `last_action` and `last_draw` attributes that are created dynamicaly anytime a new drawn path arrives. # + deletable=true editable=true dc.last_action # + deletable=true editable=true dc.last_draw # + [markdown] deletable=true editable=true # Let's draw a second map and try to import this GeoJSON data into it. # + deletable=true editable=true m2 = Map(center=center, zoom=zoom, layout=dict(width='600px', height='400px')) m2 # + [markdown] deletable=true editable=true # We can use `link` to synchronize traitlets of the two maps: # + deletable=true editable=true map_center_link = link((m, 'center'), (m2, 'center')) map_zoom_link = link((m, 'zoom'), (m2, 'zoom')) # + deletable=true editable=true new_poly = GeoJSON(data=dc.last_draw) # + deletable=true editable=true m2.add_layer(new_poly) # + [markdown] deletable=true editable=true # Note that the style is preserved! If you wanted to change the style, you could edit the `properties.style` dictionary of the GeoJSON data. Or, you could even style the original path in the `DrawControl` by setting the `polygon` dictionary of that object. See the code for details. # + [markdown] deletable=true editable=true # Now let's add a `DrawControl` to this second map. For fun we will disable lines and enable circles as well and change the style a bit. # + deletable=true editable=true dc2 = DrawControl(polygon={'shapeOptions': {'color': '#0000FF'}}, polyline={}, circle={'shapeOptions': {'color': '#0000FF'}}) m2.add_control(dc2)
examples/DrawControl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + # code for loading the format for the notebook import os # path : store the current path to convert back to it later path = os.getcwd() os.chdir('../notebook_format') from formats import load_style load_style() # + os.chdir(path) import numpy as np import pandas as pd import matplotlib.pyplot as plt # 1. magic for inline plot # 2. magic to print version # 3. magic so that the notebook will reload external python modules # %matplotlib inline # %load_ext watermark # %load_ext autoreload # %autoreload 2 from scipy.stats import beta from collections import namedtuple # %watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,scipy # - # # Multi-Armed Bandits # # Imagine this scenario: You’re in a casino. There are many different slot machines (known as "one-armed bandits", as they’re known for robbing you), each with a lever (an arm, if you will). You think that some slot machines payout more frequently than others do, and you’re goal is to walk out of the casino with the most money. # # The question is, how do you learn which slot machine is the best and get the most money in the shortest amount of time? You could try all the slot machines out to get a sense of the expected return from playing each machine. But remember, each time you play a poor performing machine, you lower your take that you walk out of the casino with that night. In order to maximize how much money you walk out of the casino with, you will have to be efficient with how you collect your data. # # Rewriting the scenario above into a business language. Each time a shopper looks comes to a webpage, we show them one of the $K$ variations of the webpage. They either click on it or do not, and we log this information about the (binary) reward for each $K$ variations. Kext, we proceed to the next shopper or have to choose one of $K$ webpage variations again. # # # ## Differences Between A/B Testing and Bandit Testing # # In both scenarios above, we would normally determine our "winner" (the slot machine that pays the most, or the best webpage variations that gets the most clicks) using the well-known **A/B testing** approach. The **A/B testing** approach consists of a period of pure exploration, where you’re randomly assigning equal numbers of users to one of the $K$ variations and run the test until it's valid. After that, it jumps into pure exploitation, where you send 100% of your users to the more successful version of your site. # # Two possible problems with the classical **A/B testing** approach is that: # # - It jumps discretely from exploration to exploitation, when you might be able to transition more smoothly. # - During the exploratory phase (the test), it wastes resources exploring inferior options in order to gather as much data as possible. # # Given the exploration - exploitation dilemma stated above, the **bandit testing** approach try to account for this. The following graph depicts the difference between the two types of testing method: # # <img src=compare_testing.png width=700 height=700> # # If we have three variations that we wish to test, the **A/B testing** approach we try out each of the three variations with equal proportions until we run our test at week 5, and then select the variation with the highest value. # # As for **bandit testing**, it attempts to use what it knows about each variation from the very beginning, and it continuously updates the probabilities that it will select each variation throughout the optimization process. In the above chart we can see that with each new week, the **bandit testing** reduces how often it selects the lower performing options and increases how often if selects the highest performing option. # # > You need to explore in order to figure out what works and what doesn’t. On the other hand, if you exploit you take advantage of what you have learned. The **bandit testing** approach highlights the fact that collecting data also has its cost. # # To be specific, **bandit testing** algorithms will try to minimize what’s known as regret, which is the difference between your actual payoff and the payoff you would have collected had you played the optimal (best) options at every opportunity. There are tons of different bandit methods, in the next section we'll look at some of the more common ones. # ## Bandit Algorithms # # Before introducing the algorithms and trying them out through simulations, we'll denote some notations and terminologies to formally define the problem: # # - **arms** is simply the variations that we're testing (webpages that we're testing) and there will be $K$ of them in total. # - In a simulation of t turns (how many samples in a simulation), we'll maintain **empirical means** of the reward for each arm (e.g. after trying out arm A for 10 turns, it got 3 clicks, the empirical means is simply 0.3) that are updated at every turn t. # - $u_i(t)$ is the empirical mean of arm $i$ after $t$ turns. # - $p_i(t)$ is the probability of picking arm $i$ at turn $t$. # # Let's look at our simulated data beforing diving into each algorithms (hopefully the docstrings are self-explanatory). # + def generate_bernoulli_bandit_data( n_simulations, K ): """ generate simluate data, that represents success / trial data Parameters ---------- n_simulations : int the total number of turns in a simulation K : int the total number of arms Returns ------- ctr : float 1d-array, shape(K, ) the randomly generated empirical click through rate for each arm rewards : boolean 2d-array, shape(n_simulations, K) given the empirical ctr, simulate in each turn of the simulation, whether the arm gets pulled will obtain the reward or not (whether the webpage gets clicked) """ ctr = np.random.rand(K) rewards = np.random.rand(n_simulations, K) < np.tile( ctr, (n_simulations, 1) ) return ctr, rewards K = 2 n_simulations = 5 ctr, rewards = generate_bernoulli_bandit_data(n_simulations, K) print(ctr) print(rewards) # - # ### Algorithm 1 - Epsilon Greedy # # At each round $t = 1, 2, ...$ the **Epsilon Greedy** algorithm will: # # - Choose a random arm with the probability of $\epsilon$. # - Choose the arm with the current best empirical mean with probability of $1-\epsilon$. # # In mathematical notations: # # $$ # p_i(t+1)= # \begin{cases} # 1 - \epsilon + \epsilon \big/ K & \quad \text{if i = } argmax_{j = 1, ..., K} \ u_j(t) \\ # \epsilon \big/ K & \quad otherwise # \end{cases} # $$ # # Or more intuitively: # # - When a new visitor comes to the site, the algorithm flips a coin that comes up tail with the probability of $\epsilon$. When it does in fact comes up tail, the algorithm is going to explore. The exploration phase is simply randomly choosing between any possible arm with equal (uniform) probability and showing it to the visitor. # - On the other hand, the algorithm will exploit the best known solution with the proability of $1- \epsilon$. To exploit, the algorithm simply looks up the current empirical means and shows the best one to the visitor. # # The image below sums up the algorithm pretty well. # # <img src=epsilon_greedy.png width = 700 height = 700> # + def epsilon_greedy(counts, epsilon = 0.5, decrease_const = 1000): """ adaptive epsilon greedy Parameters ---------- counts : int 2d-array, shape(K, 2), where K = the total number of arms success and failures for each arm where column 0 represents success, 1 represents failure epsilon : float the initial probability of choosing a random arm; 1 - epsilon is the probability of choosing the current best arm decrease_const : int parameter for the adaptive (annealing) epsilon Returns ------- (int) the chosen arm """ # calculate the empirical means and the total number of simulations that were ran totals = counts.sum(axis = 1) successes = counts[:, 0] empirical_means = successes / totals total_counts = counts.sum() epsilon /= (1 + total_counts / decrease_const) if np.random.rand() > epsilon: return np.argmax(empirical_means) else: return np.random.randint(0, empirical_means.shape[0]) # counts : stores the counts of success and failures for each arm # where column 0 represents success, 1 represents failure. # each arm's count is initialiated as 1 to ensure that each arm is # played at least once, to prevent "cold start" problem and # 0 division in the beginning counts = np.ones((K, 2)) print(counts) epsilon_greedy(counts) # - # The `decrease_const` parameter in the function above may look unfamiliar. # # For the **Epsilon Greedy** algorithm, setting the $\epsilon$ can be a bit tricky. If it’s too small, exploration will be slow at the beginning, and you will be slow to react to changes. If we happen to sample, say, the second-best arm the first few times, it may take a long time to discover that another arm is actually better. If $\epsilon$ is too big, you’ll waste many trials pulling random arms without gaining much. # # To accomodate for this situation, we will set the $\epsilon$ value at a higher value in the beginning and anneal (gradually lower) it over time. Intuitively, this simply means that after exploring around for a while, we become more certained about each arms' empirical means. After that, it's better to exploit. # # In the function call above, the $\epsilon$ at turn $t$ will become: # # $$\epsilon(t) = \epsilon(0) \Big/ (1 + t/T)$$ # # Where $T$ is a new parameter that represents a decreasing constant. # # Note that there are different ways of annealing a parameter, but the spirit is the same. # + # show adaptive learning rate epsilon = 0.5 decrease_const = 1000 # the epsilon value after 10 turns total_counts = 10 print( epsilon / (1 + total_counts / decrease_const) ) # after 10000 turns total_counts = 10000 print( epsilon / (1 + total_counts / decrease_const) ) # - # ### Algorithm 2 - Boltzmann Exploration (Softmax) # # The **Softmax** algorithm picks each arm with a probability that is proportional to its average reward. # # $$ p_i(t+1)= \frac{ e^{u_i(t) / \tau} }{ \sum_{j=1}^K e^{u_j(t) / \tau} }$$ # # Where $\tau$ is a temperature parameter, controlling the randomness of the choice. When $\tau$ = 0, the algorithm acts like pure greedy. As $\tau$ grows to infinity, the algorithm will pick arms uniformly at random. # + def softmax(counts): """ adaptive softmax Parameters ---------- counts : int 2d-array, shape( K, 2 ), where K = the total number of arms success and failures for each arm where column 0 represents success, 1 represents failure Returns ------- (int) the chosen arm """ # calculate the empirical means and the total number of simulations that were ran totals = counts.sum(axis = 1) successes = counts[:, 0] empirical_means = successes / totals total_counts = counts.sum() # annealing (adaptive learning rate) tau = 1 / np.log(total_counts + 0.000001) probs_n = np.exp(empirical_means / tau) probs_d = probs_n.sum() probs = probs_n / probs_d cum_prob = 0. z = np.random.rand() for idx, prob in enumerate(probs): cum_prob += prob if cum_prob > z: return(idx) counts = np.ones((K, 2)) softmax(counts) # - # ### Algorithm 3 - Upper Confidence Bounds (UCB) # # In the world of statistics, whenever you estimate some unknown parameter (such as the mean of a distribution) using random samples, there is a way to quantify the uncertainty inherent in your estimate. For example, the true mean of a fair six-sided die is 3.5. But if you only roll it once and get a 2, your best estimate of the mean is just 2. Obviously that estimate is not very good, and we can quantify the confidence we have for our estimate. There are confidence bounds which can be written, for example, as: "The mean of this die is 2, with a 95-th percentile lower bound of 1.4 and a 95-th percentile upper bound of 5.2." # # The upper confidence bound (UCB) family of algorithms, as its name suggests, simply selects the arm with the largest upper confidence bound at each turn. The intuition is this: the more times you roll the die, the tighter the confidence bounds, on the other hand, if your roll the die an infinite number of times then the width of the confidence bound is zero. In short, as the number of rolls increases, the uncertainty decreases, and so does the width of the confidence bound. # # Thus, unlike the **Epsilon Greedy** and **Softmax** algorithm that only keeps track of the empirical means, the **UCB** algorithm also maitains the number of times that each arm has been played, denoted by $n_i(t)$. Initially, each arm is played once. Afterwards, at round t, the algorithm greedily picks the arm $j(t)$ as follows: # # $$j(t) = argmax_{i = 1, ..., K} \left( u_i + \sqrt{\frac{2lnt}{n_i}} \right)$$ # # We can see that the **UCB** algorithm will try to learn about arms that we don't know enough about. The main advantages of these types of algorithms are: # # - Take uncertainty of sample mean estimate into account in a smart way. # - No parameters (e.g. epsilon, annealing) to validate. # + def ucb(counts): """ adaptive softmax Parameters ---------- counts : int 2d-array, shape( K, 2 ), where K = the total number of arms success and failures for each arm where column 0 represents success, 1 represents failure Returns ------- (int) the chosen arm """ # calculate the empirical means and the total number of simulations that were ran totals = counts.sum(axis = 1) successes = counts[:, 0] empirical_means = successes / totals total_counts = counts.sum() bonus = np.sqrt(2 * np.log(total_counts) / totals) return np.argmax(empirical_means + bonus) counts = np.ones((K, 2)) softmax(counts) # - # ## Experimenting With Bandit Algorithms # # In this section, we'll use our simulated data to experiment with our algorithms. To do this we'll also need a metric to calculate how well we are doing. Recall the absolute *best* we can do is to always pick the webpage (arm) with the largest click through rate (ctr). Denote this best arm's probability of $w_{opt}$. Our score should be relative to how well we would have done had we chosen the best arm from the beginning. This motivates the *total regret* of a strategy, defined as: # # $$ # \begin{align} # R_T & = \sum_{t=1}^{T} \left( w_{opt} - w_{I(t)} \right) \nonumber \\ # & = Tw_{opt} - \sum_{t=1}^{T} \; w_{I(t)} \nonumber # \end{align} # $$ # # Where $T$ is the total number of samples in the experiment, $w_{I(t)}$ is the probability of obtaining the reward (getting clicked) of the chosen arm in the $t_{th}$ turn. A total regret of 0 means the strategy is attaining the best possible score. This is likely not possible, as initially our algorithm will often make the wrong choice. Ideally, a strategy's total regret should flatten as it learns the best bandit. (Mathematically, we achieve $w_{I(t)} = w_{opt}$ often) # # We'll run the experiment and plot the cumulative regret of the three algorithms below: def run_bandit_algo(rewards, ctr, algo, **kwargs): """ Run different types of bandit algorithms Parameters ---------- rewards, ctr : Return value of the `generate_bernoulli_bandit_data` function algo : bandit function [ epsilon_greedy, softmax, ucb ] **kwargs : additional parameters to pass in to the algo Returns ------- cum_regret : 1d-array, shape( n_simulations, ) The total regret accumulated over the experiment, where the regret is measured by the maximum ctr - the chosen arm's ctr opt_arm_percentage : float The percentage of plays in which the optimal arm is pulled """ n_simulations, K = rewards.shape # counts : success and failures for each arm where column 0 represents # success, 1 represents failure. Each arm's count is initialiated as 1 # to ensure that each arm is played at least once, to prevent "cold start" # problem and 0 division in the beginning counts = np.ones( (K, 2), dtype = int ) regret = np.zeros(n_simulations) max_ctr_count = 0 max_ctr = np.max(ctr) max_ctr_idx = np.argmax(ctr) for i in range(n_simulations): # 1. run the algorithm to obtain the arm that got pulled # 2. update the success / failure according to the generated rewards # 3. update the expected regret for each turn of the simulation # 4. if the arm that got pulled is the one with the opt ctr, increment this count arm = algo( counts, **kwargs ) if rewards[i, arm] == 1: counts[arm, 0] += 1 else: counts[arm, 1] += 1 regret[i] = max_ctr - ctr[arm] if arm == max_ctr_idx: max_ctr_count += 1 cum_regret = np.cumsum(regret) opt_arm_percentage = max_ctr_count / n_simulations return cum_regret, opt_arm_percentage # + K = 5 n_simulations = 10000 algorithms = [epsilon_greedy, softmax, ucb] def run_experiment(K, n_simulations, algorithms): """ Run the bandit algorithm's simulation by the specified number of samples for simulation, the number of arms and the different version of algorithm Parameters ---------- n_simulations : int the total number of turns in a simulation K : int the total number of arms algorithms : list of functions the list of bandit algorithms to simulate Returns ------- ctr : float 1d-array, shape( K, ) the randomly generated empirical click through rate for each arm algo_opt_arm_percentage : float list the percentage of simulations that chose the best arm algo_cum_regret : float 2d-array, shape( n_simulations, length of the algorithm ) each column stores the cumulative regret for one algorithm fig : matplotlib figure the cumulative regret for each bandit algorithm """ algo_opt_arm_percentage = [] algo_cum_regret = np.zeros( (n_simulations, len(algorithms)) ) fig = plt.figure( figsize = (10, 7) ) ctr, rewards = generate_bernoulli_bandit_data(n_simulations, K) for idx, algo in enumerate(algorithms): cum_regret, opt_arm_percentage = run_bandit_algo(rewards, ctr, algo = algo) algo_cum_regret[:, idx] = cum_regret algo_opt_arm_percentage.append(opt_arm_percentage) plt.semilogy(cum_regret, label = algo.__name__) plt.title( 'Simulated Bandit Performance for K = {}'.format(K) ) plt.ylabel('Cumulative Expected Regret') plt.xlabel('Round Index') plt.legend(loc = 'lower right') return ctr, algo_opt_arm_percentage, algo_cum_regret, fig # + # change default figure size and font size plt.rcParams['figure.figsize'] = 8, 6 plt.rcParams['font.size'] = 12 ctr, algo_opt_arm_percentage, algo_cum_regret, fig = run_experiment(K, n_simulations, algorithms) fig.show() print(ctr) print(algo_opt_arm_percentage) # - # **Section Conclusion:** The plot of the cumulative expected regret of the experiment above showed that all three different algorithms have converged (the cumulative expected regret gradually decreases to a steady level). And the **UCB** seems to be doing better than the other two algorithms in this limited horizon. # # Bayesian Bandits # # All of that was great, so what's next? Well, it turns out that none of the algorithms we used in the last section are really suitable in real world applications. Why? # # Recall that in the experiment above, we tested different kinds of bandit algorithms with the assumption that there is no delay between pulling an arm and observing the result. Or, more precisely, if there is a delay, it should happen before the next timing to pull an arm. This means the following timeline is impossible: 12:00 Visitor A sees Variation 1. 12:01 visitor B sees Variation 2. 12:02 Visitor A converts. # # Also if you have limited computational resources, which means that you can only update your observed data in batch every 2 hours. For these kinds of delayed batch case, the algorithms described in the last section will pull the same arm every time for those 2 hours because it is deterministic in the absence of immediate updates. To sum up, the algorithms we just described needs the distributions to be updated every single round to work properly, which may not be applicable in a lot of practical cases .... # # Having that caveat in mind, we'll introduce a Bayesian method that is more "immune" to this delayed feedback problem, namely **Thompson Sampling**. # Recall that the the problem we want to solve is the following. You have come up with $K$ different variations of the webpage (e.g. different layout) that now you wish to find the ones with the best click through rate (CTR), e.g. clicking to sign-up for the newsletter. Let's represent each CTR by $\theta_i$ - i.e., $\theta_i$ is the true probability that an individual user will click when they were shown with the $i_{th}$ webpage. It is important to note that we don't actually know what $\theta_i$ is - if we did, we could simply choose ii for which $\theta_i$ was largest and move on. We're simply pretending that we know in order to simulate the performance of the algorithm. # # Using the Bayesian approach we will construct a prior probability distribution which represents our original belief about what the actual value of $\theta_i$, our ctr for the $i_{th}$ webpage is. The prior we'll use is the Beta distribution. Here's a quick recap of the distribution: # # ## Beta Distribution # # The Beta distribution is very useful in Bayesian statistics. A random variable $X$ has a Beta distribution, with parameters $(\alpha, \beta)$, if its density function is: # # $$f_X(x | \; \alpha, \beta ) = \frac{ x^{(\alpha - 1)}(1-x)^{ (\beta - 1) } }{B(\alpha, \beta) }$$ # # where $B$ is the [Beta function](http://en.wikipedia.org/wiki/Beta_function) (hence the name). The random variable $X$ is only allowed in [0,1], making the Beta distribution a popular distribution for decimal values, probabilities and proportions. The values of $\alpha$ and $\beta$, both positive values, provide great flexibility in the shape of the distribution. Below we plot some Beta distributions with different $\alpha$ and $\beta$ values: # + plt.figure( figsize = (12, 5) ) x = np.linspace(0.01, .99, 100) params = [ (2, 5), (1, 1), (5, 5), (20, 4) ] for a, b in params: y = beta.pdf(x, a, b) lines = plt.plot( x, y, label = "(%.1f,%.1f)" % (a, b), lw = 2 ) plt.fill_between( x, 0, y, alpha = 0.2, color = lines[0].get_color() ) plt.autoscale(tight = True) plt.legend(loc = 'upper left', title = "(a,b)-parameters") plt.show() # - # There are two important things to note about the Beta distribution: # # The first is the presence of the flat distribution above, specified by parameters $(1,1)$. This is the Uniform distribution. Hence the Beta distribution is a generalization of the Uniform distribution. # # The second is that there is an interesting connection between the Beta distribution and the Binomial distribution. Suppose we are interested in some unknown proportion or probability $p$. We assign a $\text{Beta}(\alpha, \beta)$ prior to $p$. We observe some data generated by a Binomial process, say $X \sim \text{Binomial}(N, p)$, with $p$ still unknown. Then our posterior *is again a Beta distribution*, i.e. $p | X \sim \text{Beta}( \alpha + X, \beta + N -X )$. Succinctly, one can relate the two by "a Beta prior with Binomial observations creates a Beta posterior". # # In light of the above two paragraphs, if we start with a $\text{Beta}(1,1)$ prior on $p$ (which is a Uniform), observe data $X \sim \text{Binomial}(N, p)$, then our posterior is $\text{Beta}(1 + X, 1 + N - X)$. # ## Thompson Sampling # # So after assuming the priors on the probability of ctr for each webpage. To be explicit on the phrase "assuming the priors", we will assume that we're completely ignorant of these probabilities. So a very natural prior is the flat prior over 0 to 1, $\text{Beta}(\alpha=1,\beta=1)$. The algorithm then proceeds as follows: # # For each turn: # # 1. Sample a random variable $X_i$ from the prior of arm $i$, for all $i$ ($K$ in total). # 2. Select the arm with largest sample, i.e. select $i = \text{argmax}\; X_i$. # 3. Observe the result of pulled arm $i$, and update your prior with that arm $i$. # 4. Return to 1. # # Like all the algorithms we've introduced before, **Thompson Sampling** suggests that we should not discard losers, but we should pick them at a decreasing rate as we gather confidence that there exist *better* webpages (arms). This follows because there is always a non-zero chance that a webpage with a lower ctr will get chosen, but the probability of this event decreases as we play more rounds. class BayesianBandit: """ Thompson Sampling Parameters ---------- K : int total number of arms prior_params : list of float length 2 tuple, default None, (optional) each element of the list is a tuple, where each tuple contains the alpha and beta parameter that represents the prior beta distribution for each arm. If not supplied it will assume that all arms's prior starts with an uniform distribution Attributes ---------- trials, success : int 1d-array, shape( K, ) stores the trials and success for each arm, e.g. trial = [ 1, 1 ] and success = [ 0, 1 ] means that both arm has been pulled once and arm 1 has generated the reward (clicked) """ def __init__(self, K, prior_params = None): if prior_params: priors = namedtuple( "priors", ["alpha", "beta"] ) prior = [priors(*p) for p in prior_params] self.alphas = np.array([p.alpha for p in prior]) self.betas = np.array([p.beta for p in prior]) else: self.alphas = np.ones(K) self.betas = np.ones(K) self.trials = np.zeros(K, dtype = int) self.success = np.zeros(K, dtype = int) def get_recommendation(self): """ for all arms, construct their beta distribution and draw a random sample from it, then return the arm with the maximum value random sample """ theta = np.random.beta(self.alphas + self.success, self.betas + self.trials - self.success) return np.argmax(theta) def update_result(self, arm, converted): """ override the trials and success array, the success array will only be updated if it has generated a reward """ self.trials[arm] += 1 if converted: self.success[arm] += 1 return self def experiment(T, K = None, ctr = None, prior_params = None): """ run the experiment for Thompson Sampling, pass in ctr, the fixed ctr for each arm or K, the total number of arms to run the experiment, if K is supplied then it will be randomly generated Parameters ---------- T : int number of simulation in an experiment K : int, , default = None, (optional) total number of arms ctr : float sequence, len = K, default = None, (optional) the empirical click through rate for each arm prior_params : list of float length 2 tuple, default None, (optional) each element of the list is a tuple, where each tuple contains the alpha and beta parameter that represents the prior beta distribution for each arm. If not supplied it will assume that all arms's prior starts with an uniform distribution Returns ------- ctr : float sequence, len = K the supplied or the randomly generated ctr trials, success : 2d-array, shape( T, K ) trials and success recorded for each turn of the experiment alphas, betas : float 1d-array, shape( K, ) the alpha and beta parameters for each arm """ if ctr: K = len(ctr) else: ctr = np.random.rand(K) trials = np.zeros( (T, K), dtype = int ) success = np.zeros( (T, K), dtype = int ) bb = BayesianBandit(K, prior_params) for t in range(T): arm = bb.get_recommendation() converted = np.random.rand() < ctr[arm] bb.update_result(arm, converted) trials[t] = bb.trials success[t] = bb.success return ctr, trials, success, bb.alphas, bb.betas def experiment_plot(ctr, trials, success): """ Pass in the ctr, trials and success returned by the `experiment` function and plot the Cumulative Number of Turns For Each Arm and the CTR's Convergence Plot side by side """ T, K = trials.shape n = np.arange(T) + 1 fig = plt.figure( figsize = (14, 7) ) plt.subplot(121) for i in range(K): plt.loglog( n, trials[:, i], label = "arm {}".format(i + 1) ) plt.legend(loc = "upper left") plt.xlabel("Number of turns") plt.ylabel("Number of turns/arm") plt.title("Cumulative Number of Turns For Each Arm") plt.subplot(122) for i in range(K): plt.semilogx( n, np.zeros(T) + ctr[i], label = "arm {}'s CTR".format(i + 1) ) plt.semilogx( n, ( success[:, 0] + success[:, 1] ) / n, label = "CTR at turn t" ) plt.axis([0, T, 0, 1]) plt.legend(loc = "upper left") plt.xlabel("Number of turns") plt.ylabel("CTR") plt.title("CTR's Convergence Plot") return fig # + # number of simulation in an experiment T = 10000 # the empirical click through rate for each arm ctr = 0.25, 0.35 ctr, trials, success, alphas, betas = experiment(T = T, ctr = ctr) fig = experiment_plot(ctr, trials, success) fig.show() # - # In our simulation, we gave the Bayesian bandit two webpages (arms) - one had a CTR of 0.25, the other had a CTR of 0.35. To start with, both webpages were displayed to the user with roughly equal probability. Over time, evidence accumulated that arm 2 was considerably better than arm 1. At this point the algorithm switched to displaying primarily webpage 1, and the overall CTR of the experiment converged to 0.35 (the optimal CTR). # # We can also visualize our Beta distribution for each arms in different turns. def plot_beta_dist(ctr, trials, success, alphas, betas, turns): """ Pass in the ctr, trials and success, alphas, betas returned by the `experiment` function and the number of turns and plot the beta distribution for all the arms in that turn """ subplot_num = len(turns) / 2 x = np.linspace(0.001, .999, 200) fig = plt.figure( figsize = (14, 7) ) for idx, turn in enumerate(turns): plt.subplot(subplot_num, 2, idx + 1) for i in range( len(ctr) ): y = beta( alphas[i] + success[turn, i], betas[i] + trials[turn, i] - success[ turn, i ] ).pdf(x) line = plt.plot( x, y, lw = 2, label = "arm {}".format( i + 1 ) ) color = line[0].get_color() plt.fill_between(x, 0, y, alpha = 0.2, color = color) plt.axvline(x = ctr[i], color = color, linestyle = "--", lw = 2) plt.title("Posteriors After {} turns".format(turn) ) plt.legend(loc = "upper right") return fig turns = [1, 100, 1000, 9999] posterior_fig = plot_beta_dist(ctr, trials, success, alphas, betas, turns) posterior_fig.show() # As you can see, we started out with some prior Beta distributions that represents the our initial beliefs about possible ctr values for each arm. We then update these beliefs based on evidence by showing different webpages to other randomly chosen users and observing the ctr. After doing this for many number of turns, we incrementally narrow the width of each arm's Beta distribution. Meaning that as we gather more data, we become more confident about each arm's actual ctr. # # Note that we don't really care how accurate we become about the inference of the hidden probabilities — for this problem we are more interested in becoming more confident in choosing the best arm. This is basically why at the end of experiment, arm 1's distribution is wider. The algorithm is ignorant about what that hidden probability might be, but we are reasonably confident that it is not the best, so the algorithm chooses to ignore it. # # From the above, we can see that starting after 100 pulls, the majority of arm 2's distribution already leads the pack, hence the algorithm will almost always choose this arm. This is good, as this arm is indeed better. # ## Notes On Bandit Testings # # In world settings, we need to account for situations such as delayed batch update or delay feedback. In such cases, algorithms such as **Epsilon Greedy**, **Softmax**, **UCB** needs the distributions to be updated every single round to work properly. On the other hand, **Thompson Sampling** is more realistic as it relies on random samples, which will be different every time even if the distributions are each arm aren't updated for a while. # # So, after gaining some knowledge on **bandit testings**. The question that comes into our head is that: "In general, when is it preferred over the classical **A/B testing**?" # # ### Short-term testing # # > "Whenever you have a small amount of time for both exploration and exploitation, use a bandit testing." # # **Bandit testing** are conducive for short tests for clear reasons – if you were to run a classic **A/B testing** you’d not even be able to enjoy the period of pure exploitation. Instead, **bandit testing** allow you to adjust in real time and send more traffic, more quickly, to the better variation. Here are some possible use cases: # # - **Headlines:** News has a short life cycle. Why would you run **A/B testing** on a headline if by the time you learn which variation is best, the time where the answer is applicable is over? # - **Holiday Campaigns:** e.g. If you’re running tests on an ecommerce site for Black Friday, an A/B testing isn’t that practical – you might only be confident in the result at the end of the day. On the other hand, a **bandit testing** will drive more traffic to the better-performing variation – and that in turn can increase revenue. # # # ### Long-term testing # # Because **Bandit testing** automatically shift traffic to higher performing (at the time) variations, thus it is effective in long term (or ongoing) testing where you can set it and forget about it. e.g. Serving specific ads and content to user sets (targeting ads). # ## Reference # # - [Notes: Algorithms for the multi-armed bandit problem](http://www.cs.mcgill.ca/~vkules/bandits.pdf) # - [Blog: Bandits for Recommendation Systems](http://engineering.richrelevance.com/bandits-recommendation-systems/) # - [Blog: When to Run Bandit Tests Instead of A/B Tests](http://conversionxl.com/bandit-tests/) # - [Blog: Bayesian Bandits - optimizing click throughs with statistics](https://www.chrisstucchio.com/blog/2013/bayesian_bandit.html) # - [Blog: Balancing Earning with Learning: Bandits and Adaptive Optimization](http://conductrics.com/balancing-earning-with-learning-bandits-and-adaptive-optimization/) # - [Notebook: Bayesian Methods for Hackers Chapter 6](http://nbviewer.jupyter.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter6_Priorities/Ch6_Priors_PyMC2.ipynb)
bandits/bandits.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Q# # language: qsharp # name: iqsharp # --- # + [markdown] slideshow={"slide_type": "slide"} # # Message("Introduction to Q#: a quantum development language for everyone"); # ### Seminar for the [Portland Quantum Computing Meetup Group](https://www.meetup.com/Portland-Quantum-Computing-Meetup-Group) # # Dr. <NAME> | [@crazy4pi314](twitter.com/crazy4pi314) | 28 September 2020 # # --- # Talk slides/Jupyter Notebook can be found at [bit.ly/pqc-qsharp](http://bit.ly/pqc-qsharp) # # <center> # <a src="http://unitary.fund"><img src="https://img.shields.io/badge/Supported%20By-UNITARY%20FUND-brightgreen.svg?style=flat" width="400px" align="left"/> </a> # <img src="https://img.shields.io/github/license/crazy4pi314/pqc-qsharp-intro" width="170px" align="left"/> # <a src="https://mybinder.org/v2/gh/crazy4pi314/pqc-qsharp-intro/master?filepath=presentation.ipynb"><img src="https://mybinder.org/badge_logo.svg" width="242px" align="left"/> </a> # </center> # # # # # # + [markdown] slideshow={"slide_type": "skip"} # ### Abstract # As the field of Quantum Computing expands from the academic to the industry realm, we need a way that we can continue to collaborate and innovate in both regimes. # Open source quantum software development platforms like the Quantum Development Kit and Q# from Microsoft, serve as a bridge to connect research ideas to reality. # In this talk, I will give you a tour of what you can do with Q# and show you an example of how I am using it in my own research on qRAMs. # After this talk, you will have the resources you need to dive into using Q# for your own research projects! # # --- # # #### Installation instructions for running this notebook on your machine can be found [here](https://docs.microsoft.com/quantum/install-guide/python?view=qsharp-preview) or you can run this presentation in your browser [here](TODO). # + [markdown] slideshow={"slide_type": "slide"} # ## whoami: author, streamer, community builder... # <br> # <center> # <img src="media/kaiser-bio.png" width="55%" align="center"/> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## ... and a researcher 👩‍💻 # # <br> # <center> # <img src="media/kaiser-lab.jpg" width="35%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## 📃Agenda # # 1. Introduce you to a <span style="color:#54296D;">**research project**</span> I have been working on: a [Q# library for qRAM](https://github.com/qsharp-community/qram), # 2. give you a <span style="color:#54296D;">**tour of Q#**</span> and why we are using it for our research, # 3. show you some of what our <span style="color:#54296D;">**qRAM library**</span> looks like and how we use it, and # 4. share some <span style="color:#54296D;">**tools and resources**</span> for how you can leverage Q# for your own research and studies! # # + [markdown] slideshow={"slide_type": "slide"} # # Part 1: A quantum memory problem # + [markdown] slideshow={"slide_type": "slide"} # # This presentation runs on RAM # # - Classical RAM or _random access memory_ is cheap, fast and plentiful (colorful?) # - Implemented with transistors # - Generally layed out as arrays of _cells_ that can be **read** from, or **written** to in any order. # # <br> # <center> # <img src="https://media.giphy.com/media/XyUgv8u6TRrVmFPpUo/giphy.gif" width="30%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## Quantum applications _might_ need memory # # - We need ways to transfer **classical data** to a **quantum system** # - _Some_ quantum algorithms, particularly quantum machine learning, assume access to a quantum RAM to load and store data during calculations. # - Queries at address $x$ can take many forms: # - Bit value as a phase (Grover's Algorithm): $\left|x\right\rangle\mapsto(-1)^{b_x}\left|x\right\rangle$ # - Bit value as a qubit (Element distinctness): $\left|x\right\rangle\left|0\right\rangle\mapsto\left|x\right\rangle\left|b_x\right\rangle$ # - Bit value as a complex vector of amplitudes (HHL Algorithm): $(b_0...b_n)\mapsto\sum\limits_{j} b_j\left|j\right\rangle$ # # # + [markdown] slideshow={"slide_type": "slide"} # # Quantum Memories (aka qRAM) # # **Problem:** It is not clear if we will be able to do this efficiently at all, let alone in a fault-tolerant setting. # # ❗ _An algorithmic speedup **may not** translate to an actual speedup in an application if it is not efficient to use data in the first place!_ # # 😓 Physical limitations like coherence time, error rates, hardware supported gates, etc. contribute to the difficulty. # # 💡 There are many different approaches, each optimizing for a particular resource. # # + [markdown] slideshow={"slide_type": "slide"} # ## Deep Dive: qRAM approaches and tradeoffs: # # #### http://bit.ly/between-the-bitlines # # <br> # <center> # <img src="media/olivia-talk-title.png" width="48%"> # # </center> # + [markdown] slideshow={"slide_type": "slide"} # ### So what's the path forward? # # - To find out **if qRAM will ever work**, we need to have a good way to **evaluate different proposals**. # # - How can we compare tons of papers when none provide implementations? # # <br> # <center> # <img src="https://media.giphy.com/media/lQ6iahDJqm9oldX5gh/source.gif" width="30%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # # Part 2: A tour of Q# and why it worked for us # + [markdown] slideshow={"slide_type": "slide"} # ## So many options... # # https://qosf.org/project_list/ # # <br> # <center> # <img src="media/qsof.gif" width="60%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ### What we need: # + [markdown] slideshow={"slide_type": "fragment"} # - We want to implement _algorithms_ # - We want to think about qRAM algorithms at a high level, not always specifically at the gate level # - Need the flexibility to create gate level optimizations as well # + [markdown] slideshow={"slide_type": "fragment"} # - We want to build tools to enable research and collaboration # - Need ways to packages our work and make it easy to share, collaborate, and reproduce # - We want to work with a community where we are all included # + [markdown] slideshow={"slide_type": "slide"} # # 📊 What do you know about Q#? # + [markdown] slideshow={"slide_type": "slide"} # # Q\# : Microsoft's quantum programming language # # - New open source language that is domain-specific for quantum computing # - Used with the [_Quantum Development Kit_](https://www.microsoft.com/en-us/quantum/development-kit) which provides lots of tools for writing and running your programs. # - Designed to be integrated with a number of languages/platforms like Python and .NET # + [markdown] slideshow={"slide_type": "slide"} # <center> # <img src="media/stack.png" width="80%"> # </center> # # + [markdown] slideshow={"slide_type": "slide"} # ## What do I write in a Q# program? # # - **Functions**: `Sin`, `ln`, reversing arrays, etc. # - Deterministic actions, similar to mathematical definition for functions # - **Operations**: Everything else 😁 # - Working with qubits is always an operation # + [markdown] slideshow={"slide_type": "slide"} # ## Q# Hello World # + slideshow={"slide_type": "-"} function Greeting(name : String) : Unit { Message($"Hello World! Nice to meet you {name} 💖"); } # + slideshow={"slide_type": "-"} %simulate Greeting name="Sarah" # + [markdown] slideshow={"slide_type": "slide"} # ## `using` Qubits in Q# # # - Qubits are a resource that are requested from the runtime when you need them and returned when you are done. # + slideshow={"slide_type": "-"} open Microsoft.Quantum.Measurement; # + slideshow={"slide_type": "-"} operation Qrng() : Result { using(qubit = Qubit()) { // Preparing the qubit H(qubit); // Do operation H return MResetZ(qubit); // Measure and reset qubit } } # + slideshow={"slide_type": "slide"} %simulate Qrng # + [markdown] slideshow={"slide_type": "slide"} # ## Generating new operations in QSharp # + [markdown] slideshow={"slide_type": "-"} # The _functors_ `Adjoint` and `Controlled` allow you to generate new operations without changes to your code to implement those versions. # + slideshow={"slide_type": "-"} operation ApplyX(qubit : Qubit) : Unit is Adj + Ctl { X(qubit); } operation ApplyMultiControlNOT(control: Qubit[], target : Qubit) : Unit { Controlled ApplyX(control, target); } # + slideshow={"slide_type": "slide"} open Microsoft.Quantum.Diagnostics; operation UseCtlFunctor() : Unit { using((controls, target) = (Qubit[2], Qubit())){ ApplyToEach(H, controls); ApplyMultiControlNOT(controls, target); DumpMachine(); ResetAll(controls + [target]); } } # + [markdown] slideshow={"slide_type": "slide"} # ### Q# programs can be run from: # # - the command line, if built as stand-alone applications # - Python or .NET language programs (C#, F#, etc.) for easy data processing and visualization # - Jupyter notebooks with either Q# or Python kernels # # + slideshow={"slide_type": "slide"} %simulate UseCtlFunctor # + [markdown] slideshow={"slide_type": "slide"} # ## More than just simulation... # + slideshow={"slide_type": "-"} %lsmagic # + [markdown] slideshow={"slide_type": "slide"} # ### For example: Resource estimation! # + slideshow={"slide_type": "-"} %estimate UseCtlFunctor # + [markdown] slideshow={"slide_type": "slide"} # ## Unit testing in Q# # # - Great way to check that what we have typed matches paper results 😊 # + slideshow={"slide_type": "-"} open Microsoft.Quantum.Arrays; operation AllocateQubitRegister(numQubits : Int) : Unit { Fact(numQubits > 0, "Expected a positive number."); using (register = Qubit[numQubits]) { ApplyToEach(AssertQubit(Zero, _), register); } Message("Test passed!"); } # + slideshow={"slide_type": "slide"} %simulate AllocateQubitRegister numQubits=5 # + [markdown] slideshow={"slide_type": "slide"} # ## Unit testing in Q# cont. # # - Helpful to test if qRAM optimizations still do the same thing # - Q# uses the _Choi–Jamiłkowski isomorphism_ to make an assertion of operation equivalence to one about preparing states # <!--- We know that if you apply an operation and then it's adjoint, that should be an Identity operation # - Make a channel that applies your operation under test, and then the adjoint of your reference operation. # - If you start with a maximally entangled state, then apply the channel to one half of a maximally entangled state, then you can use state assertions to verify that you still have that same maximally entangled state.--> # # + slideshow={"slide_type": "-"} open Microsoft.Quantum.Diagnostics; operation ApplyCNOT(register : Qubit[]) : Unit is Adj + Ctl { CNOT(register[0], register[1]); } # + slideshow={"slide_type": "slide"} operation ApplyCNOTTheOtherWay(register : Qubit[]) : Unit is Adj + Ctl { within { ApplyToEachCA(H, register); } apply { CNOT(register[1], register[0]); } } operation CheckThatThisWorks() : Unit { AssertOperationsEqualReferenced(2, ApplyCNOT, ApplyCNOTTheOtherWay); Message("It works!"); } # + slideshow={"slide_type": "-"} %simulate CheckThatThisWorks # + [markdown] slideshow={"slide_type": "slide"} # ## Our plan: # # 🧰 **Step 1:** Use functions and operations to implement the qRAM proposals # # 📚 **Step 2:** Use libraries, functors, and other features to reduce how much code we need to write # # 🧪 **Step 3:** Use testing features to make sure our implementations are correct # # 💰 **Step 4:** Profit! (also publish 📃) # + [markdown] slideshow={"slide_type": "slide"} # # Part 3: The qRAM library # - # ### https://github.com/qsharp-community/qram # <br> # <center> # <img src="media/github-screencap.png" width="50%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## Basic layout: # # ``` # ├───📃 docs 📃 # ├───🔮 samples 🔮 # │ ├───BucketBrigade # │ ├───Grover # │ ├───Qrom # │ ├───ResourceEstimation # │ └───SelectSwap # ├───✨ src ✨ # └───🧪 tests 🧪 # ``` # + [markdown] slideshow={"slide_type": "slide"} # # `src`: where qRAMs are implemented # <br> # <center> # <img src="media/src-screenshot.png" width="60%"> # # </center> # + [markdown] slideshow={"slide_type": "slide"} # ### Currently implemented proposals: # **qRAM** # - Bucket Brigade (bit and phase queries) [0708.1879](https://arxiv.org/abs/0708.1879) # # **qROM** # - Simple [0807.4994](https://arxiv.org/abs/0807.4994) # - SELECTSWAP [1812.00954](https://arxiv.org/abs/1812.00954) # + [markdown] slideshow={"slide_type": "slide"} # ## Custom Types for quantum memories # + [markdown] slideshow={"slide_type": "-"} # ``` # newtype QRAM = ( # QueryPhase : ((AddressRegister, MemoryRegister, Qubit[]) => Unit is Adj + Ctl), # QueryBit : ((AddressRegister, MemoryRegister, Qubit[]) => Unit is Adj + Ctl), # Write : ((MemoryRegister, MemoryCell) => Unit), # AddressSize : Int, # DataSize : Int # ); # ``` # + [markdown] slideshow={"slide_type": "slide"} # ## Using a qROM # + slideshow={"slide_type": "-"} %package QSharpCommunity.Libraries.Qram::0.1.35 # + [markdown] slideshow={"slide_type": "slide"} # ## Libraries help us bootstrap # + slideshow={"slide_type": "-"} open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Arithmetic; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Measurement; open Qram; # + [markdown] slideshow={"slide_type": "slide"} # ## Custom types and functions allow us to process memory contents # + slideshow={"slide_type": "-"} function ExampleMemoryData() : MemoryBank { let numDataBits = 3; let data = [ (0, IntAsBoolArray(0, numDataBits)), (2, IntAsBoolArray(5, numDataBits)), (4, IntAsBoolArray(2, numDataBits)), (5, IntAsBoolArray(3, numDataBits)) ]; return GeneratedMemoryBank(Mapped(MemoryCell, data)); } # + [markdown] slideshow={"slide_type": "slide"} # ### Using a qROM (Read Only Memory) # + slideshow={"slide_type": "-"} operation QueryAndMeasureQrom(memory : QROM, queryAddress : Int) : Int { using ((addressRegister, targetRegister) = (Qubit[memory::AddressSize], Qubit[memory::DataSize])) { ApplyPauliFromBitString (PauliX, true, IntAsBoolArray(queryAddress, memory::AddressSize), addressRegister); memory::Read(LittleEndian(addressRegister), targetRegister); ResetAll(addressRegister); return MeasureInteger(LittleEndian(targetRegister)); } } # + slideshow={"slide_type": "slide"} operation QueryQrom(queryAddress : Int) : Int { // Generate a (Int, Bool[]) array of data. let data = ExampleMemoryData(); // Create the QRAM. let memory = QromOracle(data::DataSet); // Measure and return the data value stored at `queryAddress`. return QueryAndMeasureQrom(memory, queryAddress); } # + [markdown] slideshow={"slide_type": "slide"} # ### Using a qROM # + [markdown] slideshow={"slide_type": "-"} # ``` # // data: {(0, 0), (2, 5), (4, 2), (5, 3)} # ``` # + slideshow={"slide_type": "-"} %simulate QueryQrom queryAddress=2 # + slideshow={"slide_type": "-"} %estimate QueryQrom queryAddress=2 # + [markdown] slideshow={"slide_type": "slide"} # # `tests`: ✔ our work # - Can run small instances on simulators # - Can verify resource counts on larger instances # <br> # <center> # <img src="media/tests-screenshot.png" width="40%"> # # </center> # # + [markdown] slideshow={"slide_type": "slide"} # # `docs`: help others use our work💪 # # <figure style="text-align: left;"> # <caption> # </caption> # <img src="media/docs.png" width="60%"> # # </figure> # + [markdown] slideshow={"slide_type": "slide"} # # What's next for the qRAM library? # # 🔍 Detailed resource counting for subroutines of our programs # # 📓 More documentation in an interactive browser # # 📄 Research paper compiling our results # # ❓ More qRAM/qROM proposals # + [markdown] slideshow={"slide_type": "slide"} # <center> # <img src="media/milestones.png" width="50%"> # # </center> # + [markdown] slideshow={"slide_type": "slide"} # # Part 4: How can you start your own Q# project? # + [markdown] slideshow={"slide_type": "slide"} # ## 1. Learn more about about Q#! # # 📑 [docs.microsoft.com/quantum](docs.microsoft.com/quantum) # # 📗 _Learn Quantum Computing with Python and Q#_: [bit.ly/qsharp-book](bit.ly/qsharp-book) # # 📺 Live quantum development on Twitch: [twitch.tv/crazy4pi314](twitch.tv/crazy4pi314) # # + [markdown] slideshow={"slide_type": "slide"} # ## 2. Check out [qsharp.community](qsharp.community) # # <br> # <center> # <img src="media/qsc.png" width="50%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # ## Q# community Mission: # # We want to _empower everyone_ to get involved in quantum development. # + [markdown] slideshow={"slide_type": "fragment"} # - Make sure everyone feels **safe and welcome** in our spaces # # + [markdown] slideshow={"slide_type": "fragment"} # - Understand **how the community communicates**, and setup tools that work for them $\to$ [slack.qsharp.community](https://slack.qsharp.community) # + [markdown] slideshow={"slide_type": "fragment"} # - Ensure we can support for **members of all skill levels** # + [markdown] slideshow={"slide_type": "slide"} # ## 3. Check out other Q# community projects # # <br> # <center> # <img src="media/qsc-projects.png" width="80%"> # </center> # + [markdown] slideshow={"slide_type": "slide"} # # 📝 Review 📝 # # # - Q# is a high level, open-source language for writing quantum algorithms # - We have implemented a number of proposals for qRAMs in a Q# library and are working on full resource estimates # - The Q# Community is a great resource for help when you are working on Q# projects # + [markdown] slideshow={"slide_type": "slide"} # ## 👩‍💻Quantum programming resources!👩‍💻 # # - Q# Documentation: [docs.microsoft.com/quantum](https://docs.microsoft.com/quantum) # - _Learn Quantum Computing with Python and Q\#_ : [bit.ly/qsharp-book](http://www.manning.com/?a_aid=learn-qc-kaiser) # - Community: [qsharp.community](https://qsharp.community/) # - Q# Community Slack [slack.qsharp.community](https://slack.qsharp.community) # - Women in Quantum Computing and Algorithms (WIQCA): [wiqca.dev](https://wiqca.dev) # - Quantum Open Source Foundation: [qosf.org](https://www.qosf.org/) # - Unitary Fund [unitary.fund](https://unitary.fund/)
presentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="pRzUe5x6548Y" executionInfo={"status": "ok", "timestamp": 1643262945028, "user_tz": -330, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} # # !pip install ghostscript # # !pip install camelot-py # + colab={"base_uri": "https://localhost:8080/"} id="Ja-aB4hQGmju" executionInfo={"status": "ok", "timestamp": 1643262948856, "user_tz": -330, "elapsed": 3841, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="7d4f0d76-d4f2-4ab7-ffe1-1b387d8c3fa1" # !pip install pdf2image # + id="zB3siMsFG7TB" executionInfo={"status": "ok", "timestamp": 1643265016337, "user_tz": -330, "elapsed": 404, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # + colab={"base_uri": "https://localhost:8080/"} id="Fek3GfjzOkbJ" executionInfo={"status": "ok", "timestamp": 1643265099940, "user_tz": -330, "elapsed": 23697, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="94aeac3c-40dd-4733-d822-d60a6096f634" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="EeJ7BuPFOjym" executionInfo={"status": "ok", "timestamp": 1643265168242, "user_tz": -330, "elapsed": 57510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="f635de5e-e7a9-4917-dc89-f1dedcf82f60" # !pip install https://github.com/myhub/tr/archive/1.5.1.zip # + id="4DMj-iA0O69b" executionInfo={"status": "ok", "timestamp": 1643265244482, "user_tz": -330, "elapsed": 1128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} from tr import * from PIL import Image, ImageDraw, ImageFont # + colab={"base_uri": "https://localhost:8080/"} id="8DSKf0KkPbU8" executionInfo={"status": "ok", "timestamp": 1643265282511, "user_tz": -330, "elapsed": 1018, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="05caac54-c1bb-4206-f613-af3fc9989bd7" img_pil = Image.open("/content/drive/MyDrive/NLP/img1.jpg") MAX_SIZE = 2000 if img_pil.height > MAX_SIZE or img_pil.width > MAX_SIZE: scale = max(img_pil.height / MAX_SIZE, img_pil.width / MAX_SIZE) new_width = int(img_pil.width / scale + 0.5) new_height = int(img_pil.height / scale + 0.5) img_pil = img_pil.resize((new_width, new_height), Image.BICUBIC) print(img_pil.width, img_pil.height) # img_pil # + colab={"base_uri": "https://localhost:8080/", "height": 911} id="ZOFyaT5RPoeA" executionInfo={"status": "ok", "timestamp": 1643265302585, "user_tz": -330, "elapsed": 3046, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="c07a3e13-d221-450c-c429-eb261a65df04" gray_pil = img_pil.convert("L") rect_arr = detect(img_pil, FLAG_RECT) img_draw = ImageDraw.Draw(img_pil) colors = ['red', 'green', 'blue', "yellow", "pink"] for i, rect in enumerate(rect_arr): x, y, w, h = rect img_draw.rectangle( (x, y, x + w, y + h), outline=colors[i % len(colors)], width=4) img_pil # + colab={"base_uri": "https://localhost:8080/", "height": 911} id="0p_-oJdtPpCy" executionInfo={"status": "ok", "timestamp": 1643265421302, "user_tz": -330, "elapsed": 6841, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQH3OVyccfvnwsvKXXjF469cxJ1DXlgNvOdmMM=s64", "userId": "01698765501272568242"}} outputId="d8810a34-8610-47e3-b496-7dee9b654f30" blank_pil = Image.new("L", img_pil.size, 255) blank_draw = ImageDraw.Draw(blank_pil) results = run(gray_pil) for line in results: x, y, w, h = line[0] txt = line[1] font = ImageFont.truetype("/content/drive/MyDrive/NLP/msyh.ttf", max(int(h * 0.6), 14)) blank_draw.text(xy=(x, y), text=txt, font=font) blank_pil # + [markdown] id="tgr7d_R3V_XO" # # Read partial area of PDF # If you want to set a certain part of page, you can use area option. # # **Note that as of tabula-py 2.0.0**, multiple_tables option became True so if you want to use multiple area options like [[0, 0, 100, 50], [0, 50, 100, 100]], you need to set multiple_tables=False # + id="fieKRuEMP-fy"
Sentiment Analysis/Table Extract.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Think Bayes # # Second Edition # # Copyright 2020 <NAME> # # License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) import numpy as np import pandas as pd # ## Bayes's Theorem # # Bayes's Theorem states: # # $P(H|D) = P(H) ~ P(D|H) ~/~ P(D)$ # # where # # * $H$ stands for "hypothesis", and # # * $D$ stands for "data". # # Each term in this equation has a name: # # * $P(H)$ is the "prior probability" of the hypothesis, which represents how confident you are that $H$ is true prior to seeing the data, # # * $P(D|H)$ is the "likelihood" of the data, which is the probability of seeing $D$ if the hypothesis is true, # # * $P(D)$ is the "total probability of the data", that is, the chance of seeing $D$ regardless of whether $H$ is true or not. # # * $P(H|D)$ is the "posterior probability" of the hypothesis, which indicates how confident you should be that $H$ is true after taking the data into account. # ## The cookie problem # # Here's a problem I got from Wikipedia a long time ago: # # > Suppose you have two bowls of cookies. Bowl 1 contains 30 vanilla and 10 chocolate cookies. Bowl 2 contains 20 of each kind. # > # > You choose one of the bowls at random and, without looking into the bowl, choose one of the cookies at random. It turns out to be a vanilla cookie. # > # > What is the chance that you chose Bowl 1? # # We'll assume that there was an equal chance of choosing either bowl and an equal chance of choosing any cookie in the bowl. # We can solve this problem using Bayes's Theorem. First, I'll define $H$ and $D$: # # * $H$ is the hypothesis that the bowl you chose is Bowl 1. # # * $D$ is the datum that the cookie is vanilla ("datum" is the rarely-used singular form of "data"). # # What we want is the posterior probability of $H$, which is $P(H|D)$. It is not obvious how to compute it directly, but if we can figure out the terms on the right-hand side of Bayes's Theorem, we can get to it indirectly. # 1. $P(H)$ is the prior probability of $H$, which is the probability of choosing Bowl 1 before we see the data. If there was an equal chance of choosing either bowl, $P(H)$ is $1/2$. # # 2. $P(D|H)$ is the likelihood of the data, which is the chance of getting a vanilla cookie if $H$ is true, in other words, the chance of getting a vanilla cookie from Bowl 1, which is $30/40$ or $3/4$. # # 3. $P(D)$ is the total probability of the data, which is the chance of getting a vanilla cookie whether $H$ is true or not. In this example, we can figure out $P(D)$ directly: because the bowls are equally likely, and they contain the same number of cookies, you were equally likely to choose any cookie. Combining the two bowls, there are 50 vanilla and 30 chocolate cookies, so the probability of choosing a vanilla cookie is $50/80$ or $5/8$. # # Now that we have the terms on the right-hand side, we can use Bayes's Theorem to combine them. prior = 1/2 prior likelihood = 3/4 likelihood prob_data = 5/8 prob_data posterior = prior * likelihood / prob_data posterior # The posterior probability is $0.6$, a little higher than the prior, which was $0.5$. # # So the vanilla cookie makes us a little more certain that we chose Bowl 1. # **Exercise:** What if we had chosen a chocolate cookie instead; what would be the posterior probability of Bowl 1? # + # Solution goes here # - # ## The Bayes table # # In the cookie problem we were able to compute the probability of the data directly, but that's not always the case. In fact, computing the total probability of the data is often the hardest part of the problem. # # Fortunately, there is another way to solve problems like this that makes it easier: the Bayes table. # # You can write a Bayes table on paper or use a spreadsheet, but in this notebook I'll use a Pandas DataFrame. # # Here's an empty DataFrame with one row for each hypothesis: # + import pandas as pd table = pd.DataFrame(index=['Bowl 1', 'Bowl 2']) # - # Now I'll add a column to represent the priors: table['prior'] = 1/2, 1/2 table # And a column for the likelihoods: table['likelihood'] = 3/4, 1/2 table # Here we see a difference from the previous method: we compute likelihoods for both hypotheses, not just Bowl 1: # # * The chance of getting a vanilla cookie from Bowl 1 is 3/4. # # * The chance of getting a vanilla cookie from Bowl 2 is 1/2. # The following cells write the Bayes table to a file. # + # Get utils.py import os if not os.path.exists('utils.py'): # !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py if not os.path.exists('tables'): # !mkdir tables # + from utils import write_table write_table(table, 'table01-01') # - # The next step is similar to what we did with Bayes's Theorem; we multiply the priors by the likelihoods: table['unnorm'] = table['prior'] * table['likelihood'] table # I called the result `unnorm` because it is an "unnormalized posterior". To see what that means, let's compare the right-hand side of Bayes's Theorem: # # $P(H) P(D|H)~/~P(D)$ # # To what we have computed so far: # # $P(H) P(D|H)$ # # The difference is that we have not divided through by $P(D)$, the total probability of the data. So let's do that. # There are two ways to compute $P(D)$: # # 1. Sometimes we can figure it out directly. # # 2. Otherwise, we can compute it by adding up the unnormalized posteriors. # # Here's the total of the unnormalized posteriors: prob_data = table['unnorm'].sum() prob_data # Notice that we get 5/8, which is what we got by computing $P(D)$ directly. # # Now we divide by $P(D)$ to get the posteriors: table['posterior'] = table['unnorm'] / prob_data table # The posterior probability for Bowl 1 is 0.6, which is what we got using Bayes's Theorem explicitly. # # As a bonus, we also get the posterior probability of Bowl 2, which is 0.4. # # The posterior probabilities add up to 1, which they should, because the hypotheses are "complementary"; that is, either one of them is true or the other, but not both. So their probabilities have to add up to 1. # # When we add up the unnormalized posteriors and divide through, we force the posteriors to add up to 1. This process is called "normalization", which is why the total probability of the data is also called the "[normalizing constant](https://en.wikipedia.org/wiki/Normalizing_constant#Bayes'_theorem)" write_table(table, 'table01-02') # ## The dice problem # # Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die. # I choose one of the dice at random, roll it, and report that the outcome is a 1. # What is the probability that I chose the 6-sided die? # # Here's a solution using a Bayes table: table2 = pd.DataFrame(index=[6, 8, 12]) # I'll use fractions to represent the prior probabilities and the likelihoods. That way they don't get rounded off to floating-point numbers. # + from fractions import Fraction table2['prior'] = Fraction(1, 3) table2['likelihood'] = Fraction(1, 6), Fraction(1, 8), Fraction(1, 12) table2 # - # Once you have priors and likelhoods, the remaining steps are always the same. table2['unnorm'] = table2['prior'] * table2['likelihood'] prob_data2 = table2['unnorm'].sum() table2['posterior'] = table2['unnorm'] / prob_data2 table2 # The posterior probability of the 6-sided die is 4/9. write_table(table2, 'table01-03') # ## The Monty Hall problem # # The [Monty Hall problem](https://en.wikipedia.org/wiki/Monty_Hall_problem) is based on one of the regular # games on a television show called "The Price is Right". # If you are a contestant on the show, here's how the game works: # # * Monty shows you three closed doors numbered 1, 2, and 3. He tells you that there is a prize behind each door. # # * One prize is valuable (traditionally a car), the other two are less valuable (traditionally goats). # # * The object of the game is to guess which door has the car. If you guess right, you get to keep the car. # # Suppose you pick Door 1. # Before opening the door you chose, Monty opens Door 3 and reveals a # goat. # Then Monty offers you the option to stick with your original # choice or switch to the remaining unopened door. # # To maximize your chance of winning the car, should you stick with Door 1 or switch to Door 2? # # To answer this question, we have to make some assumptions about the behavior of the host: # # * Monty always opens a door and offers you the option to switch. # # * He never opens the door you picked or the door with the car. # # * If you choose the door with the car, he chooses one of the other doors at random. # # Here's a Bayes table that represent the hypotheses. table3 = pd.DataFrame(index=['Door 1', 'Door 2', 'Door 3']) # And here are the priors and likelihoods. table3['prior'] = Fraction(1, 3) table3['likelihood'] = Fraction(1, 2), 1, 0 table3 # The next step is always the same. table3['unnorm'] = table3['prior'] * table3['likelihood'] prob_data3 = table3['unnorm'].sum() table3['posterior'] = table3['unnorm'] / prob_data3 table3 # The posterior probability for Door 2 is 2/3, so you are better off switching. write_table(table3, 'table01-04') # ## Exercises # **Exercise:** Suppose you have two coins in a box. # One is a normal coin with heads on one side and tails on the other, and one is a trick coin with heads on both sides. You choose a coin at random and see that one of the sides is heads. # What is the probability that you chose the trick coin? # + # Solution goes here # - # **Exercise:** Suppose you meet someone and learn that they have two children. # You ask if either child is a girl and they say yes. # What is the probability that both children are girls? # # Hint: Start with four equally likely hypotheses. # + # Solution goes here # - # **Exercise:** There are many variations of the [Monty Hall problem](https://en.wikipedia.org/wiki/Monty_Hall_problem}). # For example, suppose Monty always chooses Door 2 if he can and # only chooses Door 3 if he has to (because the car is behind Door 2). # # If you choose Door 1 and Monty opens Door 2, what is the probability the car is behind Door 3? # # If you choose Door 1 and Monty opens Door 3, what is the probability the car is behind Door 2? # + # Solution goes here # + # Solution goes here # - # **Exercise:** M&M's are small candy-coated chocolates that come in a variety of # colors. Mars, Inc., which makes M&M's, changes the mixture of colors from time to time. # In 1995, they introduced blue M&M's. # # * In 1994, the color mix in a bag of plain M&M's was 30\% Brown, 20\% Yellow, 20\% Red, 10\% Green, 10\% Orange, 10\% Tan. # # * In 1996, it was 24\% Blue , 20\% Green, 16\% Orange, 14\% Yellow, 13\% Red, 13\% Brown. # # Suppose a friend of mine has two bags of M&M's, and he tells me # that one is from 1994 and one from 1996. He won't tell me which is # which, but he gives me one M&M from each bag. One is yellow and # one is green. What is the probability that the yellow one came # from the 1994 bag? # # Hint: The trick to this question is to define the hypotheses and the data carefully. # + # Solution goes here # -
code/chap01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="jbZfUNRMWRM_" colab_type="code" colab={} from __future__ import print_function from tensorflow import keras import numpy as np import numpy as np import tensorflow as tf from tensorflow.contrib import rnn import random import collections import time # + id="m43fSYzvKpuY" colab_type="code" colab={} import numpy as np from operator import itemgetter from nltk import word_tokenize, sent_tokenize from string import punctuation from nltk.util import ngrams import re from sklearn.model_selection import train_test_split # + id="9o4OB5GTMGjr" colab_type="code" outputId="bffe7e8b-1f67-4992-8858-5a5c5d0c64da" colab={"base_uri": "https://localhost:8080/", "height": 69} import nltk nltk.download('punkt') # + id="2mWk36_zK7bI" colab_type="code" outputId="a1625787-c37a-4e0d-8fe4-c37a1d6cd99c" colab={"base_uri": "https://localhost:8080/", "height": 86} # Pre-Processing Dataset -Filtering out symbols, newlines. #Parsing and dividing in 80-20 ratio # DATASET- Jane Austen Novels: The Complete Works of Jane Austen #take one line, neglect empty line and then add it to line_set then lower down whole set(string) with open("JaneAusten.txt", 'rt') as source_file: line_set = [''] for line in (l.rstrip() for l in source_file): if line != '' or line_set[-1] != '\n': line_set.append(line + '\n') text = "".join(line_set) text = text.lower() # Tokenizing sentences from processed text vocabulary = {}#initilaizing vocab dict corpus = []#intializing corpus sentence_set = sent_tokenize(text) for sentence in sentence_set: #taking one sentence from sentence list words = word_tokenize(sentence) #work tokenizing each word of that sentence processed_sentence = ['<s>'] #making processed sentence out of it and adding START <s> symbol for word in words: if len(set(word).intersection(punctuation)) == 0 and (len(word)>1 or word == 'a' or word == 'i') : processed_sentence.append(word)#adding processed and selected word to my new processed sentence if word in vocabulary.keys():#adding in vobaulary list if not exist else increasing frequecny by 1 if exists vocabulary[word] += 1 else: vocabulary[word] = 1 processed_sentence.append('</s>')#adding END </s> to the end of my sentence corpus.append(" ".join(processed_sentence))#adding processed sentence to the corpus #Now Processed Corpus is ready number_of_sentence = len(corpus) # Vocabulary list doesn't contain the START and END word used in Pre-Processing vocabulary['<s>'] = 2*number_of_sentence vocabulary['</s>'] = 2*number_of_sentence # Now we have Processed sentence corpus and vocabulary list. #Processed Corpus DATA ANALYSIS tokens = sum(vocabulary.values()) types = len(vocabulary) print ('Number of Sentences = ' + str(number_of_sentence)) print ('Number of Tokens = ' + str(tokens)) print ('Number of Types = ' + str(types)) print ("TTR= " + str(types/tokens)) # train-test split in 80:20 ratio train_data, test_data = train_test_split(corpus, test_size=0.20, random_state=42) # + id="hg-sAza9MwmZ" colab_type="code" colab={} # tf.reset_default_graph() start_time = time.time() def elapsed(sec): if sec<60: return str(sec) + " sec" elif sec<(60*60): return str(sec/60) + " min" else: return str(sec/(60*60)) + " hr" def read_data(content): content = [x.strip() for x in content] content = [word for i in range(len(content)) for word in content[i].split()] content = np.array(content) return content # new_train = [] # for i in train_text: # new = ''.join(i.split('<UNK>')) # new_train.append(new) # + id="t7vK1JIJkhV7" colab_type="code" outputId="be4f4f1f-c908-4787-cf9e-b87de01a477d" colab={"base_uri": "https://localhost:8080/", "height": 34} training_data = read_data(train_data) testing_data = read_data(test_data) print("Loaded training data...") def build_dataset(words): count = collections.Counter(words).most_common() dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return dictionary, reverse_dictionary dictionary, reverse_dictionary = build_dataset(training_data) vocab_size = len(dictionary) # + id="p28wtguNKnNd" colab_type="code" colab={} # A dictionary mapping words to an integer index word_index = dictionary.copy() # The first indices are reserved word_index = {k:(v+3) for k,v in word_index.items()} word_index["<PAD>"] = 0 # word_index["<START>"] = 1 word_index["<UNK>"] = 1 # unknown word_index["<UNUSED>"] = 2 reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) def decode_review(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) train_text = [] for i in train_data: train_text.append(decode_review(i)) # + colab_type="code" id="PvEERT7kVgz2" colab={} # from tensorflow.python.client import device_lib # device_lib.list_local_devices() # + id="9Ow3OBsjWcdW" colab_type="code" colab={} import warnings warnings.filterwarnings("ignore") # + id="fkdQu05-d78c" colab_type="code" outputId="8317e949-e5d3-428c-a9bc-73cc38978b01" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Parameters learning_rate = 0.0001 # training_iters = 10 * len(training_data) #10000 training_iters = 20000 display_step = 1000 n_input = 4 logs_path = '/tmp/tensorflow/rnn_words' writer = tf.summary.FileWriter(logs_path) # number of units in RNN cell n_hidden = 128 # tf Graph input x = tf.placeholder("float", [None, n_input, 1]) y = tf.placeholder("float", [None, vocab_size]) # RNN output node weights and biases weights = { 'out': tf.Variable(tf.random_normal([n_hidden, vocab_size])) } biases = { 'out': tf.Variable(tf.random_normal([vocab_size])) } def RNN(x, weights, biases): # reshape to [1, n_input] x = tf.reshape(x, [-1, n_input]) # Generate a n_input-element sequence of inputs # (eg. [had] [a] [general] -> [20] [6] [33]) x = tf.split(x,n_input,1) # 2-layer LSTM, each layer has n_hidden units. #rnn_cell = rnn.MultiRNNCell([rnn.LSTMCell(n_hidden, reuse =tf.AUTO_REUSE),rnn.LSTMCell(n_hidden, reuse =tf.AUTO_REUSE)]) # 1-layer LSTM with n_hidden units #rnn_cell = rnn.LSTMCell(n_hidden, reuse =tf.AUTO_REUSE) # 1-layer RNN with n_hidden units #rnn_cell = rnn.MultiRNNCell([rnn.GRUCell(n_hidden), rnn.GRUCell(n_hidden)]) rnn_cell = rnn.BasicRNNCell(n_hidden) # rnns.append(rnn.BasicRNNCell(n_hidden)) # generate prediction # outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) # there are n_input outputs but # we only want the last output return tf.matmul(outputs[-1], weights['out']) + biases['out'] pred = RNN(x, weights, biases) # Loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) # Model evaluation correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.global_variables_initializer() # Launch the graph with tf.Session() as session: session.run(init) step = 0 offset = random.randint(0,n_input+1) end_offset = n_input + 1 acc_total = 0 loss_total = 0 loss_overall = 0 writer.add_graph(session.graph) while step < training_iters: # Generate a minibatch. Add some randomness on selection process. if offset > (len(training_data)-end_offset): offset = random.randint(0, n_input+1) symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ] symbols_in_keys = list(np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])) symbols_out_onehot = np.zeros([vocab_size], dtype=float) symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0 symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1]) _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \ feed_dict={x: symbols_in_keys, y: symbols_out_onehot}) loss_total += loss loss_overall += loss acc_total += acc if (step+1) % display_step == 0: print("No.= " + str((step+1)//display_step) + ", Average Loss= " + \ "{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \ "{:.2f}%".format(100*acc_total/display_step)) acc_total = 0 loss_total = 0 symbols_in = [training_data[i] for i in range(offset, offset + n_input)] symbols_out = training_data[offset + n_input] symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())] print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred)) step += 1 offset += (n_input+1) print("Optimization Finished!") print("Elapsed time: ", elapsed(time.time() - start_time)) num_start = 0 n_input = 4 words = [word_index['<PAD>']] * (n_input-1) + ['<s>'] overall = list(words) sentence = '' num_words = 0 while num_start < n_input and num_words < 500: try: symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))] keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1]) onehot_pred = session.run(pred, feed_dict={x: keys}) onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval()) if onehot_pred_index == dictionary['</s>']: sentence += ". " num_start += 1 else: sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index]) symbols_in_keys = list(symbols_in_keys[1:]) symbols_in_keys.append(onehot_pred_index) except: onehot_pred_index = dictionary['the'] sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index]) symbols_in_keys = list(symbols_in_keys[1:]) symbols_in_keys.append(onehot_pred_index) num_words += 1 print(sentence) # + id="AnToOoNPxvGE" colab_type="code" colab={} train_perplexity = tf.exp(loss) # + id="_ERkh3Fiur0E" colab_type="code" outputId="8cbbbceb-e95a-4de6-95ab-7f26638bb3d1" colab={"base_uri": "https://localhost:8080/", "height": 34} with tf.Session() as session: print(session.run(train_perplexity)) # + id="kTsd0-JYVvr6" colab_type="code" outputId="9828f60f-b48b-43f5-950e-eb75096e741a" colab={"base_uri": "https://localhost:8080/", "height": 975} # Parameters # learning_rate = 0.0001 # # training_iters = 10 * len(training_data) #10000 # training_iters = 100 # display_step = 10 # n_input = 4 # number of units in RNN cell # n_hidden = 128 # tf Graph input x = tf.placeholder("float", [None, n_input, 1]) y = tf.placeholder("float", [None, vocab_size]) # RNN output node weights and biases weights = { 'out': tf.Variable(tf.random_normal([n_hidden, vocab_size])) } biases = { 'out': tf.Variable(tf.random_normal([vocab_size])) } def RNN(x, weights, biases): # reshape to [1, n_input] x = tf.reshape(x, [-1, n_input]) # Generate a n_input-element sequence of inputs # (eg. [had] [a] [general] -> [20] [6] [33]) x = tf.split(x,n_input,1) # 1-layer LSTM with n_hidden units rnn_cell = rnn.LSTMCell(n_hidden, reuse =tf.AUTO_REUSE) outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) # there are n_input outputs but # we only want the last output return tf.matmul(outputs[-1], weights['out']) + biases['out'] pred = RNN(x, weights, biases) # Loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) # Model evaluation correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.global_variables_initializer() # Launch the graph with tf.Session() as session: session.run(init) step = 0 offset = random.randint(0,n_input+1) end_offset = n_input + 1 acc_total = 0 loss_total = 0 loss_overall = 0 writer.add_graph(session.graph) while step < training_iters: # Generate a minibatch. Add some randomness on selection process. if offset > (len(training_data)-end_offset): offset = random.randint(0, n_input+1) symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ] symbols_in_keys = list(np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])) symbols_out_onehot = np.zeros([vocab_size], dtype=float) symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0 symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1]) _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \ feed_dict={x: symbols_in_keys, y: symbols_out_onehot}) loss_total += loss loss_overall += loss acc_total += acc if (step+1) % display_step == 0: print("No.= " + str((step+1)//display_step) + ", Average Loss= " + \ "{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \ "{:.2f}%".format(100*acc_total/display_step)) acc_total = 0 loss_total = 0 symbols_in = [training_data[i] for i in range(offset, offset + n_input)] symbols_out = training_data[offset + n_input] symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())] print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred)) step += 1 offset += (n_input+1) print("Optimization Finished!") print("Elapsed time: ", elapsed(time.time() - start_time)) num_start = 0 # n_input = 4 words = [word_index['<PAD>']] * (n_input-1) + ['<s>'] overall = list(words) sentence = '' num_words = 0 while num_start < 5 and num_words < 500: try: symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))] keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1]) onehot_pred = session.run(pred, feed_dict={x: keys}) onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval()) if onehot_pred_index == dictionary['</s>']: sentence += ". " num_start += 1 else: sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index]) symbols_in_keys = list(symbols_in_keys[1:]) symbols_in_keys.append(onehot_pred_index) except: onehot_pred_index = dictionary['the'] sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index]) symbols_in_keys = list(symbols_in_keys[1:]) symbols_in_keys.append(onehot_pred_index) num_words += 1 print(sentence) # + id="t_dZ0I9jXJUP" colab_type="code" colab={} train_perplexity = tf.exp(loss) # + id="ZFSvXZ4EXM3u" colab_type="code" outputId="88c21a3a-ffea-4ba6-e3ec-c9987aede63e" colab={"base_uri": "https://localhost:8080/", "height": 34} with tf.Session() as session: print(session.run(train_perplexity))
Assignment 2/Q2Bi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # 1: Intro to Bayes Theorem and how we want to find p(y = ci/ X = x) # 2: Naive assumption and how that simplifies our work # 3: How to estimate probabilities using training data assuming discrete valued features # 4: Handling zeroes using Laplace correction # 5: How to find probabilities for continuous valued features # 6: How to use sklearn for Naive bayes classification # 7: Bayesian Belief Networks # - # ## INTRODUCTION TO BAYES THEOREM # Bayes Theorem defines probability of an event based on the prior knowledge of factors that might be related to an event. # # #### Mathematical Statement of Bayes Theorem is as follows : # ![title](bayestheorem.png) # Now, basically for a data point xi, we have to predict the class that the current output Y belongs to. Assume, there are total 'j' number of classes for output.<br/> # Then, <br/> # P(y=c1|x=xi) ---> tells us that for given input xi what is the probability that y is c1. <br/> # P(y=c2|x=xi) ---> tells us that for given input xi what is the probability that y is c2. <br/> # and so on till cj. <br/><br/> # # Out of all these probabilities calculations, y belongs to that particular class which has maximum probability. # We will be using Bayes theorem to doing these probability calculations. <br/> # ![title](1nb.png) # This gives us the probability that the output belongs to jth class for the current values of data point(xi). <br/> # Since for all the classes 1,2,...,j the denominator will have the same value, so we can ignore this while doing comparison. Hence, we obtain the given formula to calculate probabilities. # ![title](2nb.png) # ### NAIVE ASSUMPTION # The estimate for probability P(y=cj), can be done directly from the number of training points. <br/> # Suppose there are 100 training points and 3 output classes, 10 belong to class c1, 30 belong to class C2 and remaining 60 belong to class C3. <br/> # The estimate values of class probabilities will be : <br/> # P(y = C1) = 10/100 = 0.1 <br/> # P(y = C2) = 30/100 = 0.3 <br/> # P(y = C3) = 60/100 = 0.6 <br/> # To make the probability estimate for P(x=xi|y=cj), naive bayes classification algorithm assumes <b>all the features to be independent</b>. So, we can calculate this by individually multiplying the probabilities obtained for all these features (assuming features to be independent), for the output of jth class. # # P(x=xi|y=cj) = P(x=xi<sup>1</sup>|y=cj) P(x=xi<sup>2</sup>|y=cj) .... P(x=xi<sup>n</sup>|y=cj) # # here, xi<sup>1</sup> denotes the value of 1st feature of ith data point and x=xi<sup>n</sup> denotes the value nth feature of the ith data point. # After taking up the naive assumption, we can easily calculate the individual probabilites and then by simply multiplying the result calculate the final probability P'. # ![title](3nb.png) # ### ESTIMATE PROBABILITIES USING TRAINING DATA (TAKING DISCRETE VALUE FEATURES) # Using the above formula, we can calculate the probability that the output y belongs to jth class, for the given ith data point. Class probabilites [ P(y = cj) ] will be calculated from the data given and and individual probabilties [ P(x=xi<sup>k</sup>|y = cj) ] will be calculated by diving the data class wise can calculating these for the jth class. # ## Handling zeroes using Laplace correction # Let’s consider the following situation: you’ve trained a Naive Bayes algorithm to differentiate between spam and not spam mails. What happens if the word “Casino” doesn’t show up in your training data set, but appears in a test sample? # # Well, your algorithm has never seen it before, so it sets the probability that <b>"Casino" appears in a spam document</b> to <b>0</b>; So every time this word appears in the test data , you will try hard (it has P = 0) to mark it as not spam just because you have not seen that word in the spam part of training data.This will make the model very less efficient and thus we want to minimise it. We want to keep in mind the possibility of any word we have not seen (or for that matter seen in the not-spam part of training data), may have a probability of being a word used in spam mails greater than 0. Ths same is true for each word to be a part of not-spam mails. # # To avoid such issues with unseen values for features, as well as to combat overfitting to the data set, we pretend as if we’ve seen each word 1 (or k, if you’re smoothing by k) time more than we’ve actually seen it, and adjust the denominator of our frequency divisions by the size of the overall vocabulary to account for the “pretence”, which actually works well in practice. # # If you take smoothing factor k equal to 1 , it becomes Laplace correction. # The equations below show Laplace correction for the example taken. # <br>Without correction : # <img src="L_corr.png"> # With correction : # <img src="L_corr1.png" width="400px"> # ### FIND PROBABILITIES FOR CONTINUOUS VALUE FEATURES # For all the continuous value features, we take the distribution to be GAUSSIAN. <br/> # When Gaussian distribution is used for continuous data, then it is called GAUSSIAN NAIVE BAYES CLASSIFIER. Other types of classifiers are BERNOULLI, MULTINOMIAL, etc. <br/> # Given standard formula is used to find the probability estimates for continuous data, as shown. # ![title](normaldistribution.png) # ![title](normaldistributiongraph.png) # ### SKLEARN FOR NAIVE BAYES CLASSIFICATION from sklearn import datasets from sklearn.metrics import confusion_matrix import numpy as np # The sklearn.naive_bayes module implements Naive Bayes algorithms. # These are supervised learning methods based on applying Bayes’ theorem ## with strong (naive) feature independence assumptions. from sklearn import naive_bayes iris = datasets.load_iris() iris.data iris.target gnb = naive_bayes.GaussianNB() # GAUSSIAN NAIVE BAYES CLASSIFIER gnb.fit(iris.data, iris.target) y_pred = gnb.predict(iris.data) y_pred np.unique(y_pred, return_counts = True) # + print("CONFUSION MATRIX") print() print(confusion_matrix(iris.target, y_pred)) # iris.target - true value - shown horizontally in confusion matrix # y_pred - predicted values - shown vertically in confusion matrix
notes/13 naive bayes/NaiveBayesClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Packages import os import matplotlib.pyplot as plt import scipy.io as sio import torch import numpy as np import pandas as pd import logging import re from train_models import FNO1dComplexChooseModes, SpectralConv1dModes, OneStepDataSet # from train_models_no_spacetime import FNO1dComplexNoSpacetime # + tags=[] # %load_ext autoreload # %autoreload 1 # %aimport plotting_utils # - # # Load Data and Models # + DATA_DIR = '/local/meliao/projects/fourier_neural_operator/data/2021-08-14_NLS_data_files' MODEL_DIR = '/local/meliao/projects/fourier_neural_operator/experiments/21_use_other_frequencies/models' PLOTS_DIR = '/local/meliao/projects/fourier_neural_operator/experiments/21_use_other_frequencies/plots/Compare_N_X_datasets' RESULTS_DIR = '/local/meliao/projects/fourier_neural_operator/experiments/21_use_other_frequencies/results' # - model_fp_dd = {'Dataset 0': os.path.join(MODEL_DIR, 'dset_00_time_1_ep_1000'), 'Dataset 1': os.path.join(MODEL_DIR, 'dset_01_time_1_ep_1000'), 'Dataset 2': os.path.join(MODEL_DIR, 'dset_02_time_1_ep_1000'), 'Dataset 3': os.path.join(MODEL_DIR, 'dset_03_time_1_ep_1000'), 'Dataset 4': os.path.join(MODEL_DIR, 'dset_04_time_1_ep_1000') } model_dd = {k: torch.load(v, map_location='cpu') for k,v in model_fp_dd.items()} if not os.path.isdir(PLOTS_DIR): os.mkdir(PLOTS_DIR) # + tags=[] data_fp_dd = {'Dataset 0': os.path.join(DATA_DIR, '00_test.mat'), 'Dataset 1': os.path.join(DATA_DIR, '01_test.mat'), 'Dataset 2': os.path.join(DATA_DIR, '02_test.mat'), 'Dataset 3': os.path.join(DATA_DIR, '03_test.mat'), 'Dataset 4': os.path.join(DATA_DIR, '04_test.mat')} data_dd = {k: sio.loadmat(v) for k,v in data_fp_dd.items()} dataset_dd = {k: OneStepDataSet(v['output'], v['t'], v['x']) for k,v in data_dd.items()} # - # # Prediction Differences Between Similar ICs def prepare_input(X): # X has shape (nbatch, 1, grid_size) s = X.shape[-1] n_batches = X.shape[0] # Convert to tensor X_input = torch.view_as_real(torch.tensor(X, dtype=torch.cfloat)) # FNO code appends the spatial grid to the input as below: x_grid = torch.linspace(-np.pi, np.pi, 1024).view(-1,1) X_input = torch.cat((X_input, x_grid.repeat(n_batches, 1, 1)), axis=2) return X_input def l2_normalized_error(pred, actual): """Short summary. Parameters ---------- pred : type Description of parameter `pred`. actual : type Description of parameter `actual`. Returns ------- types Description of returned object. """ errors = pred - actual error_norms = torch.linalg.norm(torch.tensor(errors), dim=-1, ord=2) actual_norms = torch.linalg.norm(torch.tensor(actual), dim=-1, ord=2) normalized_errors = torch.divide(error_norms, actual_norms) return normalized_errors # + tags=[] preds_dd = {} errors_dd = {} for k in model_dd.keys(): model_k = model_dd[k] dset_k = dataset_dd[k] input = prepare_input(dset_k.X[:,0]) target = dset_k.X[:,1] preds_k = model_k(input) preds_dd[k] = preds_k errors_dd[k] = l2_normalized_error(preds_k, target) print("Finished with model ", k) # + def quick_boxplot(errors_dd, ref_hline=None, fp=None, title=None): error_lst = [] key_lst = [] for k, errors in errors_dd.items(): error_lst.append(errors) key_lst.append(k) fig, ax = plt.subplots() ax.set_yscale('log') ax.set_ylabel('L2 Normalized Error') ax.set_xlabel('FNO Model') ax.set_title(title) ax.set_xticklabels(labels=key_lst, rotation=45, ha='right') if ref_hline is not None: ax.hlines(ref_hline, xmin=0.5, xmax=len(key_lst)+ 0.5, linestyles='dashed') fig.patch.set_facecolor('white') ax.boxplot(error_lst) fig.tight_layout() if fp is not None: plt.savefig(fp) else: plt.show() plt.close(fig) # - quick_boxplot(errors_dd) for k in model_dd.keys(): for i in range(5): preds_dd = {'Preds_1': output1_dd[k].numpy()[i], 'Preds_2': output2_dd[k].numpy()[i]} ic_dd = {'IC_1': dset1.X[i,0].numpy(), 'IC_2': dset2.X[i,0]} soln_dd = {'Soln_1': dset1.X[i,1].numpy(), 'Soln_2': dset2.X[i,1].numpy()} solns = dset1.X[i, 1].numpy() title = 'Test case ' + str(i) + ', model trained on ' + k fp_i = os.path.join(PLOTS_DIR, 'compare_predictions_model_{}_test_case_{}.png'.format(model_name_dd[k], i)) plot_two_solutions_only_DFT(preds_dd, ic_dd, soln_dd, np.linspace(-np.pi, np.pi, 1024), title=title, fp=fp_i) for k in no_W_model_dd.keys(): for i in range(5): preds_dd = {'Preds_1': no_W_output1_dd[k].numpy()[i], 'Preds_2': no_W_output2_dd[k].numpy()[i]} ic_dd = {'IC_1': dset1.X[i,0].numpy(), 'IC_2': dset2.X[i,0].numpy()} soln_dd = {'Soln_1': dset1.X[i,1].numpy(), 'Soln_2': dset2.X[i,1].numpy()} # solns = dset1.X[i, 1].numpy() title = 'Test case ' + str(i) + ', No W channel, model trained on ' + k fp_i = os.path.join(PLOTS_DIR, 'no_W_compare_predictions_model_{}_test_case_{}.png'.format(model_name_dd[k], i)) plot_two_solutions_only_DFT(preds_dd, ic_dd, soln_dd, np.linspace(-np.pi, np.pi, 1024), title=title, fp=fp_i) # + def make_rescaled_predictions(model, dset): """ """ # print(ones_input.shape) preds = torch.zeros_like(dset.X) x_vals = torch.zeros((dset.X.shape[1], dset.X.shape[2])) errors = torch.zeros((dset.X.shape[0], dset.X.shape[1])) # print(x_vals.shape) x_vals[0] = dset.x_grid.reshape((1,-1)) preds[:, 0] = dset.X[:, 0] for t_idx in range(1, dset.n_tsteps+1): time = dset.t[t_idx] rescaled_ICs = prepare_input(dset.rescaled_ICs[:,t_idx]) # print(rescaled_ICs.shape) # x_vals[t_idx] = rescaled_ICs[0, :,2] predictions_i = model(rescaled_ICs) # inv_root_t = 1 / torch.sqrt(time) root_t = torch.sqrt(time) predictions_i = root_t * predictions_i preds[:, t_idx] = predictions_i errors_i = l2_normalized_error(predictions_i, dset.X[:,t_idx]) errors[:,t_idx] = errors_i # print("Finished predictions at ", t_idx, inv_root_t) return preds, errors # def make_composed_predictions(model, dset): # """ # """ # ones_input = torch.tensor(1.).repeat(dset.n_batches, 1,1) # # print(ones_input.shape) # preds = torch.zeros_like(dset.X) # errors = torch.zeros((dset.X.shape[0], dset.X.shape[1])) # preds[:, 0] = dset.X[:, 0] # inputs_i = prepare_input(dset.X[:, 0]) # for t_idx in range(1, dset.n_tsteps+1): # time = dset.t[t_idx] # # rescaled_ICs = dset.make_x_train_rescaled_batched(dset.X[:, 0], time) # predictions_i = model(inputs_i, ones_input) # preds[:, t_idx] = predictions_i # inputs_i = prepare_input(predictions_i) # errors_i = l2_normalized_error(predictions_i, dset.X[:,t_idx]) # errors[:,t_idx] = errors_i # # print("Finished predictions at ", t_idx) # return preds, errors # + tags=[] preds_dd = {} errors_dd = {} with torch.no_grad(): for k, model in model_dd.items(): preds_i, errors_i = make_rescaled_predictions(model, scaling_dset) preds_dd[k] = preds_i errors_dd[k] = errors_i print("Finished with ", k) # preds_composed, errors_composed = make_composed_predictions(model, time_dset) # preds_rescaled, x_vals_rescaled, errors_rescaled = make_rescaled_predictions(model, scaling_dset) # + tags=[] errors_dd_i = {k: np.delete(v.numpy(), [59], axis=0) for k,v in errors_dd.items()} fp_time_errors = os.path.join(PLOTS_DIR, 'scaling_time_errors.png') plotting_utils.plot_time_errors(errors_dd_i, title='Time-Rescaling Preds with FNO trained on different ICs') #, fp=fp_time_errors) # + tags=[] test_cases_for_plot = list(range(3)) for test_case in test_cases_for_plot: solns = scaling_dset.X.numpy()[test_case] for k,v in preds_dd.items(): fp_i = os.path.join(PLOTS_DIR, 'model_{}_test_case_{}.png'.format(model_name_dd[k], test_case)) print("Working on model {}, case {}".format(model_name_dd[k], test_case)) preds_dd_i = {k: v.numpy()[test_case]} plotting_utils.plot_one_testcase_panels(preds_dd_i, solns, plot_errors=True, show_n_timesteps=10, fp=fp_i) # break # + tags=[] pred_arr = preds_dd['Mixed ICs'] print(pred_arr.shape) plt.plot(np.real(pred_arr[0,2,:].numpy())) # + train_pattern = os.path.join(RESULTS_DIR, '{}_train_FNO_train.txt') test_pattern = os.path.join(RESULTS_DIR, '{}_train_FNO_test.txt') for k,v in model_name_dd.items(): train_fp_i = train_pattern.format(v) test_fp_i = test_pattern.format(v) train_df = pd.read_table(train_fp_i) test_df = pd.read_table(test_fp_i) title_i = 'Training set: ' + k fp_i = os.path.join(PLOTS_DIR, 'train_test_{}.png'.format(v)) plotting_utils.make_train_test_plot(train_df, test_df, log_scale=True, title=title_i, fp=fp_i) # - DATA_DIR = '/local/meliao/projects/fourier_neural_operator/data/' NEW_PLOTS_DIR = '/local/meliao/projects/fourier_neural_operator/experiments/18_train_with_rescaling/plots/mixed_IC_model' if not os.path.isdir(NEW_PLOTS_DIR): os.mkdir(NEW_PLOTS_DIR) test_dset_fp_dd = {'ICs freq [1, ..., 5]': os.path.join(DATA_DIR, '2021-06-24_NLS_data_04_test.mat'), 'ICs freq [6, ..., 10]': os.path.join(DATA_DIR, '2021-07-22_NLS_data_06_test.mat'), 'ICs freq [11, ..., 15]': os.path.join(DATA_DIR, '2021-08-04_NLS_data_09_test.mat'), 'ICs freq [16, ..., 20]': os.path.join(DATA_DIR, '2021-08-04_NLS_data_10_test.mat'), 'Mixed ICs': os.path.join(DATA_DIR, '2021-08-08_NLS_mixed_IC_data_test.mat'), } # + test_data_dd = {k: sio.loadmat(v) for k,v in test_dset_fp_dd.items()} test_dset_dd = {k: TimeScalingDataSet(v['output'], v['t'], v['x']) for k,v in test_data_dd.items()} # + tags=[] preds_dd = {} errors_dd = {} mixed_model = model_dd['Mixed ICs'] with torch.no_grad(): for k, dset in test_dset_dd.items(): preds_i, _ , errors_i = make_rescaled_predictions(mixed_model, dset) preds_dd[k] = preds_i errors_dd[k] = errors_i print("Finished with ", k) # preds_composed, errors_composed = make_composed_predictions(model, time_dset) # preds_rescaled, x_vals_rescaled, errors_rescaled = make_rescaled_predictions(model, scaling_dset) # + tags=[] errors_dd_i = {k: v.numpy() for k,v in errors_dd.items()} t = 'Model trained on Mixed ICs and tested on different datasets' fp = os.path.join(NEW_PLOTS_DIR, 'mixed_ICs_time_errors.png') plotting_utils.plot_time_errors(errors_dd_i, title=t, fp=fp) # + tags=[] test_cases_for_plot = list(range(3)) for test_case in test_cases_for_plot: for k, dset in test_dset_dd.items(): solns = dset.X.numpy()[test_case] preds_dd_i = {k: preds_dd[k].numpy()[test_case]} fp_i = os.path.join(NEW_PLOTS_DIR, 'panels_dset_{}_test_case_{}.png'.format(model_name_dd[k], test_case)) plotting_utils.plot_one_testcase_panels(preds_dd_i, solns, show_n_timesteps=10, fp=fp_i) print("Finished dset {} and test case {}".format(model_name_dd[k], test_case)) # break # -
experiments/21_use_other_frequencies/Compare_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('..') from datetime import datetime import pandas as pd from bcb import sgs df = sgs.get({'IPCA': 433, 'IGPM': 189}, start_date='2002-01-01', end_date='2021-01-01', join=True) df
notebooks/sgs get series with join.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Create Endpoint from Job # ## Disclaimer! # # You should update in conf.py the following variables: # # - training_job_name # - model_data # + import conf import sagemaker from sagemaker import get_execution_role from sagemaker.model import Model role = get_execution_role() print(role) bucket = conf.SESSION_BUCKET sess = sagemaker.Session(default_bucket=bucket) # - model_name = sess.create_model_from_job(training_job_name=conf.training_job_name, name=conf.model_name, role=role, ) model = Model(model_data=conf.model_data, image=conf.container_image, role=role, sagemaker_session=sess) object_detector = model.deploy(initial_instance_count=conf.initial_instance_count, instance_type=conf.deploy_instance_type, endpoint_name=conf.endpoint_name) sess.sagemaker_client.describe_endpoint(EndpointName=conf.endpoint_name)
2.0-amazon-sagemaker/2.2-hardhat-object-detection/03-CreateEndpointFromJob.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load Data # # Load sales data from S3 / HDFS. We use the built-in "csv" method, which can use the first line has column names and which also supports infering the schema automatically. We use both and save some code for specifying the schema explictly. # # We also peek inside the data by retrieving the first five records. # + from pyspark.sql.functions import * raw_data = spark.read\ .option("header","true")\ .option("inferSchema","false")\ .csv("s3://dimajix-training/data/kc-house-data") raw_data.limit(5).toPandas() # - # ## Inspect Schema # # Now that we have loaded the data and that the schema was inferred automatically, let's inspect it. raw_data.printSchema() # + from pyspark.sql.types import * data = raw_data.withColumn("price", raw_data.price.cast(FloatType())) \ .withColumn("bedrooms", raw_data.bedrooms.cast(FloatType())) \ .withColumn("bathrooms", raw_data.bathrooms.cast(FloatType())) \ .withColumn("sqft_living", raw_data.sqft_living.cast(FloatType())) \ .withColumn("sqft_lot", raw_data.sqft_lot.cast(FloatType())) \ .withColumn("floors", raw_data.floors.cast(FloatType())) \ .withColumn("sqft_above", raw_data.sqft_above.cast(FloatType())) \ .withColumn("sqft_basement", raw_data.sqft_basement.cast(FloatType())) \ .withColumn("yr_built", raw_data.yr_built.cast(IntegerType())) \ .withColumn("yr_renovated", raw_data.yr_renovated.cast(IntegerType())) \ .withColumn("sqft_living15", raw_data.sqft_living15.cast(FloatType())) \ .withColumn("sqft_lot15", raw_data.sqft_lot15.cast(FloatType())) data.printSchema() # - # # Initial Investigations # # As a first step to get an idea of our data, we create some simple visualizations. We use the Python matplot lib package for creating simple two-dimensional plots, where the x axis will be one of the provided attributes and the y axis will be the house price. # %matplotlib inline # Import relevant Python packages import pandas as pd import numpy as np import matplotlib.pyplot as plt # ## House Price in Relation to sqft_living # # Probably one of the most important attributes is the size of the house. This is provided in the data in the column "sqft_living". We extract the price column and the sqft_living column and create a simple scatter plot. # + # Extract price and one of the attributes price = data.select("price").toPandas() sqft_living = data.select("sqft_living").toPandas() # Create simple scatter plot plt.plot(sqft_living, price, ".") # - # ## House Price in Relation to sqft_lot # # Another interesting attribute for predicting the house price might be the size of the whole lot, which is provided in the column "sqft_lot". So let's create another plot, now with "price" and "sqft_lot". # + price = data.select("price").toPandas() sqft_lot = data.select("sqft_lot").toPandas() plt.plot(sqft_lot, price, ".") # - # # Perform Linear Regression # # Let's try to fit a line into the picture by performing a linear regression. This is done in two steps: # 1. Extract so called features from the raw data. The features have to be stored in a new column of type "Vector" # 2. Train a linear regression model # + from pyspark.ml.feature import * from pyspark.ml.regression import * # Extract features using VectorAssembler vector_assembler = VectorAssembler(inputCols=['sqft_living'], outputCol='features') features = vector_assembler.transform(data) # Traing linear regression model regression = LinearRegression(featuresCol='features',labelCol='price') model = regression.fit(features) # - # ## Inspect Model # # Let's inspect the generated linear model. It has two fields, "intercept" and "coefficients" which completely describe the model. # # The basic formular of the model is # # y = SUM(coeff[i]*x[i]) + intercept # # where y is the prediction variable, and x[i] are the input feature. print("Intercept: " + str(model.intercept)) print("Coefficients: " + str(model.coefficients)) # ## Plot Data and Model # # Now let's overlay the original scatter plot with the trained model. The model encodes a line, which can be overlayed by an additional invocation of "plt.plot". # + # For plotting the model, we need to generate input and output values. Input values are stored in "model_x" model_x = np.linspace(0,14000,100) # model_y contains the model applied to model_x. The model has only one feature and an intercept model_y = model_x * model.coefficients[0] + model.intercept plt.plot(sqft_living, price, ".") plt.plot(model_x, model_y, "r") # - # # Measuring Fit # # Now the important question of course is, how well does the model approximate the real data. We can find our by transforming our input data using the model. This is done by using the function # # model.transform # # which accepts one parameter and adds a new column "prediction" to input data, which contains the evaluated model for each record. prediction = model.transform(features) prediction.limit(5).toPandas() # ## Manually Calculate RMSE # # Using SQL we compute the root mean squared error (RMSE). Formally it is calculated as # # SQRT(SUM((price - prediction)**2) / n) # # where n is the number of records. prediction.selectExpr("sqrt(sum((price - prediction)*(price-prediction)) / count(*)) as RMSE").toPandas() # ## Use Built in Functionality to Measure the Fit # Of course Spark ML already contains evaluators for the most relevant metrics # + from pyspark.ml.evaluation import * evaluator = RegressionEvaluator(labelCol="price", predictionCol="prediction", metricName="rmse") evaluator.evaluate(prediction) # - # # Measuring Generalization of Model # # Now we have an idea how well the model approximates the given data. But for machine learning it is more important to understand how well a model generalizes from the training data to new data. New data could contain different outliers. # # In order to measure the generalization of the model, we need to change our high level approach. Our new approach needs to provide distinct sets of training data and test data. We can create such data using the Spark method "randomSplit". # + train_data, test_data = features.randomSplit([0.8,0.2], seed=0) # Train a linear regression model regression = LinearRegression(featuresCol='features',labelCol='price') model = regression.fit(train_data) # Now create predictions, but this time for the "test_data" and NOT for the training data itself prediction = model.transform(test_data) # Evaluate model using RegressionEvaluator again, but this time using the "prediction" data frame evaluator = RegressionEvaluator(labelCol="price", predictionCol="prediction", metricName="rmse") evaluator.evaluate(prediction) # - # # Improving Prediction # # Now that we have a metric and a valid approachm, the next question is: How can we improve the model? So far we only used the column "sqft_living" for building the model, but we have much more information about the houses. A very simple way is to include more attributes into the feature vector. # # Remember that the schema looked as follows: # # root # |-- id: long (nullable = true) # |-- date: string (nullable = true) # |-- price: decimal(7,0) (nullable = true) # |-- bedrooms: integer (nullable = true) # |-- bathrooms: double (nullable = true) # |-- sqft_living: integer (nullable = true) # |-- sqft_lot: integer (nullable = true) # |-- floors: double (nullable = true) # |-- waterfront: integer (nullable = true) # |-- view: integer (nullable = true) # |-- condition: integer (nullable = true) # |-- grade: integer (nullable = true) # |-- sqft_above: integer (nullable = true) # |-- sqft_basement: integer (nullable = true) # |-- yr_built: integer (nullable = true) # |-- yr_renovated: integer (nullable = true) # |-- zipcode: integer (nullable = true) # |-- lat: double (nullable = true) # |-- long: double (nullable = true) # |-- sqft_living15: integer (nullable = true) # |-- sqft_lot15: integer (nullable = true) # # We simply use all real numeric columns. Some columns like "condition", "grade", "zipcode" are categorical variables, which we don't want to use now. # + # Extract features using VectorAssembler vector_assembler = VectorAssembler(inputCols=[ 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'sqft_living15', 'sqft_lot15'], outputCol='features') features = vector_assembler.transform(data) # Again split into training and test data train_data, test_data = features.randomSplit([0.8,0.2], seed=0) # Traing linear regression model regression = LinearRegression(featuresCol='features',labelCol='price') model = regression.fit(train_data) prediction = model.transform(test_data) # Evaluate model evaluator = RegressionEvaluator(labelCol="price", predictionCol="prediction", metricName="rmse") evaluator.evaluate(prediction) # -
spark-training/spark-python/jupyter-ml-house-prices/House Prices Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import numpy as np import pandas as pd import datetime as dt # + deletable=true editable=true #loading data shopInfoFile = '../dataset/shop_info.txt' shopInfo = pd.read_table(shopInfoFile, sep = ',', header = None) shopInfo.columns = ['shopID', 'city', 'locationID', 'perPay', 'score', 'commentCnt', 'shopLevel', 'cate1', 'cate2', 'cate3'] # + deletable=true editable=true #informationi on shops in different cities cities = np.unique(shopInfo['city']) shopByCity = {} shopNumByCity = {} for city in cities: shopNumByCity[city] = shopInfo[shopInfo['city'] == city].shape[0] shopByCity[city] = np.array(shopInfo[shopInfo['city'] == city]['shopID']) # + deletable=true editable=true # generate city code cities = np.unique(shopInfo['city']) cityDic = {} for index, city in enumerate(cities): cityDic[city] = shopNumByCity[city] cityDF = pd.DataFrame(cityDic, index = ['code']).T cityDF.to_csv('../preprocess/cityEncoding.csv', header = False) # + deletable=true editable=true # generate category code categoryDic = {} classFirst = np.unique(shopInfo['cate1']) count = 1 cur = 1 interval = 1000 countMS = len(classFirst) * 1000 intervalMS = 30 for index1, class1 in enumerate(classFirst): categoryDic[class1] = {} classSecond = np.unique(shopInfo[shopInfo['cate1'] == class1]['cate2']) for class2 in classSecond: categoryDic[class1][class2] = {} classThird = np.unique(shopInfo[shopInfo['cate1'] == class1][shopInfo['cate2'] == class2]['cate3']) for class3 in classThird: if class1 == '美食': categoryDic[class1][class2][class3] = countMS countMS = countMS + 1 else: categoryDic[class1][class2][class3] = cur cur = cur + 1 if class1 == '美食': countMS = countMS + intervalMS - 1 else: cur = cur + intervalMS - 1 if class1 != '美食': cur = count * interval count = count + 1 # + deletable=true editable=true # save city code and category code data cityCode = [] categoryCode = [] for shopID in shopInfo['shopID']: record = shopInfo.ix[shopID - 1] city = record['city'] cate1 = record['cate1'] cate2 = record['cate2'] cate3 = record['cate3'] cityCode.append(cityDic[city]) categoryCode.append(categoryDic[cate1][cate2][cate3]) shopInfo_new = pd.read_table(shopInfoFile, sep = ',', header = None) shopInfo_new.columns = ['shopID', 'city', 'locationID', 'perPay', 'score', 'commentCnt', 'shopLevel', 'cate1', 'cate2', 'cate3'] shopInfo_new['city'] = cityCode del shopInfo_new['cate1'] del shopInfo_new['locationID'] del shopInfo_new['cate2'] del shopInfo_new['cate3'] shopInfo_new['category'] = categoryCode shopInfo_new['score'][np.isnan(shopInfo_new['score'])] = 0.0 shopInfo_new['commentCnt'][np.isnan(shopInfo_new['commentCnt'])] = 0.0 shopInfo_new.to_csv('../preprocess/shopInfo.csv', header = False, index = False, date_format = 'int32') shopInfo_load = pd.read_csv('../preprocess/shopInfo.csv', header = None, dtype = 'int32') shopInfo_load.columns = ['shopID', 'city', 'perPay', 'score', 'commentCnt', 'shopLevel', 'category'] # + deletable=true editable=true # define dates for training and testing startDateTrain = dt.date(2016, 9, 20) endDateTrain = dt.date(2016, 10, 17) startDateTest = dt.date(2016, 10, 18) endDateTest = dt.date(2016, 10, 31) columns = ['shopID', 'year', 'month', 'day', 'city', 'perPay', 'score', 'commentCnt', 'shopLevel', 'category'] # + deletable=true editable=true shopDataTrain = {} for column in columns: shopDataTrain[column] = [] for shopID in shopInfo_load['shopID']: curDate = startDateTrain endDate = endDateTrain + dt.timedelta(days = 1) while curDate != endDate: for shopCol in shopInfo_load.columns: shopDataTrain[shopCol].append(shopInfo_load[shopCol][shopID - 1]) shopDataTrain['year'].append(curDate.year) shopDataTrain['month'].append(curDate.month) shopDataTrain['day'].append(curDate.day) curDate = curDate + dt.timedelta(days = 1) # + deletable=true editable=true trainFeatures_basicInfo = pd.DataFrame(shopDataTrain, columns = columns) trainFeatures_basicInfo.to_csv('../preprocess/trainValidFeatures_basicInfo.csv', header = False, index = False, date_format = 'int32') # + deletable=true editable=true shopDataTest = {} for column in columns: shopDataTest[column] = [] for shopID in shopInfo_load['shopID']: curDate = startDateTest endDate = endDateTest + dt.timedelta(days = 1) while curDate != endDate: for shopCol in shopInfo_load.columns: shopDataTest[shopCol].append(shopInfo_load[shopCol][shopID - 1]) shopDataTest['year'].append(curDate.year) shopDataTest['month'].append(curDate.month) shopDataTest['day'].append(curDate.day) curDate = curDate + dt.timedelta(days = 1) # + deletable=true editable=true testFeatures_basicInfo = pd.DataFrame(shopDataTest, columns = columns) testFeatures_basicInfo.to_csv('../preprocess/validFeatures_basicInfo.csv', header = False, index = False, date_format = 'int32') # + deletable=true editable=true #trainTest data startDateTrain = dt.date(2016, 10, 4) endDateTrain = dt.date(2016, 10, 31) startDateTest = dt.date(2016, 11, 1) endDateTest = dt.date(2016, 11, 14) columns = ['shopID', 'year', 'month', 'day', 'city', 'perPay', 'score', 'commentCnt', 'shopLevel', 'category'] # + deletable=true editable=true shopDataTrain = {} for column in columns: shopDataTrain[column] = [] for shopID in shopInfo_load['shopID']: curDate = startDateTrain endDate = endDateTrain + dt.timedelta(days = 1) while curDate != endDate: for shopCol in shopInfo_load.columns: shopDataTrain[shopCol].append(shopInfo_load[shopCol][shopID - 1]) shopDataTrain['year'].append(curDate.year) shopDataTrain['month'].append(curDate.month) shopDataTrain['day'].append(curDate.day) curDate = curDate + dt.timedelta(days = 1) # + deletable=true editable=true trainFeatures_basicInfo = pd.DataFrame(shopDataTrain, columns = columns) trainFeatures_basicInfo.to_csv('../preprocess/trainTestFeatures_basicInfo.csv', header = False, index = False, date_format = 'int32') # + deletable=true editable=true shopDataTest = {} for column in columns: shopDataTest[column] = [] for shopID in shopInfo_load['shopID']: curDate = startDateTest endDate = endDateTest + dt.timedelta(days = 1) while curDate != endDate: for shopCol in shopInfo_load.columns: shopDataTest[shopCol].append(shopInfo_load[shopCol][shopID - 1]) shopDataTest['year'].append(curDate.year) shopDataTest['month'].append(curDate.month) shopDataTest['day'].append(curDate.day) curDate = curDate + dt.timedelta(days = 1) # + deletable=true editable=true testFeatures_basicInfo = pd.DataFrame(shopDataTest, columns = columns) testFeatures_basicInfo.to_csv('../preprocess/testFeatures_basicInfo.csv', header = False, index = False, date_format = 'int32')
src/featureExtraction/featureExtraction_basicInfo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"></ul></div> # - from esper.prelude import * import torch import torch.nn as nn import pyro import pyro.distributions as dist import pyro.optim as optim import pyro.infer as infer from torch.utils.data import DataLoader from transcript_utils import * from timeit import default_timer as now from custom_mlp import MLP, Exp import itertools mi_dict = {ngram: score for [ngram, score] in mutual_info('immigration')} mi_priors = torch.tensor([mi_dict[ngram] if ngram in mi_dict else 0 for ngram in vocabulary]) for k in ['immigration', 'border', 'healthcare']: print('{} {:.4f}'.format(k, mi_priors[vocabulary.index(k)].item())) # + # compute_vectors(video_list(), vocabulary, SEGMENT_SIZE, SEGMENT_STRIDE) # - class RegressionModel(nn.Module): def __init__(self, p): # p = number of features super(RegressionModel, self).__init__() self.linear = nn.Linear(p, 1) self.sigmoid = nn.Sigmoid() self.softplus = nn.Softplus() self.p = p def forward(self, x): return self.sigmoid(self.linear(x)) def model(self, x, y): # Create unit normal priors over the parameters loc, scale = torch.zeros(self.p), torch.ones(self.p) * 10 bias_loc, bias_scale = torch.zeros(1), torch.ones(1) * 10 w_prior = dist.Normal(loc, scale).independent(1) b_prior = dist.Normal(bias_loc, bias_scale).independent(1) priors = {'linear.weight': w_prior, 'linear.bias': b_prior} # lift module parameters to random variables sampled from the priors lifted_module = pyro.random_module("module", self, priors) # sample a regressor (which also samples w and b) lifted_reg_model = lifted_module() with pyro.iarange("map", x.shape[0]): # run the regressor forward conditioned on data prediction_mean = lifted_reg_model(x).squeeze(-1) # condition on the observed data pyro.sample("obs", dist.Bernoulli(prediction_mean), obs=y) def guide(self, x, y): # define our variational parameters w_loc = torch.tensor(mi_priors) # note that we initialize our scales to be pretty narrow w_log_sig = torch.tensor(-3.0 * torch.ones(1, self.p) + 0.05 * torch.randn(1, self.p)) b_loc = torch.tensor(0.5) + 0.05 * torch.randn(1) b_log_sig = torch.tensor(-3.0 * torch.ones(1) + 0.05 * torch.randn(1)) # register learnable params in the param store mw_param = pyro.param("guide_mean_weight", w_loc) sw_param = self.softplus(pyro.param("guide_log_scale_weight", w_log_sig)) mb_param = pyro.param("guide_mean_bias", b_loc) sb_param = self.softplus(pyro.param("guide_log_scale_bias", b_log_sig)) # guide distributions for w and b w_dist = dist.Normal(mw_param, sw_param).independent(1) b_dist = dist.Normal(mb_param, sb_param).independent(1) dists = {'linear.weight': w_dist, 'linear.bias': b_dist} # overload the parameters in the module with random samples # from the guide distributions lifted_module = pyro.random_module("module", self, dists) # sample a regressor (which also samples w and b) return lifted_module() unsup_dataset = SegmentVectorDataset(video_list(), vocab_size=vocab_size, inmemory=True) text_dataset = SegmentTextDataset(video_list()) unsup_loader = DataLoader(unsup_dataset, batch_size=100, shuffle=True) def get_accuracy(model_gen, x, y, iters=100): samples = [] for _ in range(iters): model = model_gen() y_pred = model(x).squeeze(-1).round() fp = torch.sum((y_pred != y) & (y_pred == 1)).item() fn = torch.sum((y_pred != y) & (y_pred == 0)).item() acc = torch.sum(y_pred == y).item() n = float(y_pred.shape[0]) samples.append(torch.tensor([acc/n, fp/n, fn/n])) return torch.mean(torch.stack(samples), dim=0).tolist(), torch.std(torch.stack(samples), dim=0).tolist(), # + def format_data(labels): dataset = LabeledSegmentDataset(unsup_dataset, labels, categories=2) x_data, y_data, _ = unzip(list(dataset)) y_data = torch.tensor([y[1] for y in y_data]) x_data = torch.stack(x_data) split = int(len(x_data) * 2 / 3) (train_x, val_x) = (x_data[:split], x_data[split:]) (train_y, val_y) = (y_data[:split], y_data[split:]) return train_x, train_y, val_x, val_y def add_labels(labels, train_x, train_y, val_x, val_y): active_train_x, active_train_y, active_val_x, active_val_y = format_data(labels) train_x = torch.cat((train_x, active_train_x)) train_y = torch.cat((train_y, active_train_y)) val_x = torch.cat((val_x, active_val_x)) val_y = torch.cat((val_y, active_val_y)) return train_x, train_y, val_x, val_y # + # mi_priors_raw = torch.tensor([mi_dict[ngram] if ngram in mi_dict else 0 for ngram in vocabulary]) # def baseline_model(x): # return torch.mm(x, mi_priors_raw.unsqueeze(0).t()).squeeze() # acc = get_accuracy(lambda: baseline_model, # torch.cat((train_x, val_x)), torch.cat((train_y, val_y)), iters=2)[0][0] # print('Baseline accuracy: {:.4f}'.format(acc)) # + model_name = 'regression_active' regression_model = RegressionModel(vocab_size) def weights_path(iteration, epoch): return '/app/data/models/transcript_{}_weights_iter{:02d}_epoch{:04d}.pt'.format(model_name, iteration, epoch) def best_weights_path(iteration): return '/app/data/models/transcript_{}_best_weights_iter{:02d}.pt'.format(model_name, iteration) def torch_trainer(): loss_fn = nn.MSELoss(size_average=False) optim = torch.optim.Adam(regression_model.parameters(), lr=0.05) def step(x, y): # run the model forward on the data y_pred = regression_model(x).squeeze(-1) # calculate the mse loss loss = loss_fn(y_pred, y) # initialize gradients to zero optim.zero_grad() # backpropagate loss.backward() # take a gradient step optim.step() return loss.item() return step, lambda: regression_model def pyro_trainer(): pyro.clear_param_store() opt = optim.Adam({"lr": 0.01}) svi = infer.SVI(regression_model.model, regression_model.guide, opt, loss=infer.Trace_ELBO()) def step(x, y): return svi.step(x, y) return step, lambda: regression_model.guide(None, None) def train(iteration, step, model_gen, train_x, train_y, val_x, val_y, epochs=100, checkpoint_frequency=5, verbose=False): accs = [] for epoch in range(epochs): loss = step(train_x, train_y) if epoch % checkpoint_frequency == 0: [tacc, tfp, tfn], _ = get_accuracy(model_gen, train_x, train_y) [vacc, vfp, vfn], [vaccstd, vfpstd, vfnstd] = get_accuracy(model_gen, val_x, val_y) if verbose: print("[iteration %04d] loss: %.0f, train: acc %.3f, val: acc %.3f (+/- %.3f) fp %.3f (+/- %.3f) fn %.3f (+/ %.3f)" % (epoch, loss, tacc, vacc, vaccstd, vfp, vfpstd, vfn, vfnstd)) pyro.get_param_store().save(weights_path(iteration, epoch)) accs.append(vacc) best_epoch = torch.tensor(accs).argmax().item() * checkpoint_frequency pyro.get_param_store().load(weights_path(iteration, best_epoch)) pyro.get_param_store().save(best_weights_path(iteration)) return best_epoch def model_uncertainty(model_gen, x, iters=20): ys_pred = [] for _ in range(iters): model = model_gen() ys_pred.append(model(x).squeeze(-1).round()) return torch.stack(ys_pred).std(dim=0) def max_uncertainty(model_gen, batches=None): all_std = [] all_idx = [] loader = itertools.islice(unsup_loader, batches) if batches is not None else unsup_loader for x, i in tqdm(loader): all_std.append(model_uncertainty(model_gen, x)) all_idx.append(i) all_std = torch.cat(all_std) all_idx = torch.cat(all_idx) top_std, top_idx = all_std.topk(1000) top_idx = top_idx.tolist() random.shuffle(top_idx) return top_idx # - step, model_gen = pyro_trainer() print(train(0, step, model_gen, *format_data(pcache.get('labeled_segments')), verbose=True)) train_x, train_y, val_x, val_y = format_data(pcache.get('labeled_segments')) # + step, model_gen = pyro_trainer() def train_and_label(iteration, train_x, train_y, val_x, val_y): best_epoch = train(iteration, step, model_gen, train_x, train_y, val_x, val_y) def eval_model(it): pyro.get_param_store().load(best_weights_path(it)) [vacc, vfp, vfn], [vaccstd, vfpstd, vfnstd] = get_accuracy(model_gen, val_x, val_y, iters=1000) print("[iteration %d] acc %.3f (+/- %.3f) fp %.3f (+/- %.3f) fn %.3f (+/ %.3f)" % (it, vacc, vaccstd, vfp, vfpstd, vfn, vfnstd)) if iteration > 0: eval_model(iteration - 1) eval_model(iteration) indices = max_uncertainty(model_gen) def done_callback(labels): print('Added {} labels'.format(len(labels))) pcache.set('active_labels_{}'.format(iteration), labels) train_and_label(iteration + 1, *add_labels(labels, train_x, train_y, val_x, val_y)) label_widget(text_dataset, indices, done_callback) train_and_label(0, *format_data(pcache.get('labeled_segments'))) # - def eval_model(path): pyro.get_param_store().load(path) old_model = RegressionModel(vocab_size) return get_accuracy((lambda: old_model.guide(None, None)), val_x, val_y, iters=1000) eval_model('/app/data/models/transcript_regression_weights_epoch00080.pt') eval_model('/app/data/models/transcript_regression_active_weights_epoch00060.pt') data = regression_model.guide(None, None)(x_data).squeeze(-1) print(data) print(data.round()) list(regression_model.named_parameters()) # Inspect learned parameters print("Learned parameters:") for name, param in regression_model.named_parameters(): if name == 'linear.weight': weights = param.data.numpy().squeeze() idx = np.argsort(weights)[::-1] print(weights[idx]) print(np.array(vocabulary)[idx][:100])
app/notebooks/problang/transcript_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:qa] # language: python # name: conda-env-qa-py # --- # !ls data/deep_punct/ import tensorflow as tf print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) #tf.config.gpu.set_per_process_memory_growth(True) tf.GPUOptions.per_process_gpu_memory_fraction = 0.2 tf.device('/gpu:2') tf.compat.v1.GPUOptions. from deepcorrect import DeepCorrect corrector = DeepCorrect('data/deep_punct/deeppunct_params_en', 'data/deep_punct/deeppunct_checkpoint_wikipedia') # How are you? #corrector.correct('michelle obama, spouse of barack obama') corrector.correct('of what nationality is ken mcgoogan') corrector.correct('michelle obama, spouse of barack obama') corrector.correct('michelle obama, spouse of barack obama') corrector.correct('<NAME>, voice actor of the last unicorn') corrector.correct('Michelle Obama spouse Barack Obama') corrector.correct('<NAME>, voice actor of The Last Unicorn, character role in The Unicorn') corrector.correct('the actor is named <NAME>') corrector.correct('the movie is named the last unicorn') corrector.correct('what is ben moses\'s nationality?') corrector.correct('what is ben moses\'s nationality?') corrector.correct('what is ben moses\'s nationality?') corrector.correct('the actor is named <NAME>')
playground-deepcorrect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Encoder generator: 002 # + import os import random import numpy import pennylane as qml from pennylane import numpy as np from pennylane.optimize import AdamOptimizer, GradientDescentOptimizer import torch import sys sys.path.append("..") from wordsToNumbers import Corpus from wordsToNumbers import fibonacci_vocabulary from wordsToQubits import put_word_on_sphere from utils import get_corpus_from_directory, working_window, get_word_from_sphere from qencode.initialize import setAux from qencode.encoders import e2_classic from qencode.training_circuits import swap_t from qencode.qubits_arrangement import QubitsArrangement from training.cost.swap_cost import sw_cost, sw_fidelity # - np.random.seed(73) # ## Corpus """ corpus_path="C:/Users/tomut/Documents/GitHub/Qountry/CountryMixt/" corpus_tex = get_corpus_from_directory(corpus_path, limit=1) corpus= Corpus(corpus_tex) print(corpus.prop()) """ corpus_text = "Same old dive, same old end of the work week drink Bartender knows my name,"#but I don't mind She kicks 'em up strong, serves me up right And here I go again I'm drinkin' one, I'm drinkin' two I got my heartache medication, a strong dedication To gettin' over you, turnin' me loose On that hardwood jukebox lost in neon time My heartache medication, well it suits me fine And I'm drinkin' enough to take you off my mind I got my heartache medication" corpus= Corpus(corpus_text) print(corpus.prop()) parameterize_vovabulary = fibonacci_vocabulary(corpus.vocabulary) # ## Training set history_lenghth = 3 x,y = working_window(history_lenghth, splited_text=corpus.split_text) print("len training set:", len(x)) # ## Working principles # ## Just a simple encoder # ### BIG encoder # + shots = 2500 nr_trash=2 nr_latent=2 spec_big = QubitsArrangement(nr_trash, nr_latent, nr_swap=1, nr_ent=0) print("Qubits:", spec_big.qubits) #set up the device dev = qml.device("default.qubit", wires=spec_big.num_qubits) # - parameterize_vovabulary = fibonacci_vocabulary(corpus.vocabulary) # + # circuit initializer def circuit_initializer(words, qubits): for i in range(len(words)): print("i words",words) put_word_on_sphere(words[i], qubit=qubits[i]) @qml.qnode(dev) def encoder_e2(init_params, encoder_params,spec=spec_big, reinit_state=None): #initilaization circuit_initializer(init_params,qubits= [*spec.latent_qubits, *spec.trash_qubits]) #encoder for params in encoder_params: e2_classic(params, [*spec.latent_qubits, *spec_big.trash_qubits]) #swap test swap_t(spec) return [qml.probs(i) for i in spec.swap_qubits] # - # #### Training parameters # + epochs = 50 batch_size = 2 num_samples = 0.8 # proportion of the data used for training learning_rate = 0.0003 beta1 = 0.9 beta2 = 0.999 opt = AdamOptimizer(learning_rate, beta1=beta1, beta2=beta2) # + training_data = [] for i in range(int(len(x)*num_samples)): w_l = [ parameterize_vovabulary[w] for w in x[i]] w_l.append(parameterize_vovabulary[y[i]]) training_data.append(w_l) training_data = torch.tensor(training_data) test_data = [ ] for i in range(int(len(x)*num_samples),len(x)): w_l=[parameterize_vovabulary[w] for w in x[i]] w_l.append(parameterize_vovabulary[y[i]]) test_data.append(w_l) test_data = torch.tensor(test_data) print("data example:",training_data[0]) # - def iterate_batches(X, batch_size): X1 = [x for x in X] random.shuffle(X1) batch_list = [] batch = [] for x in X: if len(batch) < batch_size: batch.append(x) else: batch_list.append(batch) batch = [] if len(batch) != 0: batch_list.append(batch) return batch_list batch_list=iterate_batches(X=training_data, batch_size=2) batch_list # ### training # + # initialize random encoder parameters nr_encod_qubits = len(spec_big.trash_qubits) + len(spec_big.latent_qubits) nr_par_encoder = 15 * int(nr_encod_qubits*(nr_encod_qubits-1)/2) encoder_params = np.random.uniform(size=(1, nr_par_encoder), requires_grad=True) #print(qml.draw(encoder_e2)(init_params=training_data[0], encoder_params=encoder_params, spec=spec_big)) # + def cost(encoder_params, X): return sw_cost(encoder_params, input_data=X, circuit=encoder_e2, reinit_state=None) def fidelity(encoder_params, X): print("x",X) return sw_fidelity(encoder_params, input_data=X, circuit=encoder_e2, reinit_state=None) loss_hist=[] fid_hist=[] loss_hist_test=[] fid_hist_test=[] for epoch in range(epochs): batches = iterate_batches(X=training_data, batch_size=batch_size) for xbatch in batches: encoder_params = opt.step(cost, encoder_params, X=xbatch) if epoch%5 == 0: loss_training = cost(encoder_params, [training_data] ) fidel = fidelity(encoder_params, training_data ) loss_hist.append(loss_training) fid_hist.append(fidel) print("Epoch:{} | Loss:{} | Fidelity:{}".format(epoch, loss_training, fidel)) loss_test = cost(encoder_params, test_data ) fidel = fidelity(encoder_params, test_data ) loss_hist_test.append(loss_test) fid_hist_test.append(fidel) print("Test-Epoch:{} | Loss:{} | Fidelity:{}".format(epoch, loss_test, fidel)) """ experiment_parameters={"experiment":"Encoder_002","params":encoder_params} f=open("Encoder_002_/params"+str(epoch)+".txt","w") f.write(str(experiment_parameters)) f.close() """ # -
working_notebook/.ipynb_checkpoints/Encoder_002-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Coding a deep neural network using Keras # Keras is a friendly (and powerful) python library design for developing and evaluating deep learning models (https://keras.io/). # # In this tutorial, we will construct a deep neural network using Keras, train it with a dataset for a simple classification task, and evaluate its performance. # # Let's start importing the libraries to be used in this tutorial. Run the next section by clicking on it and press Ctrl+Enter. # import libraries import numpy as np from keras.models import Sequential from keras.layers import Dense # # First step: Load and split the data # We are going to use a well-known diabetes dataset (a typical machine learning repository). It describes certain health parameters for a population of Pima Indians and whether they had an onset of diabetes within five years. For our purpose, this is a binary classification problem: if the person has diabetes is labeled as 1, and if not as 0. Let's check the dataset: # # In the home folder of this tutorial, open the file 'pima-indians-diabetes.csv'. It contains 9 columns, where the first 8 are numerical values that we call attributes indicating a subject characteristic like age, blood pressure, body mass index among others. The last column indicates the binary classification. The dataset contains records for 768 subjects, that we call instances. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. # # Dataset details can be check here: https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.names. # # To do: # - Load the Pima Indians diabetes dataset using the NumPy method 'loadtxt'. # # Note: From now on, you will need to fill the _ (underscores) in the codes given. # + # load the dataset dataset = np.loadtxt('../pima-indians-diabetes.csv', delimiter=',') # Uncomment and rerun for a quick view on pars distribution #from aux_func import plot_hist #plot_hist(dataset) # - # To properly evaluate our model, we need to split the dataset into training and testing subsets. Why? Because we will be testing the performance of our model with data not employed in training. If the model performs similarly in both datasets, this is indicating a robust model. # # To do: # - Split the dataset (by rows), keeping 90% for the training and 10% for the testing. # - Check that you pass the test (output 'Correct split') # # Note: From now on, you will need to fill the _ (underscores) in the codes given. # + # define some usefull variables for spliting p = 10 # % percentage of testing data nr = len(dataset[:,0]) # total number of instances (rows) nt = int(nr/p) # number of instances for training # split into input (X) and output (y) variables for training and testing X_train = dataset[:-nt,0:8] y_train = dataset[:-nt,8] # for testing X_test = dataset[-nt:,0:8] y_test = dataset[-nt:,8] # checking if nr == (len(y_test)+len(y_train)): print('Correct split') else: print('Incorrect split') # - # # Second step: Build the model # Now we need to define our neural network model, which is basically a sequence of neuron layers connected by weights. We will use Keras for defining this sequence of layers. # We will create a 'Sequential model' (Keras class: https://keras.io/api/models/sequential/) and add layers one at a time to build our network architecture. We will also use the 'Dense' class to define the fully connected neuron layers and specify the activation function between them. # # In this example, we will use a fully connected neural network structure with four layers. # # To do: # - Create an object called 'model' from the Sequential class (done). # - Add a first layer (the input layer). Ensure that the input layer has the right number of input features. This can be specified when creating the first layer with the input_dim argument. # - Add a second hidden layer with 12 neurons (nodes). # - Add a third hidden layer with 8 neurons (nodes). # - Add a forth output layer. # - Check that the model is correct by printing a model summary. # # Note: the shape of the input to the model is defined as an argument on the first hidden layer. This means that the line of code that adds the first Dense layer is doing 2 things, defining the input or visible layer and the first hidden layer. # + # define the keras model model = Sequential() model.add(Dense(12, input_dim=8, activation='relu')) #relu: rectified linear unit activation function model.add(Dense(8, activation='relu')) model.add(Dense(1, activation='sigmoid')) #sigmoid: Sigmoid function # checking summary print(model.summary()) # draw the network (this just work for the architecture 8-12-8-1) from IPython.display import Image Image("../img/nn_1_draw.png", width = 500) # - # How do we know the number of layers and their types? # This is a tough question. Often the best network structure is found through a process of trial and error experimentation. Generally, it would help if you had a network large enough to capture the structure of the problem (this will become more clear in the next two tutorials). # # Third step: Compile the model # Compile means convert the code into a machine code (or low-level code) from where the code can be executed. When we execute it, Keras use numerical libraries as TensorFlow or Theano to choose the best way to represent the network for training and making predictions to run on your hardware. # # Also, when compiling we need to specify how the training is going to be conducted. Remember that training means finding the best set of weights to map inputs to outputs in the dataset. In this case, we need to specify the loss function (to evaluate the sets of weights), the optimizer (like gradient descent), and some particular metrics that we would like to collect. # # To do: # - Compile our Keras Sequential model using the 'compile' method. Use as the loss function 'binary_crossentropy', as optimizer 'adam' (an efficient stochastic gradient descent algorithm), and the metric 'accuracy'. # # For details go to: # - binary_crossentropy: https://keras.io/api/losses/ # - adam: https://keras.io/api/optimizers/adam/ # - accuracy: https://keras.io/api/metrics/accuracy_metrics/ # compile the keras model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # # Fourth step: Fit the model to the dataset # # Now we are ready to execute the model on some data. # We can simply train our model calling the 'fit' method in our Keras model. # # The training occurs over 'epochs' (epoch: one pass through all of the rows in the training dataset), and each epoch is split into 'batches' (batch: one or more samples considered by the model within an epoch before weights are updated). In other words, epochs are the number of iterations that the training process will go through and the batch the number of samples that it will consider in each iteration to update the weights. # # To do: # - Fit the model to the training dataset. Set the epochs to 300 and the batch size to 10. # fit the keras model on the dataset model.fit(X_train, y_train, epochs=500, batch_size=10, verbose = True) # # Fifth step: Evaluate the model # Now that we have trained our network, we are ready to evaluate it. First, let's check how it performs of the training dataset. We will use the 'evaluate' method in our Keras model and report the accuracy. The method will generate a prediction for each input and compare model outputs with the true values for the training dataset and calculate the accuracy as a percentage of correct results (the method collects scores, including the average loss and any metrics you have configured, such as accuracy). # # To do: # - Evaluate the model accuracy on the training dataset. # # Note: The evaluate() function will return a list with two values. The first will be the loss of the model on the dataset and the second will be the accuracy of the model on the dataset. We are only interested in reporting the accuracy, so we will ignore the loss value. # evaluate the keras model on training data loss, accuracy = model.evaluate(X_train, y_train) print('Accuracy: %.2f' % (accuracy*100)) # Around 80%? that sounds great, right? Yeah, nah. It's easy to perform great on data that the model has already seen. The true test needs to be done on data not seen by the network during training. # # To do: # - Evaluate the model accuracy on the testing dataset. # evaluate the keras model 80 test data loss, accuracy = model.evaluate(X_test, y_test) print('Accuracy: %.2f' % (accuracy*100)) # Not bad right? What? Were you expecting a 100%? 🤣? # # Sixth step: Make predictions # Now you can use your Keras trained (and tested) model to make predictions given a certain input. # # For example, we receive the following data from a new person (instance) beloging to the same group (pima indians): # # 5,166,72,19,175,25.8,0.587,51 # # We can now use our neural network to predict if this person has diabetes (or could develop it). We can use the 'predict' method in our model. # # To do: # - Predict if the new person has or could develop diabetes. # # input new instance x_new = np.array([[5,166,72,19,175,25.8,0.587,51]]) # make probability predictions with the model predictions = model.predict(x_new) print('Output: '+str(np.round(predictions[0][0]))) # If you get 1.0 as an output that means that the person potentially has diabetes, bad for the person but good for us on catching that! # # Finally, run the next command to see where all this machine learning thing is going to... from IPython.display import Image Image("../img/rand/r_everywhere.jpg", width = 500) # # Final Notes # In this tutorial, we have covered the following topics when creating a deep neural network for a simple classification task using Keras: # # - Load and split data. # - Define the model. # - Compile the model. # - Fit the model. # - Evaluate the model. # - Predict with the model. # # I hope that you have enjoyed it, and I invite you to keep exploring this awesome topic. Here is a link with multiple dataset repositories for you to play with: https://archive.ics.uci.edu/ml/index.php. Good luck!
master/.ipynb_checkpoints/nn_1_sol-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly import datetime as dt # + #API api = 'https://api.coinmarketcap.com/data-api/v3/cryptocurrency/listing?start=1&limit=3&sortBy=market_cap&sortType=desc&convert=USD&cryptoType=all&tagType=all&audited=false' r = requests.get(api) jsondata= r.json() name =[] for item in jsondata['data']['cryptoCurrencyList']: name.append(item['name']) coin= [x.replace(" " , "-") for x in name] coin = [x[:x.index('.')] if '.' in x else x for x in coin] #coin as column of dataframe df = pd.DataFrame(columns = coin) # + def func (): time= dt.datetime.now() for i in coin: url = requests.get(f'https://coinmarketcap.com/currencies/{i}').text soup = BeautifulSoup(url , 'lxml') tag = soup.find('div', class_='sc-16r8icm-0 bILTHz') block = tag.find_all('div' , class_ = 'namePill') wl = block[-1].text.split()[1].replace(',','') wl = int(wl) df.loc[time , i]= wl n = 0 while n<3 : func() time.sleep(10) n+=1 # - df df.reset_index() df.index df.to_csv('my_csv.csv', mode='a', header=False) df_data_min = pd.read_csv('my_csv.csv') df_data_min df class Crypto: #initialize class def __init__(): api = 'https://api.coinmarketcap.com/ \ data-api/v3/cryptocurrency/listing?start=1&limit=3& \ sortBy=market_cap&sortType=desc&convert=USD& \ cryptoType=all&tagType=all&audited=false' r = requests.get(api) data= r.json() name =[] for item in jsondata['data']['cryptoCurrencyList']: name.append(item['name']) coin= [x.replace(" " , "-") for x in name] coin = [x[:x.index('.')] if '.' in x else x for x in coin] #coin as column of dataframe df = pd.DataFrame(columns = coin) def save_data(self): pass
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Fine-tuning a Pretrained Network for Style Recognition # # In this example, we'll explore a common approach that is particularly useful in real-world applications: take a pre-trained Caffe network and fine-tune the parameters on your custom data. # # The advantage of this approach is that, since pre-trained networks are learned on a large set of images, the intermediate layers capture the "semantics" of the general visual appearance. Think of it as a very powerful generic visual feature that you can treat as a black box. On top of that, only a relatively small amount of data is needed for good performance on the target task. # First, we will need to prepare the data. This involves the following parts: # (1) Get the ImageNet ilsvrc pretrained model with the provided shell scripts. # (2) Download a subset of the overall Flickr style dataset for this demo. # (3) Compile the downloaded Flickr dataset into a database that Caffe can then consume. # + caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line) import sys sys.path.insert(0, caffe_root + 'python') import caffe caffe.set_device(0) caffe.set_mode_gpu() import numpy as np from pylab import * # %matplotlib inline import tempfile # Helper function for deprocessing preprocessed images, e.g., for display. def deprocess_net_image(image): image = image.copy() # don't modify destructively image = image[::-1] # BGR -> RGB image = image.transpose(1, 2, 0) # CHW -> HWC image += [123, 117, 104] # (approximately) undo mean subtraction # clamp values in [0, 255] image[image < 0], image[image > 255] = 0, 255 # round and cast from float32 to uint8 image = np.round(image) image = np.require(image, dtype=np.uint8) return image # - # ### 1. Setup and dataset download # # Download data required for this exercise. # # - `get_ilsvrc_aux.sh` to download the ImageNet data mean, labels, etc. # - `download_model_binary.py` to download the pretrained reference model # - `finetune_flickr_style/assemble_data.py` downloads the style training and testing data # # We'll download just a small subset of the full dataset for this exercise: just 2000 of the 80K images, from 5 of the 20 style categories. (To download the full dataset, set `full_dataset = True` in the cell below.) # + # Download just a small subset of the data for this exercise. # (2000 of 80K images, 5 of 20 labels.) # To download the entire dataset, set `full_dataset = True`. full_dataset = False if full_dataset: NUM_STYLE_IMAGES = NUM_STYLE_LABELS = -1 else: NUM_STYLE_IMAGES = 2000 NUM_STYLE_LABELS = 5 # This downloads the ilsvrc auxiliary data (mean file, etc), # and a subset of 2000 images for the style recognition task. import os os.chdir(caffe_root) # run scripts from caffe root # !data/ilsvrc12/get_ilsvrc_aux.sh # !scripts/download_model_binary.py models/bvlc_reference_caffenet # !python examples/finetune_flickr_style/assemble_data.py \ # --workers=-1 --seed=1701 \ # --images=$NUM_STYLE_IMAGES --label=$NUM_STYLE_LABELS # back to examples os.chdir('examples') # - # Define `weights`, the path to the ImageNet pretrained weights we just downloaded, and make sure it exists. import os weights = os.path.join(caffe_root, 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') assert os.path.exists(weights) # Load the 1000 ImageNet labels from `ilsvrc12/synset_words.txt`, and the 5 style labels from `finetune_flickr_style/style_names.txt`. # + # Load ImageNet labels to imagenet_labels imagenet_label_file = caffe_root + 'data/ilsvrc12/synset_words.txt' imagenet_labels = list(np.loadtxt(imagenet_label_file, str, delimiter='\t')) assert len(imagenet_labels) == 1000 print 'Loaded ImageNet labels:\n', '\n'.join(imagenet_labels[:10] + ['...']) # Load style labels to style_labels style_label_file = caffe_root + 'examples/finetune_flickr_style/style_names.txt' style_labels = list(np.loadtxt(style_label_file, str, delimiter='\n')) if NUM_STYLE_LABELS > 0: style_labels = style_labels[:NUM_STYLE_LABELS] print '\nLoaded style labels:\n', ', '.join(style_labels) # - # ### 2. Defining and running the nets # # We'll start by defining `caffenet`, a function which initializes the *CaffeNet* architecture (a minor variant on *AlexNet*), taking arguments specifying the data and number of output classes. # + from caffe import layers as L from caffe import params as P weight_param = dict(lr_mult=1, decay_mult=1) bias_param = dict(lr_mult=2, decay_mult=0) learned_param = [weight_param, bias_param] frozen_param = [dict(lr_mult=0)] * 2 def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1, param=learned_param, weight_filler=dict(type='gaussian', std=0.01), bias_filler=dict(type='constant', value=0.1)): conv = L.Convolution(bottom, kernel_size=ks, stride=stride, num_output=nout, pad=pad, group=group, param=param, weight_filler=weight_filler, bias_filler=bias_filler) return conv, L.ReLU(conv, in_place=True) def fc_relu(bottom, nout, param=learned_param, weight_filler=dict(type='gaussian', std=0.005), bias_filler=dict(type='constant', value=0.1)): fc = L.InnerProduct(bottom, num_output=nout, param=param, weight_filler=weight_filler, bias_filler=bias_filler) return fc, L.ReLU(fc, in_place=True) def max_pool(bottom, ks, stride=1): return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride) def caffenet(data, label=None, train=True, num_classes=1000, classifier_name='fc8', learn_all=False): """Returns a NetSpec specifying CaffeNet, following the original proto text specification (./models/bvlc_reference_caffenet/train_val.prototxt).""" n = caffe.NetSpec() n.data = data param = learned_param if learn_all else frozen_param n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param) n.pool1 = max_pool(n.relu1, 3, stride=2) n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75) n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param) n.pool2 = max_pool(n.relu2, 3, stride=2) n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75) n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param) n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param) n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param) n.pool5 = max_pool(n.relu5, 3, stride=2) n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param) if train: n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True) else: fc7input = n.relu6 n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param) if train: n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True) else: fc8input = n.relu7 # always learn fc8 (param=learned_param) fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param) # give fc8 the name specified by argument `classifier_name` n.__setattr__(classifier_name, fc8) if not train: n.probs = L.Softmax(fc8) if label is not None: n.label = label n.loss = L.SoftmaxWithLoss(fc8, n.label) n.acc = L.Accuracy(fc8, n.label) # write the net to a temporary file and return its filename with tempfile.NamedTemporaryFile(delete=False) as f: f.write(str(n.to_proto())) return f.name # - # Now, let's create a *CaffeNet* that takes unlabeled "dummy data" as input, allowing us to set its input images externally and see what ImageNet classes it predicts. dummy_data = L.DummyData(shape=dict(dim=[1, 3, 227, 227])) imagenet_net_filename = caffenet(data=dummy_data, train=False) imagenet_net = caffe.Net(imagenet_net_filename, weights, caffe.TEST) # Define a function `style_net` which calls `caffenet` on data from the Flickr style dataset. # # The new network will also have the *CaffeNet* architecture, with differences in the input and output: # # - the input is the Flickr style data we downloaded, provided by an `ImageData` layer # - the output is a distribution over 20 classes rather than the original 1000 ImageNet classes # - the classification layer is renamed from `fc8` to `fc8_flickr` to tell Caffe not to load the original classifier (`fc8`) weights from the ImageNet-pretrained model def style_net(train=True, learn_all=False, subset=None): if subset is None: subset = 'train' if train else 'test' source = caffe_root + 'data/flickr_style/%s.txt' % subset transform_param = dict(mirror=train, crop_size=227, mean_file=caffe_root + 'data/ilsvrc12/imagenet_mean.binaryproto') style_data, style_label = L.ImageData( transform_param=transform_param, source=source, batch_size=50, new_height=256, new_width=256, ntop=2) return caffenet(data=style_data, label=style_label, train=train, num_classes=NUM_STYLE_LABELS, classifier_name='fc8_flickr', learn_all=learn_all) # Use the `style_net` function defined above to initialize `untrained_style_net`, a *CaffeNet* with input images from the style dataset and weights from the pretrained ImageNet model. # # # Call `forward` on `untrained_style_net` to get a batch of style training data. untrained_style_net = caffe.Net(style_net(train=False, subset='train'), weights, caffe.TEST) untrained_style_net.forward() style_data_batch = untrained_style_net.blobs['data'].data.copy() style_label_batch = np.array(untrained_style_net.blobs['label'].data, dtype=np.int32) # Pick one of the style net training images from the batch of 50 (we'll arbitrarily choose #8 here). Display it, then run it through `imagenet_net`, the ImageNet-pretrained network to view its top 5 predicted classes from the 1000 ImageNet classes. # # Below we chose an image where the network's predictions happen to be reasonable, as the image is of a beach, and "sandbar" and "seashore" both happen to be ImageNet-1000 categories. For other images, the predictions won't be this good, sometimes due to the network actually failing to recognize the object(s) present in the image, but perhaps even more often due to the fact that not all images contain an object from the (somewhat arbitrarily chosen) 1000 ImageNet categories. Modify the `batch_index` variable by changing its default setting of 8 to another value from 0-49 (since the batch size is 50) to see predictions for other images in the batch. (To go beyond this batch of 50 images, first rerun the *above* cell to load a fresh batch of data into `style_net`.) # + def disp_preds(net, image, labels, k=5, name='ImageNet'): input_blob = net.blobs['data'] net.blobs['data'].data[0, ...] = image probs = net.forward(start='conv1')['probs'][0] top_k = (-probs).argsort()[:k] print 'top %d predicted %s labels =' % (k, name) print '\n'.join('\t(%d) %5.2f%% %s' % (i+1, 100*probs[p], labels[p]) for i, p in enumerate(top_k)) def disp_imagenet_preds(net, image): disp_preds(net, image, imagenet_labels, name='ImageNet') def disp_style_preds(net, image): disp_preds(net, image, style_labels, name='style') # - batch_index = 8 image = style_data_batch[batch_index] plt.imshow(deprocess_net_image(image)) print 'actual label =', style_labels[style_label_batch[batch_index]] disp_imagenet_preds(imagenet_net, image) # We can also look at `untrained_style_net`'s predictions, but we won't see anything interesting as its classifier hasn't been trained yet. # # In fact, since we zero-initialized the classifier (see `caffenet` definition -- no `weight_filler` is passed to the final `InnerProduct` layer), the softmax inputs should be all zero and we should therefore see a predicted probability of 1/N for each label (for N labels). Since we set N = 5, we get a predicted probability of 20% for each class. disp_style_preds(untrained_style_net, image) # We can also verify that the activations in layer `fc7` immediately before the classification layer are the same as (or very close to) those in the ImageNet-pretrained model, since both models are using the same pretrained weights in the `conv1` through `fc7` layers. diff = untrained_style_net.blobs['fc7'].data[0] - imagenet_net.blobs['fc7'].data[0] error = (diff ** 2).sum() assert error < 1e-8 # Delete `untrained_style_net` to save memory. (Hang on to `imagenet_net` as we'll use it again later.) del untrained_style_net # ### 3. Training the style classifier # # Now, we'll define a function `solver` to create our Caffe solvers, which are used to train the network (learn its weights). In this function we'll set values for various parameters used for learning, display, and "snapshotting" -- see the inline comments for explanations of what they mean. You may want to play with some of the learning parameters to see if you can improve on the results here! # + from caffe.proto import caffe_pb2 def solver(train_net_path, test_net_path=None, base_lr=0.001): s = caffe_pb2.SolverParameter() # Specify locations of the train and (maybe) test networks. s.train_net = train_net_path if test_net_path is not None: s.test_net.append(test_net_path) s.test_interval = 1000 # Test after every 1000 training iterations. s.test_iter.append(100) # Test on 100 batches each time we test. # The number of iterations over which to average the gradient. # Effectively boosts the training batch size by the given factor, without # affecting memory utilization. s.iter_size = 1 s.max_iter = 100000 # # of times to update the net (training iterations) # Solve using the stochastic gradient descent (SGD) algorithm. # Other choices include 'Adam' and 'RMSProp'. s.type = 'SGD' # Set the initial learning rate for SGD. s.base_lr = base_lr # Set `lr_policy` to define how the learning rate changes during training. # Here, we 'step' the learning rate by multiplying it by a factor `gamma` # every `stepsize` iterations. s.lr_policy = 'step' s.gamma = 0.1 s.stepsize = 20000 # Set other SGD hyperparameters. Setting a non-zero `momentum` takes a # weighted average of the current gradient and previous gradients to make # learning more stable. L2 weight decay regularizes learning, to help prevent # the model from overfitting. s.momentum = 0.9 s.weight_decay = 5e-4 # Display the current training loss and accuracy every 1000 iterations. s.display = 1000 # Snapshots are files used to store networks we've trained. Here, we'll # snapshot every 10K iterations -- ten times during training. s.snapshot = 10000 s.snapshot_prefix = caffe_root + 'models/finetune_flickr_style/finetune_flickr_style' # Train on the GPU. Using the CPU to train large networks is very slow. s.solver_mode = caffe_pb2.SolverParameter.GPU # Write the solver to a temporary file and return its filename. with tempfile.NamedTemporaryFile(delete=False) as f: f.write(str(s)) return f.name # - # Now we'll invoke the solver to train the style net's classification layer. # # For the record, if you want to train the network using only the command line tool, this is the command: # # <code> # build/tools/caffe train \ # -solver models/finetune_flickr_style/solver.prototxt \ # -weights models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel \ # -gpu 0 # </code> # # However, we will train using Python in this example. # # We'll first define `run_solvers`, a function that takes a list of solvers and steps each one in a round robin manner, recording the accuracy and loss values each iteration. At the end, the learned weights are saved to a file. def run_solvers(niter, solvers, disp_interval=10): """Run solvers for niter iterations, returning the loss and accuracy recorded each iteration. `solvers` is a list of (name, solver) tuples.""" blobs = ('loss', 'acc') loss, acc = ({name: np.zeros(niter) for name, _ in solvers} for _ in blobs) for it in range(niter): for name, s in solvers: s.step(1) # run a single SGD step in Caffe loss[name][it], acc[name][it] = (s.net.blobs[b].data.copy() for b in blobs) if it % disp_interval == 0 or it + 1 == niter: loss_disp = '; '.join('%s: loss=%.3f, acc=%2d%%' % (n, loss[n][it], np.round(100*acc[n][it])) for n, _ in solvers) print '%3d) %s' % (it, loss_disp) # Save the learned weights from both nets. weight_dir = tempfile.mkdtemp() weights = {} for name, s in solvers: filename = 'weights.%s.caffemodel' % name weights[name] = os.path.join(weight_dir, filename) s.net.save(weights[name]) return loss, acc, weights # Let's create and run solvers to train nets for the style recognition task. We'll create two solvers -- one (`style_solver`) will have its train net initialized to the ImageNet-pretrained weights (this is done by the call to the `copy_from` method), and the other (`scratch_style_solver`) will start from a *randomly* initialized net. # # During training, we should see that the ImageNet pretrained net is learning faster and attaining better accuracies than the scratch net. # + niter = 200 # number of iterations to train # Reset style_solver as before. style_solver_filename = solver(style_net(train=True)) style_solver = caffe.get_solver(style_solver_filename) style_solver.net.copy_from(weights) # For reference, we also create a solver that isn't initialized from # the pretrained ImageNet weights. scratch_style_solver_filename = solver(style_net(train=True)) scratch_style_solver = caffe.get_solver(scratch_style_solver_filename) print 'Running solvers for %d iterations...' % niter solvers = [('pretrained', style_solver), ('scratch', scratch_style_solver)] loss, acc, weights = run_solvers(niter, solvers) print 'Done.' train_loss, scratch_train_loss = loss['pretrained'], loss['scratch'] train_acc, scratch_train_acc = acc['pretrained'], acc['scratch'] style_weights, scratch_style_weights = weights['pretrained'], weights['scratch'] # Delete solvers to save memory. del style_solver, scratch_style_solver, solvers # - # Let's look at the training loss and accuracy produced by the two training procedures. Notice how quickly the ImageNet pretrained model's loss value (blue) drops, and that the randomly initialized model's loss value (green) barely (if at all) improves from training only the classifier layer. plot(np.vstack([train_loss, scratch_train_loss]).T) xlabel('Iteration #') ylabel('Loss') plot(np.vstack([train_acc, scratch_train_acc]).T) xlabel('Iteration #') ylabel('Accuracy') # Let's take a look at the testing accuracy after running 200 iterations of training. Note that we're classifying among 5 classes, giving chance accuracy of 20%. We expect both results to be better than chance accuracy (20%), and we further expect the result from training using the ImageNet pretraining initialization to be much better than the one from training from scratch. Let's see. def eval_style_net(weights, test_iters=10): test_net = caffe.Net(style_net(train=False), weights, caffe.TEST) accuracy = 0 for it in xrange(test_iters): accuracy += test_net.forward()['acc'] accuracy /= test_iters return test_net, accuracy test_net, accuracy = eval_style_net(style_weights) print 'Accuracy, trained from ImageNet initialization: %3.1f%%' % (100*accuracy, ) scratch_test_net, scratch_accuracy = eval_style_net(scratch_style_weights) print 'Accuracy, trained from random initialization: %3.1f%%' % (100*scratch_accuracy, ) # ### 4. End-to-end finetuning for style # # Finally, we'll train both nets again, starting from the weights we just learned. The only difference this time is that we'll be learning the weights "end-to-end" by turning on learning in *all* layers of the network, starting from the RGB `conv1` filters directly applied to the input image. We pass the argument `learn_all=True` to the `style_net` function defined earlier in this notebook, which tells the function to apply a positive (non-zero) `lr_mult` value for all parameters. Under the default, `learn_all=False`, all parameters in the pretrained layers (`conv1` through `fc7`) are frozen (`lr_mult = 0`), and we learn only the classifier layer `fc8_flickr`. # # Note that both networks start at roughly the accuracy achieved at the end of the previous training session, and improve significantly with end-to-end training. To be more scientific, we'd also want to follow the same additional training procedure *without* the end-to-end training, to ensure that our results aren't better simply because we trained for twice as long. Feel free to try this yourself! # + end_to_end_net = style_net(train=True, learn_all=True) # Set base_lr to 1e-3, the same as last time when learning only the classifier. # You may want to play around with different values of this or other # optimization parameters when fine-tuning. For example, if learning diverges # (e.g., the loss gets very large or goes to infinity/NaN), you should try # decreasing base_lr (e.g., to 1e-4, then 1e-5, etc., until you find a value # for which learning does not diverge). base_lr = 0.001 style_solver_filename = solver(end_to_end_net, base_lr=base_lr) style_solver = caffe.get_solver(style_solver_filename) style_solver.net.copy_from(style_weights) scratch_style_solver_filename = solver(end_to_end_net, base_lr=base_lr) scratch_style_solver = caffe.get_solver(scratch_style_solver_filename) scratch_style_solver.net.copy_from(scratch_style_weights) print 'Running solvers for %d iterations...' % niter solvers = [('pretrained, end-to-end', style_solver), ('scratch, end-to-end', scratch_style_solver)] _, _, finetuned_weights = run_solvers(niter, solvers) print 'Done.' style_weights_ft = finetuned_weights['pretrained, end-to-end'] scratch_style_weights_ft = finetuned_weights['scratch, end-to-end'] # Delete solvers to save memory. del style_solver, scratch_style_solver, solvers # - # Let's now test the end-to-end finetuned models. Since all layers have been optimized for the style recognition task at hand, we expect both nets to get better results than the ones above, which were achieved by nets with only their classifier layers trained for the style task (on top of either ImageNet pretrained or randomly initialized weights). test_net, accuracy = eval_style_net(style_weights_ft) print 'Accuracy, finetuned from ImageNet initialization: %3.1f%%' % (100*accuracy, ) scratch_test_net, scratch_accuracy = eval_style_net(scratch_style_weights_ft) print 'Accuracy, finetuned from random initialization: %3.1f%%' % (100*scratch_accuracy, ) # We'll first look back at the image we started with and check our end-to-end trained model's predictions. plt.imshow(deprocess_net_image(image)) disp_style_preds(test_net, image) # Whew, that looks a lot better than before! But note that this image was from the training set, so the net got to see its label at training time. # # Finally, we'll pick an image from the test set (an image the model hasn't seen) and look at our end-to-end finetuned style model's predictions for it. batch_index = 1 image = test_net.blobs['data'].data[batch_index] plt.imshow(deprocess_net_image(image)) print 'actual label =', style_labels[int(test_net.blobs['label'].data[batch_index])] disp_style_preds(test_net, image) # We can also look at the predictions of the network trained from scratch. We see that in this case, the scratch network also predicts the correct label for the image (*Pastel*), but is much less confident in its prediction than the pretrained net. disp_style_preds(scratch_test_net, image) # Of course, we can again look at the ImageNet model's predictions for the above image: disp_imagenet_preds(imagenet_net, image) # So we did finetuning and it is awesome. Let's take a look at what kind of results we are able to get with a longer, more complete run of the style recognition dataset. Note: the below URL might be occasionally down because it is run on a research machine. # # http://demo.vislab.berkeleyvision.org/
examples/02-fine-tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 1 # + import csv germplasm = open('Germplasm.tsv', 'r') locus = open('LocusGene.tsv', 'r') data_germplasm = [] data_locus = [] with germplasm as file: reader = csv.reader (file, delimiter = "\t") for row in reader: data_germplasm.append(row[0]) with locus as file: reader = csv.reader (file, delimiter = "\t") for row in reader: data_locus.append(row[0]) if data_germplasm == data_locus: print("The AGI Locus Code sequence list are the same in both files") else: print("The lists are not equal") # - # # # Problem 2: Design and create the database. # # It should have two tables - one for each of the two data files. # The two tables should be linked in a 1:1 relationship # you may use either sqlMagic or pymysql to build the database # # # %load_ext sql # %sql mysql+pymysql://root:root@127.0.0.1:3306/mysql # + # %sql show databases; # - # %sql create database germplasm; # %sql show databases # %sql use germplasm; # %sql CREATE TABLE germplasm(locus VARCHAR (50) NOT NULL PRIMARY KEY, germplasm VARCHAR (100) NOT NULL, phenotype VARCHAR (1000) NOT NULL, pubmed INTEGER NOT NULL); # %sql DESCRIBE germplasm # %sql CREATE TABLE locusgene(locus VARCHAR (50) NOT NULL PRIMARY KEY, gene VARCHAR(10) NOT NULL, proteinlength INTEGER NOT NULL); # %sql DESCRIBE locusgene # %sql DESCRIBE locusgene # + import csv locus_data = csv.DictReader(Locusgene, delimiter = "\t", quotechar = '"') germplasm_data = csv.DictReader(Germplasm, delimiter = "\t", quotechar = '"') import pymysql.cursors connection = pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='germplasm', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) try: with connection.cursor() as cursor: for row in locus_file: sql = """INSERT INTO locusgene' (locus, gene, proteinlength) VALUES ('{}', '{}', '{}');""".format(row["Locus"], row["Gene"], row["ProteinLength"]) cursor.execute(sql) for row in germplasm_file: sql = """INSERT INTO germplasm (locus, germplasm, phenotype, pubmed) VALUES ('{}', '{}', '{}', '{}');""".format(row["Locus"], row["germplasm"], row["phenotype"], row["pubmed"]) cursor.execute(sql) connection.commit() finally: connection.close() Locusgene.close() Germplasm.close() # - # %sql SELECT * FROM locusgene # %sql SELECT * FROM germplasm # ## Problem 4: Create reports, written to a file # # 1. Create a report that shows the full, joined, content of the two database tables (including a header line) # # 2. Create a joined report that only includes the Genes SKOR and MAA3 # # 3. Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # 4. Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) # # When creating reports 2 and 3, remember the "Don't Repeat Yourself" rule! # # All reports should be written to **the same file**. You may name the file anything you wish. # %sql SHOW TABLES; # report1 = %sql SELECT * FROM germplasm RIGHT JOIN locusgene ON \ # germplasm.locus = locusgene.locus
Accelerated_Intro_WilkinsonExams/Exam_2_Answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from sklearn.model_selection import train_test_split # %matplotlib inline import scipy.stats as ss import matplotlib.style as ms import seaborn as sns from sklearn import ensemble #sns.set('whitegrid') plt.rc('font',family='Arial') #mpl.rcParams['font.sans-serif'] = ['Arial'] #mpl.rcParams['axes.unicode_minus']=False features = ['Barren', 'Cultivated and Managed Vegetation', 'Deciduous Broadleaf Trees', 'Evergreen Broadleaf Trees', 'Evergreen Deciduous Needleleaf Trees', 'Herbaceous Vegetation', 'Mixed-Other Trees', 'Regularly Flooded Vegetation', 'Shrubs', 'Snow-Ice', 'distance_light', 'distance_water', 'Temperature', 'Surface Pressure', 'U_Wind', 'V_wind', 'Precipitation', 'NDVI', 'Pop', 'month'] #features = ['Temperature','NDVI','Evergreen Broadleaf Trees','Barren','Herbaceous Vegetation', 'distance_light', 'distance_water','Cultivated and Managed Vegetation'] #f = 'distance_light' def plot_light(path,file,unit,outpath = None,title = None): #os.chdir(r'F:\8.18数据整理\variable select\Open water') f = file histfile = path+'/hist_csv/'+"hist_%s.csv"%f pdpfile = path+'/pdp_csv/'+"pdp_%s.csv"%f hist = pd.read_csv(histfile) pdp = pd.read_csv(pdpfile) if (f=='distance_light')|(f=='distance_water'): hist[f] = hist[f].apply(lambda x:x/1000) pdp[f] = pdp[f].apply(lambda x:x/1000) fig,ax2 = plt.subplots(figsize = (8,6)) #ax1.spines['left'].set_color('royalblue') ax2.spines['left'].set_color('b') ax2.spines['right'].set_color('orangered') sns.lineplot(ax=ax2,data=pdp,x=pdp.iloc[:,0],y=pdp['partial_dependence'],color = 'lightsalmon',lw = 3) ax2.tick_params(axis='y',labelsize = 20,color='orangered',labelcolor = 'orangered') ax2.tick_params(axis='x',labelsize = 20) ax2.set_ylim([0.4,0.6]) ax2.set_yticks([0.4,0.45,0.5,0.55,0.6]) #ax2.yaxis.set_major_locator(plt.MultipleLocator(0.1)) ax2.set_ylabel(ylabel='distribution probability',fontdict={'size':24,'color':'orangered'}) ax2.axhline(y=0.5,ls=':',lw=3,c='black') ax2.set_xlabel(xlabel='Distance to light'+' (%s)'%unit,fontdict={'size':24}) ax2.set_xticks(list(range(0,401,100))) ax2.set_xlim([-5,405]) # prevalence ax1 = ax2.twinx() ax1.plot(hist.iloc[:,0],hist['hist'],ls='--',lw = '3',color = 'b',alpha = 0.7) #ax1.fill_between(hist[f],hist['hist'],alpha = 0.4,lw=0,color = 'royalblue') ax1.tick_params(axis='y',labelsize = 18,color='b',labelcolor = 'b') ax1.set_ylim([0,0.5]) ax1.margins(0.02) ax1.set_ylabel(ylabel='preditor frequency',fontdict={'size':24,'color':'b'}) plt.margins(0.02) #plt.title(label='Partial Dependence Plot of %s'%f,fontdict = {'size':16}) plt.title(label='%sern China'%title,fontdict = {'size':28}) if outpath ==None: outpath = os.path.join(path,'plot_figure') plt.savefig(outpath+"\%s.png"%f,dpi=300,bbox_inches = 'tight') # 东部 surface pressure——> 【0,0.7】 # 西部 水——> 【0,1】 # 西部 温度——> 【0,0.6】 # 西部 surface pressure——> 【0,0.7】 # # + jupyter={"outputs_hidden": true} features = [['distance_light','Km']]#[['distance_water','Km']] for f,u in features: inpath = r'D:\china avian distribution\20200806\plot_file\region_setR1\east' outpath = r'D:\china avian distribution\20200806\plot_file\region_setR1\east' plot_light(inpath,file = f,outpath=outpath,unit = u,title='East') print(f,u) # + features = [['distance_light','Km']]#[['distance_water','Km']] for f,u in features: inpath = r'D:\china avian distribution\20200806\plot_file\region_setR1\west' outpath = r'D:\china avian distribution\20200806\plot_file\region_setR1\west' plot_light(inpath,file = f,outpath=outpath,unit = u,title='West') print(f,u) # -
code/modeling/plot_pdp_light_Rset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Redo partitions # In this notebook, we re-write the any matrix tables that are too sparse to have fewer partitions. # # Setup from datetime import datetime import hail as hl import os import time # ## Define constants MT = 'gs://fc-secure-fd6786bf-6c28-4f33-ac30-3860fbeee5bb/data/merged/20210621/merged-filtered-chr2.mt' SMALLER_NUM_MT_PARTITIONS = 1000 # + RESULT_BUCKET = os.getenv("WORKSPACE_BUCKET") DATESTAMP = time.strftime('%Y%m%d') TIMESTAMP = time.strftime('%Y%m%d_%H%M%S') # WORK_DIR = !pwd # Outputs FEWER_PARTITIONS_MT = f'{os.getenv("WORKSPACE_BUCKET")}/data/merged/{DATESTAMP}/{os.path.basename(MT)}' HAIL_LOG = f'{WORK_DIR[0]}/hail-redo-partitions-{TIMESTAMP}.log' HAIL_LOG_DIR_FOR_PROVENANCE = f'{os.getenv("WORKSPACE_BUCKET")}/hail-logs/{DATESTAMP}/' # - # ## Check access # !gsutil ls {MT} # In general, this should not exist print(FEWER_PARTITIONS_MT) # !gsutil ls {FEWER_PARTITIONS_MT} # ## Start Hail # + # See also https://towardsdatascience.com/fetch-failed-exception-in-apache-spark-decrypting-the-most-common-causes-b8dff21075c # See https://spark.apache.org/docs/2.4.7/configuration.html EXTRA_SPARK_CONFIG = { # If set to "true", performs speculative execution of tasks. This means if one or more tasks are running # slowly in a stage, they will be re-launched. 'spark.speculation': 'true', # Default is false. # Fraction of tasks which must be complete before speculation is enabled for a particular stage. 'spark.speculation.quantile': '0.95', # Default is 0.75 # Default timeout for all network interactions. This config will be used in place of # spark.core.connection.ack.wait.timeout, spark.storage.blockManagerSlaveTimeoutMs, # spark.shuffle.io.connectionTimeout, spark.rpc.askTimeout or spark.rpc.lookupTimeout if they are not configured. 'spark.network.timeout': '180s', # Default is 120s # (Netty only) Fetches that fail due to IO-related exceptions are automatically retried if this is set to a # non-zero value. This retry logic helps stabilize large shuffles in the face of long GC pauses or transient # network connectivity issues. 'spark.shuffle.io.maxRetries': '10', # Default is 3 # (Netty only) How long to wait between retries of fetches. The maximum delay caused by retrying is 15 seconds # by default, calculated as maxRetries * retryWait. 'spark.shuffle.io.retryWait': '15s', # Default is 5s # Number of failures of any particular task before giving up on the job. The total number of failures spread # across different tasks will not cause the job to fail; a particular task has to fail this number of attempts. # Should be greater than or equal to 1. Number of allowed retries = this value - 1. 'spark.task.maxFailures': '10', # Default is 4. # Number of consecutive stage attempts allowed before a stage is aborted. 'spark.stage.maxConsecutiveAttempts': '10' # Default is 4. } # - hl.init(spark_conf=EXTRA_SPARK_CONFIG, min_block_size=50, default_reference='GRCh38', log=HAIL_LOG) # Check the configuration. sc = hl.spark_context() config = sc._conf.getAll() config.sort() config # # Read AoU matrix table mt = hl.read_matrix_table(MT) mt.n_partitions() mt.describe() # # Re-partition the matrix table # # From https://discuss.hail.is/t/improving-pipeline-performance/1344 start = datetime.now() print(start) # + # https://discuss.hail.is/t/improving-pipeline-performance/1344 intervals = mt._calculate_new_partitions(SMALLER_NUM_MT_PARTITIONS) hl.read_matrix_table(MT, _intervals=intervals).write(FEWER_PARTITIONS_MT) # - end = datetime.now() print(end) print(end - start) # # Provenance # Copy the Hail log to the workspace bucket so that we can retain it. # !gzip --keep {HAIL_LOG} # !gsutil cp {HAIL_LOG}.gz {HAIL_LOG_DIR_FOR_PROVENANCE} print(datetime.now()) # !pip3 freeze
aou_workbench_pooled_analyses/matrix_table_creation/redo_partitions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mixing the easy and the hard # # Often in computation, a program is a mix of easy and hard problems. Some jobs only take a fraction of a second, others hours on end. It may be beneficial to mix these in a way that the hard jobs are outsourced to a compute node, while we don't want to spend minutes or even days waiting in the queue to do something very simple.
notebooks/hard_and_easy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Remote data access using pandas # The pandas library enables access to data displayed on websites using the `read_html()` function and access to the API endpoints of various data providers through the related `pandas-datareader` library. import os import pandas_datareader.data as web from datetime import datetime from pprint import pprint # ## Download html table with SP500 constituents # The download of the content of one or more html tables works as follows, for instance for the constituents of the S&P500 index from Wikipedia sp_url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies' sp500_constituents = pd.read_html(sp_url, header=0)[0] sp500_constituents.info() sp500_constituents.head() # ## pandas-datareader for Market Data # `pandas` used to facilitate access to data providers' APIs directly, but this functionality has moved to the related pandas-datareader library. The stability of the APIs varies with provider policies, and as of June 2o18 at version 0.7, the following sources are available # See [documentation](https://pandas-datareader.readthedocs.io/en/latest/); functionality frequently changes as underlying provider APIs evolve. # ### Yahoo Finance # + start = '2014' end = datetime(2017, 5, 24) yahoo= web.DataReader('FB', 'yahoo', start=start, end=end) yahoo.info() # - # ### IEX # IEX is an alternative exchange started in response to the HFT controversy and portrayed in <NAME>ewis' controversial Flash Boys. It aims to slow down the speed of trading to create a more level playing field and has been growing rapidly since launch in 2016 while still small with a market share of around 2.5% in June 2018. # + start = datetime(2015, 2, 9) # end = datetime(2017, 5, 24) iex = web.DataReader('FB', 'iex', start) iex.info() # - iex.tail() # #### Book Data # # In addition to historical EOD price and volume data, IEX provides real-time depth of book quotations that offer an aggregated size of orders by price and side. This service also includes last trade price and size information. # # DEEP is used to receive real-time depth of book quotations direct from IEX. The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side, and do not indicate the size or number of individual orders at any price level. Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP. # # DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported. # Only works on trading days. book = web.get_iex_book('AAPL') list(book.keys()) orders = pd.concat([pd.DataFrame(book[side]).assign(side=side) for side in ['bids', 'asks']]) orders.head() for key in book.keys(): try: print(f'\n{key}') print(pd.DataFrame(book[key])) except: print(book[key]) pd.DataFrame(book['trades']).head() # ### Quandl # + symbol = 'FB.US' quandl = web.DataReader(symbol, 'quandl', '2015-01-01') quandl.info() # - # ### FRED # + start = datetime(2010, 1, 1) end = datetime(2013, 1, 27) gdp = web.DataReader('GDP', 'fred', start, end) gdp.info() # - inflation = web.DataReader(['CPIAUCSL', 'CPILFESL'], 'fred', start, end) inflation.info() # ### Fama/French from pandas_datareader.famafrench import get_available_datasets get_available_datasets() ds = web.DataReader('5_Industry_Portfolios', 'famafrench') print(ds['DESCR']) # ### World Bank from pandas_datareader import wb gdp_variables = wb.search('gdp.*capita.*const') gdp_variables.head() wb_data = wb.download(indicator='NY.GDP.PCAP.KD', country=['US', 'CA', 'MX'], start=1990, end=2019) wb_data.head() # ### OECD df = web.DataReader('TUD', 'oecd', end='2015') df[['Japan', 'United States']] # ### EuroStat df = web.DataReader('tran_sf_railac', 'eurostat') df.head() # # # ### Stooq # Google finance stopped providing common index data download. The Stooq site had this data for download for a while but is currently broken, awaiting release of [fix](https://github.com/pydata/pandas-datareader/issues/594) index_url = 'https://stooq.com/t/' ix = pd.read_html(index_url) len(ix) f = web.DataReader('^SPX', 'stooq', start='20000101') f.info() f.head() # ### NASDAQ Symbols from pandas_datareader.nasdaq_trader import get_nasdaq_symbols symbols = get_nasdaq_symbols() symbols.info() url = 'https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ' res = pd.read_html(url) len(res) for r in res: print(r.info()) # ### Tiingo # Requires [signing up](https://api.tiingo.com/) and storing API key in environment df = web.get_data_tiingo('GOOG', api_key=os.getenv('TIINGO_API_KEY')) df.info()
Chapter02/02_data_providers/01_datareader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # Dependencies # - from __future__ import division from itertools import * # + deletable=true editable=true import os falange = r"/run/user/1000/gvfs/smb-share:server=falange,share=homes" #os.listdir(falange) # + deletable=true editable=true import numpy as np import matplotlib.pyplot as plt # %matplotlib nbagg # + [markdown] deletable=true editable=true # ```$ ipcluster start -n 4 # ``` # + deletable=true editable=true import matplotlib.patches as mpatches import seaborn as sns from matplotlib.colors import ListedColormap sns.set_palette("Spectral") current_palette = sns.color_palette() my_cmap = ListedColormap((["#000000"]+current_palette.as_hex())[::-1]) # + deletable=true editable=true from ipyparallel import Client rc = Client(profile="parallel") # + deletable=true editable=true rc.shutdown() # + deletable=true editable=true dv = rc[:] # + deletable=true editable=true dv # + deletable=true editable=true rc.ids # + deletable=true editable=true with dv.sync_imports(): import numpy # + [markdown] deletable=true editable=true # # Mandelbrot # + deletable=true editable=true @dv.parallel(block = True) def mandel3(x, y, max_iters=50): c = complex(x, y) z = 0.0j for i in range(max_iters): z = z*z + c if z.real*z.real + z.imag*z.imag >= 4: return i return max_iters #could return X,Y, max_iters to try block=False # + deletable=true editable=true #zooming def zoombrot(zooms, n=500, target = [-0.748202,-0.161082]): for zoom in range(zooms): window = 1./10**zoom x = np.linspace(target[0]-1*window,target[0]+1*window,n) y = np.linspace(target[1]-1*window,target[1]+1*window,n) X, Y = np.meshgrid(x, y) im3 = numpy.reshape(mandel3.map(X.ravel(), Y.ravel()), (len(y), len(x))) fig, axes = plt.subplots(1, 1) axes.grid(False) values = np.unique(im3.ravel()) sns.set_palette("Spectral",len(values)-1) current_palette = sns.color_palette() my_cmap = ListedColormap((["#000000"]+current_palette.as_hex())[::-1]) im = axes.imshow(im3, cmap=my_cmap,extent=[x[0],x[-1],y[0],y[-1]]) #im = axes.imshow(ship, cmap=my_cmap) #colors = [ im.cmap(im.norm(value)) for value in values] #patches = [ mpatches.Patch(color=colors[i], label="{l}".format(l=values[i]) ) for i in range(len(values)) ] #plt.legend(handles=patches, loc='best', title="Iterations", frameon=True, fancybox=True) plt.title('Mandelbrot w/ %s Points'%(len(im3)**2)) plt.show() # + deletable=true editable=true zoombrot(5) # + [markdown] deletable=true editable=true # # Burning Ship # + deletable=true editable=true @dv.parallel(block=True) def burning_ship(x,y,max_iters=100): c = complex(x,y) z = 0.0j for i in range(max_iters): z = (abs(z.real)+abs(z.imag)*1j)**2 + c if z.real*z.real + z.imag*z.imag >= 4: return i return max_iters #could return X,Y, max_iters to try block=False # + deletable=true editable=true #x = np.arange(-2, 1, 0.0005) #y = np.arange(-1, 1, 0.0005) n = 5000 n = 10000 #n = 500 y = np.linspace(-0.015,0.0025,1/3 * n) x = np.linspace(-1.87,-1.83,n) xstart = -1.871 x = np.linspace(xstart,xstart+(y[-1]-y[0])*3,n) X, Y = np.meshgrid(x, y) # + deletable=true editable=true # %%time shipwide = numpy.reshape(burning_ship.map(X.ravel(), Y.ravel()), (len(y), len(x))) # + def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return izip(a, b) def colour_grad(cs,n): grad = [] for c1,c2 in pairwise(cs): for step in np.linspace(0,1,n/(len(cs)-1),endpoint=False): grad += [tuple(c1*step + c2*(1-step))] return grad # - sns.set_palette(['orange','white']) sns.set_palette(colour_grad(np.array(sns.color_palette()),len(np.unique(shipwide)))) #sns.set_palette("YlOrRd_d",len(np.unique(shipwide))) sns.set_palette("OrRd",len(np.unique(shipwide))) current_palette = sns.color_palette() #my_cmap = ListedColormap((["#000000"]+current_palette.as_hex()[::-1])[::-1]) #my_cmap = ListedColormap(current_palette.as_hex()[::-1]) my_cmap = ListedColormap(current_palette.as_hex()) # + fig, axes = plt.subplots(1, 1) axes.grid(False) im = axes.imshow(shipwide, cmap=my_cmap,extent=[x[0],x[-1],y[-1],y[0]],interpolation='bessel') axes.set_xticks([]) axes.set_yticks([]) fig.tight_layout() fig.savefig('burningshipwide.png', format='png', dpi=1200,bbox_inches='tight') #plt.show() # - # # Other # + deletable=true editable=true # %%time im3 = numpy.reshape(mandel3.map(X.ravel(), Y.ravel()), (len(y), len(x))) # + deletable=true editable=true fig, axes = plt.subplots(1, 1) axes.grid(False) # im = axes.imshow(im3, cmap=my_cmap) im = axes.imshow(ship, cmap=my_cmap) axes.set_xticks([]) axes.set_yticks([]) if legending: values = np.unique(im) # get the colors of the values, according to the # colormap used by imshow colors = [ im.cmap(im.norm(value)) for value in values] # create a patch (proxy artist) for every color patches = [ mpatches.Patch(color=colors[i*10], label="{l}".format(l=values[i*10]) ) for i in range(len(values)/10 +1) ] # put those patched as legend-handles into the legend plt.legend(handles=patches, loc='best', title="Iterations", frameon=True, fancybox=True) plt.title('Mandelbrot w/ %s Points'%(len(im3)**2)) fig. plt.show() # + [markdown] deletable=true editable=true # ## Magic Methods # + deletable=true editable=true # %%time # %px a = numpy.random.random(1000) print np.array(dv['a']).mean() # + [markdown] deletable=true editable=true # Parallel Magic Funcs # + deletable=true editable=true # %%px # %%time # %matplotlib inline import seaborn as sns x = numpy.random.normal(numpy.random.randint(-10, 10), 1, 1000) sns.kdeplot(x);
Parallel Computing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt from plotly import __version__ import cufflinks as cf # %matplotlib inline from plotly.offline import download_plotlyjs, init_notebook_mode, iplot init_notebook_mode(connected = True) cf.go_offline() df1 = pd.DataFrame(np.random.randn(100,4), columns = "A B C D".split()) df2 = pd.DataFrame({'Category':'A B C'.split(), 'Values':[32,4,50]}) df2 df.plot() df1.iplot(kind = "scatter", x ='A', y = 'B', mode = "markers") df2.iplot(kind = "bar", x = "Category", y = "Values") df1.sum().iplot(kind = "bar") df1.iplot(kind = "box") df3 = pd.DataFrame({'x':[1,2,3,4,5], 'y':[10,20,30,20,10], 'z':[500,400,300,200,100]}) df3 df3.iplot(kind = "surface") df1.iplot(kind = "bubble", x = "A", y = "B", size = "C")
InteractivePlots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: platzi # language: python # name: platzi # --- # # Módulo 2: HTML: Requests y BeautifulSoup # ## Parsing Pagina12 # # <img src='https://www.pagina12.com.ar/assets/media/logos/logo_pagina_12_n.svg?v=1.0.178' width=300></img> # En este módulo veremos cómo utilizar las bibliotecas `requests` y `bs4` para programar scrapers de sitios HTML. Nos propondremos armar un scraper de noticias del diario <a href='www.pagina12.com.ar'>Página 12</a>. # Supongamos que queremos leer el diario por internet. Lo primero que hacemos es abrir el navegador, escribir la URL del diario y apretar Enter para que aparezca la página del diario. Lo que ocurre en el momento en el que apretamos Enter es lo siguiente: # 1. El navegador envía una solicitud a la URL pidiéndole información. # 2. El servidor recibe la petición y procesa la respuesta. # 3. El servidor envía la respuesta a la IP de la cual recibió la solicitud. # 4. Nuestro navegador recibe la respuesta y la muestra **formateada** en pantalla. # # Para hacer un scraper debemos hacer un programa que replique este flujo de forma automática para luego extraer la información deseada de la respuesta. Utilizaremos `requests` para realizar peticiones y recibir las respuestas y `bs4` para *parsear* la respuesta y extraer la información.<br> # Te dejo unos links que tal vez te sean de utilidad: # - [Códigos de status HTTP](https://developer.mozilla.org/es/docs/Web/HTTP/Status) # - [Documentación de requests](https://requests.kennethreitz.org/en/master/) # - [Documentación de bs4](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) import requests url = 'https://www.pagina12.com.ar/' p12 = requests.get(url) p12.status_code p12.content # Muchas veces la respuesta a la solicitud puede ser algo que no sea un texto: una imagen, un archivo de audio, un video, etc. p12.text # Analicemos otros elementos de la respuesta p12.headers p12.request.headers # El contenido de la request que acabamos de hacer está avisando que estamos utilizando la biblioteca requests para python y que no es un navegador convencional. Se puede modificar # ## Clase 4 # Ya obtuvimos el código HTML de la página. En esta clase veremos cómo extraer de él la información deseada. from bs4 import BeautifulSoup s = BeautifulSoup(p12.text, 'lxml') type(s) print(s.prettify()) # Primer ejercicio: obtener un listado de links a las distintas secciones del diario.<br> # DOM: estructura jerárquica, html<br> # Usar el inspector de elementos para ver dónde se encuentra la información. Ojo cuando la página es responsive s.find('ul', attrs={'class':'hot-sections'}).find_all('li')
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 2_ HTML_ Requests y BeautifulSoup/M2C2 - Parseando HTML con BS - Script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 32-bit # name: python3 # --- ciphertext ="pixxg qa pm epw pia bpm xczm bzcbp qv pqu pm eqtt zmozmb vw aikzqnqkm bpib smmxa qb" # Create a dictionary with the alphabet and fill it with the # of occurences of every letter. alphabet = {"a" : 0, "b" : 0, "c" : 0, "d" : 0, "e" : 0, "f" : 0, "g" : 0, "h" : 0, "i" : 0, "j" : 0, "k" : 0, "l": 0, "m" : 0, "n" : 0, "o" : 0, "p" : 0, "q" : 0, "r": 0, "s" : 0, "t": 0, "u" : 0, "v" : 0, "w" : 0, "x" : 0, "y" : 0, "z" : 0} total=0 for letter in ciphertext: if letter in alphabet: alphabet[letter] += 1 total += 1 print(alphabet) print(total) # Translate # of occurences to the percentage (# of occurences / total). probs_cipher = {} for letter in alphabet: probs_cipher[letter] = alphabet[letter] / total print(probs_cipher) # Look for which shift has the lowest statistical distance to the overall English language statistics. # + probs_eng = {"a" : 0.082, "b" : 0.015, "c" : 0.028, "d" : 0.042, "e" : 0.127, "f" : 0.022, "g" : 0.020, "h" : 0.061, "i" : 0.070, "j" : 0.001, "k" : 0.008, "l" : 0.040, "m" : 0.024, "n" : 0.067, "o" : 0.075, "p" : 0.019, "q" : 0.001, "r" : 0.060, "s" : 0.063, "t" : 0.090, "u" : 0.028, "v" : 0.010, "w" : 0.024, "x" : 0.001, "y" : 0.020, "z" : 0.001} stat_dist = {"0" : 0, "1" : 0, "2" : 0, "3" : 0, "4" : 0, "5" : 0, "6" : 0, "7" : 0, "8" : 0, "9" : 0, "10" : 0, "11": 0, "12" : 0, "13" : 0, "14" : 0, "15" : 0, "16" : 0, "17": 0, "18" : 0, "19": 0, "20" : 0, "21" : 0, "22" : 0, "23" : 0, "24" : 0, "25" : 0} alphabet = "abcdefghijklmnopqrstuvwxyz" for i in range(26): for j in range(26): stat_dist[str(i)] += abs(probs_eng[alphabet[j]] - probs_cipher[alphabet[(j + i) % 25]]) for k in range(26): stat_dist[str(k)] = 0.5 * stat_dist[str(k)] key = min(stat_dist, key=stat_dist.get) print(key) # - # Shift the ciphertext with the found shift number to get the plaintext. plaintext = "" for idx, letter in enumerate(ciphertext): if letter == " ": plaintext += letter else: find = alphabet.find(letter) if (find - int(key)) < 0: plaintext += alphabet[find - int(key) + 26] else: plaintext += alphabet[find - int(key)] print(plaintext)
Assignment_1/code_a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="NEBQsRW0Lb33" # **Defining the question** # # **Descrie the Data Analysis Question** # # Analysis to infer on electric car usage in a car-sharing service company # # **Define Metric for Success** # # The main objective of this analysis is to be conscious of the rate at which electric car is used. # In order to meet this objective the analysis will be able to give a solution to the following research questions: # # Which time in paris do a company receives more incoming request for a shared electric car (Bluecar)? # # What is the most popular hour for returning cars? # # What station is the most popular? # # Overall? # At the most popular picking hour? # # What postal code is the most popular for picking up Blue cars? # Does the most popular station belong to that postal code? # # Overall? # At the most popular picking hour? # # Do the results change if you consider Utilib and Utilib 1.4 instead of Blue cars? # # **Recording Experimental Design** # # Import libraries. # # Load Datasets. # # Explore # # Clean Datasets # # Data Analysis # # Recommendation # # # # # # + [markdown] id="n3suGqdJT1_s" # **Import Python Libraries** # + id="_-ArxjcRTgsg" # Import Python Libraries import csv import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly as py import plotly.graph_objs as go from sklearn.cluster import KMeans import warnings warnings.filterwarnings("ignore") # + [markdown] id="31LVOQ8PUALM" # **Load the Datasets** # + id="Bq6ZZuJOUHeP" #upload the file #Load the datasets url= '/content/Autolib_dataset (2).csv' df=pd.read_csv(url) # + colab={"base_uri": "https://localhost:8080/", "height": 779} id="lXOfkOBQe9-N" outputId="a73dfd4e-0a1d-4fc3-f3a9-0146f451bc0d" #preview the dataset df # + [markdown] id="QfRcD5axf3Pv" # **Explore the Dataset** # + colab={"base_uri": "https://localhost:8080/"} id="xYlft3eGf-Hy" outputId="b8a4d8da-2d6a-43ad-a8e4-69ee5f3fb67b" df.info() # + [markdown] id="x3v_k7sxHc1N" # Select a sample # + colab={"base_uri": "https://localhost:8080/", "height": 445} id="Oy4ylSKXgnVC" outputId="eaf9c36c-b0e8-4fcf-c5f7-4d5ae3adfc0c" # preview the first five records df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 377} id="YDXd6ozkHEbq" outputId="8b4aaffc-4eac-4f35-fc48-bc33e037c128" # view the last 5 records df.tail() # + colab={"base_uri": "https://localhost:8080/", "height": 736} id="4Hh9ndNPIyTy" outputId="4d498366-1885-4b38-9676-6e5ff3a8f940" sample=df.sample(10) sample # + [markdown] id="UG02Sx7uHqGE" # Select training and Test samples # + colab={"base_uri": "https://localhost:8080/"} id="neS1UYEDJPu5" outputId="d1b95a8a-a388-4a84-8fe9-ac05403025fe" #separate numeric data from attribute labels NumericArray=sample.values print(NumericArray) # + colab={"base_uri": "https://localhost:8080/"} id="UJcEoQ89K0_-" outputId="1df449ca-4097-4c48-8204-56711d2f95fd" # slice the numeric data into Training and Test samples split=10 TrainingSample,TestSample=NumericArray[:split,:] , NumericArray[split:,:] print("Training Sample\n" , TrainingSample) print("\nTest Sample\n" , TestSample) # + id="2kjeau_uN9Cj" colab={"base_uri": "https://localhost:8080/"} outputId="86e11d73-9c94-4098-8839-433286e98dd6" #split Training Sample into input and output attribute X_Attributes_TrainingSample=TrainingSample[: , 0: -1] Y_Attributes_TrainingSample=TrainingSample[:, -1] print("input attributes Sample\n" , X_Attributes_TrainingSample) print("\n Output attributes Sample\n" , Y_Attributes_TrainingSample) # + [markdown] id="YWeLr8_ZUrKH" # **Data Cleaning** # + colab={"base_uri": "https://localhost:8080/"} id="PgvmG7e5s9cz" outputId="0ddbf677-1a4d-44fb-a857-fc36af1c76c8" # preview the dataset column names df.columns # + [markdown] id="sQFFs7UzvS1S" # *Perform a validation check* # # Drop column cars since it has same values with the column for Blue cars. # # Drop column Address since we can get it through postal code. # # Drop column for Displayed comment since it has got alot of missing value # + id="Z5XIo7Eru8r6" # Drop unnecesary column df.drop(['Displayed comment', 'Address', 'Cars'], axis = 1, inplace = True) # + id="jpPyYHLCsfcM" #create a list of column names columnns=['Blue_CarCounter' , 'Utilib_Counter' , 'Utilib1.4_Counter', 'Charge_Slots', 'Charge_Status', 'City', 'ID', 'Kind', 'Geo_Point', 'Postal_Code', 'Public_Name', 'Rental_Status', 'Scheduled_At','Slots', 'Station_Type', 'Status', 'Subscription_Status', 'Year', 'Month', 'Day', 'Hour', 'Minute'] # + colab={"base_uri": "https://localhost:8080/"} id="nfOQ8HGbso6J" outputId="6b56c800-385d-4177-f51a-7cfd95ca9fc7" #preview column names #use strip() to get lead of leading and trailing characters df.columns.str.strip() # + id="tVKGOD3WsxI3" #Change to lower case df.columns = map(str.lower, df.columns) # + colab={"base_uri": "https://localhost:8080/"} id="LO-RGzPir8kA" outputId="ab7474b6-b56d-44aa-b34e-1aef8fb17e6a" # preview the columns df.columns # + [markdown] id="EuqddXOYVbZI" # Check for missing values # + colab={"base_uri": "https://localhost:8080/"} id="acI_9B4aYaJ7" outputId="5887a5af-d345-4dc4-9157-edb2f23d2345" # we count the number of non - missing values in the df dataframe df.count() # + colab={"base_uri": "https://localhost:8080/"} id="EtbdS2gTYrkA" outputId="923f987e-a16a-4072-8fe1-b3ab4e029270" #subtract the no. of non-missing rows from the total number num_rows= df.shape[0] num_rows - df.count() # + colab={"base_uri": "https://localhost:8080/", "height": 473} id="bcXabdnyUwE7" outputId="33d2eb7d-a5f6-46e6-824e-89f2e7b5f25b" #check for null values df.isnull() # + [markdown] id="B5_nTWEbbQ3y" # Filling Missing Values # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="J4gR_mWDilbD" outputId="638aaa79-7173-4d60-9c65-db0623c924df" # use forward fill to fill in missing values df_fill_forward=df.fillna(method='ffill') # preview the dataset df_fill_forward # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="EcNDhxopjtrJ" outputId="14bd8f19-964f-4caa-b706-e81873a43f89" #use backward fill to replace missing values df_fill_backward=df_fill_forward.fillna(method='bfill') #use backward fill method to replace null values df_fill_backward # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="cypaY6-Jn4VM" outputId="c90e8e9c-d244-46d5-e0aa-a475c5efe080" # Define a new variable and assign it data with non missing values df2=df_fill_backward df2 # + [markdown] id="I-CQYfxolqJE" # Smoothening noisy data # # # + colab={"base_uri": "https://localhost:8080/"} id="_812voAtl4l7" outputId="93523041-758a-4e35-80a2-d83b7e761757" #Calculating our first, third quantiles and then later our IQR Q1 = df2.quantile(0.25) Q3 = df2.quantile(0.75) IQR = Q3 - Q1 IQR # + id="mKay5FdOrqQw" #Removing outliers based on the IQR range and stores the result in the data frame 'df_out' df_out = df2[~((df2 < (Q1 - 1.5 * IQR)) | (df2 > (Q3 + 1.5 * IQR))).any(axis=1)] # + colab={"base_uri": "https://localhost:8080/"} id="s-UqMxVNsSgS" outputId="76f97ca9-3d2f-4928-8941-e43037f1d34b" # Printing the shape of our new dataset df_out.shape # + [markdown] id="lj5DPqkqwkCc" # Resolving Incosistencies # # We use Data transformation techniques # + colab={"base_uri": "https://localhost:8080/"} id="DIxq4Mk_xoKs" outputId="60254327-2761-4910-819e-f3463714723b" #labelling the data #import the necessary libraries from sklearn import preprocessing # perform data Labelling Encoder=preprocessing.LabelEncoder() Encoded_df=df2.apply(preprocessing.LabelEncoder().fit_transform) print("Encoded_df:\n", Encoded_df) # + [markdown] id="8PBI_7BT2gzQ" # Normalizing the Data # + colab={"base_uri": "https://localhost:8080/"} id="iUIjjJNA2tda" outputId="1f3d59a9-f129-4add-9f3c-c791f1e056dd" # import the necessary package from sklearn.preprocessing import Normalizer from numpy import set_printoptions #perform data labelling Encoder=preprocessing.LabelEncoder() Encoded_df=df2.apply(preprocessing.LabelEncoder().fit_transform) #separate numeric data from attribute labels Numeric_Data= Encoded_df.values print("\n Un Normalized Data:\n" , Y_Attributes_TrainingSample) # + colab={"base_uri": "https://localhost:8080/"} id="uzeGVy9J3p8_" outputId="8281f49b-aa03-42e2-b683-39e8cfd0c65c" #split Training samples into input and output attributes x_Attributes_TrainingSample=TrainingSample[:, 0: -1] Y_Attributes_TrainingSample=TrainingSample[:, -1] print("\n Un Normalized Data:\n" , X_Attributes_TrainingSample) print("\n Output attribute Sample:\n" , Y_Attributes_TrainingSample) # + id="yUGW-7Rp-PTl" # Normalize input training sample #normalized_X_Attributes= preprocessing.normalize(X_Attributes_TrainingSample.astype(float)) #print("\nNormalized Input Attributes:\n" , normalized_X_Attributes) # + [markdown] id="uFIcgiETTf_a" # **Analysis** # + [markdown] id="602ML02nGO9U" # Research Questions # + colab={"base_uri": "https://localhost:8080/", "height": 377} id="WcKgnxFbAdcx" outputId="b4843eb1-bdc7-42f0-e056-6eba4cb02813" # Question One #identify all Electric Cars in Paris BlueCarCounter=df[df['city'] =='Paris'] #prevuew all Electric Cars in Paris BlueCarCounter.head() # + id="BfeHTykiALtF" #split by city city=df.groupby('city') # + colab={"base_uri": "https://localhost:8080/", "height": 677} id="XYTsEjVVGpmo" outputId="8779d5b2-fbf7-4925-e9a2-56e729cebd31" # create a new column to store the difference df['blue_car_difference']=df['bluecar counter'].diff() #preview the dataset df # + colab={"base_uri": "https://localhost:8080/"} id="Xpk4DlVoHlYn" outputId="13a302ed-3235-4c5f-e8a3-31124bf477b5" # # positive value means a car was returned # Negative value means a car was taken # Identify the most popular hour of the day for picking up a shared electric car (Bluecar) in the city of Paris over the month of April 2018 df4_blue=BlueCarCounter[BlueCarCounter['blue_car_difference'] < 0].groupby('hour')['hour'].count().sort_values(ascending= False) df4_blue # + id="YbX4AtHaK379" # Question Two #What is the most popular hour for returning cars? BlueCarCounter[BlueCarCounter['blue_car_difference'] > 0].groupby('hour')['hour'].count().sort_values(ascending= False) # + [markdown] id="JcPHV26lLPKS" # **Visualization** # + colab={"base_uri": "https://localhost:8080/", "height": 390} id="-WAX-8uMQBZX" outputId="62b677ae-a2f7-42ad-b975-ccebac20225a" #Density estimation of values using distplot plt.figure(1 , figsize = (15 , 6)) feature_list = ['blue_car_difference'] pos = 1 for i in feature_list: plt.subplot(1 , 3 , pos) plt.subplots_adjust(hspace = 0.5 , wspace = 0.5 ) plt.subplots_adjust(hspace = 0.5 , wspace = 0.5 ) sns.distplot(df[i], bins=20, kde = True) pos = pos + 1 plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="DBSEchCkRg8q" outputId="ec0d8aa1-5d9c-497e-b91f-4aa52f2b673e" # A histogram showing frequency of a Blue Car df['blue_car_difference'].plot.hist(bins=20) plt.ylabel('frequency') plt.xlabel("blue_car_difference")
Moringa_Data_Science_Prep_W4_Independent_Project_2021_21_Dennis_Kiarie__Python_Data_Analysis_ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluation of $\langle \sigma v \rangle $ # # # This is an interactive python demonstration that shows how the rate of a reaction varies with energy and temperature in the absence of resonances (i.e. assuming $S_0 = const$). # + import numpy as np # see https://www.astropy.org/ for documentation import astropy.units as u import astropy.constants as c # %matplotlib notebook from IPython.display import display from ipywidgets import * import matplotlib.pyplot as plt def mass_numbers(z1=1,z2=2,n1=0,n2=0): a1,a2 = z1+n1,z2+n2 return a1,a2 def reduced_A(a1,a2): a1,a2 = mass_numbers(a1,a2) A_r = (a1*a2)/(a1 + a2) return A_r def beta(z1 = 1, z2 = 2, n1 = 0, n2 = 0): a1,a2 = mass_numbers(z1,z2,n1,n2) A_r = reduced_A(a1,a2) beta = 31.28*z1*z2*A_r**(0.5) * (u.keV)**0.5 return beta def E0(z1 = 1, z2 = 1, n1 = 0, n2 = 0, T = 1.5e6 * u.K): b = beta(z1,z2,n1,n2) E0 = (0.5*b*c.k_B*T)**(2./3.) return E0.to(u.keV) def Delta(z1 = 1, z2 = 1, n1 = 0, n2 = 0, T = 1.5e6 * u.K): e0 = E0(z1,z2,n1,n2,T) Delta = 4/3**0.5 *(e0 * c.k_B * T)**0.5 return Delta.to(u.keV) def energy_range(z1 =1, z2 = 1, n1 = 0, n2 = 0, T = 1.5e6*u.K): e0 = E0(z1,z2,n1,n2,T) D = Delta(z1,z2,n1,n2,T) emin = np.max([0,(e0-4*D).value]) emax = (e0 + 4*D).value energy = np.arange(emin,emax,(emax-emin)/1000.) return energy*u.keV def SigmaV(z1=1, z2=1, n1=0, n2=0, T = 1.5e6, S0 = 1*u.keV*u.barn): energy = energy_range(z1,z2,n1,n2,T) a1,a2 = mass_numbers(z1,z2,n1,n2) b = beta(z1,z2,n1,n2) A = reduced_A(a1,a2) mu = A*u.u f1 = (8/(mu*np.pi))**0.5 f2 = S0/(c.k_B*T)**(3./2) f3 = np.exp(-energy/(T*c.k_B)) f4 = np.exp(-b*energy**(-0.5)) return (f1*f2*f3*f4).cgs def plot_sv(T = 1e6*u.K, z1 = 1, z2 = 1, n1 = 0, n2 = 0, S0 = 1*u.keV*u.barn): energy = energy_range(z1,z2,n1,n2,T) sv = SigmaV(z1,z2,n1,n2,T,S0) plt.xlabel('Energy (kEV)') plt.plot(energy,sv,label=r'$\sigma v$') plt.legend() @interact def interactive_plot_sv(T = (1e6,1e10,1e6), #T = [1e6,5e6,1e7,5e7,1e8,5e8,1e9,5e9,1e10]*u.K, z1 = [1.,2.,3.,4.,5.], z2 = [1.,2.,3.,4.,5.], n1 = [0.,1.,2.,3.,4.,5.], n2 = [0.,1.,2.,3.,4.,5.], S0 = [1,10,100,1000]*u.keV*u.barn): plt.cla() energy = energy_range(z1,z2,n1,n2,T*u.K) sv = SigmaV(z1,z2,n1,n2,T*u.K,S0) I = np.trapz(sv,energy) logT_grid = np.arange(6,11,0.1) tg = 10**logT_grid svg = np.array([SigmaV(z1,z2,n1,n2,temp*u.K,S0).value.sum() for temp in tg]) f = plt.figure(1) f.clear() plt.subplot(121) plt.xlabel('Energy (kEV)',size=18) plt.ylabel(r'$\sigma v$ normalized ',size=18) plt.plot(energy,sv/sv.max()) plt.title(r"$\langle \sigma v \rangle=$ {:2E}".format(I),size=13) ax = f.add_subplot(122) ax.yaxis.tick_right() plt.subplot(122) plt.xlabel(r'$\log [Temperature (K)]$',size=18) plt.plot(logT_grid,np.log10(svg),label=r'$\log( \langle \sigma v \rangle)$',color='red') plt.title(r"Z$_1$={:};Z$_2$={:}".format(int(z1),int(z2)),size=13) plt.legend() f.canvas.draw() # -
demos/Nuc_Lecture_2_Interactive_Rates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import the Pandas library import pandas as pd # Create a Data Frame for the Apple Data aapl_df = pd.DataFrame({"AAPL": [1045.85, 1070.08, 1140.99, 1113.65, 1193.32, 1231.54, 1215.00, 1207.15,1248.84, 1166.27, 1138.85, 1003.63, 1080.91]}) # Display the Apple DataFrame aapl_df # - # Call the pandas describe function on the Apple DataFrame to generate summary statistics aapl_df.describe()
Big_Picture_Analysis/Big_Picture_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The following command executes training of Deep Convolutional GAN on MNIST dataset. It takes a couple of minutes on an average GPU. # %run nnabla-examples/mnist-collection/dcgan.py -c cudnn -i 7000 # Run the following block to show generated fake images using the trained model. # + language="markdown" # * itr=999 # ![image](tmp.monitor.dcgan/Fake-images/000999.png) # * itr=1999 # ![image](tmp.monitor.dcgan/Fake-images/001999.png) # * itr=2999 # ![image](tmp.monitor.dcgan/Fake-images/002999.png) # * itr=3999 # ![image](tmp.monitor.dcgan/Fake-images/003999.png) # * itr=4999 # ![image](tmp.monitor.dcgan/Fake-images/004999.png) # * itr=5999 # ![image](tmp.monitor.dcgan/Fake-images/005999.png) # * itr=6999 # ![image](tmp.monitor.dcgan/Fake-images/006999.png) # - # In this demo, the training is stopped at 7000 iteration. More iterations should give better quality results. # # You can find the source code of [dcgan.py on GitHub](https://github.com/sony/nnabla-examples/blob/master/mnist-collection/dcgan.py).
docker/tutorial/run-nnabla-examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Generating input for KEGGDecoder # The KEGGDecoder tool uses files output by KEGG ghostKOALA or kofamscan by default. This page walks you through how to generate these files using kofamscan. If you would like to generate them another way, the input files to KEGGDecoder look like this: # ``` # NORP96_1 # NORP96_2 # NORP96_3 # NORP96_4 # NORP96_5 # NORP96_6 K04764 # NORP96_7 K01890 # NORP96_8 K01889 # NORP96_9 K02887 # NORP96_10 K02916 # NORP96_11 K02520 # NORP96_12 K01868 # NORP96_13 # NORP96_14 # NORP96_15 # NORP96_16 # NORP96_17 # NORP96_18 K07334 # ``` # We did not binder-ize kofamscan because it requires too much compute to be executed on a binder cloud computer. However, you can copy and paste the code on your local computer and try it there. # # The documentation states that kofamscan needs to be run on linux, but we have successfully run it on unix using conda. # Download the databases and executables using: # ``` # # download the ko list # wget ftp://ftp.genome.jp/pub/db/kofam/ko_list.gz # # download the hmm profiles # wget ftp://ftp.genome.jp/pub/db/kofam/profiles.tar.gz # # download kofamscan tool # wget ftp://ftp.genome.jp/pub/tools/kofamscan/kofamscan.tar.gz # # download README # wget ftp://ftp.genome.jp/pub/tools/kofamscan/README.md # ``` # # And then unzip and untar the relevant files: # # ``` # gunzip ko_list.gz # tar xf profiles.tar.gz # tar xf kofamscan.tar.gz # ``` # # Next, make a conda environment using miniconda: # # ``` # conda create -n kofamscan hmmer parallel # conda activate kofamscan # conda install -c conda-forge ruby # ``` # Then copy the template config file and add the relative paths of my newly downloaded kofamscan databases. The config file should look like this: # # `config.yml`: # # ``` # # Path to your KO-HMM database # # A database can be a .hmm file, a .hal file or a directory in which # # .hmm files are. Omit the extension if it is .hal or .hmm file # profile: ./profiles # # # Path to the KO list file # ko_list: ko_list # # # Path to an executable file of hmmsearch # # You do not have to set this if it is in your $PATH # # hmmsearch: /usr/local/bin/hmmsearch # # # Path to an executable file of GNU parallel # # You do not have to set this if it is in your $PATH # # parallel: /usr/local/bin/parallel # # # Number of hmmsearch processes to be run parallelly # cpu: 8 # ``` # Lastly, run kofamscan: # # ``` # ./exec_annotation -o sb1_out sb1_proteins.faa # ```
example_notebooks/generating_KO_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import pandas as pd import matplotlib.pyplot as plt import math as mt import numpy as np import seaborn as sns import plotly.express as px from pathlib import Path PROJECT_ROOT = Path(os.path.abspath('')).resolve().parents[0] # - df = pd.read_csv(os.path.join(PROJECT_ROOT, 'data', 'H2.csv')) df # + # Organizar features target = ['IsCanceled'] numeric_feat = ['LeadTime','ArrivalDateYear','ArrivalDateWeekNumber','ArrivalDateDayOfMonth', 'StaysInWeekendNights','StaysInWeekNights','Adults','Children','Babies', 'PreviousCancellations','PreviousBookingsNotCanceled','BookingChanges', 'Agent','Company','DaysInWaitingList','ADR','RequiredCarParkingSpaces', 'TotalOfSpecialRequests'] binary_feat = ['IsRepeatedGuest'] categorical_feat = ['ArrivalDateMonth','Meal','Country','MarketSegment','DistributionChannel','ReservedRoomType', 'AssignedRoomType','DepositType','CustomerType','ReservationStatus'] date = ['ReservationStatusDate'] # + #Data Quality Report for Continuous Features def DescribeContinuousFeatures(Continuous_Features, dataset): Continuous_Head = ['Count', 'Missing Values (%)', 'Cardinality', 'Minimum', '1st Qrt.', 'Mean', 'Median', '3rd Qrt.', 'Maximum', 'Std. Dev.'] Continuous_Describe = pd.DataFrame(index=Continuous_Features, columns=Continuous_Head) Continuous_Describe.index.name = 'Feature Name' columns = dataset[Continuous_Features] #Total Number of Instances Continuous_Describe[Continuous_Head[0]] = columns.count() #Percentage of instances that has Missing Values (sabendo à partida que nenhuma variável contínua tem missings) Continuous_Describe[Continuous_Head[1]] = columns.isnull().sum() * 100 / len(dataset) #Cardinality of each feature (cardinality measures the number of Distinct Values) Continuous_Describe[Continuous_Head[2]] = columns.nunique() #Minimum Value Continuous_Describe[Continuous_Head[3]] = columns.min() #1ST Quartile Continuous_Describe[Continuous_Head[4]] = columns.quantile(0.25) #Mean Continuous_Describe[Continuous_Head[5]] = round(columns.mean(), 2) #Median Continuous_Describe[Continuous_Head[6]] = columns.median() #3rd Quartile Continuous_Describe[Continuous_Head[7]] = columns.quantile(0.75) #Maximum Value Continuous_Describe[Continuous_Head[8]] = columns.max() #Standard Deviation Continuous_Describe[Continuous_Head[9]] = round(columns.std(),2) return Continuous_Describe DescribeContinuousFeatures(numeric_feat,df) # + #Data Quality Report for Categorical Features def DescribeCategoricalFeatures(Categorical_Features, dataset): Categorical_Head = ['Count', 'Missing Values (%)', 'Cardinality', 'Mode', 'Mode frequency', 'Mode (%)',\ '2nd Mode', '2nd Mode frequency', '2nd Mode (%)'] Categorical_Describe = pd.DataFrame(index=Categorical_Features, columns=Categorical_Head) Categorical_Describe.index.name = 'Feature Name' columns = dataset[Categorical_Features] #Total Number of Instances Categorical_Describe[Categorical_Head[0]] = columns.count() missings = [] for col in columns: missings.append(len(dataset[dataset[col]=='?']) * 100/ len(dataset)) #Percentage of instances that has Missing Values Categorical_Describe[Categorical_Head[1]] = missings #Cardinality of each feature (cardinality measures the number of Distinct Values) Categorical_Describe[Categorical_Head[2]] = columns.nunique() Categorical_Describe[Categorical_Head[3]] = columns.mode().T Categorical_Describe[Categorical_Head[4]] = columns.describe().T['freq'] Categorical_Describe[Categorical_Head[5]] = columns.describe().T['freq']/columns.describe().T['count'] snd_modes = [] snd_modes_freq = [] for col in columns: snd_modes.append(columns[col].value_counts().index[1]) snd_modes_freq.append(columns[col].value_counts()[1]) Categorical_Describe[Categorical_Head[6]] = snd_modes Categorical_Describe[Categorical_Head[7]] = snd_modes_freq Categorical_Describe[Categorical_Head[8]] = Categorical_Describe[Categorical_Head[7]]/columns.describe().T['count'] return Categorical_Describe DescribeCategoricalFeatures(categorical_feat,df) # - def plot_bar(features_list,direction='h'): fig, axarr = plt.subplots(round(len(features_list)/2), 2, figsize=(20, 25)) a = 0 b = 0 c = 1 for feat in features_list: if direction == 'v': df[feat].value_counts().plot.bar( ax=axarr[a][b], fontsize=18, color='tomato') axarr[a][b].set_title(feat, fontsize=20) plt.subplots_adjust(hspace=0.3,wspace=0.6) plt.suptitle('Numerical Variables Absolute Frequencies',fontsize = 30, x = 0.43, y = 0.93) else: df[feat].value_counts().plot.barh( ax=axarr[a][b], fontsize=18, color='tomato') axarr[a][b].set_title(feat, fontsize=20) plt.subplots_adjust(hspace=0.3,wspace=0.6) plt.suptitle('Categorical Variables Absolute Frequencies',fontsize = 30, x = 0.43, y = 0.93) if b == 0: b = 1 else: b=0 if c % 2 == 0: a += 1 c+=1 plt.show() plot_bar(categorical_feat,'h') # + country_cancel = df['Country'].value_counts().rename("Occurrences").to_frame() country_cancel['canceled'] = df[df['IsCanceled']==1]['Country'].value_counts() country_cancel = country_cancel.fillna(0) country_cancel['%canceled'] = country_cancel['canceled']/country_cancel['Occurrences'] fig = px.choropleth(country_cancel, locations=country_cancel.index, color='%canceled', # lifeExp is a column of gapminder hover_name=country_cancel.index, # column to add to hover information color_continuous_scale=px.colors.sequential.Sunset, title='Country % of cancelations') fig.show() fig.write_image("map.png") # - #country_cancel['%canceled'].sort_values(ascending=False).head(30) a = country_cancel.sort_values(by=['Occurrences'],ascending=False).head(15) a['%canceled'].sort_values(ascending=False) def bar_charts_categorical(df, feature, target): cont_tab = pd.crosstab(df[feature], df[target], margins = True) categories = cont_tab.index[:-1] fig = plt.figure(figsize=(15, 5)) plt.subplot(121) p1 = plt.bar(categories, cont_tab.iloc[:-1, 0].values, 0.55, color="gray") p2 = plt.bar(categories, cont_tab.iloc[:-1, 1].values, 0.55, bottom=cont_tab.iloc[:-1, 0], color="skyblue") plt.legend((p2[0], p1[0]), ('$y_i=1$', '$y_i=0$')) plt.xticks(rotation='vertical') plt.title("Frequency bar chart") plt.xlabel(feature) plt.ylabel("$Frequency$") # auxiliary data for 122 obs_pct = np.array([np.divide(cont_tab.iloc[:-1, 0].values, cont_tab.iloc[:-1, 2].values), np.divide(cont_tab.iloc[:-1, 1].values, cont_tab.iloc[:-1, 2].values)]) plt.subplot(122) p1 = plt.bar(categories, obs_pct[0], 0.55, color="gray") p2 = plt.bar(categories, obs_pct[1], 0.55, bottom=obs_pct[0], color="skyblue") plt.xticks(rotation='vertical') plt.legend((p2[0], p1[0]), ('$y_i=1$', '$y_i=0$')) plt.title("Proportion bar chart") plt.xlabel(feature) plt.ylabel("$p$") plt.show() bar_charts_categorical(df, 'Adults', 'IsCanceled') # + bins_df = df[['IsCanceled','LeadTime']].copy() #bins_df.loc[(bins_df['ADR'] > 5000), 'ADR'] = 0 min_value = bins_df['LeadTime'].min() max_value = bins_df['LeadTime'].max() bins = np.linspace(min_value,max_value,10) labels = [1,2,3,4,5,6,7,8,9] bins_df['LeadTime'] = pd.cut(bins_df['LeadTime'], bins=bins, labels=labels, include_lowest=True) bar_charts_categorical(bins_df, 'LeadTime', 'IsCanceled') # - bar_charts_categorical(df, 'TotalOfSpecialRequests', 'IsCanceled') bar_charts_categorical(df, 'DepositType', 'IsCanceled') bar_charts_categorical(df, 'MarketSegment', 'IsCanceled') bar_charts_categorical(df, 'ReservedRoomType', 'IsCanceled')
BC2_predicting_cancellations/scripts/Data Understanding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + ################################################################## #《Python机器学习及实践:从零开始通往Kaggle竞赛之路(2023年度版)》开源代码 #----------------------------------------------------------------- # @章节号:4.3.3(读取/写入Excel文件数据) # @作者:范淼 # @电子邮箱:<EMAIL> # @微博:https://weibo.com/fanmiaothu # @官方交流QQ群号:561500762 ################################################################## # + import pandas as pd # 使用Pandas读取excel文件,存入DataFrame类型的变量df。 df = pd.read_excel('../datasets/bitcoin/bitcoin.xlsx') # 查看df。 df # + # 选择df的前10行数据,并选择前5列,构成df_part。 df_part = df[:10][df.columns[0:5]] # 查看df_part。 df_part # - # 将df_part的数据,按照excel格式写入到bitcoin_part.xlsx文件。 df_part.to_excel('../datasets/bitcoin/bitcoin_part.xlsx', index=False) # + # 使用Pandas读取excel文件,存入DataFrame类型的变量df_new。 df_new = pd.read_excel('../datasets/bitcoin/bitcoin_part.xlsx') # 校验df_new与df_part是否一致。 df_new
Chapter_4/Section_4.3.3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1 toc-item"><a href="#Can-a-Neural-Network-Fit-Random-Data?" data-toc-modified-id="Can-a-Neural-Network-Fit-Random-Data?-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Can a Neural Network Fit Random Data?</a></div><div class="lev2 toc-item"><a href="#Training-and-Saving" data-toc-modified-id="Training-and-Saving-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Training and Saving</a></div><div class="lev2 toc-item"><a href="#Loading" data-toc-modified-id="Loading-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Loading</a></div><div class="lev2 toc-item"><a href="#Old-School-Saving-and-Loading" data-toc-modified-id="Old-School-Saving-and-Loading-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Old School Saving and Loading</a></div><div class="lev1 toc-item"><a href="#My-Notes" data-toc-modified-id="My-Notes-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>My Notes</a></div><div class="lev2 toc-item"><a href="#Unsolved-Mysteries" data-toc-modified-id="Unsolved-Mysteries-21"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Unsolved Mysteries</a></div><div class="lev3 toc-item"><a href="#DISAMBIGUATING-GRAPH,-METAGRAPH,-VARIABLES,-OPS,-AND-MORE" data-toc-modified-id="DISAMBIGUATING-GRAPH,-METAGRAPH,-VARIABLES,-OPS,-AND-MORE-211"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>DISAMBIGUATING GRAPH, METAGRAPH, VARIABLES, OPS, AND MORE</a></div><div class="lev2 toc-item"><a href="#SavedModelBuilder" data-toc-modified-id="SavedModelBuilder-22"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>SavedModelBuilder</a></div><div class="lev3 toc-item"><a href="#Trimmed-Impl-of-SavedModelBuilder" data-toc-modified-id="Trimmed-Impl-of-SavedModelBuilder-221"><span class="toc-item-num">2.2.1&nbsp;&nbsp;</span>Trimmed Impl of SavedModelBuilder</a></div><div class="lev2 toc-item"><a href="#tf.train.Saver" data-toc-modified-id="tf.train.Saver-23"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>tf.train.Saver</a></div><div class="lev2 toc-item"><a href="#Misc" data-toc-modified-id="Misc-24"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Misc</a></div><div class="lev1 toc-item"><a href="#Copy-Pastes-for-Plane-Ride" data-toc-modified-id="Copy-Pastes-for-Plane-Ride-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Copy-Pastes for Plane Ride</a></div><div class="lev2 toc-item"><a href="#SavedModelBuilder" data-toc-modified-id="SavedModelBuilder-31"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>SavedModelBuilder</a></div><div class="lev2 toc-item"><a href="#Loader" data-toc-modified-id="Loader-32"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Loader</a></div> # - # [Link to mnist.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist.py) # # Can a Neural Network Fit Random Data? # + import tensorflow as tf import numpy as np import os num_features = 28 num_classes = 5 batch_size = 32 hidden_size = 64 num_batches = 50 fake_inputs = np.random.random(size=(num_batches, batch_size, num_features)) fake_labels = np.random.randint(0, num_classes, size=(num_batches, batch_size)) # - # Use tf.placeholder for values we will set in sess.run() calls. input_layer = tf.placeholder( dtype=tf.float32, shape=(None, num_features), name='input_layer') # Define the two layers with ReLU activation functions. hidden_layer_1 = tf.layers.dense( inputs=input_layer, units=hidden_size, activation=tf.nn.relu, name='hidden_layer_1') hidden_layer_2 = tf.layers.dense( inputs=hidden_layer_1, units=hidden_size, activation=tf.nn.relu, name='hideen_layer_2') # Project to output layer of dimensionality equal to the number of classes to predict. # This layer is often called the "logits". output_layer = tf.layers.dense( inputs=hidden_layer_2, units=num_classes, name='output_layer') # Define our loss function and training operation. # + labels = tf.placeholder(dtype=tf.int32, shape=(None,), name='labels') onehot_labels = tf.one_hot(labels, num_classes, name='onehot_labels') loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=output_layer) global_step = tf.get_variable( 'global_step', shape=(), dtype=tf.int32, trainable=False) train_op = tf.contrib.layers.optimize_loss( loss=loss, global_step=global_step, learning_rate=0.0001, optimizer='Adam') # - # We will evaluate our model by computing the number of correct predictions it makes in a given batch. correct = tf.nn.in_top_k(output_layer, labels, k=1) num_correct = tf.reduce_sum(tf.to_int32(correct)) # Place the objects we want to use in future `sess.run` calls in collections, so that we can easily access them upon loading models from file. # + tf.add_to_collection('fetches', num_correct) tf.add_to_collection('fetches', train_op) tf.add_to_collection('feed_dict', input_layer) tf.add_to_collection('feed_dict', labels) # - # ## Training and Saving # + num_epochs = 200 export_dir = 'out' def run_training_epoch(sess): input_layer, labels = tf.get_collection('feed_dict') num_correct_total = 0 for batch_idx in range(num_batches): outputs = sess.run(tf.get_collection('fetches'), feed_dict={ input_layer: fake_inputs[batch_idx], labels: fake_labels[batch_idx]}) num_correct_total += outputs[0] return num_correct_total with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for epoch_idx in range(num_epochs): num_correct_total = run_training_epoch(sess) if epoch_idx % 100 == 0: print('Epoch {}: accuracy={:.3%}'.format( epoch_idx, float(num_correct_total) / (num_batches * batch_size))) builder = tf.saved_model.builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( sess=sess, tags=[tf.saved_model.tag_constants.TRAINING]) builder.save() # - # ## Loading # [loader.py](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/python/saved_model/loader.py) tf.reset_default_graph() with tf.Session() as sess: tf.saved_model.loader.load( sess=sess, tags=[tf.saved_model.tag_constants.TRAINING], export_dir=export_dir) for epoch_idx in range(num_epochs): num_correct_total = run_training_epoch(sess) if epoch_idx % 100 == 0: print('Epoch {}: accuracy={:.3%}'.format(epoch_idx, float(num_correct_total) / (num_batches * batch_size))) # ## Old School Saving and Loading saver = tf.train.Saver() with tf.Session() as sess: save_path = saver.save(sess, 'out/0/saver.ckpt') tf.reset_default_graph() v1 = tf.get_variable('v1', shape=[3]) # create ur graph as usual saver = tf.train.Saver() # even create saver as usual yo! with tf.Session() as sess: saver.restore(sess, '/tmp/model.ckpt') # oh shit whaddup # check ur vars breh print('v1 :', v1.eval()) # [tf.train.import_meta_graph](https://www.tensorflow.org/api_docs/python/tf/train/import_meta_graph) # # Recreates a Graph saved in a MetaGraphDef proto. # # This function takes a MetaGraphDef protocol buffer as input. If the argument is a file containing a MetaGraphDef protocol buffer , it constructs a protocol buffer from the file content. The function then adds all the nodes from the graph_def field to the current graph, recreates all the collections, and returns a saver constructed from the saver_def field. # # In combination with export_meta_graph(), this function can be used to # # Serialize a graph along with other Python objects such as QueueRunner, Variable into a MetaGraphDef. # Restart training from a saved graph and checkpoints. # Run inference from a saved graph and checkpoints. # + ... # Create a saver. saver = tf.train.Saver(...variables...) # Remember the training_op we want to run by adding it to a collection. tf.add_to_collection('train_op', train_op) sess = tf.Session() for step in xrange(1000000): sess.run(train_op) if step % 1000 == 0: # Saves checkpoint, which by default also exports a meta_graph # named 'my-model-global_step.meta'. saver.save(sess, 'my-model', global_step=step) # TWO....DAYS...LATER... with tf.Session() as sess: new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta') new_saver.restore(sess, 'my-save-dir/my-model-10000') # tf.get_collection() returns a list. In this example we only want the # first one. train_op = tf.get_collection('train_op')[0] for step in xrange(1000000): sess.run(train_op) # - # > NOTE: Restarting training from saved meta_graph only works if the device assignments have not changed. # # # # My Notes # ## Unsolved Mysteries # > When you want to save and load variables, the graph, and the graph's metadata. # # There is some redundancy/sloppiness going on in that sentence, right? What's the difference between each of those exactly? Ok, it is about time I dive into this rabbit hole: # # ------------------------------------ # # ### DISAMBIGUATING GRAPH, METAGRAPH, VARIABLES, OPS, AND MORE # # __tf.Graph__: "A Graph contains a set of # - tf.Operation objects, which represent units of computation # - tf.Tensor objects, which represent the units of data that flow between operations." # # Ok if you actually read the code (and protos), you find that, more technically, _a Graph is a set of Nodes, and a Node is mainly defined by an Operation and the input tensor names for that operation._ # # OK I THINK I GET IT NOW (after reading protos below): A SavedModelBuilder literally saves a list of MetaGraphDefs, each of which specifies: # 1. A GraphDef. This is literally a list of NodeDefs, each of which specify a tf.Operation and a set of tf.Tensors that are fed as input to the operation. Notice how this is purely _structural_ and isn't concerned at all with particular values for any of those tensors (CRUCIAL TO UNDERSTAND THAT SENTENCE). # 2. A SaverDef. Tells how to save and restore __variables__. This is how we get access to the particular values of tensors in our graph (via their corresponding tf.Variable). # 3. A {string => CollectionDef} map. # 4. A {string => SignatureDef} map. A SignatureDef contains two {string => TensorInfo} maps, one for inputs and one for outputs. # 5. A list of AssetFileDefs. # # # ------------------- # ## SavedModelBuilder # # - [saved_model.proto](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/core/protobuf/saved_model.proto) # - [model_builder_impl.py](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/python/saved_model/builder_impl.py#L39) # # # ```c # message SavedModel { # // CTor just inits this to tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION # int64 saved_model_schema_version = 1; # repeated MetaGraphDef meta_graphs = 2; # } # # message MetaGraphDef { # MetaInfoDef meta_info_def = 1; # # // GRAPH DEF AND NODE DEF # message GraphDef { # message NodeDef { # string name = 1; # string op = 2; # repeated string input = 3; # string device = 4; # map<string, AttrValue> attr = 5; # } # repeated NodeDef node = 1; # VersionDef versions = 2; # } # GraphDef graph_def = 2; # # // SAVER DEF # message SaverDef { # string filename_tensor_name = 1; # string save_tensor_name = 2; // saving Operation() # string restore_op_name = 3; # int32 max_to_keep = 4; # bool sharded = 5; # float keep_checkpoint_ever_n_hours = 6; # } # SaverDef saver_def = 3; # # // COLLECTION DEF # message CollectionDef { # message NodeList { repeated string value = 1; } # message BytesList { repeated bytes value = 1; } # message Int64List { repeated int64 value = 1 [packed = true]; } # message FloatList { repeated float value = 1 [packed = true]; } # message AnyList { repeated google.protobuf.Any value = 1; } # # oneof kind { # NodeList node_list = 1; # BytesList bytes_list = 2; # Int64List int64_list = 3; # FloatList float_list = 4; # AnyList any_list = 5; # } # } # map<string, CollectionDef> collection_def = 4; # # // SIGNATURE DEF # message SignatureDef { # map<string, TensorInfo> inputs = 1; # map<string, TensorInfo> outputs = 2; # } # map<string, SignatureDef> signature_def = 5; # # // ASSET FILE DEF AND TENSOR INFO # message AssetFileDef { # message TensorInfo { # message CooSparse { values_tensor_name, indices_tensor_name, dense_shape_tensor_name } // pseudo-proto # oneof encoding { # string name; // For dense Tensors # CooSparse coo_sparse; // COO encoding for sparse tensors # } # DateType dtype; # TensorShapeProto tensor_shape; # } # TensorInfo tensor_info = 1; # string filename = 2; # } # repeated AssetFileDef asset_file_def = 6; # } # # // ------------------------------------ # // GRAPH STUFF # // ----------------------------------- # # # # # ``` # ### Trimmed Impl of SavedModelBuilder # + from tensorflow.core.protobuf import * from tensorflow.python.saved_model import constants class SavedModelBuilder: def __init__(self, export_dir): self._saved_model = saved_model_pb2.SavedModel( saved_model_schema_version=constants.SAVED_MODEL_SCHEMA_VERSION) self._export_dir = export_dir # Real impl checks that it doesnt exist & then makes it. def add_meta_graph_and_variables(self, sess, tags, signature_def_map=None, assets_collections=None, legacy_init_op=None, clear_devices=False, main_op=None): if main_op is not None: self._maybe_add_legacy_init_op(legacy_init_op) else: self._add_main_op(main_op) saver = tf.train.Saver( var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS), sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True) # Save the variables. saver.save(sess, variables_path, write_meta_graph=False, write_state=False) # Export the meta graph def. meta_graph_def = saver.export_meta_graph_def(clear_devices=clear_devices) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) # - saved_model_proto = saved_model_pb2.SavedModel() meta_graph_proto = meta_graph_pb2() # ## tf.train.Saver # - [API Documentation](https://www.tensorflow.org/api_docs/python/tf/train/Saver) # # Saves and restores variables. # # The Saver class adds ops to save and restore variables to and from checkpoints. # It also provides convenience methods to run these ops. # # Checkpoints are binary files in a proprietary format which map variable names to tensor values. # The best way to examine the contents of a checkpoint is to load it using a Saver. # # Savers can automatically number checkpoint filenames with a provided counter. # This lets you keep multiple checkpoints at different steps while training a model. # For example you can number the checkpoint filenames with the training step number. # To avoid filling up disks, savers manage checkpoint files automatically. # For example, they can keep only the N most recent files, or one checkpoint for every N hours of training. # # You number checkpoint filenames by passing a value to the optional global_step argument to save(): # # ```python # saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0' # ... # saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000' # ``` # Additionally, optional arguments to the Saver() constructor let you control the proliferation of checkpoint files on disk: # * max_to_keep indicates the maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) # * keep_checkpoint_every_n_hours: In addition to keeping the most recent max_to_keep checkpoint files, you might want to keep one checkpoint file for every N hours of training. This can be useful if you want to later analyze how a model progressed during a long training session. For example, passing keep_checkpoint_every_n_hours=2 ensures that you keep one checkpoint file for every 2 hours of training. The default value of 10,000 hours effectively disables the feature. # # # Note that you still have to call the save() method to save the model. Passing these arguments to the constructor will not save variables automatically for you. # # A training program that saves regularly looks like: # # ```python # # Create a saver. # saver = tf.train.Saver(...variables...) # # Launch the graph and train, saving the model every 1,000 steps. # sess = tf.Session() # for step in xrange(1000000): # sess.run(..training_op..) # if step % 1000 == 0: # # Append the step number to the checkpoint name: # saver.save(sess, 'my-model', global_step=step) # ``` # # In addition to checkpoint files, savers keep a protocol buffer on disk with the list of recent checkpoints. This is used to manage numbered checkpoint files and by latest_checkpoint(), which makes it easy to discover the path to the most recent checkpoint. That protocol buffer is stored in a file named 'checkpoint' next to the checkpoint files. # # If you create several savers, you can specify a different filename for the protocol buffer file in the call to save(). # # __init__ : # ```python # __init__( # var_list=None, # reshape=False, # sharded=False, # max_to_keep=5, # keep_checkpoint_every_n_hours=10000.0, # name=None, # restore_sequentially=False, # saver_def=None, # builder=None, # defer_build=False, # allow_empty=False, # write_version=tf.train.SaverDef.V2, # pad_step_number=False, # save_relative_paths=False, # filename=None # ) # ``` # ## Misc # REALLY USEFUL DESCRIPTION OF DEVICE NAME SEMANTICS IN [node_def.proto](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/core/framework/node_def.proto): # # ```c # // A (possibly partial) specification for the device on which this # // node should be placed. # // The expected syntax for this string is as follows: # // # // DEVICE_SPEC ::= PARTIAL_SPEC # // # // PARTIAL_SPEC ::= ("/" CONSTRAINT) * # // CONSTRAINT ::= ("job:" JOB_NAME) # // | ("replica:" [1-9][0-9]*) # // | ("task:" [1-9][0-9]*) # // | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") ) # // # // Valid values for this string include: # // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) # // * "/job:worker/device:GPU:3" (partial specification) # // * "" (no specification) # // # // If the constraints do not resolve to a single device (or if this # // field is empty or not present), the runtime will attempt to # // choose a device automatically. # ``` # # Note: may be useful to see how Estimators save their shit... # + [markdown] heading_collapsed=true # # Copy-Pastes for Plane Ride # + [markdown] hidden=true # ## SavedModelBuilder # + hidden=true export_dir = ... ... builder = tf.saved_model_builder.SavedModelBuilder(export_dir) with tf.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING], signature_def_map=foo_signatures, assets_collection=foo_assets) ... # Add a second MetaGraphDef for inference. with tf.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph([tag_constants.SERVING]) ... builder.save() # + [markdown] hidden=true # ## Loader # + hidden=true export_dir = ... ... with tf.Session(graph=tf.Graph()) as sess: tf.saved_model.loader.load(sess, [tag_constants.TRAINING], export_dir) ...
section1/notebooks/TFBasics_SavingRestoringAndHighLevelAPIs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Simulating any compartmental model using the `Spp` class # # In the present notebook, we show how (most) compartmenal models can be simulated using the `pyross.deterministic.Spp` class. We allow users to specify any number of epidemiological classes, as well as any linear or infectious coupling between them. # %%capture ## compile PyRoss for this notebook import os owd = os.getcwd() os.chdir('../../') # %run setup.py install os.chdir(owd) # %matplotlib inline import numpy as np import pyross import matplotlib.pyplot as plt #from matplotlib import rc; rc('text', usetex=True) # ### The SIR model # # Below you will find the model-specification dictionary for the SIR model: # + model_spec = { "classes" : ["S", "I"], "S" : { "linear" : [], "infection" : [ ["I", "-beta"] ] }, "I" : { "linear" : [ ["I", "-gamma"] ], "infection" : [ ["I", "beta"] ] } } # - # This corresponds to # # $$ # \begin{aligned} # \dot{S}_i & = - \beta \sum_\nu C_{ij} \frac{I_j}{N_j} S_i \\ # \dot{I}_i & = \beta \sum_\nu C_{ij} \frac{I_j}{N_j} - \gamma I_i \\ # \dot{R}_i & = \gamma I_i^\mu # \end{aligned} # $$ # # Let's go through each component of the model specification step-by-step: # # - The list `"classes" : ["S", "I", "R"]` defines the epidemiological # classes of the model. <i>The order in which they are written are important</i>, as this ordering will have to be mainained if giving the initial conditions of the simulation as an array. Each model requires the presence of a susceptible class. This class # will always be the first element of the list `classes`, regardless of whether it is labelled as `S` or not. # - The dynamics of each class is defined by a key-value pair. Consider # # <br> # # ```json # "E" : { # "linear" : [ ["E", "-gammaE"] ], # "infection" : [ ["I", "betaI"], ["A", "betaA"] ] # }, # ``` # # <br> # # - This reads out as: # $$\dot{E}^\mu = -\gamma_E E + \beta_I \sum_\nu C^I_{\mu \nu} \frac{I^\nu}{N^\nu} S^\mu + \beta_A \sum_\nu C^A_{\mu \nu} \frac{A^\nu}{N^\nu} S^\mu.$$ # - The linear terms for each epidemic class is defined by the lists of lists: # # <br> # # ```json # "linear" : [ ["E", "-gammaE"] ] # ``` # # <br> # # Eeach pair in `linear` corresponds to the linear coupling # with the class and the coupling constant respectively. So # `["E", "-gammaE"]` corresponds to the term $-\gamma_E E$ in # the equation for $\dot{E}$. The minus sign in front of `gammaE` # signifies that the negative of the coefficient should be used. # - The infection terms are defined in a similar manner. Each pair # in `infection` corresponds to the non-linear coupling with $S$ # and the coupling constant respectively. So `["I", "betaI"]` # corresponds to the term $\beta_I \sum_\nu C^I_{\mu \nu} \frac{I^\nu}{N^\nu} S$. # Next, we define the parameter values: parameters = { 'beta' : 0.3, 'gamma' : 0.1 } # The initial conditions can be defined in either of two ways. They can either be defined using a dictionary, where for each model class we have a corresponding 1D array of length $M$ (where $M$ is the number of age-groups), or a numpy array. The numpy array must have dimensions $M \times (d-1)$, where $d$ is the number of model classes (so 3 for SIR, for example). # # If the initial conditions are provided as a dictionary, we are free to leave out one of the classes. In which case the initial conditions of the left out class will be inferred from the others. # + # Initial conditions as an array x0 = np.array([ 999, 1000, 1000, # S 1, 0, 0, # I ]) # Initial conditions as a dictionary x0 = { # S will be inferred from I and R 'I' : [1, 0, 0], 'R' : [0, 0, 0] } # + M = 3 Ni = 1000*np.ones(M) N = np.sum(Ni) CM = np.array([ [1, 0.5, 0.1], [0.5, 1, 0.5], [0.1, 0.5, 1 ] ], dtype=float) def contactMatrix(t): return CM # duration of simulation and data file Tf = 160; Nt=160; model = pyross.deterministic.Spp(model_spec, parameters, M, Ni) # simulate model data = model.simulate(x0, contactMatrix, Tf, Nt) # + # plot the data and obtain the epidemic curve S = np.sum(model.model_class_data('S', data), axis=1) I = np.sum(model.model_class_data('I', data), axis=1) R = np.sum(model.model_class_data('R', data), axis=1) t = data['t'] fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 22}) plt.fill_between(t, 0, S/N, color="#348ABD", alpha=0.3) plt.plot(t, S/N, '-', color="#348ABD", label='$S$', lw=4) plt.fill_between(t, 0, I/N, color='#A60628', alpha=0.3) plt.plot(t, I/N, '-', color='#A60628', label='$I$', lw=4) plt.fill_between(t, 0, R/N, color="dimgrey", alpha=0.3) plt.plot(t, R/N, '-', color="dimgrey", label='$R$', lw=4) plt.legend(fontsize=26); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Fraction of compartment value') plt.xlabel('Days'); # - # We can use `pyross.determinisitic.Spp.update_model_parameters` to change the parameters from what we set them initially: # + parameters = { 'beta' : 0.3, 'gamma' : 0.01 } model.update_model_parameters(parameters) # simulate model data = model.simulate(x0, contactMatrix, Tf, Nt) # + # plot the data and obtain the epidemic curve S = np.sum(model.model_class_data('S', data), axis=1) I = np.sum(model.model_class_data('I', data), axis=1) R = np.sum(model.model_class_data('R', data), axis=1) t = data['t'] fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 22}) plt.fill_between(t, 0, S/N, color="#348ABD", alpha=0.3) plt.plot(t, S/N, '-', color="#348ABD", label='$S$', lw=4) plt.fill_between(t, 0, I/N, color='#A60628', alpha=0.3) plt.plot(t, I/N, '-', color='#A60628', label='$I$', lw=4) plt.fill_between(t, 0, R/N, color="dimgrey", alpha=0.3) plt.plot(t, R/N, '-', color="dimgrey", label='$R$', lw=4) plt.legend(fontsize=26); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Fraction of compartment value') plt.xlabel('Days'); # - # ### SEAIR model # # Let us now look at a more complicated example. # + model_spec = { "classes" : ["S", "E", "A", "Ia", "Is"], "S" : { "linear" : [], "infection" : [ ["A", "-beta"], ["Ia", "-beta"], ["Is", "-beta"] ] }, "E" : { "linear" : [ ["E", "-gammaE"] ], "infection" : [ ["A", "beta"], ["Ia", "beta"], ["Is", "beta"] ] }, "A" : { "linear" : [ ["E", "gammaE"], ["A", "-alphabar*gammaA"], ["A", "-alpha*gammaA"] ], "infection" : [ ] }, "Ia" : { "linear" : [ ["A", "alpha*gammaA"],["Ia", "-gammaIa"] ], "infection" : [ ] }, "Is" : { "linear" : [ ["A", "alphabar*gammaA"], ["Is", "-gammaIs"] ], "infection" : [ ] } } gammaA = 0.2 alpha = 0.4 # fraction of asymptomatic infectives parameters = { 'beta' : 0.2, 'gammaE' : 0.04, 'alpha*gammaA' : alpha*gammaA, 'alphabar*gammaA' : (1 - alpha)*gammaA, 'gammaIa' : 0.1, 'gammaIs' : [0.1,0.1, 0.1], } # - # This corresponds to # # $$ # \begin{aligned} # \dot{S}_i & = - \lambda_i(t) S_i \\ # \dot{E}_i & = \lambda_i(t) S_i - \gamma_E E_i \\ # \dot{A}_i & = \gamma_E E_i - \gamma_A A_i \\ # \dot{I^a}_i & = \alpha \gamma_A A - \gamma_I^a I^a_i \\ # \dot{I^s}_i & = (1 - \alpha)\gamma_A A - \gamma_I^s I^s_i \\ # \dot{R}_i & = \gamma_I^a I^a_i + \gamma_I^s I^s_i # \end{aligned} # $$ # # where # # $$ # \lambda_i(t) = \sum_j C_{ij} \left( \beta_I^a \frac{I^a_j}{N_j} + \beta_I^s \frac{I^s_j}{N_j} + \beta_A \frac{A_j}{N_j}\right). # $$ # # # $$ # C_{ij} \beta_I^a \frac{I^a_j}{N_j} # $$ # + M = 3 Ni = 10000*np.ones(M) N = np.sum(Ni) x0 = { 'E' : np.array([0,0,0]), 'A' : np.array([1,0,0]), 'Ia' : np.array([0,0,0]), 'Is' : np.array([0,0,0]), 'R' : np.array([0,0,0]) } CM = np.array([ [1, 0.5, 0.1], [0.5, 1, 0.5], [0.1, 0.5, 1 ] ], dtype=float) def contactMatrix(t): return CM # duration of simulation and data file Tf = 300; Nt=161; model = pyross.deterministic.Spp(model_spec, parameters, M, Ni) # simulate model data = model.simulate(x0, contactMatrix, Tf, Nt) # + # plot the data and obtain the epidemic curve t = data['t'] fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 22}) for model_class in (model_spec['classes'] + ['R'] ): Os = np.sum(model.model_class_data(model_class, data), axis=1) plt.plot(t, Os/N, '-', label='$%s$' % model_class, lw=4) plt.legend(fontsize=26); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Fraction of compartment value') plt.xlabel('Days'); # - # ### SEAI8R # + model_spec = { "classes" : ["S", "E", "Ia", "Is", "Is'", "Ih", "Ih'", "Ic", "Ic'", "Im"], "S" : { "linear" : [ ], "infection" : [ ["Ia", "-beta"], ["Is", "-beta"] ] }, "E" : { "linear" : [ ["E", "-gammaE"] ], "infection" : [ ["Ia", "beta"], ["Is", "beta"] ] }, "Ia" : { "linear" : [ ["E", "a*gammaE"], ["Ia", "-gammaIa"] ], "infection" : [ ] }, "Is" : { "linear" : [ ["E", "abar*gammaE"], ["Is", "-gammaIs"] ], "infection" : [ ] }, "Is'" : { "linear" : [ ["Is", "hbar*gammaIs"], ["Is'", "-gammaIs'"] ], "infection" : [ ] }, "Ih" : { "linear" : [ ["Is", "h*gammaIs"], ["Ih", "-gammaIh"] ], "infection" : [ ] }, "Ih'" : { "linear" : [ ["Ih", "cbar*gammaIh"], ["Ih'", "-gammaIh'"] ], "infection" : [ ] }, "Ic" : { "linear" : [ ["Ic", "c*gammaIh"], ["Ic", "-gammaIc"] ], "infection" : [ ] }, "Ic'" : { "linear" : [ ["Ic", "mbar*gammaIc"], ["Ic'", "-gammaIc'"] ], "infection" : [ ] }, "Im" : { "linear" : [ ["Ic", "m*gammaIc"] ], "infection" : [ ] } } a = 0.4 h = 0.1 c = 0.12 m = 0.13 beta = 0.2 gammaE = np.array([0.3, 0.5, 0.6])*0.1 gammaIa = np.array([0.3, 0.5, 0.6])*0.1 gammaIs = np.array([0.3, 0.5, 0.6])*0.1 gammaIh = np.array([0.3, 0.5, 0.6])*0.1 gammaIc = np.array([0.3, 0.5, 0.6])*0.1 parameters = { 'beta' : beta, 'gammaE' : gammaE, 'a*gammaE' : a*gammaE, 'abar*gammaE' : (1-a)*gammaE, 'gammaIa' : gammaIa, 'gammaIs' : gammaIs, 'h*gammaIs' : h*gammaIs, 'hbar*gammaIs' : (1-h)*gammaIs, 'gammaIh' : gammaIh, 'c*gammaIh' : c*gammaIh, 'cbar*gammaIh' : (1-c)*gammaIh, 'gammaIc' : gammaIc, 'm*gammaIc' : m*gammaIc, 'mbar*gammaIc' : (1-m)*gammaIc, "gammaIh'" : 0.1, "gammaIc'": 0.1, "gammaIs'" : 0.1 } # + M = 3 Ni = 10000*np.ones(M) N = np.sum(Ni) x0 = { 'E' : np.array([1,0,0]), 'Ia' : np.array([0,0,0]), 'Is' : np.array([0,0,0]), 'Ih' : np.array([0,0,0]), 'Ic' : np.array([0,0,0]), "Is'" : np.array([0,0,0]), "Ih'" : np.array([0,0,0]), "Ic'" : np.array([0,0,0]), 'Im' : np.array([0,0,0]), 'R' : np.array([0,0,0]) } CM = np.array([ [1, 0.5, 0.1], [0.5, 1, 0.5], [0.1, 0.5, 1 ] ], dtype=float) def contactMatrix(t): return CM # duration of simulation and data file Tf = 300; Nt=161; model = pyross.deterministic.Spp(model_spec, parameters, M, Ni) # simulate model data = model.simulate(x0, contactMatrix, Tf, Nt) # + # plot the data and obtain the epidemic curve t = data['t'] fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 22}) for model_class in (model_spec['classes'] + ['R'] ): Os = np.sum(model.model_class_data(model_class, data), axis=1) plt.plot(t, Os/N, '-', label='$%s$' % model_class, lw=4) plt.legend(fontsize=26); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Fraction of compartment value') plt.xlabel('Days');
examples/deterministic/ex16-Spp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import our dependencies from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import pandas as pd import tensorflow as tf # Import our input dataset diabetes_df = pd.read_csv('./resources/diabetes.csv') diabetes_df.head() # + # Remove diabetes outcome target from features data y = diabetes_df.Outcome X = diabetes_df.drop(columns="Outcome") # Split training/test datasets X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y) # + # Preprocess numerical data for neural network # Create a StandardScaler instance scaler = StandardScaler() # Fit the StandardScaler X_scaler = scaler.fit(X_train) # Scale the data X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # + # Define the logistic regression model log_classifier = LogisticRegression(solver="lbfgs",max_iter=200) # Train the model log_classifier.fit(X_train,y_train) # Evaluate the model y_pred = log_classifier.predict(X_test) print(f" Logistic regression model accuracy: {accuracy_score(y_test,y_pred):.3f}") # + # Define the basic neural network model nn_model = tf.keras.models.Sequential() nn_model.add(tf.keras.layers.Dense(units=16, activation="relu", input_dim=8)) nn_model.add(tf.keras.layers.Dense(units=1, activation="sigmoid")) # Compile the Sequential model together and customize metrics nn_model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model fit_model = nn_model.fit(X_train_scaled, y_train, epochs=100) # Evaluate the model using the test data model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2) print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
module/diabetes/LogisticRegression_NeuralNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt x = np.linspace(0, 2*np.pi, 1000) y = 5.5*(np.cos(2*x))+5.5 z = 0.02*np.exp(x) w = 0.25*(x**2) + 0.1*np.sin(10*x) plt.plot(x,y) plt.plot(x,z) plt.plot(x,w) plt.xlim([0, 2*np.pi]) plt.ylim([-1, 10]) plt.xlabel('time in Earth 119') plt.ylabel('python wizardry') plt.show()
hw-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + cellView="form" colab={"base_uri": "https://localhost:8080/"} id="sdJ7nZcubQDf" outputId="7d6a2af6-92ee-4892-9cf4-0694c1d81f3d" #@title 檢視 GPU # ! nvidia-smi # + cellView="form" colab={"base_uri": "https://localhost:8080/"} id="QNtYV2KjZ_DS" outputId="6bec9bd3-fefa-40b3-e5d3-889ffa406099" #@title 安裝 colorama, ffmpeg # ! pip install colorama # ! pip install ffmpeg-python # + cellView="form" colab={"base_uri": "https://localhost:8080/"} id="g-0BaNcladl4" outputId="4eb46726-8a33-403f-c62f-b8443f9e4827" #@title 安裝 DeepFaceLab # !git clone https://github.com/iperov/DeepFaceLab.git # + cellView="form" colab={"base_uri": "https://localhost:8080/"} id="KwlV62qxcY4v" outputId="1433f760-da45-438c-a587-6d8b1959a079" #@title Mount Google Drive from google.colab import drive drive.mount('/content/drive') # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 35} id="7HbURJTmgM1t" outputId="1c89d1c9-3210-41d2-c151-f068355ffb16" #@title 防止 Google Colab 斷開連線 import IPython from google.colab import output display(IPython.display.Javascript(''' function ClickConnect(){ btn = document.querySelector("colab-connect-button") if (btn != null){ console.log("Click colab-connect-button"); btn.click() } btn = document.getElementById('ok') if (btn != null){ console.log("Click reconnect"); btn.click() } } setInterval(ClickConnect,60000) ''')) print("Done.") # + cellView="form" id="Zqb6s37kqzrq" #@title 將素材從 Google Drive 搬回 VM workspace_file = '/content/drive/MyDrive/aidataset/deepfacelab/deepfacelab_ws2.zip' #@param {type:"string"} cmd = f'cp {workspace_file} ./' # ! $cmd # + cellView="form" id="Scqv3ILhtLF2" #@title 解壓縮Workspace zip_file = 'deepfacelab_ws2.zip' #@param {type:"string"} cmd = f'unzip {zip_file}' # ! $cmd # + [markdown] id="_3LlY9XxqX9L" # ## 可以在 https://dfldata.xyz/ 搜尋萬用 XSeg 模型 # + id="Y-wi-zIS1bzs" cellView="form" #@title 套用或移除 XSeg 遮罩 Mode = "Apply mask" #@param ["Apply mask", "Remove mask"] Data = "data_src" #@param ["data_src", "data_dst"] main_path = "/content/DeepFaceLab/main.py " data_path = "/content/workspace/"+Data+"/aligned " mode_arg = "apply " if Mode == "Apply mask" else "remove " cmd = main_path+"xseg "+mode_arg+"--input-dir "+data_path cmd += "--model-dir /content/workspace/model" if mode_arg == "apply " else "" # !python $cmd # + cellView="form" id="AQsRjacktdrJ" #@title 訓練 DeepFakes 模型 import os model_path = '/content/drive/MyDrive/aidataset/deepfacelab/model_20220312' #@param {type:"string"} if not os.path.exists(model_path): os.mkdir(model_path) # %cd /content/DeepFaceLab/ cmd = f'python main.py train --training-data-src-dir /content/workspace/data_src/aligned --training-data-dst-dir /content/workspace/data_dst/aligned --model-dir {model_path} --model SAEHD' # ! $cmd
deepfacelab_xseg_apply.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.10 64-bit (conda) # language: python # name: python_defaultSpec_1595974702083 # --- # # Deploy IoT Edge Modules # # In this section, we will deploy LVA & Inference Server modules (IoT Edge Runtime specific Docker container images) to an IoT Edge device. # ## Setting Up the Deployment Manifest File from env_variables import * set_key(envPath, "INPUT_VIDEO_FOLDER_ON_DEVICE", "/lvafolders/input") tempVar = set_key(envPath, "OUTPUT_VIDEO_FOLDER_ON_DEVICE", "/lvafolders/output") # ## Setting Up Local Folders on the IoT Edge Device # Next, we will set up local folders on our IoT Edge device. First, connect to the Iot Edge device in a terminal window and create the two folders below on the IoT Edge device. You can give these folders any name and create them under any desired root folder. However, the full path must be same in the other parts of this section. Both folders must have read and write access for all users. # # Run the commands below **on the IoT Edge device** through a terminal window. # # ``` # sudo mkdir -p /lvafolders/input # sudo mkdir /lvafolders/output # ``` # # Next, set the access permissions for these folders. # ``` # sudo chmod 777 -R /lvafolders # ``` # # Afterwards, in your IoT Edge device's terminal window, run the following command to download the video sample named `lots_284.mkv` into your /lvafolders/input folder. This video sample is a minute-long clip of a parking lot. If you wish to run another video clip, simply move it into the /lvafolders/input folder. # # ``` # curl https://lvamedia.blob.core.windows.net/public/lots_284.mkv --output /lvafolders/input/lots_284.mkv # ``` # # Finally, set the access permissions again for safe measure. # ``` # sudo chmod 777 -R /lvafolders # ``` # ## Reset the IoT Edge Device and Deploy the LVA & Inference Server Modules # # Next, we will reset the IoT Edge device by uploading a deployment manifest that consists of only two system modules: IoTHub and IoTEdge. # # 1. Right click on the "deployment.reset.json" file # # ![reset deployment](../../../../images/_reset_deployment.jpg) # # 2. Click on "Create Deployment for Single Device" # a. If it is the first time using the IoT Hub service that was created in the previous sections, VSCode will ask you to enter the "IoT Hub Connection String". You can use the value of the "IOT_HUB_CONN_STRING" key, which can be located in the .env file. This command will open a window on top edge of the VSCode and will ask for the IoT Edge device name that you want to use make this deployment. Since you entered the IoT Hub connection string, it will automatically find names of applicable devices and list them for you. For this section, we only have one Edge device, which we named "lva-sample-device" when we defined the variables. Select this device in the drop-down list. # # ![select edge device](../../../../images/_select_edge_device.png) # # b. If you already have a connected IoT Hub in VS Code Azure IoT Hub service, and would like to switch to a different IoT Hub to deploy LVA, you can click "More Actions" on the "AZURE IOT HUB" panel, and enter your "IOT_HUB_CONN_STRING" key. # # ![deployment set iothub](../../../../images/_deployment_set_iothub.png) # # 3. Locate the deployment manifest that you created in previous sections. Note that the name of this file may be different from the one listed below, depending on the sample you are running. Right click on the deployment manifest file. In the pop-up menu, click on "Generate IoT Edge Deployment Manifest". In this step, VSCode will auto-read the contents of the .env file and use the values of the variables to write over some placeholder variables inside the file. After these replacements, a new version of the file will be placed under the newly created "config" folder in the same working directory. # # ![deployment modules](../../../../images/_deployment_modules_3.jpg) # # 4. Right click on the file in the config folder. Note that its name may be different from the one below, depending on the sample you are running. In the pop-up menu, click on "Create Deployment for Single Device". # # ![deployment modules](../../../../images/_deployment_modules_4.jpg) # # 5. Like in step 2, VSCode will ask the device name to where the deployment will be made. Again, select the appropriate device name. # # 6. Depending on your Internet speed, the modules will be pulled from the Cloud and deployed into your Edge device; this process can range from seconds to minutes. In VSCode, refresh the "AZURE IOT HUB" panel. You can see the modules that are uploaded on to your Edge device once the refresh is complete. # # ![View iotedge device](../../../../images/_iothub_view.jpg) # # As in the screen shot above, you should see a status of "running" next to each module name (see region 3 in the above screenshot). # # Alternatively, you can run the following command in a terminal window on the Iot Edge device: # # ``` # sudo iotedge list # ``` # # The result of the command above will look something similar to the snippet below, with all module statuses saying "running". # # ``` # user@iotedgedevice:~/Desktop/subfolder$ sudo iotedge list # [sudo] password for user: # NAME STATUS DESCRIPTION CONFIG # lvaExtension running Up 6 minutes namecontreg.azurecr.io/nameaimodule:latest # edgeAgent running Up 7 minutes mcr.microsoft.com/azureiotedge-agent:1.0 # edgeHub running Up 6 minutes mcr.microsoft.com/azureiotedge-hub:1.0 # lvaEdge running Up 6 minutes mcr.microsoft.com/media/live-video-analytics:1 # rtspsim running Up 6 minutes mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2 # ``` # ## Troubleshooting # # If any of the modules are not running after a few minutes, you can try the following commands: # # 1) Check IoT Edge # # ``` # sudo iotedge check # ``` # # 2) Restart IoT Edge and wait a few minutes # # ``` # sudo systemctl restart iotedge # ``` # # 3) Check the logs of your devices (e.g., lvaExtension, lvaEdge) # # ``` # sudo iotedge logs <NAME_OF_DEVICE> # ``` # # These commands are relevant for the following sections as well. If you need more troubleshooting solutions, click [here](https://docs.microsoft.com/en-us/azure/iot-edge/troubleshoot). # # If all the modules are running, you have succesfully deployed the modules into the IoT Edge device. # ## Next Steps # If all the code cells above have successfully finished running, return to the Readme page to continue.
utilities/video-analysis/notebooks/common/deploy_iotedge_modules.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ringity # language: python # name: ringity # --- import ringity as rng # ## Create network # + N = 2**10 rho = 0.05 # expected density eta = 0.1 # 'randomness parameter' : eta=1 results in an Erdos-Renyi graph. kappa = 0 # 'concentration parameter': Higher values of kappa result in an increasing accumulation of hubs. a = 0.5 # 'local wiring parameter': Higher values of a result in a lower average clustering coefficient G = rng.network_model(N=N, rho=rho, eta=eta, kappa=kappa, a=a) # - # ##### Visualization rng.plot_nx(G) # ## Diagram properties # ### Persistence diagram dgm = rng.diagram(G) print(dgm) # ##### Visualization rng.plot_dgm(dgm) # ### Persistence sequence (a.k.a. "Hole-Size Distribution") sequence = dgm.sequence print(sequence) # ##### Visualization # plot first 30 values of persistent sequence rng.plot_seq(dgm, 30) # ### Ring score dgm.score
doc/notebooks/ringity_user_guide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import pickle # %matplotlib inline with pd.HDFStore('train.h5') as train: df = train.get('train').sort_values(by=['id','timestamp']) df.head() df.plot(['derived_1']) from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import Lasso model = Lasso() model.fit(X,y) cols = ['derived_0', 'derived_1', 'derived_2', 'derived_3', 'derived_4', 'fundamental_0', 'fundamental_1', 'fundamental_2', 'fundamental_3', 'fundamental_5', 'fundamental_6', 'fundamental_7', 'fundamental_8', 'fundamental_9', 'fundamental_10', 'fundamental_11', 'fundamental_12', 'fundamental_13', 'fundamental_14', 'fundamental_15', 'fundamental_16', 'fundamental_17', 'fundamental_18', 'fundamental_19', 'fundamental_20', 'fundamental_21', 'fundamental_22', 'fundamental_23', 'fundamental_24', 'fundamental_25', 'fundamental_26', 'fundamental_27', 'fundamental_28', 'fundamental_29', 'fundamental_30', 'fundamental_31', 'fundamental_32', 'fundamental_33', 'fundamental_34', 'fundamental_35', 'fundamental_36', 'fundamental_37', 'fundamental_38', 'fundamental_39', 'fundamental_40', 'fundamental_41', 'fundamental_42', 'fundamental_43', 'fundamental_44', 'fundamental_45'] X = df.loc[:,cols[:5]] y = df.y X.fillna(X.median(),inplace=True); from sklearn.linear_model import ElasticNet enet = ElasticNet(l1_ratio=0.1, alpha= 0.1,fit_intercept=True,normalize=True) from sklearn.model_selection import validation_curve param_range = np.logspace(-3,1,5) train_scores, test_scores = validation_curve(enet, X, y, param_name="alpha", param_range=param_range, cv=3, n_jobs=3, verbose=2) # + train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title("Validation Curve with SVM") plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(-0.1, 0.1) lw = 2 plt.semilogx(param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw) plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="darkorange", lw=lw) plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw) plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="navy", lw=lw) plt.legend(loc="best") plt.show() # - train_scores_mean test_scores_mean enet.fit(X,y) enet.score(X,y) df.corrwith(df.y)
2Sigma/.ipynb_checkpoints/ElasticNet-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Z03qx2oKw0CO" colab_type="code" colab={} # #!pip install eli5 # + id="ovmRP6uVw2X6" colab_type="code" outputId="5e56af78-bd2d-44e0-e0ff-4e9456e49b2f" executionInfo={"status": "ok", "timestamp": 1581682473380, "user_tz": -60, "elapsed": 12416, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 168} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook # + id="m6odCqaEG-iw" colab_type="code" outputId="f2027afa-a813-4947-c289-f62ca2673aaa" executionInfo={"status": "ok", "timestamp": 1581682488135, "user_tz": -60, "elapsed": 1625, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # cd '/content/drive/My Drive/Colab Notebooks/dw_matrix' # + id="E5gjHZvjHRxS" colab_type="code" outputId="acaf792c-bd6d-478a-d7b9-da849cdc39d9" executionInfo={"status": "ok", "timestamp": 1581682498857, "user_tz": -60, "elapsed": 7069, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # ls data # + id="uHC-STIfHTJh" colab_type="code" colab={} df = pd.read_csv('data/men_shoes.csv', low_memory=False) # + id="pQDA4tM-Hl_p" colab_type="code" colab={} def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): X = df[ feats ].values y = df['prices_amountmin'].values scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="tase49BLH2vj" colab_type="code" colab={} df['brand_cat'] = df.brand.map(lambda x: str(x).lower()).factorize()[0] # + id="jRAFdSrsICEZ" colab_type="code" outputId="81677e98-dd66-4f3d-a749-28e7e4638b5d" executionInfo={"status": "ok", "timestamp": 1581682500852, "user_tz": -60, "elapsed": 3959, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} run_model(['brand_cat']) # + id="uEMn6VhWIIdI" colab_type="code" outputId="7e76f6a6-3a19-4006-ba6c-59d9032d23ac" executionInfo={"status": "ok", "timestamp": 1581682503525, "user_tz": -60, "elapsed": 4533, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'], model) # + id="kDL2Tve5Iuqi" colab_type="code" outputId="98d8515e-498a-435e-953d-14fb1cb87b91" executionInfo={"status": "ok", "timestamp": 1581682507411, "user_tz": -60, "elapsed": 1714, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 173} df.features.values # + id="uCIk2LcOI9IR" colab_type="code" colab={} def parse_features(x): output_dict = {} if str(x) == 'nan' : return output_dict features = literal_eval(x.replace('\\"','"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict df['features_parsed'] = df['features'].map(parse_features) # + id="XqI-cbrxSV3Q" colab_type="code" outputId="5fc1242d-17c6-457a-cbcd-300dcf83f31c" executionInfo={"status": "ok", "timestamp": 1581682509508, "user_tz": -60, "elapsed": 1989, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} keys = set() # df['features_parsed'].map( lambda x: keys.update(x.keys()) ) df['features_parsed'].map( lambda x: keys.update(x.keys()) ) len(keys) # + id="iJK5BtyBS_zu" colab_type="code" outputId="631a2b15-cbab-465b-ca0f-498fe12ae2b1" executionInfo={"status": "ok", "timestamp": 1581682513817, "user_tz": -60, "elapsed": 4659, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["51ee90867947452db562527d41a94004", "c20db350e88b4065a208bac78699c142", "debd164b64ed46c88e32cfb330a8795f", "7485d4e095e246dbb271f617ab63f1fa", "<KEY>", "8d671e3f28b344e1a6f18863e3587c89", "<KEY>", "<KEY>"]} def get_name_feat(key): return 'feat_' + key for key in tqdm_notebook( keys): df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan) # + id="ACjIX79ZW2cb" colab_type="code" outputId="ee88a986-e2cd-411a-a273-01e71a1dc9fa" executionInfo={"status": "ok", "timestamp": 1581682516210, "user_tz": -60, "elapsed": 764, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 153} df.columns # + id="ab71HJUMaIZV" colab_type="code" colab={} keys_stat = {} for key in keys: keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0]/df.shape[0] * 100 # + id="jregWCHaa0dT" colab_type="code" outputId="a55ef616-8483-4133-c728-2d3cfe446247" executionInfo={"status": "ok", "timestamp": 1581682522125, "user_tz": -60, "elapsed": 1951, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 102} {k:v for k,v in keys_stat.items() if v > 30} # + id="ipAMxx8Oksde" colab_type="code" outputId="1a559165-093f-43a1-8735-851d85bf56be" executionInfo={"status": "ok", "timestamp": 1581682522131, "user_tz": -60, "elapsed": 1417, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} keys_stat # + id="Z_lIcmvNa2Pr" colab_type="code" outputId="e6ba527e-f4de-4bcd-ed3d-665963f42a8d" executionInfo={"status": "ok", "timestamp": 1581682526480, "user_tz": -60, "elapsed": 2008, "user": {"displayName": "<NAME>017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # df['feat_brand_cat'] = df.feat_brand.factorize()[0] # df['feat_color_cat'] = df.feat_color.factorize()[0] # df['feat_gender_cat'] = df.feat_gender.factorize()[0] # df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] # df['feat_material_cat'] = df.feat_material.factorize()[0] # df['feat_sport_cat'] = df.feat_sport.factorize()[0] # df['feat_style_cat'] = df.feat_style.factorize()[0] for key in keys_stat: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] print(get_name_feat(key)) # get_name_feat(key) # + id="Jy7qEoHlcP0l" colab_type="code" colab={} feats = ['brand_cat'] # + id="kQwFFEH0b7sG" colab_type="code" outputId="b3243d3b-ddc8-4579-e2dc-68449cc4f31d" executionInfo={"status": "ok", "timestamp": 1581682533012, "user_tz": -60, "elapsed": 3505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(['brand_cat'], model=model) # + id="yWNymkyOcz8D" colab_type="code" outputId="4abbb300-3957-4ca4-d901-4c55652e18c4" executionInfo={"status": "ok", "timestamp": 1581682538755, "user_tz": -60, "elapsed": 8107, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} feats = ['brand_cat', 'feat_brand_cat', 'feat_color_cat', 'feat_gender_cat', 'feat_manufacturer part number_cat', 'feat_material_cat'] run_model(feats, model) # + id="iYaZCchDeWWO" colab_type="code" outputId="7f0c93e9-b8b3-4e00-e968-ada27fcd2859" executionInfo={"status": "ok", "timestamp": 1581682542706, "user_tz": -60, "elapsed": 9897, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} feats = ['brand_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat'] run_model(feats, model) # + id="WHAZSMyZjbZu" colab_type="code" colab={} feats_cat = [x for x in df.columns if '_cat' in x] # + id="RmQ1v6C7kBzE" colab_type="code" colab={} feats_cat.remove('feat_catalog') # + id="kRfL-Do-jlyL" colab_type="code" outputId="2bef6206-fdb3-4c8a-a69d-af5d3999f30d" executionInfo={"status": "ok", "timestamp": 1581683027631, "user_tz": -60, "elapsed": 72573, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 34} feats += feats_cat feats = list(set(feats)) run_model(feats_cat, model) # + id="gHGySEOUdGiS" colab_type="code" outputId="8ad1a2d4-abcd-480f-c358-ebfab6747b79" executionInfo={"status": "ok", "timestamp": 1581683280067, "user_tz": -60, "elapsed": 252411, "user": {"displayName": "Mateusz \u017bukowski", "photoUrl": "", "userId": "04666613203844295870"}} colab={"base_uri": "https://localhost:8080/", "height": 391} X = df [ feats ].values y = df['prices_amountmin'].values m = model.fit(X, y) perm = PermutationImportance(m, random_state=0).fit(X, y); eli5.show_weights(perm, feature_names=feats)
matrix_one/day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="_qsi8wF8Zu4c" colab_type="text" # https://colab.research.google.com/drive/1BvH_StLUVEUn2kWlw4O6wDVP0A_Zi-Ci # + id="oHqVQkc2fLKB" colab_type="code" outputId="e9b9a1eb-e3bf-4754-ed97-bebb374c718a" colab={"base_uri": "https://localhost:8080/", "height": 445} # !wget https://www.dropbox.com/s/0pigmmmynbf9xwq/dataset1.zip # + id="9-8liePXfOR-" colab_type="code" colab={} # !unzip dataset1.zip # + id="gPuPwXSUfQAi" colab_type="code" colab={} dir_data = "/content/dataset1" dir_seg = dir_data + "/annotations_prepped_train/" dir_img = dir_data + "/images_prepped_train/" # + id="EKD6_34kfUpW" colab_type="code" outputId="16951017-434c-466a-e65b-a218e09db294" colab={"base_uri": "https://localhost:8080/", "height": 102} import glob, os all_img_paths = glob.glob(os.path.join(dir_img, '*.png')) all_img_paths[:5] # + id="k5jRpdY6fvjx" colab_type="code" outputId="a31c8564-62fe-413a-8da8-c7858e73af21" colab={"base_uri": "https://localhost:8080/", "height": 102} import glob, os all_mask_paths = glob.glob(os.path.join(dir_seg, '*.png')) all_mask_paths[:5] # + id="dv4ijunUgSsN" colab_type="code" outputId="50625947-882b-4902-8d6d-62e8c6d10344" colab={"base_uri": "https://localhost:8080/", "height": 34} all_img_paths[0].split('/')[4] # + id="LWjtjeSUfzGz" colab_type="code" colab={} x = [] y = [] count = 0 # + id="7w3FtHgYf3XI" colab_type="code" outputId="29b327cd-856d-4c32-b642-e5083faac43b" colab={"base_uri": "https://localhost:8080/", "height": 85} import cv2 from scipy import ndimage from skimage import io for i in range(len(all_img_paths)): img = cv2.imread(all_img_paths[i]) img = cv2.resize(img,(224,224)) mask_path = dir_seg+all_img_paths[i].split('/')[4] img_mask = io.imread(mask_path) # Changed to skimage read img_mask = cv2.resize(img_mask,(224,224)) x.append(img) y.append(img_mask) if(i%100==0): print(i) # + id="FFcyj7uIgf49" colab_type="code" outputId="3ad490fe-c281-461c-af4d-52370c229069" colab={"base_uri": "https://localhost:8080/", "height": 34} import numpy as np np.array(y).shape # + id="gCO37rWDCLJH" colab_type="code" outputId="6987c55b-6bc1-4cf1-a54f-f0878fc7eaff" colab={"base_uri": "https://localhost:8080/", "height": 34} np.array(x).shape # + id="PCCXv1ShylWT" colab_type="code" outputId="03e23b73-bf6a-4e03-8643-9a719a063afa" colab={"base_uri": "https://localhost:8080/", "height": 332} import matplotlib.pyplot as plt # %matplotlib inline plt.subplot(221) plt.imshow(x[0]) plt.axis('off') plt.title('Original image') plt.grid('off') plt.subplot(222) plt.imshow(y[0]) plt.axis('off') plt.title('Masked image') plt.grid('off') plt.subplot(223) plt.imshow(x[1]) plt.axis('off') plt.grid('off') plt.subplot(224) plt.imshow(y[1]) plt.axis('off') plt.grid('off') plt.show() # + id="pWSR_bf0gm4Y" colab_type="code" outputId="8256a143-c34a-4b1b-c428-cd5921136eb3" colab={"base_uri": "https://localhost:8080/", "height": 320} import matplotlib.pyplot as plt # %matplotlib inline plt.imshow(img_mask) plt.grid('off') # + id="0oOvtatDgweJ" colab_type="code" outputId="263b6f99-4370-48a7-b63e-012360bd4cd9" colab={"base_uri": "https://localhost:8080/", "height": 34} set(np.array(y).flatten()) # + id="HmwhFpYIg4zz" colab_type="code" outputId="e4b1fb88-7710-4513-e2c2-2f2c4f2ace61" colab={"base_uri": "https://localhost:8080/", "height": 34} np.array(y).shape # + id="pXX0IMtzhWCS" colab_type="code" outputId="b1d961a1-5149-48b2-8773-81abc773ea57" colab={"base_uri": "https://localhost:8080/", "height": 34} n_classes = len(set(np.array(y).flatten())) n_classes # + id="wNmsxFFHjTY_" colab_type="code" colab={} def getSegmentationArr(img): seg_labels = np.zeros(( 224 , 224 , 12 )) for c in range(12): seg_labels[: , : , c ] = (img == c ).astype(int) return seg_labels # + id="4Uy4f80qjkxT" colab_type="code" colab={} y2 = [] for i in range(len(y)): y2.append(getSegmentationArr(y[i])) # + id="xEJoVLFVjr1w" colab_type="code" outputId="9bdf7027-a0ae-44fb-f803-7abe3f1eb5a1" colab={"base_uri": "https://localhost:8080/", "height": 34} x = np.array(x) y2 = np.array(y2) print(x.shape, y2.shape) # + id="-ULZzNm5j3bm" colab_type="code" outputId="3b9da586-5436-4d39-fed9-13514be9909a" colab={"base_uri": "https://localhost:8080/", "height": 34} x = x/255 print(np.max(x)) # + id="4mLQYWIPkB8J" colab_type="code" outputId="a2de3bda-a809-44ae-b036-4aaa164266a8" colab={"base_uri": "https://localhost:8080/", "height": 1000} from keras.applications.vgg16 import VGG16 as PTModel base_pretrained_model = PTModel(input_shape = (224,224,3), include_top = False, weights = 'imagenet') base_pretrained_model.trainable = False base_pretrained_model.summary() # + id="8l6m3UKzkEeK" colab_type="code" colab={} from keras.layers import Input, Conv2D, concatenate, UpSampling2D, BatchNormalization, Activation, Cropping2D, ZeroPadding2D # + id="t9kuB24OkJPu" colab_type="code" colab={} from keras.layers import Input, merge, Conv2D, MaxPooling2D,UpSampling2D, Dropout, Cropping2D, merge, concatenate from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras import backend as K from keras.models import Model # + id="VPb0szQXkGNh" colab_type="code" outputId="8f630fde-8f19-49f5-ca35-c5329afdc155" colab={"base_uri": "https://localhost:8080/", "height": 88} conv1 = Model(inputs=base_pretrained_model.input,outputs=base_pretrained_model.get_layer('block1_conv2').output).output conv2 = Model(inputs=base_pretrained_model.input,outputs=base_pretrained_model.get_layer('block2_conv2').output).output conv3 = Model(inputs=base_pretrained_model.input,outputs=base_pretrained_model.get_layer('block3_conv3').output).output conv4 = Model(inputs=base_pretrained_model.input,outputs=base_pretrained_model.get_layer('block4_conv3').output).output drop4 = Dropout(0.5)(conv4) conv5 = Model(inputs=base_pretrained_model.input,outputs=base_pretrained_model.get_layer('block5_conv3').output).output drop5 = Dropout(0.5)(conv5) # + id="aZaaxUmjkHn2" colab_type="code" outputId="a6bf01f9-d2ff-4cbe-f25e-0b68bdf6d5b1" colab={"base_uri": "https://localhost:8080/", "height": 105} up6 = Conv2D(512, 2, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(UpSampling2D(size =(2,2))(drop5)) merge6 = concatenate([drop4,up6], axis = 3) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(merge6) conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(conv6) conv6 = BatchNormalization()(conv6) up7 = Conv2D(256, 2, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(UpSampling2D(size =(2,2))(conv6)) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(merge7) conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(conv7) conv7 = BatchNormalization()(conv7) up8 = Conv2D(128, 2, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(UpSampling2D(size =(2,2))(conv7)) merge8 = concatenate([conv2,up8],axis = 3) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(merge8) conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(conv8) conv8 = BatchNormalization()(conv8) up9 = Conv2D(64, 2, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(UpSampling2D(size =(2,2))(conv8)) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(merge9) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(conv9) conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',kernel_initializer = 'he_normal')(conv9) conv9 = BatchNormalization()(conv9) conv10 = Conv2D(12, 1, activation = 'softmax')(conv9) # + id="0EjRCdG4kORH" colab_type="code" outputId="f138b55e-d888-4f8f-9a25-d27608571f2a" colab={"base_uri": "https://localhost:8080/", "height": 1000} model = Model(input = base_pretrained_model.input, output = conv10) model.summary() # + id="uhKfgElukPqM" colab_type="code" colab={} for layer in model.layers[:18]: layer.trainable = False # + id="eofGIBfglz-V" colab_type="code" outputId="05e8395f-05ab-45f1-cc4f-a54a24d85435" colab={"base_uri": "https://localhost:8080/", "height": 51} model.compile(optimizer=Adam(1e-3, decay = 1e-6), loss='categorical_crossentropy', metrics = ['accuracy']) # + id="ly9u4LB1uyO1" colab_type="code" outputId="fb941a4f-e6fb-413f-d9a1-9ac0e3c3eb3e" colab={"base_uri": "https://localhost:8080/", "height": 34} np.max(x) # + id="prYx0SaukSwk" colab_type="code" outputId="028dbd2c-6927-4e79-fc8f-582c8a26bb62" colab={"base_uri": "https://localhost:8080/", "height": 615} history = model.fit(x,y2,epochs=15,batch_size=1,validation_split=0.1) # + id="qYqWM6KB6uVm" colab_type="code" outputId="c6ab37d7-199f-4369-d5fb-0ee0f7178bb6" colab={"base_uri": "https://localhost:8080/", "height": 388} history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] epochs = range(1, len(val_loss_values) + 1) plt.subplot(211) plt.plot(epochs, history.history['loss'], 'r', label='Training loss') plt.plot(epochs, val_loss_values, 'b', label='Test loss') plt.title('Training and test loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.grid('off') plt.show() plt.subplot(212) plt.plot(epochs, history.history['acc'], 'r', label='Training accuracy') plt.plot(epochs, val_acc_values, 'b', label='Test accuracy') plt.title('Training and test accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.gca().set_yticklabels(['{:.0f}%'.format(x*100) for x in plt.gca().get_yticks()]) plt.legend() plt.grid('off') plt.show() # + id="C47Epm8ErU7p" colab_type="code" colab={} y_pred = model.predict(x[-2:].reshape(2,224,224,3)) y_predi = np.argmax(y_pred, axis=3) y_testi = np.argmax(y2[-2:].reshape(2,224,224,12), axis=3) #np.mean(y_predi == y_testi) # + id="CaiOIdX4rp_5" colab_type="code" outputId="4949b8d8-7206-4f16-a4b2-f3fe00dd642f" colab={"base_uri": "https://localhost:8080/", "height": 34} np.mean(y_predi == y_testi) # + id="CrokcxCxsdtU" colab_type="code" colab={} import tensorflow as tf from keras.backend.tensorflow_backend import set_session import keras, sys, time, warnings from keras.models import * from keras.layers import * import pandas as pd # + id="1YWO0HP1wXmW" colab_type="code" outputId="4d31cbf1-d6ee-4b1a-ca4e-41d86e033f93" colab={"base_uri": "https://localhost:8080/", "height": 332} import matplotlib.pyplot as plt # %matplotlib inline plt.subplot(231) plt.imshow(x[-1]) plt.axis('off') plt.title('Original image') plt.grid('off') plt.subplot(232) plt.imshow(y[-1]) plt.axis('off') plt.title('Masked image') plt.grid('off') plt.subplot(233) plt.imshow(y_predi[-1]) plt.axis('off') plt.title('Predicted masked image') plt.grid('off') plt.subplot(234) plt.imshow(x[-2]) plt.axis('off') plt.grid('off') plt.subplot(235) plt.imshow(y[-2]) plt.axis('off') plt.grid('off') plt.subplot(236) plt.imshow(y_predi[-2]) plt.axis('off') plt.grid('off') plt.show() # + id="vb1qtnoA2hF-" colab_type="code" colab={} # + id="Bl4Dqfrq2hNz" colab_type="code" colab={} from keras.utils import plot_model # + id="7gFUB5Q72hTX" colab_type="code" outputId="a954c68c-50e2-4b74-cedd-bc4b40e1205b" colab={"base_uri": "https://localhost:8080/", "height": 1000} plot_model(model, show_shapes=True, show_layer_names=True, to_file='model.png') from IPython.display import Image Image(retina=True, filename='model.png') # + id="t38oatH02hRl" colab_type="code" colab={} # + id="Nv-0Eyvd2hLT" colab_type="code" colab={} # + id="Qzx1jP192hJk" colab_type="code" colab={}
Chapter07/Semantic_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # CNTK 103 Part A: MNIST Data Loader # # This tutorial is targeted to individuals who are new to CNTK and to machine learning. We assume you have completed or are familiar with CNTK 101 and 102. In this tutorial, you will train a feed forward network based simple model to recognize handwritten digits. This is the first example, where we will train and evaluate a neural network based model on read real world data. # # CNTK 103 tutorial is divided into two parts: # - Part A: Familiarize with the [MNIST][] database that will be used later in the tutorial # - [Part B](https://github.com/Microsoft/CNTK/blob/v2.0.rc2/Tutorials/CNTK_103B_MNIST_FeedForwardNetwork.ipynb): We will use the feedforward classifier used in CNTK 102 to classify digits in MNIST data set. # # [MNIST]: http://yann.lecun.com/exdb/mnist/ # # # + deletable=true editable=true # Import the relevant modules to be used later from __future__ import print_function import gzip import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import shutil import struct import sys try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve # Config matplotlib for inline plotting # %matplotlib inline # + [markdown] deletable=true editable=true # ## Data download # # We will download the data into local machine. The MNIST database is a standard handwritten digits that has been widely used for training and testing of machine learning algorithms. It has a training set of 60,000 images and a test set of 10,000 images with each image being 28 x 28 pixels. This set is easy to use visualize and train on any computer. # + deletable=true editable=true # Functions to load MNIST images and unpack into train and test set. # - loadData reads image data and formats into a 28x28 long array # - loadLabels reads the corresponding labels data, 1 for each image # - load packs the downloaded image and labels data into a combined format to be read later by # CNTK text reader def loadData(src, cimg): print ('Downloading ' + src) gzfname, h = urlretrieve(src, './delete.me') print ('Done.') try: with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x3080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4))[0] if n != cimg: raise Exception('Invalid file: expected {0} entries.'.format(cimg)) crow = struct.unpack('>I', gz.read(4))[0] ccol = struct.unpack('>I', gz.read(4))[0] if crow != 28 or ccol != 28: raise Exception('Invalid file: expected 28 rows/cols per image.') # Read data. res = np.fromstring(gz.read(cimg * crow * ccol), dtype = np.uint8) finally: os.remove(gzfname) return res.reshape((cimg, crow * ccol)) def loadLabels(src, cimg): print ('Downloading ' + src) gzfname, h = urlretrieve(src, './delete.me') print ('Done.') try: with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x1080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4)) if n[0] != cimg: raise Exception('Invalid file: expected {0} rows.'.format(cimg)) # Read labels. res = np.fromstring(gz.read(cimg), dtype = np.uint8) finally: os.remove(gzfname) return res.reshape((cimg, 1)) def try_download(dataSrc, labelsSrc, cimg): data = loadData(dataSrc, cimg) labels = loadLabels(labelsSrc, cimg) return np.hstack((data, labels)) # + [markdown] deletable=true editable=true # # Download the data # # The MNIST data is provided as train and test set. Training set has 60000 images while the test set has 10000 images. Let us download the data. # + deletable=true editable=true # URLs for the train image and labels data url_train_image = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz' url_train_labels = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz' num_train_samples = 60000 print("Downloading train data") train = try_download(url_train_image, url_train_labels, num_train_samples) url_test_image = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz' url_test_labels = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz' num_test_samples = 10000 print("Downloading test data") test = try_download(url_test_image, url_test_labels, num_test_samples) # + [markdown] deletable=true editable=true # # Visualize the data # + deletable=true editable=true # Plot a random image sample_number = 5001 plt.imshow(train[sample_number,:-1].reshape(28,28), cmap="gray_r") plt.axis('off') print("Image Label: ", train[sample_number,-1]) # + [markdown] deletable=true editable=true # # Save the images # # Save the images in a local directory. While saving the data we flatten the images to a vector (28x28 image pixels becomes an array of length 784 data points) and the labels are encoded as [1-hot][] encoding (label of 3 with 10 digits becomes `0010000000`. # # [1-hot]: https://en.wikipedia.org/wiki/One-hot # + deletable=true editable=true # Save the data files into a format compatible with CNTK text reader def savetxt(filename, ndarray): dir = os.path.dirname(filename) if not os.path.exists(dir): os.makedirs(dir) if not os.path.isfile(filename): print("Saving", filename ) with open(filename, 'w') as f: labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str))) for row in ndarray: row_str = row.astype(str) label_str = labels[row[-1]] feature_str = ' '.join(row_str[:-1]) f.write('|labels {} |features {}\n'.format(label_str, feature_str)) else: print("File already exists", filename) # + deletable=true editable=true # Save the train and test files (prefer our default path for the data) data_dir = os.path.join("..", "Examples", "Image", "DataSets", "MNIST") if not os.path.exists(data_dir): data_dir = os.path.join("data", "MNIST") print ('Writing train text file...') savetxt(os.path.join(data_dir, "Train-28x28_cntk_text.txt"), train) print ('Writing test text file...') savetxt(os.path.join(data_dir, "Test-28x28_cntk_text.txt"), test) print('Done') # + [markdown] deletable=true editable=true # **Suggested Explorations** # # One can do data manipulations to improve the performance of a machine learning system. I suggest you first use the data generated so far and run the classifier in CNTK 103 Part B. Once you have a baseline with classifying the data in its original form, now use the different data manipulation techniques to further improve the model. # # There are several ways data alterations can be performed. CNTK readers automate a lot of these actions for you. However, to get a feel for how these transforms can impact training and test accuracies, I strongly encourage individuals to try one or more of data perturbation. # # - Shuffle the training data (rows to create a different). Hint: Use `permute_indices = np.random.permutation(train.shape[0])`. Then run Part B of the tutorial with this newly permuted data. # - Adding noise to the data can often improves [generalization error][]. You can augment the training set by adding noise (generated with numpy, hint: use `numpy.random`) to the training images. # - Distort the images with [affine transformation][] (translations or rotations) # # [generalization error]: https://en.wikipedia.org/wiki/Generalization_error # [affine transformation]: https://en.wikipedia.org/wiki/Affine_transformation # # + deletable=true editable=true
Deep Learning and the Microsoft Cognitive Toolkit/Introduction to Deep Learning & CNTK Hands-on/CNTK_103A_MNIST_DataLoader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME> # # 06/05/2020 # # # Graph # # The graph is a representation of a population and its relations. On a given graph, a node represents a person and an edge represents some kind of realtion, such as, lives with, studies with, buys from etc. # # # Spread of Infection # # At each time step of the pandemic, a time step representing a single day, the chance of someone becoming infected is given by: # # ### $1 - (1-\lambda)*(1-p_r)^2$ # # Where $\lambda$ is the leak probability and $p_r$ is the probability that an infected individual infects a susceptible individual that are connected through relation r. # # ![imagem1](figures\\infection_graph.png) # # # In the code, we model this behavior as follows: # ```python # if G.nodes[node]['status'] == 'susceptible': # # Infection through leak # if np.random.random() < lambda_leak: # newly_infected.append(node) # else: # for contact in adjacencies[1].keys(): # # Infection through infected neighbour # if G.nodes[contact]['status'] == 'infected' and np.random.random() < p_r: # newly_infected.append(node) # G.nodes[contact]['contacts_infected'] += 1 # # Here we model that once infected, additional infections have no effect # break # ``` # # # Graph Types # # The graphs will be randomnly generated and its type and parameters will be used to model different kinds of societal realtionships. # # All graphs were generated so that the population size was equal to 5000 people(nodes). # # ## Relaxed Caveman # # The caveman graph was used to model people that cohabit in the same house or living space. # The parameters used were k(size of cliques) = 4, l(number of groups) = 5000/4 and p(probabilty of rewiring each edge) = 25%. With these parameters we expect to model an average of 4 people per residence. # # ![imagem1](figures\\relaxed_caveman.png) # # # ![imagem1](figures\\relaxed_caveman_hist.png) # # ## Scale Free # # The scale free graph was used to model the buy and sell realtion within the population. It was constructed with the default paramenters of the scale free graph from networkx. Here we have "Hubs" or nodes that have a high number of connections compared to the rest of the population. Theses nodes represent someone that has a high number of customers. # # ![imagem1](figures\\scale_free.png) # # # ![imagem1](figures\\scale_free_hist.png) # # # # Simulating the pandemic # # We ran simulations for both the Scale Free and the Relaxed caveman graphs independently to validated their behaviour in contrats to the standanrd SIR model simulation. # # ## SIR Simulation # # ### Parameters: B = 3.2 and v = 0.23 # # ![imagem1](figures\\SIR_simulation.png) # # # ## Caveman simulation # # ### Parameters : p_r = 0.5, lambda_leak=.01, pop_size=5000, initial_infection = 1/5000) # # ![imagem1](figures\\relaxed_caveman_simulation.png) # # # ## Scale Free simulation # # ### Parameters : p_r = 0.5, lambda_leak=.01, pop_size=5000, initial_infection = 1/5000) # # ![imagem1](figures\\scale_free_simulation.png) # #
reports/may_13/Report on Covid Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="etUt8t6gv8HK" # # Sparse Variational Dropout # + [markdown] colab={} colab_type="code" id="xBi5MWAsv8HL" # <img src="https://ars-ashuha.github.io/images/ss_vd1.png", width=960> # <img src="https://ars-ashuha.github.io/images/ss_vd2.png", width=960> # + [markdown] colab_type="text" id="eVnbfF7pwbeH" # # Install # + colab={"base_uri": "https://localhost:8080/", "height": 138} colab_type="code" id="8Rb2JA_YwRVY" outputId="9a774a99-2a02-4a69-b293-7ad1e84a2b8b" # !pip3 install http://download.pytorch.org/whl/cpu/torch-0.4.1-cp36-cp36m-linux_x86_64.whl # !pip3 install torchvision # + colab={"base_uri": "https://localhost:8080/", "height": 248, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" id="v2hTKqOQwTkB" outputId="56335e7f-c6d2-40a9-9eb2-971bfcf745b9" # Logger # !pip install tabulate -q from google.colab import files src = list(files.upload().values())[0] open('logger.py','wb').write(src) # - from logger import Logger # + [markdown] colab_type="text" id="iiCiVLaJv8HV" # # Implementation # + colab={} colab_type="code" id="ICMEDWnov8HW" import math import torch import numpy as np import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from logger import Logger from torch.nn import Parameter from torchvision import datasets, transforms # + colab={} colab_type="code" id="KcEIF5_kv8HY" class LinearSVDO(nn.Module): def __init__(self, in_features, out_features, threshold, bias=True): super(LinearSVDO, self).__init__() self.in_features = in_features self.out_features = out_features self.threshold = threshold self.W = Parameter(torch.Tensor(out_features, in_features)) self.log_sigma = Parameter(torch.Tensor(out_features, in_features)) self.bias = Parameter(torch.Tensor(1, out_features)) self.reset_parameters() def reset_parameters(self): self.bias.data.zero_() self.W.data.normal_(0, 0.02) self.log_sigma.data.fill_(-5) def forward(self, x): self.log_alpha = self.log_sigma * 2.0 - 2.0 * torch.log(1e-16 + torch.abs(self.W)) self.log_alpha = torch.clamp(self.log_alpha, -10, 10) if self.training: lrt_mean = F.linear(x, self.W) + self.bias lrt_std = torch.sqrt(F.linear(x * x, torch.exp(self.log_sigma * 2.0)) + 1e-8) eps = lrt_std.data.new(lrt_std.size()).normal_() return lrt_mean + lrt_std * eps return F.linear(x, self.W * (self.log_alpha < 3).float()) + self.bias def kl_reg(self): # Return KL here -- a scalar k1, k2, k3 = torch.Tensor([0.63576]), torch.Tensor([1.8732]), torch.Tensor([1.48695]) kl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha) - 0.5 * torch.log1p(torch.exp(-self.log_alpha)) a = - torch.sum(kl) return a # + colab={} colab_type="code" id="7mGfHQ4Nv8Ha" # Define a simple 2 layer Network class Net(nn.Module): def __init__(self, threshold): super(Net, self).__init__() self.fc1 = LinearSVDO(28*28, 300, threshold) self.fc2 = LinearSVDO(300, 10, threshold) self.threshold = threshold def forward(self, x): x = F.relu(self.fc1(x)) x = F.log_softmax(self.fc2(x), dim=1) return x # + colab={} colab_type="code" id="3fi-O-SFv8Hc" # Load a dataset def get_mnist(batch_size): trsnform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=trsnform), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, download=True, transform=trsnform), batch_size=batch_size, shuffle=True) return train_loader, test_loader # + colab={} colab_type="code" id="4oGOpuEsv8He" # Define New Loss Function -- SGVLB class SGVLB(nn.Module): def __init__(self, net, train_size): super(SGVLB, self).__init__() self.train_size = train_size self.net = net def forward(self, input, target, kl_weight=1.0): assert not target.requires_grad kl = 0.0 for module in self.net.children(): if hasattr(module, 'kl_reg'): kl = kl + module.kl_reg() return F.cross_entropy(input, target) * self.train_size + kl_weight * kl # + colab={} colab_type="code" id="S7HkpvRVv8Hh" model = Net(threshold=3) optimizer = optim.Adam(model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,60,70,80], gamma=0.2) fmt = {'tr_los': '3.1e', 'te_loss': '3.1e', 'sp_0': '.3f', 'sp_1': '.3f', 'lr': '3.1e', 'kl': '.2f'} logger = Logger('sparse_vd', fmt=fmt) train_loader, test_loader = get_mnist(batch_size=100) sgvlb = SGVLB(model, len(train_loader.dataset)) # + colab={"base_uri": "https://localhost:8080/", "height": 1730} colab_type="code" id="v6mf7WjhJIqA" outputId="20da9957-22f0-45d9-faa8-705ca4b58a20" kl_weight = 0.02 epochs = 100 for epoch in range(1, epochs + 1): scheduler.step() model.train() train_loss, train_acc = 0, 0 kl_weight = min(kl_weight+0.02, 1) logger.add_scalar(epoch, 'kl', kl_weight) logger.add_scalar(epoch, 'lr', scheduler.get_lr()[0]) for batch_idx, (data, target) in enumerate(train_loader): data = data.view(-1, 28*28) optimizer.zero_grad() output = model(data) pred = output.data.max(1)[1] loss = sgvlb(output, target, kl_weight) loss.backward() optimizer.step() train_loss += loss train_acc += np.sum(pred.numpy() == target.data.numpy()) logger.add_scalar(epoch, 'tr_los', train_loss / len(train_loader.dataset)) logger.add_scalar(epoch, 'tr_acc', train_acc / len(train_loader.dataset) * 100) model.eval() test_loss, test_acc = 0, 0 for batch_idx, (data, target) in enumerate(test_loader): data = data.view(-1, 28*28) output = model(data) test_loss += float(sgvlb(output, target, kl_weight)) pred = output.data.max(1)[1] test_acc += np.sum(pred.numpy() == target.data.numpy()) logger.add_scalar(epoch, 'te_loss', test_loss / len(test_loader.dataset)) logger.add_scalar(epoch, 'te_acc', test_acc / len(test_loader.dataset) * 100) for i, c in enumerate(model.children()): if hasattr(c, 'kl_reg'): logger.add_scalar(epoch, 'sp_%s' % i, (c.log_alpha.data.numpy() > model.threshold).mean()) logger.iter_info() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UQVizuLov8Hu" outputId="2af8aae0-dae3-4a80-9c97-349316e19ad4" all_w, kep_w = 0, 0 for c in model.children(): kep_w += (c.log_alpha.data.numpy() < model.threshold).sum() all_w += c.log_alpha.data.numpy().size print('keept weight ratio =', all_w/kep_w) # + [markdown] colab_type="text" id="FNMUWDjtJgd-" # # Good result should be like # # epoch kl lr tr_los tr_acc te_loss te_acc sp_0 sp_1 # # ------- ---- ------- -------- -------- --------- -------- ------ ------ # # 100 1 1.6e-06 -1.4e+03 98.0 -1.4e+03 98.3 0.969 0.760 # # keept weight ratio = 30.109973454683352 # - # # Visualization # + import matplotlib.pyplot as plt # %matplotlib inline from matplotlib import rcParams rcParams['figure.figsize'] = 16, 3 rcParams['figure.dpi'] = 300 log_alpha = (model.fc1.log_alpha.detach().numpy() < 3).astype(np.float) W = model.fc1.W.detach().numpy() plt.imshow(log_alpha * W, cmap='hot', interpolation=None) plt.colorbar() # + s = 0 from matplotlib import rcParams rcParams['figure.figsize'] = 8, 5 z = np.zeros((28*15, 28*15)) for i in range(15): for j in range(15): s += 1 z[i*28:(i+1)*28, j*28:(j+1)*28] = np.abs((log_alpha * W)[s].reshape(28, 28)) plt.imshow(z, cmap='hot_r') plt.colorbar() plt.axis('off') # - # # Compression with Sparse Matrixes # + colab={} colab_type="code" id="3wnL7Hp9v8Hy" import scipy import numpy as np from scipy.sparse import csc_matrix, csc_matrix, coo_matrix, dok_matrix row, col, data = [], [], [] M = list(model.children())[0].W.data.numpy() LA = list(model.children())[0].log_alpha.data.numpy() for i in range(300): for j in range(28*28): if LA[i, j] < 3: row += [i] col += [j] data += [M[i, j]] Mcsr = csc_matrix((data, (row, col)), shape=(300, 28*28)) Mcsc = csc_matrix((data, (row, col)), shape=(300, 28*28)) Mcoo = coo_matrix((data, (row, col)), shape=(300, 28*28)) # + colab={} colab_type="code" id="e4T8E4Miv8H0" np.savez_compressed('M_w', M) scipy.sparse.save_npz('Mcsr_w', Mcsr) scipy.sparse.save_npz('Mcsc_w', Mcsc) scipy.sparse.save_npz('Mcoo_w', Mcoo) # + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" id="QebP7_Emv8H2" outputId="c9c70b01-9c69-40d8-879e-fd93bd4cda7c" # ls -lah | grep _w # -
svdo-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sos # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SoS # language: sos # name: sos # --- # + [markdown] kernel="SoS" tags=[] # # SoS Actions and common action options # + [markdown] kernel="SoS" tags=[] # * **Difficulty level**: intermediate # * **Time need to lean**: 10 minutes or less # * **Key points**: # * SoS actions are Python functions that usually starts an interpreter to execute a script # * Parameters of actions allow you to execute actions with additional parameter, control input and output, and execute in containers # + [markdown] kernel="SoS" tags=[] # ## SoS Actions # + [markdown] kernel="SoS" tags=[] # Although arbitrary python functions can be used in SoS step process, SoS defines many special functions called **`actions`** that accepts some shared parameters, and can behave differently in different running modes of SoS. # # For example, command `sleep 5` would be executed in run mode, # + kernel="SoS" tags=[] # %run -v0 import time start_time = time.time() sh: sleep 5 echo "I am awake" print(f'It took {time.time() - start_time :.1f}s to execute shell script') # + [markdown] kernel="SoS" tags=[] # However, if the action is executed in dryrun mode (option `-n`), it will just print the script it is intended to execute. # + kernel="SoS" tags=[] # %run -n -v0 import time start_time = time.time() sh: sleep 5 echo "I am awake" print(f'It took {time.time() - start_time :.1f}s to execute shell script') # + [markdown] kernel="SoS" tags=[] # ## Action options # + [markdown] kernel="SoS" tags=[] # Actions can have their own parameters but they all accept common set of options that define how they interact with SoS. # + [markdown] kernel="SoS" tags=[] # ### Option `active` # + [markdown] kernel="SoS" tags=[] # Action option `active` is used to activate or inactivate an action. It accept either a condition that returns a boolean variable (`True` or `False`), or one or more integers, or slices that corresponds to indexes of active substeps. # # The first usage allows you to execute an action only if certain condition is met, so # # ```sos # if cond: # action(script) # ``` # # is equivalent to # # ```sos # action(script, active=cond) # ``` # or # ```sos # action: active=cond # script # ``` # in script format. For example, the following action will only be executed if `a.txt` exists # + kernel="SoS" tags=[] !echo "something" > a.txt sh: active=file_target('a.txt').exists() wc a.txt # + [markdown] kernel="SoS" tags=[] # For the second usage, when a loop is defined by `for_each` or `group_by` options of `input:` statement, an action after input would be repeated for each substep. The `active` parameter accepts an integer, either a non-negative number, a negative number (counting backward), a sequence of indexes, or a slice object, for which the action would be active. # # For example, for an input loop that loops through a sequence of numbers, the first action `run` is executed for all groups, the second action is executed for even number of groups, the last action is executed for the last step. # + kernel="SoS" tags=[] input: for_each={'seq': range(5)} sh: expand=True echo A at substep {_index} sh: active=slice(None, None, 2), expand=True echo B at substep {_index} sh: active=-1, expand=True echo C at substep {_index} # + [markdown] kernel="SoS" output_cache="[]" tags=[] # ### Option `allow_error` # + [markdown] kernel="SoS" output_cache="[]" tags=[] # Option `allow_error` tells SoS that the action might fail but this should not stop the workflow from executing. This option essentially turns an error to a warning message and change the return value of action to `None`. # # For example, in the following example, the wrong shell script would stop the execution of the step so the following action is not executed. # + kernel="SoS" output_cache="[{\"output_type\":\"stream\",\"text\":\"/<KEY>: line 1: This: command not found\\nFailed to process statement run(r\\\"\\\"\\\"This is not shell\\\\n\\\"\\\"\\\")...fter run'): Failed to execute script (ret=127). \\nPlease use command\\n /bin/bash /<KEY>/.sos/interactive_0_0\\nunder /private/var/folders/ys/gnzk0qbx5wbdgm531v82xxljv5yqy8/T/tmpzn3zpjx3 to test it.\\n\",\"name\":\"stderr\"}]" tags=[] # %env --expect-error run: This is not shell print('Step after run') # + [markdown] kernel="SoS" output_cache="[]" tags=[] # With option `allow_error=True`, the error from the `sh` action would turn to a warning and the rest of the step would continue to execute: # + kernel="SoS" output_cache="[{\"output_type\":\"stream\",\"text\":\"/<KEY>: line 1: This: command not found\\n\\u001b[95mWARNING\\u001b[0m: \\u001b[95mFailed to execute script (ret=127). \\nPlease use command\\n /bin/bash /var/<KEY>T/tmp557fin4d/.sos/interactive_0_0\\nunder /Users/bpeng1/SOS/docs/src/documentation to test it.\\u001b[0m\\n\",\"name\":\"stderr\"}]" tags=[] sh: allow_error=True The This is not shell print('Step after run') # + [markdown] kernel="SoS" tags=[] # ### Option `args` # + [markdown] kernel="SoS" tags=[] # All script-executing actions accept an option `args`, which changes how the script is executed. # # By default, such an action has an `interpreter` (e.g. `bash`), a default `args='{filename:q}'`, and the script would be executed as `interpreter args`, which is # ``` # bash {filename:q} # ``` # where `{filename:q}` would be replaced by the script file created from the body of the action. # + [markdown] kernel="SoS" tags=[] # If you would like to change the command line with additional parameters, or different format of filename, you can specify an alternative `args`, with variables `filename` (filename of temporary script) and `script` (actual content of the script). # # For example, you can pass command line options to a bash script using `args` as follows # + kernel="SoS" tags=[] bash: args='{filename:q} ARG1 ARG2' echo $1 $2 # + [markdown] kernel="SoS" tags=[] # and you can actually execute a command without `filename`, and instead executing the script directly from command line # + kernel="SoS" output_cache="[{\"output_type\":\"stream\",\"name\":\"stdout\",\"text\":\"10000 loops, best of 3: 31.2 usec per loop\\n\"}]" tags=[] python: args='-m timeit {script}' '"-".join(str(n) for n in range(100))' # + [markdown] kernel="SoS" tags=[] # ### Options `container` and `engine` # + [markdown] kernel="SoS" tags=[] # Parameter `container` and `engine` specify name or URL and execution engine of the container used to execute the action. Parameter `engine` is usually derived from `container` but can be specified explicitly as one of # # * `engine='docker'`: Execute the script in specified container using [docker](https://www.docker.com/) # * `engine='singularity'`: Execute the script with [singularity](https://www.sylabs.io/) # * `engine='local'`: Execute the script locally, this is the default mode. # # Parameters `container` and `engine` accept the following values: # # | `container` | `engine` | execute by | example | comment | # | -- | -- | -- | -- | -- | # | `tag` | ` ` | docker | `container='ubuntu'` | docker is the default container engine | # | `name` | `docker` | docker | `container='ubuntu', engine='docker'` | treat `name` as docker tag | # | `docker://tag` | ` ` | docker | `container='docker://ubuntu'` | | # | `filename.simg` | ` ` | singularity | `container='ubuntu.simg'` | | # | `shub://tag` | ` ` | singularity | `container='shub://GodloveD/lolcow'` | Image will be pulled to a local image | # | `library://tag` | ` ` | singularity | `container='library://GodloveD/lolcow'` | Image will be pulled to a local image | # | `name` | `singularity` | singularity | `container='a_dir', engine='singularity'` | treat `name` as singularity image file or directory | # | `docker://tag` | `singularity` | singularity | `container='docker://godlovdc/lolcow', engine='singularity'` | | # | `file://filename` | ` ` | singularity | `container='file://ubuntu.simg'` | | # | `local://name` | ` ` | local | `container='local:any_tag'` | `local://any_tag` is equivalent to `engine='local'` | # | `name` | `local` | local | `engine=engine` with `parameter: engine='docker'` | Usually used to override parameter `container` | # # Basically, # * `container='tag'` pulls and uses docker image `tag` # * `container='filename.simg` uses an existing singularity image # * `container='shub://tag'` pulls and uses singularity image `shub://tag`, which will generate a local `tag.simg` file # + [markdown] kernel="SoS" tags=[] # If a docker image is specified, the action is assumed to be executed in the specified docker container. The image will be automatically downloaded (pulled) if it is not available locally. # # For example, executing the following script # + [markdown] kernel="SoS" tags=[] # ``` # [10] # python3: container='python' # set = {'a', 'b'} # print(set) # ``` # + [markdown] kernel="SoS" tags=[] # under a docker terminal (that is connected to the docker daemon) will # # 1. Pull docker image `python`, which is the official docker image for Python 2 and 3. # 2. Create a python script with the specified content # 3. Run the docker container `python` and make the script available inside the container # 4. Use the `python3` command inside the container to execute the script. # # Additional `docker_run` parameters can be passed to actions when the action # is executed in a docker image. These options include # # * `name`: name of the container (option `--name`) # * `tty`: if a tty is attached (default to `True`, option `-t`) # * `stdin_open`: if stdin should be open (default to `False`, option `-i`) # * `user`: username (default o `root`, option `-u`) # * `environment`: Can be a string, a list of string or dictinary of environment variables for docker (option `-e`) # * `volumes`: shared volumes as a string or list of strings, in the format of `hostdir` (for `hostdir:hostdir`) or `hostdir:mnt_dir`, in addition to current working directory which will always be shared. # * `volumes_from`: container names or Ids to get volumes from # * `port`: port opened (option `-p`) # * `extra_args`: If there is any extra arguments you would like to pass to the `docker run` process (after you check the actual command of `docker run` of SoS # # Because of the different configurations of docker images, use of docker in SoS can be complicated. Please refer to http://vatlab.github.io/doc/user_guide/docker.html for details. # # + [markdown] kernel="SoS" tags=[] # ### Option `default_env` # + [markdown] kernel="SoS" tags=[] # Option `default_env` set environment variables **if they do not exist in the system**. The value of this option should be a dictionary with string keys and values. # + [markdown] kernel="SoS" tags=[] # For example, if we have a process that depends on an environmental variable `DEBUG`, you can set a default value for it # + kernel="SoS" tags=[] sh: default_env={'DEBUG': 'ON'} if [ "$DEBUG" == 'ON' ] then echo "Working in DEBUG mode" else echo "Working in production mode" fi # + [markdown] kernel="SoS" tags=[] # If users actually set `DEBUG` to something else, the option will not be applied and shell script will be running in production mode. # + [markdown] kernel="SoS" tags=[] # ### Option `env` # + [markdown] kernel="SoS" tags=[] # Option `env` set environment variables **that overrides system variables defined in `os.environ`**. This option can be used to define `PATH` and other environmental variables for the action. Note that the effect of option is limited to this option. # + kernel="SoS" tags=[] sh: default_env={'DEBUG': 'ON'} if [ "$DEBUG" == 'ON' ] then echo "Working in DEBUG mode" else echo "Working in production mode" fi # + [markdown] kernel="SoS" tags=[] # ### Option `input` # + [markdown] kernel="SoS" tags=[] # Although all actions accept parameter `input`, its usage vary among actions. Roughly speaking, **script-executing actions such as `run`, `bash` and `python` prepend the content of all input files to the script**; **report-generation actions `report`, `pandoc` and `RMarkdown` append the content of input files after the specifie script**, and other actions usually ignore this parameter. # + [markdown] kernel="SoS" tags=[] # For example, if you have defined a few utility functions that will be used by multiple scripts, you can define it in a separate file # + kernel="SoS" tags=[] # %save myfunc.py -f def myfunc(): print('Hello') # + [markdown] kernel="SoS" tags=[] # and include it in `python` actions as follows: # + kernel="SoS" tags=[] python: input='myfunc.py' myfunc() # + [markdown] kernel="SoS" tags=[] # Note that although SoS would check the existence of `input` files before executing the action, this option does not define any variable (such as `_input`) to be used in the script. # + [markdown] kernel="SoS" tags=[] # ### Option `output` # + [markdown] kernel="SoS" tags=[] # Similar to `input`, parameter `output` defines the output of an action, which can be a single name (or target) or a list of files or targets. SoS would check the existence of output target after the completion of the action. For example, # + kernel="SoS" tags=[] # %env --expect-error # %run [10] bash: output='non_existing.txt' # + [markdown] kernel="SoS" tags=[] # ### Option `stdout` # + [markdown] kernel="SoS" tags=[] # Option `stdout` is applicable to script-executing actions such as `bash` and `R` and redirect the standard out of the action to specified file. The value of the option should be a path-like object (`str`, `path`, etc), or `False`. The file will be opened in `append` mode so you will have to remove or truncate the file if the file already exists. If `stdout=False`, the output will be suppressed (redirect to `/dev/null` under linux). # + [markdown] kernel="SoS" tags=[] # For example, # + kernel="SoS" tags=[] !rm -f test.log sh: stdout='test.log' ls *.ipynb # + kernel="SoS" tags=[] !head -2 test.log # + [markdown] kernel="SoS" tags=[] # ### Option `stderr` # + [markdown] kernel="SoS" tags=[] # Option `stderr` is similar to `stdout` but redirects the standard error output of actions. `stderr=False` also suppresses stderr. # + [markdown] kernel="SoS" tags=[] # ### Option `tracked` # + [markdown] kernel="SoS" tags=[] # If an action takes a long time to execute and the step it resides tend to be changed (for example, during the development of a workflow step), you might want to keep action-level signatures so that the action could be skipped if it has been executed before. # # Action-level signature is controlled by parameter `tracked`, which can be `None` (no signature), `True` (record signature), `False` (do not record signature), a string (filename), or a list of filenames. When this parameter is `True` or one or more filenames, SoS will # # 1. if specified, collect targets specified by parameter `input` # 2. if specified, colelct targets specified by parameter `output` # 3. if one or more files are specified, collect targets from parameter `tracked` # # These files, together with the content of the first parameter (usually a script), will be used to create a step signature and allow the actions with the same signature be skipped. # + [markdown] kernel="SoS" tags=[] # For example, suppose action `sh` is time-consuming that produces output `test.txt` # + kernel="SoS" tags=[] # %run -s force [10] import time, os time.sleep(2) sh: output='test.txt', tracked=True touch test.txt print(os.path.getmtime('test.txt')) # + [markdown] kernel="SoS" tags=[] # Because of the `tracked=True` parameter, a signature will be created with `output` and it will not be re-executed even when the step itself is changed (from `sleep(2)` to `sleep(1)`). # + kernel="SoS" tags=[] # %run -s default [10] import time, os time.sleep(1) sh: output='test.txt', tracked=True touch test.txt print(os.path.getmtime('test.txt')) # + [markdown] kernel="SoS" tags=[] # Note that the signature can only be saved and used with appropriate signature mode (`force`, `default` etc). # + [markdown] kernel="SoS" tags=[] # ### Option `workdir` # + [markdown] kernel="SoS" tags=[] # Option `workdir` changes the current working directory for the action, and change back once the action is executed. The directory will be created if it does not exist. # + kernel="SoS" tags=[] bash: workdir='tmp' touch a.txt bash: ls tmp # + [markdown] kernel="SoS" tags=[] # ## Further reading # * [Script format of function calls](script_format.html)
src/user_guide/sos_actions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spatial joins # + [markdown] slideshow={"slide_type": "fragment"} # Goals of this notebook: # # - Based on the `countries` and `cities` dataframes, determine for each city the country in which it is located. # - To solve this problem, we will use the the concept of a 'spatial join' operation: combining information of geospatial datasets based on their spatial relationship. # + # %matplotlib inline import pandas as pd import geopandas pd.options.display.max_rows = 10 # - countries = geopandas.read_file("zip://./data/ne_110m_admin_0_countries.zip") cities = geopandas.read_file("zip://./data/ne_110m_populated_places.zip") rivers = geopandas.read_file("zip://./data/ne_50m_rivers_lake_centerlines.zip") # ## Recap - joining dataframes # # Pandas provides functionality to join or merge dataframes in different ways, see https://chrisalbon.com/python/data_wrangling/pandas_join_merge_dataframe/ for an overview and https://pandas.pydata.org/pandas-docs/stable/merging.html for the full documentation. # To illustrate the concept of joining the information of two dataframes with pandas, let's take a small subset of our `cities` and `countries` datasets: cities2 = cities[cities['name'].isin(['Bern', 'Brussels', 'London', 'Paris'])].copy() cities2['iso_a3'] = ['CHE', 'BEL', 'GBR', 'FRA'] cities2 countries2 = countries[['iso_a3', 'name', 'continent']] countries2.head() # We added a 'iso_a3' column to the `cities` dataset, indicating a code of the country of the city. This country code is also present in the `countries` dataset, which allows us to merge those two dataframes based on the common column. # # Joining the `cities` dataframe with `countries` will transfer extra information about the countries (the full name, the continent) to the `cities` dataframe, based on a common key: cities2.merge(countries2, on='iso_a3') # **But**, for this illustrative example, we added the common column manually, it is not present in the original dataset. However, we can still know how to join those two datasets based on their spatial coordinates. # ## Recap - spatial relationships between objects # # In the previous notebook [02-spatial-relationships.ipynb](./02-spatial-relationships-operations.ipynb), we have seen the notion of spatial relationships between geometry objects: within, contains, intersects, ... # # In this case, we know that each of the cities is located *within* one of the countries, or the other way around that each country can *contain* multiple cities. # # We can test such relationships using the methods we have seen in the previous notebook: france = countries.loc[countries['name'] == 'France', 'geometry'].squeeze() cities.within(france) # The above gives us a boolean series, indicating for each point in our `cities` dataframe whether it is located within the area of France or not. # Because this is a boolean series as result, we can use it to filter the original dataframe to only show those cities that are actually within France: cities[cities.within(france)] # We could now repeat the above analysis for each of the countries, and add a column to the `cities` dataframe indicating this country. However, that would be tedious to do manually, and is also exactly what the spatial join operation provides us. # # *(note: the above result is incorrect, but this is just because of the coarse-ness of the countries dataset)* # + [markdown] slideshow={"slide_type": "slide"} # ## Spatial join operation # # <div class="alert alert-info" style="font-size:120%"> # <b>SPATIAL JOIN</b> = *transferring attributes from one layer to another based on their spatial relationship* <br><br> # # # Different parts of this operations: # # <ul> # <li>The GeoDataFrame to which we want add information</li> # <li>The GeoDataFrame that contains the information we want to add </li> # <li>The spatial relationship we want to use to match both datasets ('intersects', 'contains', 'within')</li> # <li>The type of join: left or inner join</li> # </ul> # # </div> # + [markdown] slideshow={"slide_type": "-"} # In this case, we want to join the `cities` dataframe with the information of the `countries` dataframe, based on the spatial relationship between both datasets. # # We use the [`geopandas.sjoin`](http://geopandas.readthedocs.io/en/latest/reference/geopandas.sjoin.html) function: # - joined = geopandas.sjoin(cities, countries, op='within', how='left') joined joined['continent'].value_counts() # ## The overlay operation # # In the spatial join operation above, we are not changing the geometries itself. We are not joining geometries, but joining attributes based on a spatial relationship between the geometries. This also means that the geometries need to at least overlap partially. # # If you want to create new geometries based on joining (combining) geometries of different dataframes into one new dataframe (eg by taking the intersection of the geometries), you want an **overlay** operation. africa = countries[countries['continent'] == 'Africa'] africa.plot() cities['geometry'] = cities.buffer(2) geopandas.overlay(africa, cities, how='difference').plot() # <div class="alert alert-info" style="font-size:120%"> # <b>REMEMBER</b> <br> # # <ul> # <li>**Spatial join**: transfer attributes from one dataframe to another based on the spatial relationship</li> # <li>**Spatial overlay**: construct new geometries based on spatial operation between both dataframes (and combining attributes of both dataframes)</li> # </ul> # # </div>
03-spatial-joins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %%writefile laminarflow/_cruisecontrol.py import tensorflow as tf import pickle as pkl class _tf_temp(): def __init__(self, name): self.name = name class _method_temp(): def __init__(self, name, method_name): self.name = name self.method_name = method_name class CruiseControl(): """ Laminar Flow's Cruise Control method automates several time saving tasks for Tensorflow, allowing for quicker prototyping and testing. Among these abilities is automatic saving in an encapsulated tensorflow session. This makes it so you don't need to keep open a session to keep the values. However, it does require you to use `tf.get_variable` to define variables instead of `tf.Variable` directly. On top of that, automatic initialization of uninitialized variables allows for this structure to be dynamically updated and have no cat and mouse hunt for errors. Pickling and unpickling has been implemented in this class under the strict conditions that all non-variable tensors used in args to the `add` function are the result of previous functions input to the `add` function and all `function`s used have to either be functions or methods bound to results of previous calls to add. That is, when calling `add`, only use direct attributes of that CruiseControl instance or variables controlled by it and only use functions or methods controlled by that instance as well. Since TensorFlow objects are not pickleable directly, args and kwargs to `add` have to be easily sanitized, unless you want to do it yourself. More complex sanitization is possible, if somewhat difficult. To sanitize yourself, currently you must create the magic functions in whatever you pass to `add` if it isn't automatically taken care of already. More documentation to follow. This all needs to be reworded to make more sense. """ #Constructor def __init__(self, save_file_name, unique_identifier = None): #collect_variables() self._vars = set() self._var_pkl = list() self._file_name = save_file_name self._uuid = unique_identifier if unique_identifier else hex(id(self))[2:] self._g = tf.Graph() with self._g.as_default(): while True: try: with tf.variable_scope(self._uuid): with tf.variable_scope(self._uuid): tf.get_variable("initialized",shape=[1]) break except: if unique_identifier is not None: raise NameError("Error: {} is an invalid unique identifier.".format(self._uuid)) self._uuid += "0" self._sess = None self._opened = 0 #Structure def add(self, name, function, *args, **kwargs): self._sess = None if hasattr(self, name): raise AttributeError("Error: {} already defined.".format(name)) current = set(self._g.get_collection("variables")) #sanitize sanitized_args = [self.sanitize(arg) for arg in args] sanitized_kwargs = {key:self.sanitize(value) for key,value in kwargs.items()} sanitized_func = self.sanitize(function) #Test. try: pkl.dumps([sanitized_func, sanitized_args, sanitized_kwargs]) except: raise ValueError("Error: Unable to sanitize.") #unsanitize unsanitized_func = self.unsanitize(function) unsanitized_args = [self.unsanitize(arg) for arg in args] unsanitized_kwargs = {key:self.unsanitize(value) for key,value in kwargs.items()} with self._g.as_default(): with tf.variable_scope(self._uuid): with tf.variable_scope(name): setattr(self, name, unsanitized_func(*unsanitized_args, **unsanitized_kwargs)) self._vars |= set(self._g.get_collection("variables")) - current self._var_pkl.append([name, sanitized_func, sanitized_args, sanitized_kwargs]) if isinstance(getattr(self, name), tf.train.Optimizer): #Initialize slots? pass return self def last_added(self): try: return getattr(self, self._var_pkl[-1][0]) except: return None #Sanitization def removeUUIDandColon(self, name): #start = 3 #len("{0}") name = name[len(self._uuid):].replace(self._uuid, "{0}") #restart = var.name[start:].find('/') + start + 1 #restart = name.find('/') + 1 #return name[restart:name.rfind(":")] return name[name.find('/') + 1:name.rfind(":")] def sanitize(self, obj): if hasattr(obj, "__self__"): method_self = getattr(obj, "__self__") for name,_,_,_ in self._var_pkl: if getattr(self, name) is method_self: return _method_temp(name, obj.__name__) for var in self._var: if var is method_self: return _method_temp(self.removeUUIDandColon(var.name), obj.__name__) try: pkl.dumps(obj) return obj except: return _tf_temp(self.removeUUIDandColon(obj.name)) def unsanitize(self, obj): if isinstance(obj, _tf_temp): try: return tf.get_variable(obj.name.format(self._uuid)) except: end = obj.name.find('/') return getattr(self, obj.name[:end]) if isinstance(obj, _method_temp): try: return getattr(tf.get_variable(obj.name.format(self._uuid)), obj.method_name) except: return getattr(getattr(self, obj.name.format(self._uuid)), obj.method_name) return obj #Features def set_file(self, save_file_name): self._file_name = save_file_name setFile = set_file #Values def save(self, save_file_name = None): variables = [] for i in self._vars: if tf.is_variable_initialized(i).eval(): try: variables.append((self.removeUUIDandColon(i.name),i.value().eval())) except: #TODO: Don't do this. Limit exceptions to known expected ones. pass if save_file_name is None: save_file_name = self._file_name with open(self._file_name, "wb") as file: pkl.dump(variables, file) def load(self, save_file_name = None): """ Must be done from within a with block. """ try: if save_file_name is None: save_file_name = self._file_name with open(self._file_name, "rb") as file: variables = pkl.load(file) except: #TODO: Don't do this. Limit exceptions to known expected ones. #print("Unable to load pkl file.") return with tf.variable_scope(self._uuid, reuse=True): for i in variables: try: tf.get_variable(i[0].format(self._uuid)).assign(i[1]).eval(session=self._sess) except ValueError as msg: #print(str(msg)) pass def transfer_from(self, save_file_name): self.load(save_file_name) #yes, this is just an alias for readability sake, and to force a file name. #Serialization def __reduce__(self): return (CruiseControl, (self._file_name,), self._var_pkl) def __setstate__(self, state): for i in state: self.add(i[0],i[1],*i[2],**i[3]) return self ''' #Initialization def initialize_variables(self, specifically=None): uninitialized = [] start = len(self._uuid) with tf.variable_scope(self._uuid, reuse=True): for name in self._sess.run(tf.report_uninitialized_variables(self._vars)): name = name.decode("utf-8") restart = name[start:].find('/') + start + 1 end = name.rfind(":") if end == -1: end = None print(name) print(restart) print(end) print(name[restart:end]) uninitialized.append(tf.get_variable(name[restart:end])) self._sess.run(tf.initialize_variables(uninitialized)) ''' #Functionality @property def sess(self): if self._opened: return self._sess with self._g.as_default(): self._init = tf.global_variables_initializer() self._sess = tf.Session(graph=self._g) return self._sess def __enter__(self): sess = self.sess self._opened += 1 if self._opened == 1: sess.__enter__() #self.initialize_variables() # trying to be smart about initialization seems # to be a bad idea for some reason? self._sess.run(self._init) # We're just going to load values back anyway. self.load() return self else: return self def __exit__(self, *args): self.save() self._opened -= 1 if self._opened == 0: return self._sess.__exit__(*args) def run(self, *args, **kwargs): """ This provides Session.run access to the CruiseControlled sesion. """ with self: return self._sess.run(*args, **kwargs) # + # %%writefile laminarflow/_filedescriptors.py from laminarflow import CruiseControl # These will basically just read the file as said type, # and the result will be dictlike already, so we can # just pass it in appropriately. layer_types = {} def loadDictlike(dct): """ Given a network description as a list of dictionaries, constructs a CruiseControl wrapped network with the given definition. The values are translated using function pointers in layer_types, which can be modified directly to give new layer support. Helper functions will be provided in future versions to speed up the process. The naming scheme of layers and description parameters is inspired by Caffe's prototxt files in hopes of being able to directly load Caffe's Modelzoo files, with trained parameters and all. """ pass def loadJSON(filename): pass def loadYAML(filename): pass # - # %%writefile laminarflow/__init__.py from laminarflow._cruisecontrol import CruiseControl #from laminarflow._filedescriptors import loadDictlike #from laminarflow._filedescriptors import loadJSON #from laminarflow._filedescriptors import loadYAML #from laminarflow._filedescriptors import layer_types # + # %%writefile setup.py import os from setuptools import setup # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name = "LaminarFlow", version = "1.3.0.0", author = "<NAME>", author_email = "<EMAIL>", description = ("A meta class to wrap and automate TensorFlow."), license = read("LICENSE"), keywords = "TensorFlow", #url = "http://laminarflow.rtfd.org/", packages=['laminarflow'], long_description=read('README.md'), install_requires=['pyyaml'], classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", #Hopefully. "Programming Language :: Python", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Utilities", ], ) # + import tensorflow as tf def clu(x, outdim, indim=None): if indim: try: indim = list(indim) except: indim = [indim] else: indim = x.get_shape()[1:].as_list() try: outdim = list(outdim) except: outdim = [outdim] C = tf.get_variable("C", initializer=tf.truncated_normal(indim+outdim, stddev=0.1)) b = tf.get_variable("b", initializer=tf.truncated_normal(outdim, stddev=0.1)) cluKernel = -tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(x,len(indim)+1), tf.expand_dims(C,0))), 1) + b cluKernel._C = C cluKernel._b = b return cluKernel def lin(x, shape): W = tf.get_variable("W", initializer=tf.truncated_normal(shape, stddev=0.1)) b = tf.get_variable("b", initializer=tf.truncated_normal(shape[1:], stddev=0.1)) linKernel = tf.matmul(x,W) + b linKernel._W = W linKernel._b = b return linKernel #Convolution and Maxpool shortcuts def conv2d(x, shape): W = tf.get_variable("W", initializer=tf.truncated_normal(shape, stddev=0.1)) b = tf.get_variable("b", initializer=tf.constant(0.1, shape=shape[-1:])) convKernel = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') + b convKernel._W = W convKernel._b = b return convKernel def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def cross_entropy(y,yhat): return -tf.reduce_sum(y*tf.log(yhat + 1e-9)) #Without epsilon, it crashes. def accuracy_test(y,yhat): correct_prediction = tf.equal(tf.argmax(yhat,1), tf.argmax(y,1)) return tf.reduce_mean(tf.cast(correct_prediction, "float")) # + from laminarflow import CruiseControl test = CruiseControl("test") test.add('input',tf.placeholder, "float", shape=[None, 784]) #test.add('expected_output', tf.placeholder, "float", shape=[None, 10]) test.add('h', lin, test.input,[784,10]) test.add('output', tf.nn.softmax, test.h) #test.add('optim', tf.train.AdadeltaOptimizer, 1e-4) #test.add('loss', cross_entropy, test.expected_output, test.output) #test.add('train_step', test.optim.minimize, test.loss) # + import numpy as np with test as sess: print(test.output.eval({test.input:np.ones([1,784])})) print(test.output.eval({test.input:np.ones([1,784])})) with test as sess: print(test.output.eval({test.input:np.ones([1,784])})) # + import pickle as pkl test2 = pkl.loads(pkl.dumps(test)) with test2 as sess: print(test2.output.eval({test2.input:np.ones([1,784])})) # - test = CruiseControl("test") test.add('input',tf.placeholder, "float", shape=[None, 784]) test.add('expected_output', tf.placeholder, "float", shape=[None, 10]) test.add('h', lin, test.input,[784,10]) test.add('output', tf.nn.softmax, test.h) test.add('optim', tf.train.AdadeltaOptimizer, 1e-4) test.add('loss', cross_entropy, test.expected_output, test.output) test.add('train_step', test.optim.minimize, test.loss) #testing test._initialized = set(x for x in test._vars) with test as sess: print(test.output.eval({test.input:np.ones([1,784])})) print(test.run(test.output,feed_dict={test.input:np.ones([1,784])})) print(test.optim._zeros_slot(test.loss, 'accum_update', test.optim._name)) test.optim.get_slot(test.loss, 'accum_update').name print([i.name for i in tf.trainable_variables()]) test._var_pkl[6][1].method_name # + import pickle as pkl test2 = pkl.loads(pkl.dumps(test)) with test2 as sess: print(test2.output.eval({test2.input:np.ones([1,784])})) # - test.add('relu', tf.nn.relu, test.h) test.add('h2', lin, test.relu, [10,10]) test.add('output2', tf.nn.softmax, test.h2) test.add('loss2', cross_entropy, test.expected_output, test.output2) test.add('train_step2', test.optim.minimize, test.loss2) with test as sess: print(test.output.eval({test.input:np.ones([1,784])})) with test as sess: print(test.output2.eval({test.input:np.ones([1,784])})) test.optim._get_or_create_slot(test.loss2,'accum') tf.is_variable_initialized(t).eval() import tensorflow as tf with tf.variable_scope("test2"): test = {} test['in'] = tf.placeholder("float", shape=[1,2]) test['out'] = tf.placeholder("float", shape=[1,3]) test['W'] = tf.get_variable("W", shape=[2,3]) test['loss'] = tf.nn.l2_loss(tf.matmul(test['in'], test['W']) - test['out']) test['optim'] = tf.train.AdadeltaOptimizer() test['train_step'] = test['optim'].minimize(test['loss']) test['train_step'].name tf.global_variables() [i.name for i in tf.global_variables()] sess = tf.Session() sess.run(tf.global_variables_initializer()) with tf.variable_scope("test2", reuse=True): W = tf.get_variable("W") #WAda = tf.get_variable("test2/W/Adadelta:0") with tf.variable_scope("test2", reuse=True): with tf.variable_scope("W", reuse=True): WAda1 = tf.get_variable("Adadelta_1:0") #Variable test2/test2/W/Adadelta_1:0 [...] was not created with tf.get_variable() "{0} is {0}".format("this") len("{0}") test.loss._shape_as_list() type(test.output) set(dir(test.input)) [x for x in test.output.op.inputs] dir(test.output.graph) test.output.graph.get_tensor_by_name(test.input.name) test.output.graph.is_feedable(test.output) [i.name for i in test._vars] tf.is_variable_initialized(test.input) test._sess.graph sess = tf.Session() with sess: test.expected_output.eval(feed_dict={test.expected_output:np.ones([1,10])}) tf.Graph().get_tensor_by_name(test.input.name) tf.get_default_graph().get_tensor_by_name(test.h.name) g = tf.get_default_graph() g.unique_name(test.input) g = tf.Graph() g.as_default() with g.as_default(): a = tf.placeholder('float',shape=[None,10]) b = tf.placeholder('float',shape=[10,1]) out = a*b with tf.Session(graph=g): print(out.eval(feed_dict={a:np.ones([10,10]),b:[[1],[1],[1],[1],[1],[1],[1],[1],[1],[1]]}))
LaminarFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Global TF Kernel (Python 3) # language: python # name: global-tf-python-3 # --- # + import keras import numpy as np import pandas as pd from keras.layers import Input, Dense from keras.models import Model, Sequential # - df = pd.read_csv("../housing-conditions-in-copenhagen/data.csv",sep="\s+") df.head() df.describe() df_new = pd.concat([df,pd.get_dummies(df["housing"],prefix="housing")],axis=1).drop(["housing"],axis=1) df_new = pd.concat([df_new,pd.get_dummies(df["influence"],prefix="influence")],axis=1).drop(["influence"],axis=1) df_new = pd.concat([df_new,pd.get_dummies(df["satisfaction"],prefix="satisfaction")],axis=1).drop(["satisfaction"],axis=1) df_new["contact_with_neighbours"] = df_new["contact_with_neighbours"].map(lambda v: 0 if v == 1 else 1) df_new.head() y = df_new["contact_with_neighbours"].values.reshape(-1,1) X = df_new.drop(["contact_with_neighbours"], axis=1).values y.shape, X.shape # + indices = np.arange(72) np.random.shuffle(indices) num_validation_samples = int(0.2*72) y = y[indices] X = X[indices] X_train = X[:-num_validation_samples] y_train = y[:-num_validation_samples] X_val = X[-num_validation_samples:] y_val = y[-num_validation_samples:] # - X_train.shape,y_train.shape,X_val.shape, y_val.shape # + model = Sequential() model.add(Dense(1, activation='sigmoid', input_dim=X.shape[1])) rmsprop = keras.optimizers.RMSprop(lr=0.01) model.compile(optimizer=rmsprop, loss='mse', metrics=['mse']) early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto') model.fit(X, y, epochs=300, validation_data=(X_val, y_val), callbacks = [early_stopping]) # -
python3/notebooks/meetup/.ipynb_checkpoints/logistic-regression-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Clean and explore FEC Data and Census Bureau data # ## Clean and explore FEC Data import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime import sqlite3 from sklearn.preprocessing import LabelEncoder # + df = pd.read_csv('raw_data/fec-pol-contr.csv', index_col=False, na_values=[99999]) print('Num of candidate ids:',len(df['cand_id'].unique()),'\n', 'Num of committee ids:', len(df['cmte_id'].unique()),'\n', 'Num of candidate names:', len(df['cand_nm'].unique()),'\n', 'Shape:', df.shape) # - cand_list = ['<NAME> ', '<NAME>', '<NAME>', '<NAME>'] df = df.loc[df.cand_nm.isin(cand_list)] print('Num of candidates:', len(df.cand_nm.unique())) print(df.form_tp.value_counts()) # drop the refunds (SB28A) df = df[df.form_tp != 'SB28A'] print(df.form_tp.value_counts()) print(df.contbr_city.value_counts()) # change bolling afb to washington indx = df.loc[(df.contbr_city == 'BOLLING AFB')].index for ind in list(indx): df.contbr_city[ind] = 'WASHINGTON' print(df.contbr_city.value_counts()) df.loc[(df.contbr_zip.isna() != False)] df.loc[df.contbr_nm == 'THOMPSON, <NAME>', 'contbr_zip'] = 20001 df['contbr_zip'] = df.contbr_zip.astype('str') df.contbr_zip = df['contbr_zip'].apply(lambda x: x[0:5]) print('Unique zip codes:', len(df.contbr_zip.unique())) print('Null zip codes:', df.contbr_zip.isna().sum()) print(df.contbr_zip.unique()) # (df.contbr_zip == '11217')].index) df = df[df.contbr_zip !='11217'] len(df.contbr_zip.unique()) # filter out negative values df = df.loc[df.contb_receipt_amt > 0] print('Min contribution: $', df.contb_receipt_amt.min()) # filter out values over legal limit of $2800 df = df.loc[df.contb_receipt_amt <= 2800] print('Max contribution: $', df.contb_receipt_amt.max()) df.contb_receipt_amt.describe() # convert receipt date to datetime format and then add second column called converted_date df.contb_receipt_dt = pd.to_datetime(df.contb_receipt_dt) df.contb_receipt_dt.describe() df['converted_date'] = df.contb_receipt_dt.map(lambda x: 100*x.year + x.month) df['payment_yr'] = df.contb_receipt_dt.map(lambda x: x.year) df.columns df.head(3) df.sort_values(by=['payment_yr']).head() df_grouped_yr = df.groupby(['cand_nm','payment_yr']).mean() df_grouped_yr[['contb_receipt_amt']] df.shape # filter out 2016, 2017, and 2018 df = df[df.payment_yr >= 2019] df.shape # tran_id is not a unique value df[df.duplicated(subset = 'tran_id', keep=False)] df = df.drop(['cand_id', 'file_num', 'contbr_nm', 'contb_receipt_dt', 'form_tp', 'election_tp', 'contbr_city', 'memo_cd', 'receipt_desc', 'contbr_st', 'memo_text', 'cmte_id', 'tran_id', 'contbr_employer', 'contbr_occupation', 'payment_yr' ], axis=1) df.shape # graph average contribution by candidate plt.scatter(df.contb_receipt_amt, df.cand_nm) plt.title('2019 Individual Average Contribution') plt.xlabel('Dollars') plt.show() df_grouped_cand = df.groupby('cand_nm').mean() df_grouped_cand['contb_receipt_amt'] df.cand_nm.value_counts() df.groupby('cand_nm').min()['converted_date'] # contribution amount by month for each candidate df.groupby(['cand_nm','converted_date']).mean()['contb_receipt_amt'] df.head() # ## Join US Census data to FEC data conn = sqlite3.connect('') cur = conn.cursor() census_data = pd.read_csv('raw_data/census-bureau-acs.csv', header =0, index_col=0, usecols=['GEO.id', 'GEO.id2','HC02_EST_VC02'], skiprows=[1,2],na_values='-' ) census_data.to_sql('INCOME', conn, if_exists = 'append') df_census = pd.DataFrame(census_data) df_census = df_census.rename(columns={'GEO.id2': 'zip', 'HC02_EST_VC02': 'income'}) len(df_census.income.value_counts()) print('Census data types:', '\n',df_census.dtypes, '\n') print('FEC data types:','\n', df.dtypes) df.contbr_zip = df.contbr_zip.astype(int) df_merged = df.merge(df_census, how='left', left_on='contbr_zip', right_on='zip', suffixes=('_left','_right') ) print(df_merged.isna().sum()) print(df_merged.dtypes) pd.to_numeric(df_merged.income); print(df_merged.shape) print(df_merged.isna().sum()) df_merged.head() # how many income values are each candidate missing? print(len(df_merged.loc[(df_merged.cand_nm == '<NAME> ') & (df_merged.zip.isna() == True) ])) print(len(df_merged.loc[(df_merged.cand_nm == '<NAME>') & (df_merged.zip.isna() == True) ])) print(len(df_merged.loc[(df_merged.cand_nm == '<NAME>') & (df_merged.zip.isna() == True) ])) print(len(df_merged.loc[(df_merged.cand_nm == '<NAME>') & (df_merged.zip.isna() == True) ])) nulls = df_merged.loc[df_merged.income.isna() == True].index len(nulls) # drop null income values, most of them went to <NAME> df_merged = df_merged.drop(index=nulls) len(df_merged) print(df_merged.shape) df_merged.cand_nm.value_counts() df_merged.drop(columns=['zip'], inplace=True) print(df_merged.shape) df_merged.head() le = LabelEncoder() df_merged['target'] = None df_merged.target = le.fit_transform(df_merged.cand_nm) df_merged.head() df_merged.dtypes df_merged.to_csv('clean_data/2019-fec-contr-census.csv')
ntbk-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Using the PSQL Database for Obiwan import psycopg2 import sys import json with open('config.json') as f: conf = json.load(f) conn_str = "host={} dbname={} user={} password={}".format(conf['host'], conf['database'], conf['user'], conf['passw']) conn = psycopg2.connect(conn_str)
doc/nb/db_tools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logist Regression(Application) # **Gradient Descent algorithm:** # # 输入:样本数据$T=\{(x_1,y_1),(x_2,y_2),...,(x_N,y_N)\},y_i\in\{0,1\}$,梯度步长$\alpha$,迭代停止阈值$\epsilon$ # # 输出:样本预测labels. # # (1) 初始化参数$W,b$,两者都是向量形式,一般定义为一个很小的数字. # # (2) 计算线性值:$Z=W\cdot X+b$ # # (3) sigma函数作用:$A=sigmoid(Z)$ # # (4) 计算损失函数:$loss(w,b) = -\frac{1}{N}\cdot \sum_{i=1}^{N}[y_ilog(\hat{y_i})+(1-y_i)log(1-\hat{y})]$ # # (4.1) 计算$dW,db$:$dW=\frac{1}{m}\cdot X(A-Y),db=\frac{1}{m}\cdot \sum{(A-Y)}$ # # (5) 更新参数值:$W=W - \alpha \cdot dW,b=b-\alpha \cdot db$ # # (5.1) L1:$W=W - \alpha \cdot (dW+\lambda sign(W)),b=b-\alpha \cdot db$ # # (5.2) L2:$W=W - \alpha \cdot (dW+\lambda W),b=b-\alpha \cdot db$ # # (6) 重复(2)-(5)直到损失函数在阈值$\epsilon$内.退出迭代 import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import h5py # ### 1 Load data set # # 这里的数据集使用iris数据集. def loadData_iris(): """ Returns: -------- X:have two dimensions (sepal length and width). Y:labels. """ iris = datasets.load_iris() X = iris.data[:100, :2] Y = iris.target[:100] return X,Y X,Y = loadData_iris() # #### 1.1 绘制原图查看 plt.scatter(X[:,0],X[:,1],c=Y) # #### 1.2 Split data set # # 我们将数据集划分为训练样本和测试样本,比例为: # # 训练样本:测试样本 = 0.8:0.2 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) # ### 2 Build Logistic Regression Model # # 在构建模型之前,我们需要注意,用Python实现算法的过程中,一定要避免多次for-loop,因为Python的效率比较低,多次使用for-loop会使得代码执行效率非常低下.所以要养成在实现算法的时候避免多次for-loop.所以我们能使用向量的形式就不要使用for-loop或者while-loop. # # #### 2.1 Sigmoid function # # $\sigma(z)=\frac{1}{1+e^{-z}}$ def sigmoid(Z): return 1./(1. + np.exp(-Z)) # #### 2.2 initialization parameters # # 这里在初始化参数时需要有几个注意的地方: # # (1) 参数$w,b$的初始值不可过大,这样会使得参数初始值处于sigmoid函数的平缓地带.所以我们会使用 # # ```python # W = np.random.rand(1,n)``` # # (2) 我们需要设置随机种子,这样可以保证每次运行的初始结果是一样的.这样做的好处是: # # 我们在进行不同超级参数$\alpha,\lambda,Iter$调节的时候,不会因为初始值的改变而使得结果不可信. def initial(n): """ initialization parameters Returns: ------- W:weights b:bias """ np.random.seed(1) W = np.random.rand(1,n) b = np.zeros((1,1)) return W,b # #### 2.3 Build LR # # 构建基本的Logistics Regression的时候,我们需要看看各个参数的维度以及损失函数 # # **参数维度:** # # 假设我们的训练样本$X$的维度是(m,n),标签的维度是(1,m)其中m:样本数量,n:特征数量 # # 那么我们经过参数$W,b$预测出来的值记为$A$则: # # $A$:(1,m) # # $Z$:(1,m),因为$sigmoid(Z)$并不改变矩阵维度 # # $Z=WX+b$:所以我们令$W$的维度为(1,n),这样$(1,n)X(m,n)^{T} $可以得到(1,m).$b$为常数. # # # **Ps:** # # # - 其实这里的初始$W,b$维度可以不用这样令.无论是$W$转置还是$X$只要最后能得到(1,m)的形式就行,我这里这样令维度是对接神经网络,方便记忆. # # - 特别地:在python中形状是(m,)就相当于维度(1,m) # # - 因为Numpy的广播机制存在,所以$WX$得到的结果可以和一个常数相加 # def LR(X_train,y_train,alpha,Iter,is_print=False): """ Implementation Logistics Regression. Parameters: ---------- X_train: training set y_train: labels alpha: learning rate Iter: Iter is_print: is print loss value. Return: W: Best weights b: Best bias cost: loss value. """ m,n = X_train.shape W,b = initial(n) cost = [] for iter_ in range(Iter): Z = np.dot(W,X_train.T) + b A = sigmoid(Z) loss = - np.sum((y_train*np.log(A)+(1-y_train)*np.log(1-A)))/m cost.append(loss) if is_print and iter_ % 10==0: print("Iter:{},loss:{}".format(iter_,loss)) dZ = A - y_train dW = np.dot(dZ,X_train)/m assert dW.shape == W.shape # Make sure the shape is rigth. db = np.sum(dZ,keepdims=True) /m assert db.shape == b.shape # Update parameters. W = W - alpha * dW b = b - alpha * db return W,b,cost W,b,cost = LR(X_train,y_train,0.1,100,is_print=True) plt.plot(cost);plt.title('Cost value'); # 可以看出在迭代过程中损失是在下降的,这是符合正常逻辑的.一般如果参数设置的比较好,你会发先你的损失函数图会呈现类似于上图的情况 # # #### 2.4 Build Predict function # # 现在构建预测函数,并且返回正确率. # # 另外我们说过,在sigmoid函数中如果值大于0.5,我们将其分为第一类,小于0.5我们可以将其分为第二类. # # 所以我们可以使用四舍五入函数 # # ```python # # np.round(A)``` def Predict(X_test,y_test,W,b): """ Predict labels. Prameters: ---------- X_test:testing set y_test:labels W:weights b:bias Return: ------ correct_rate: correct rate. """ Z = np.dot(W,X_test.T) + b A = sigmoid(Z) predict_y = np.round(A) correct_rate = np.sum((predict_y==y_test))/y_test.shape[0] return correct_rate correct_rate = Predict(X_test,y_test,W,b) print('The test set correct rate is:',correct_rate) correct_rate = Predict(X_train,y_train,W,b) print('The train set correct rate is:',correct_rate) # 可以看出无论是测试样本还是训练样本的正确率都可以接受.所以该模型现阶段是可行的.也没有发生过拟合或者欠拟合的现象. # # **Ps:** # # (1) 实际上在寻找最优参数$W,b$的过程中,我们是不知道该如何调节学习率和迭代次数来保证找到的参数是优秀的. # # (2) 通常学习率不可以过高,学习率没有其他的办法去预测,只有靠经验来预估. # # (3) 一个比较好的方法就是交叉验证,对于不同的学习率和迭代次数所参数的测试、训练样本正确率进行比对,选择较好的学习率和迭代次数. # ### 3 Scikit-learn LogisticRegression # # 同样我们也可以使用scikit-learn中的LogisticRegression来拟合数据 # # **Ps:** # # scikit-learn中对于LR有两个函数: # # (1) [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression):即我们普通的LR # # (2) [LogisticRegressionCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html):带交叉验证的LR # # logreg = LogisticRegression() logreg.fit(X_train,y_train) logreg_predict = logreg.predict(X_test) # + accurate = (logreg_predict == y_test).sum() / y_test.shape[0] print('the test set accurate is:',accurate) # - # ### 4 Regularization # # 如果你发现在拟合过程中,无论怎样调节超级参数$learning rate,Iter,C$都无法出现很好的情况,那么除了[特征工程](https://www.zhihu.com/question/29316149)之外,你就需要考虑正则化. # # 其中C是惩罚项,就是我们正则化式子中的$\lambda$ # # # (1) $L_1$ Regular:$loss(W) = -[Ylog(\hat{Y})+(1-Y)log(1-\hat{Y})] + \lambda {||W||}_1$ # # (2) $L_1$ Regular:$loss(W) = -[Ylog(\hat{Y})+(1-Y)log(1-\hat{Y})] + \frac{\lambda}{2} {||W||}_2$ def LR_Regular(X_train,y_train,alpha,Iter,C,is_print,Regular_method='l2'): """ Implementation Regularization of LR Prameters: --------- X_train: training set. y_train: labels alpha: learning Iter:number of iterative C: regularization strength is_print: is print loss value Regular_method: l1 or l2,default l2 Return: ------ W:Weights b:bias cost: loss value """ m,n = X_train.shape W,b = initial(n) cost = [] for iter_ in range(Iter): Z = np.dot(W,X_train.T) + b A = sigmoid(Z) if Regular_method =="l2": loss = - (np.sum((y_train*np.log(A)+(1-y_train)*np.log(1-A))) + (C/2)*np.sum(W**2))/m elif Regular_method=="l1": loss = - (np.sum((y_train*np.log(A)+(1-y_train)*np.log(1-A))) + C*np.sum(np.abs(W)))/m cost.append(loss) if is_print and iter_ % 100 ==0: print("Iter:{},loss:{}".format(iter_,loss)) dZ = A - y_train if Regular_method =="l2": dW = (np.dot(dZ,X_train)+C*W)/m elif Regular_method =="l1": dW = (np.dot(dZ,X_train)+C*np.sign(W))/m assert dW.shape == W.shape # Make sure the shape is right. db = np.sum(dZ,keepdims=True) /m assert db.shape == b.shape W = W - alpha * dW b = b - alpha * db return W,b,cost W,b,cost = LR_Regular(X_train,y_train,0.1,90,0.1,True,'l2') plt.plot(cost) correct_rate = Predict(X_test,y_test,W,b) print('The test set correct rate is:',correct_rate) correct_rate = Predict(X_train,y_train,W,b) print('The test set correct rate is:',correct_rate) # 实际上这里的例子并不会因为正则化之后而变得特别好,因为数据过于简单.所以正则化无法很好的体现,但是正则化会加快最优值的收敛. # #### 4.2 Scikit-laern # # 同样我们也可以在scikit上使用正则化 # **Ps:** # # 在scikit中alpha是惩罚项,且惩罚项定义的是$\frac{1}{C}$,也就是说与我们定义的是为导数关系,所以scikit中C越小惩罚越大 # # ```python # """alpha : float # Regularization parameter. alpha is equal to 1 / C."""``` # # 附上其[源码](https://github.com/scikit-learn/scikit-learn/blob/7389dba/sklearn/linear_model/logistic.py#L998) logreg = LogisticRegression(C=0.1) logreg.fit(X_train,y_train) logreg_predict = logreg.predict(X_test) accurate = (logreg_predict == y_test).sum() / y_test.shape[0] print('the test set accurate is:',accurate) # ### 5 LR and Picture # # 现在我们来看看最后一个案例,将LR应用于图片分类中.构建"猫与非猫"的二元分类器 # #### 5.1 加载数据集 # # 这个数据集是放在h5文件中的,所以我们需要使用库h5py将图片数据读取出来. def load_data(): ''' create train set and test set make sure you have .h5 file in your dataset Returns: ------- train_set_x_orig: original train set shape is (209, 64, 64, 3) train_set_y_orig: original train label shape is (209,) test_set_x_orig: original test set shape is (50, 64, 64, 3) test_set_y_orig: original test label shape is (50,) classes: cat or non-cat. Note: ---- (209, 64, 64, 3): 209 picture,64 width,64 height,3 channel. ''' train_dataset = h5py.File('data_set/train_catvnoncat.h5', "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels test_dataset = h5py.File('data_set/test_catvnoncat.h5', "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels classes = np.array(test_dataset["list_classes"][:]) # the list of classes return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes train_x_orig, train_y, test_x_orig, test_y, classes = load_data() # #### 5.2 reshape picture # # 由于图片的原始数据是(m,width,height,channel)的形式,我们需要将其转换为(m,n)的形式,也就是说我们需要将channel的所有数字转换成一列.并除上255归一化.更多图片的学习,我们将在K-means算法中学习. train_x = train_x_orig.reshape(train_x_orig.shape[0],-1) / 255 test_x = test_x_orig.reshape(test_x_orig.shape[0],-1) / 255 print('Train_x\'s shape:{}'.format(train_x.shape)) print('Test_x\'s shape:{}'.format(test_x.shape)) print("Train_y's shape:{}".format(train_y.shape)) print("Test_y's shape:{}".format(test_y.shape)) # 取出一个图片查看 index = 0 plt.imshow(train_x_orig[index]) print ("y = " + str(train_y[index]) + ". It's a " + classes[train_y[index]].decode("utf-8") + " picture.") index = 13 plt.imshow(train_x_orig[index]) print ("y = " + str(train_y[index]) + ". It's a " + classes[train_y[index]].decode("utf-8") + " picture.") # ### 5.3 使用Scikit logreg_img = LogisticRegression(C=1,max_iter=500) logreg_img.fit(train_x,train_y) predict_img = logreg_img.predict(test_x) accurate = np.sum((predict_img==test_y)) / test_y.shape[0] print('The test predict is:',accurate) predict_img = logreg_img.predict(train_x) accurate = np.sum((predict_img==train_y)) / train_y.shape[0] print('The test predict is:',accurate) # #### 5.4 使用LR_Regular W_img,b_img,cost_img = LR_Regular(train_x,train_y,0.01,1000,0,True,'l2') plt.plot(cost_img) correct_rate = Predict(test_x,test_y,W_img,b_img) print('The test ste correct rate is:',correct_rate) correct_rate = Predict(train_x,train_y,W_img,b_img) print('The train ste correct rate is:',correct_rate) # 可以看出两个的效果都是类似的,实际上对于图片这种高纬度的数据而言,正确率有0.72已经是相对比较高了.当然正确的做法应该是使用神经网络. # # **Ps:** # # 使用LR_Regular刚开始迭代的时候,存在梯度爆炸的问题,这个实际上是以为数据维度高,而我们欠缺优化超级参数,这点将在神经玩网络中说明. # # 无论怎样,结果还是不错的. # # 另外这里出现了上下溢问题: # # - 数值上溢:大量级的数被近似为正无穷或负无穷时发生上溢,进一步运算导致无限值变为非数字. # - 数值下溢:接近零的数被四舍五入为0时发生下溢.被零除,取零的对数,进一步运算会变为非数字. # # Homework # # 使用data_set中的horseColicTest.txt和horseColicTraining.txt做LR # # Good Luck~
4-3 Logistic regression(Application).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ### You are given two integer arrays, A and B of dimensions NXM. # ### Your task is to perform the following operations: # ### Add ( A + B) # ### Subtract ( A - B) # ### Multiply ( A * B) # ### Integer Division (A / B) # ### Mod (A % B) # ### Power (A ** B) # + pycharm={"name": "#%%\n"} import numpy as np # + pycharm={"name": "#%%\n"} n, m = map(int, input().split()) a = np.array([input().split() for _ in range(n)], int) b = np.array([input().split() for _ in range(n)], int) print(np.add(a, b)) print(np.subtract(a, b)) print(np.multiply(a, b)) print(np.floor_divide(a, b)) print(np.mod(a, b)) print(np.power(a, b)) # + pycharm={"name": "#%%\n"}
numpy/array_mathematics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tips # ### Introduction: # # This exercise was created based on the tutorial and documentation from [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/index.html) # The dataset being used is tips from Seaborn. # # ### Step 1. Import the necessary libraries: # + import pandas as pd # visualization libraries import matplotlib.pyplot as plt import seaborn as sns # print the graphs in the notebook % matplotlib inline # set seaborn style to white sns.set_style("white") # - # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Tips/tips.csv). # ### Step 3. Assign it to a variable called tips # + url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Tips/tips.csv' tips = pd.read_csv(url) tips.head() # - # ### Step 4. Delete the Unnamed 0 column # + del tips['Unnamed: 0'] tips.head() # - # ### Step 5. Plot the total_bill column histogram # + # create histogram ttbill = sns.distplot(tips.total_bill); # set lables and titles ttbill.set(xlabel = 'Value', ylabel = 'Frequency', title = "Total Bill") # take out the right and upper borders sns.despine() # - # ### Step 6. Create a scatter plot presenting the relationship between total_bill and tip sns.jointplot(x ="total_bill", y ="tip", data = tips) # ### Step 7. Create one image with the relationship of total_bill, tip and size. # #### Hint: It is just one function. sns.pairplot(tips) # ### Step 8. Present the relationship between days and total_bill value sns.stripplot(x = "day", y = "total_bill", data = tips, jitter = True); # ### Step 9. Create a scatter plot with the day as the y-axis and tip as the x-axis, differ the dots by sex sns.stripplot(x = "tip", y = "day", hue = "sex", data = tips, jitter = True); # ### Step 10. Create a box plot presenting the total_bill per day differetiation the time (Dinner or Lunch) sns.boxplot(x = "day", y = "total_bill", hue = "time", data = tips); # ### Step 11. Create two histograms of the tip value based for Dinner and Lunch. They must be side by side. # + # better seaborn style sns.set(style = "ticks") # creates FacetGrid g = sns.FacetGrid(tips, col = "time") g.map(plt.hist, "tip"); # - # ### Step 12. Create two scatterplots graphs, one for Male and another for Female, presenting the total_bill value and tip relationship, differing by smoker or no smoker # ### They must be side by side. # + g = sns.FacetGrid(tips, col = "sex", hue = "smoker") g.map(plt.scatter, "total_bill", "tip", alpha =.7) g.add_legend(); # - # ### BONUS: Create your own question and answer it using a graph.
07_Visualization/Tips/Exercises_with_code_and_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dlmacedo/deep-learning-class/blob/master/notebooks/tensorflow/IMDB_Classification_using_Transformers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="EmeqA75kTGWT" colab_type="text" # # Imports and Setups # + [markdown] id="FQitJui_RLzx" colab_type="text" # https://wandb.ai/wandb/gallery/reports/SimpleTransformers-Transformers-Made-Easy--VmlldzoyNDQzNTg # + id="J-t5Fg3MAe7n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 673} outputId="653372df-0f86-4615-bfa4-4464c270ccdb" # !pip install transformers # + id="IqRSeDKpGKK9" colab_type="code" colab={} # %%capture # !pip install wandb # + id="tyOYjMWhGNtj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="213181f9-e3bd-49f8-8087-2bfe39800bcb" import wandb wandb.login() # setup wandb environment variables # %env WANDB_ENTITY=wandb # %env WANDB_PROJECT=gallery # + id="nYdfX_HyAznx" colab_type="code" colab={} from transformers import DistilBertTokenizerFast from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments import tensorflow as tf from pathlib import Path from sklearn.model_selection import train_test_split # + [markdown] id="-QmqT4BwTkqe" colab_type="text" # # Download and Prepare Dataset # + [markdown] id="JJWxCfEoT7O7" colab_type="text" # #### Download IMDB dataset # + id="674h_XuiA4-b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 223} outputId="72a548f2-9bb3-45ea-a7cb-c1fe0a2fc382" # !wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz # !tar -xf aclImdb_v1.tar.gz # + [markdown] id="tBQs-oKZT_Jl" colab_type="text" # #### Helper function to read and prepare dataset # + id="H4G9mRW2A8Rg" colab_type="code" colab={} def read_imdb_split(split_dir): split_dir = Path(split_dir) texts = [] labels = [] for label_dir in ["pos", "neg"]: for text_file in (split_dir/label_dir).iterdir(): texts.append(text_file.read_text()) labels.append(0 if label_dir is "neg" else 1) return texts, labels train_texts, train_labels = read_imdb_split('aclImdb/train') test_texts, test_labels = read_imdb_split('aclImdb/test') # + [markdown] id="FScnznLmUEco" colab_type="text" # #### Prepare train and validation split # + id="oywXRq_lBKLw" colab_type="code" colab={} train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2) # + [markdown] id="njP3ICL1UK5q" colab_type="text" # #### Tokenize text data # + id="1QkFqulABiSQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68, "referenced_widgets": ["6a77a81706414ca6b860459e634ac887", "f05d666ee5da4efbaba95eef81e4a0f3", "0b8f6267cdde455daafee3fea54ded89", "23fc82459787431a802b021688f2e31f", "415ba9825ef1447a8723a5c42833cf15", "c8917a16833045ae8f0d8fba845ed67a", "6d9e1eb8699848ee8c6c737906322a5d", "e01b3bac405e4a67ba3e143455bf9040"]} outputId="ba21c057-bbd8-4c12-bd17-0c7121c72472" tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased') train_encodings = tokenizer(train_texts, truncation=True, padding=True) val_encodings = tokenizer(val_texts, truncation=True, padding=True) test_encodings = tokenizer(test_texts, truncation=True, padding=True) # + [markdown] id="J5eRLMYcUPOX" colab_type="text" # #### `tf.data` input pipeline # + id="MQA8-uZXBmIG" colab_type="code" colab={} train_dataset = tf.data.Dataset.from_tensor_slices(( dict(train_encodings), train_labels )) val_dataset = tf.data.Dataset.from_tensor_slices(( dict(val_encodings), val_labels )) test_dataset = tf.data.Dataset.from_tensor_slices(( dict(test_encodings), test_labels )) # + [markdown] id="kxSkEVEKUlKo" colab_type="text" # # Fine-tune transformer model for binary text classification # + [markdown] id="4BuzckVuUx7-" colab_type="text" # #### Define train configs and download pretrained transformer model. # + id="mvq0e951Un_X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 232, "referenced_widgets": ["1d9eea71d88640409be95b7cb36c5571", "b8b7e2a94ebf46b6867c45484cd7cb8c", "b544db17e99d49b89fd09fefc3408c3b", "c8889fdf13f3444cbbdd26000b62f121", "2c4aa35d8bfb4d5f996b1da0f5a77462", "62960623642741a7ad1fabfd546e9514", "ff7cba93e71748aea3679bf03f9bce32", "a4e211908a194a4589149cefe040ea71", "a977e48477ef43bfb864a3c6f7029648", "6d6ee6509dcc4f7bbf176e0a9311b01a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "24cfcfaf4be447618132d28e8789e389", "a94776d25798436f8781150487e31d0c"]} outputId="d2712ca1-2de5-48aa-982b-80e1f390ac23" training_args = TFTrainingArguments( output_dir='./results', num_train_epochs=3, per_device_train_batch_size=16, per_device_eval_batch_size=64, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', logging_steps=10, ) with training_args.strategy.scope(): model = TFDistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased") # + [markdown] id="wGf_miCaU-T7" colab_type="text" # #### Build trainer and train # + id="81qwJc3aB0Au" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 236} outputId="0f5d5c58-6115-4270-a947-dd30a3433cc3" trainer = TFTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset ) trainer.train()
content/courses/deeplearning/notebooks/tensorflow/IMDB_Classification_using_Transformers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/TerrenceAm22/DS-Unit-2-Kaggle-Challenge/blob/master/LS_DS_223_assignment_checkpoint2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jmBfJGtDokmb" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 2, Module 3* # # --- # + [markdown] id="nzcU5Y11okmc" colab_type="text" # # Cross-Validation # # # ## Assignment # - [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset. # - [ ] Continue to participate in our Kaggle challenge. # - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # **You can't just copy** from the lesson notebook to this assignment. # # - Because the lesson was **regression**, but the assignment is **classification.** # - Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification. # # So you will have to adapt the example, which is good real-world practice. # # 1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) # 2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...` # 3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values) # 4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)) # # # # ## Stretch Goals # # ### Reading # - <NAME>, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation # - <NAME>, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107) # - <NAME>, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation # - <NAME>, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb) # - <NAME>, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) # # ### Doing # - Add your own stretch goals! # - Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details. # - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives. # - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6: # # > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ... # # The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? # # + [markdown] id="AegMJVryokmd" colab_type="text" # ### BONUS: Stacking! # # Here's some code you can use to "stack" multiple submissions, which is another form of ensembling: # # ```python # import pandas as pd # # # Filenames of your submissions you want to ensemble # files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv'] # # target = 'status_group' # submissions = (pd.read_csv(file)[[target]] for file in files) # ensemble = pd.concat(submissions, axis='columns') # majority_vote = ensemble.mode(axis='columns')[0] # # sample_submission = pd.read_csv('sample_submission.csv') # submission = sample_submission.copy() # submission[target] = majority_vote # submission.to_csv('my-ultimate-ensemble-submission.csv', index=False) # ``` # + id="CjRDBzGBokmd" colab_type="code" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + id="d-AL3H4vokmf" colab_type="code" colab={} import pandas as pd # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) # Read test_features.csv & sample_submission.csv test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') # + id="IV9ULxmRokmh" colab_type="code" colab={} # Importing Necessary Libraries import category_encoders as ce import numpy as np from sklearn.feature_selection import f_regression, SelectKBest from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt from sklearn.model_selection import validation_curve from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from category_encoders import TargetEncoder from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer #from sklearn.model_selection import cross_validate # + id="PpRjkxBnokmj" colab_type="code" outputId="075a32b7-f6d2-405e-b691-598795cfdd8b" colab={"base_uri": "https://localhost:8080/", "height": 411} train.head() # + id="7drSwvErokml" colab_type="code" outputId="b0e79ac0-ee8a-4caf-9517-6dd18061203e" colab={"base_uri": "https://localhost:8080/", "height": 34} # Preforming Train/Test Split train, val = train_test_split(train, random_state=42) train.shape, test.shape, val.shape # + id="T49bTJ3Hokmn" colab_type="code" outputId="e57f1a19-5443-4edc-dbe7-390b986dd9f3" colab={"base_uri": "https://localhost:8080/", "height": 85} target = 'status_group' y_train = train[target] y_train.value_counts(normalize=True) # + id="6Ij3H20Jokmq" colab_type="code" outputId="a18ab726-9cb6-4cdd-e730-c66bfbf3b644" colab={"base_uri": "https://localhost:8080/", "height": 54} # Assigning a dataframe with all train columns except the target & ID train_features = train.drop(columns=['status_group', 'id']) # Make a list of all numeric features numeric_features = train_features.select_dtypes(include='number').columns.tolist() #Making a list of all categorical features cardinality = train_features.select_dtypes(exclude='number').nunique() # Getting a list of all categorical features with cardinality <= 50 categorical_features = cardinality[cardinality <= 50].index.tolist() features = numeric_features + categorical_features print(features) # + id="TKRb7g1Qokms" colab_type="code" colab={} # Arranging data into X features matrix and y target vector X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] # + id="G7jd9nX_okmv" colab_type="code" colab={} # Using pipeline 2 for RandomizedSearchCV #pipeline = make_pipeline( #ce.OneHotEncoder(use_cat_names=True), #SimpleImputer(strategy='mean'), #DecisionTreeClassifier(random_state=42) #) # Fit on train #pipeline.fit(X_train, y_train) # Score on train, val #print('Train Accuracy', pipeline.score(X_train, y_train)) #print('Validation Accuracy', pipeline.score(X_val, y_val)) # Predict on test #y_pred = pipeline.predict(X_test) # + id="ocHuNeoXokmx" colab_type="code" outputId="24790925-8390-4110-9c69-ac5b4e312cab" colab={"base_uri": "https://localhost:8080/", "height": 51} pipeline2 = make_pipeline( ce.OrdinalEncoder(verbose=20), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=200, n_jobs=-1) ) pipeline2.fit(X_train, y_train) print('Train Accuracy', pipeline2.score(X_train, y_train)) print('Validation Accuracy', pipeline2.score(X_val, y_val)) y_pred = pipeline2.predict(X_test) # + id="D8rKrrBwjuKP" colab_type="code" colab={} #cross_val_score(pipeline2, X_train, y_train, cv=5) # + id="9cOxbb3Ho1Gc" colab_type="code" outputId="9dd0b666-059a-418b-8116-e7b78bf199e4" colab={"base_uri": "https://localhost:8080/", "height": 816} param_distributions = { 'ordinalencoder__verbose':[20], 'simpleimputer__strategy':['mean'], 'randomforestclassifier__n_estimators':[100, 120, 140, 160, 180, 200], 'randomforestclassifier__n_jobs':[-1], 'randomforestclassifier__max_depth':[None, 20, 30, 40, 50], 'randomforestclassifier__criterion':["entropy"], 'randomforestclassifier__min_samples_split':[20, 40] } search = RandomizedSearchCV( pipeline2, param_distributions = param_distributions, n_iter=10, cv=5, verbose=10, scoring= 'accuracy', return_train_score=True, n_jobs=-1 ) search.fit(X_train, y_train) # + id="v_Qchs1o5ZRM" colab_type="code" outputId="c5c3bd5f-5e9f-4ab9-f55f-9ecfceb420ec" colab={"base_uri": "https://localhost:8080/", "height": 71} print('Best hyperparameters', search.best_params_) print('Cross-validation MAE', search.best_score_) # + id="SqWQGpIsuzU7" colab_type="code" colab={}
LS_DS_223_assignment_checkpoint2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} import torch from autograph.lib.mcts_aut import AutStats, UCBAnnealedAutStats from autograph.lib.automata import style_agraph from IPython.display import SVG, display # + pycharm={} checkpoint = "autograph/play/checkpoints/mine_big/simple_aut_no_cur_transplant_ucb_25" checkpoint2 = "autograph/play/checkpoints/simple_aut_no_cur_transplant_from" data = torch.load(checkpoint, map_location="cpu") data2 = torch.load(checkpoint2, map_location="cpu") # + pycharm={} aset = data["aut"] # - data["train_loop"]["global_step"] data["train_loop"]["num_rounds"] # + pycharm={} astats = data["aut_stats"] astats2 = data2["aut_stats"] # + pycharm={} repr(astats) # - repr(astats2) sobj1 = AutStats(len(aset.graph.network)) sobj1.load_state_dict(astats) sobj2 = AutStats(len(aset.graph.network)) sobj2.load_state_dict(astats2) sobj = UCBAnnealedAutStats(sobj2, sobj1, 25) sobj.synchronize() [sobj.v(i) for i in sobj.indices()] sobj.baseline() g = style_agraph(aset.graph.network, [1], False) for state in sobj.indices(): if g.has_edge(*state): #if sobj.local_n[state] == 0: # g.remove_edge(state) # pass # else: g.get_edge(*state).attr["label"] = "" g.get_edge(*state).attr["label"] += ("\n%.3f" % sobj.v(state)) pass g.layout(prog="dot") SVG(g.draw(format="svg"))
tests/aut_visuals/Explore Checkpoint-Transplant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Web Scraping with Python # MIT Political Methodology Lab Workshop Series # Spring 2022 # <NAME> # ## Scraping Overview # # Most news sites and similar web pages can be scraped through a three step process: # # 1. given a link to an article, extract and format all the needed info from the page # 2. given an archive-type page of links, finding all the links on the page. # 3. iterating through each page of archives, scraping all the pages from it, and saving to disk # # We'll write one function for each. # ## Libraries and setup # requests is for general HTTP loading import requests # BeautifulSoup is an HTML parser from bs4 import BeautifulSoup # JSON is a nice format for writing out # ujson can handle datetimes better and is a drop in replacement for the json module import json # Sometimes you'll need the regular expressions library and a date library import re import dateutil.parser # ## Page scraper # We'll write a function here that takes in the URL of an article or page, extracts the information we want from HTML, and structures the output. # # Python things to learn: # # - calling methods from objects # - what a dictionary looks like # - how to define a function # # HTML things to learn: # # - Chrome inspector # - what HTML tags look like # find the URL of an article to scrape url = "https://reliefweb.int/report/afghanistan/more-200-displaced-families-receive-cash-assistance-laghman-province" # download article page and get content page = requests.get(url) content = page.content content page.status_code # convert to BeautifulSoup soup = BeautifulSoup(content, "lxml") # quick aside to demonstrate "strip" txt = " <NAME> " print(txt) print(txt.strip()) # extract body text from page HTML (we'll do this together) body = soup.find("div", {"class" : "content"}) #body.text print(body.text.strip()) # extract title from page HTML title = soup.find("article").find("h2") print(title.text) title # extract author from page HTML author = soup.find("dd", {"class" : "source"}) author.text.strip() # extract date from page HTML raw_date = soup.find("dd", {"class" : "date published"}) print(raw_date.text) # convert from raw text into a standardized form date = dateutil.parser.parse(raw_date.text) # Put it into a standard ISO format date = date.strftime("%Y-%m-%d") print(date) # put it all together! def page_scraper(url): """Function to scrape a page""" # Code to download and soupify the page page = requests.get(url) content = page.content soup = BeautifulSoup(content, "lxml") # All the code to extract pieces from the HTML title = soup.find("article").find("h2") title = title.text.strip() body = soup.find("div", {"class" : "content"}) body = body.text.strip() raw_date = soup.find("dd", {"class" : "date published"}) date = dateutil.parser.parse(raw_date.text) date = date.strftime("%Y-%m-%d") author = soup.find("dd", {"class" : "source"}) author = author.text.strip() article = { "title" : title, "body" : body, "date" : date, "author" : author } return article # test it page_scraper(url) # test it on a different page page_scraper("https://reliefweb.int/report/iraq/reconstruction-needed-displaced-iraqis-continue-return-iom-iraq") # ## Link getter # Second, we need to get the URLS of all the pages we want to scrape. We can do this by finding the directory pages, where the links are on it, and how to get all available directory pages. # # Python things to learn here: # # - getting values from dictionaries # - for loops and list comprehensions # - regex with the `re` library # - basic string operations # + ### Function to get all the article links from a single directory page # - url = "https://reliefweb.int/updates?advanced-search=%28PC13%29_%28F10%29&page={0}".format(1) page = requests.get(url) content = page.content soup = BeautifulSoup(content, "lxml") links_raw = soup.find("div", {"class" : "articles"}).find_all("a") print(links_raw[0:5]) # pull out just the links links_raw = [i['href'] for i in links_raw] links_raw[0:5] # uh oh! `links` is full of all sorts of garbage. Is there a term # that we can search for to reliably pull out article links only? links = [i for i in links_raw if bool(re.search("/report", i))] links[0:5] # + # If these weren't complete URLs we could use string operations plus a list comprehension to fix this: #links = ["https://reliefweb.int" + i for i in links] #links[0:5] # - # Put it all together into a function that takes in a "page number" # and returns all the links to scrape from it. def page_to_link(page_num): # how to use .format() url = "https://reliefweb.int/updates?advanced-search=%28PC13%29_%28F10%29&page={0}".format(page_num) # download the page page = requests.get(url) # get its content content = page.content # soupify soup = BeautifulSoup(content, "lxml") # pull out links links_raw = soup.find("div", {"class" : "articles"}).find_all("a") links = [i['href'] for i in links_raw] # clean links links = [i for i in links if bool(re.search("/report", i))] return links page_to_link(44) # ## Putting it together # # Now we have a function that'll take a page number for the archive page and return all the links. # We have another function that'll take in an article URL and give us the structured content from the page. # # Let's put them together and download a (small!) range of stories. # # Note: let's be nice to the UN and not all download the whole thing at once. # + # get all the links we want to scrape all_links = [] for num in range(1, 3): lks = page_to_link(num) all_links.extend(lks) # extend! not append. len(all_links) all_links[12] # + all_content = [] for link in all_links[10:20]: # be nice to reliefweb and only get some try: content = page_scraper(link) all_content.append(content) # back to append! except Exception as e: # if something goes wrong, keep trucking, # but print out the link so we can diagnose it. print(e) print(link) # - len(all_content) all_content[4] # ## Saving as CSV # # If you're going to work with your text next in R, a CSV is probably the most useful form to save your text in. To save it as a csv, we will convert into a `DataFrame` using `pandas`, a package for working with data in Python. `pandas` will then let us easily write it out to a CSV. # + import pandas as pd content_df = pd.DataFrame(all_content) #content_df content_df.to_csv("all_content.csv") # - # ## Saving as JSON # # We can also store is as a JSON file. JSON and dictionaries are almost equivalent, so it's a natural form to save a dict as a JSON file. import json # + FILENAME = "reliefweb.json" with open(FILENAME, "w") as f: json.dump(all_content, f) # - # ### Read it back in # # If you want to load it back in later to analyze, you can do this: # + FILENAME = "reliefweb.json" with open(FILENAME, "r") as f: loaded_content = json.load(f) # - # is it the same? assert loaded_content[4] == all_content[4] list(range(0, 3))
Python Notebooks/Completed/Scraper_Completed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Classification # In this tutorial, we shall train a model for classification on dataset using our own network, and then we shall evaluate how good/bad it is compared to the the one trained using transfer learning. # # We use the famous CIFAR10 dataset readily avaliable from one of the frameworks. # # We shall follow the following steps: # 1. Load the Data (already split into train and test) # 2. Define a Convolutional Neural Network to train a Classification Problem for classifying into 10 classes from the image input # 3. Train a model using the training set, and created validation set # 4. Evluate the model on test set # 5. Repeat a similar approach for Tranfer Learning using a pre-trained model import tensorflow as tf import keras import numpy as np import random import matplotlib.pyplot as plt # %matplotlib inline tf.logging.set_verbosity(tf.logging.ERROR) # ## Load the Data # + from keras.datasets import cifar10 # Load Data (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Mention the Class Name List class_list = ['airplane','automobile','bird','cat','deer','dog', 'frog','horse','ship','truck'] # # Convert a one-hot vector for the test-labels y_train = keras.utils.to_categorical(y_train, num_classes=len(class_list)) # # Split the test set to Validation & Test set num_test_samples = x_test.shape[0] x_val, y_val = x_test[0:num_test_samples//2,:,:], y_test[0:num_test_samples//2] x_test, y_test = x_test[num_test_samples//2:,:,:], y_test[num_test_samples//2:] y_val = keras.utils.to_categorical(y_val, num_classes=len(class_list)) # - # ## Visualize Data # Train Data x_train.shape, y_train.shape # Validation Data x_val.shape, y_val.shape # + num = random.randint(0,50000) class_fig = class_list[np.argmax(y_train[num])] plt.imshow(x_train[num].reshape(32,32,3)) print("Class Name :{}".format(class_fig)) # - # # Define the Model # + from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers import Input, Conv2D, Activation, Dense, Flatten, MaxPooling2D # Writing the model model = Sequential(name='CIFAR-Classifier') inputs = Input(shape=(32, 32, 3)) # Layer 1 model.add(Conv2D(filters=32,kernel_size=3, strides=2,padding="same")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2), padding='valid')) # Layer 2 model.add(Conv2D(filters=128,kernel_size=(3,3), strides=(2,2),padding="same")) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) # Layer 3 model.add(Flatten()) model.add(Dense(10,activation='softmax',use_bias=True)) # Compile the Model with the Loss function, Optimizer and Accuracy Metric model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Check the Architecture model.build(input_shape=(None, 32, 32, 3)) model.summary() # - # ## Train the Model # + # Train the model model.fit(x_train, y_train, validation_data=(x_val,y_val), epochs=5) # - # ## Test the Model # + # Predict on Test Set y_pred = model.predict_classes(x_test).reshape(5000,1) acc = np.sum(y_test == y_pred) / y_test.shape[0] # Print Accuracy print("Test Accuracy :{}".format(acc*100))
Object Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (mooc) # language: python # name: mooc # --- # + [markdown] colab_type="text" id="jcyB6NFmQReL" # # Tutorial: Describe Neuroscience Dataset using MINDS # - # ## Initialize and configure # !pip install nexusforge==0.6.2 # !pip install allensdk # !pip install neurom[plotly]==3.0.1 # !pip install --upgrade nest-asyncio==1.5.1 # ### Get an authentication token # The [Nexus sandbox application](https://sandbox.bluebrainnexus.io/web) can be used to get a token: # # - Step 1: From the [web page](https://sandbox.bluebrainnexus.io/web), click on the login button in the top right corner and follow the instructions on screen. # # - Step 2: You will then see a `Copy token` button in the top right corner. Click on it to copy the token to the clipboard. # # Once a token is obtained, proceed to paste it as the value of the `TOKEN` variable below. # # __Important__: A Nexus token is valid for 8 hours, if your working session is open for more than 8 hours, you may need to refresh the value of the token and reintialize the forge client in the _'Configure a forge client to store, manage and access datasets'_ section below. import getpass TOKEN = getpass.getpass() # ### Configure a forge client to store, manage and access datasets # + import uuid import base64 import requests import json from pathlib import Path from kgforge.core import KnowledgeGraphForge from kgforge.specializations.mappings import DictionaryMapping from allensdk.api.queries.cell_types_api import CellTypesApi from allensdk.core.cell_types_cache import CellTypesCache # - r = requests.get('https://raw.githubusercontent.com/BlueBrain/nexus/ef830192d4e7bb95f9351c4bdab7b0114c27e2f0/docs/src/main/paradox/docs/getting-started/notebooks/rdfmodel/jsonldcontext.json') dirpath = './rdfmodel' Path(dirpath).mkdir(parents=True, exist_ok=True) with open(f'{dirpath}/jsonldcontext.json', 'w') as outfile: json.dump(r.json(), outfile) ORG = "github-users" PROJECT = "" # Provide here the automatically created project name created when you logged into the Nexus sandbox instance. forge = KnowledgeGraphForge("https://raw.githubusercontent.com/BlueBrain/nexus/ef830192d4e7bb95f9351c4bdab7b0114c27e2f0/docs/src/main/paradox/docs/getting-started/notebooks/forge.yml", bucket=f"{ORG}/{PROJECT}", endpoint="https://sandbox.bluebrainnexus.io/v1", token=TOKEN) # + [markdown] colab_type="text" id="9W5M5Ck9Tq7q" # ## Download datasets from Allen Cell Types Database and from MouseLight # - # ### Download mouse neuron morphology from the Allen Cell Types Database # We will be downloading mouse neuron morphology data from the [Allen Cell Types Database](https://celltypes.brain-map.org/). The [AllenSDK](https://allensdk.readthedocs.io/en/latest/) can be used for data download. ALLEN_DIR = "allen_cell_types_database" ctc = CellTypesCache(manifest_file=f"{ALLEN_DIR}/manifest.json") MAX_CELLS = 1 SPECIES = CellTypesApi.MOUSE nm_allen_identifiers = [cell["id"] for cell in ctc.get_cells(species=[SPECIES], require_reconstruction = True)][:MAX_CELLS] print(f"Selected a mouse neuron with identifier: {nm_allen_identifiers}") with open(f"{ALLEN_DIR}/cells.json") as f: allen_cell_types_metadata = json.load(f) nm_allen_metadata = [neuron for neuron in allen_cell_types_metadata if neuron["specimen__id"] in nm_allen_identifiers] print(f"Metadata of the neuron {nm_allen_identifiers}:") nm_allen_metadata # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ### Download one mouse neuron morphology reconstructed from the selected neuron # - # We will be downloading one mouse neuron morphology from the [Allen Cell Types Database](https://celltypes.brain-map.org/) using the [AllenSDK](https://allensdk.readthedocs.io/en/latest/). for identifier in nm_allen_identifiers: ctc.get_reconstruction(identifier) # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ### Download one mouse neuron electrophysiology recording from the selected neuron # - # We will be downloading one mouse neuron electrophysiology from the [Allen Cell Types Database](https://celltypes.brain-map.org/) using the [AllenSDK](https://allensdk.readthedocs.io/en/latest/). for identifier in nm_allen_identifiers: ctc.get_ephys_data(identifier) # ## Transform Allen Cell Types Database Metadata to [Neuroshapes' MINDS](https://bbp-nexus.epfl.ch/datamodels/class-schemadataset.html) metadata # ### Map the Allen Cell Types Database neuron morphologies metadata to Neuroshapes allen_nm_mapping = DictionaryMapping.load("https://raw.githubusercontent.com/BlueBrain/nexus/ef830192d4e7bb95f9351c4bdab7b0114c27e2f0/docs/src/main/paradox/docs/getting-started/notebooks/mappings/allen_morphology_dataset.hjson") nm_allen_resources = forge.map(nm_allen_metadata, allen_nm_mapping, na='') # ### Map the Allen Cell Types Database neuron electrophysiology recording to Neuroshapes allen_ephys_mapping = DictionaryMapping.load("https://raw.githubusercontent.com/BlueBrain/nexus/ef830192d4e7bb95f9351c4bdab7b0114c27e2f0/docs/src/main/paradox/docs/getting-started/notebooks/mappings/allen_ephys_dataset.hjson") nephys_allen_resources = forge.map(nm_allen_metadata, allen_ephys_mapping, na='') # ## Register # # If the registration fails, try refreshing the access token and reinitializing the forge client in the _'Configure a forge client to store, manage and access datasets'_ section. # ### Register the Allen Cell Types Database neuron morphology nm_allen_resources.id = forge.format("identifier", "neuronmorphologies", str(uuid.uuid4())) forge.register(nm_allen_resources) # ### Register the Allen Cell Types Database neuron electrophysiology recording nephys_allen_resources.id = forge.format("identifier", "traces", str(uuid.uuid4())) forge.register(nephys_allen_resources) # ## Access # ### Set filters # + _type = "NeuronMorphology" filters = {"type": _type} # - # ### Run Query # + number_of_results = 10 # You can limit the number of results, pass `None` to fetch all the results data = forge.search(filters, limit=number_of_results) print(f"{str(len(data))} dataset(s) of type {_type} found") # - # ### Display the results as pandas dataframe # + property_to_display = ["id","name","subject","brainLocation.brainRegion.id","brainLocation.brainRegion.label","brainLocation.layer.id","brainLocation.layer.label", "contribution","brainLocation.layer.id","brainLocation.layer.label","distribution.name","distribution.contentUrl","distribution.encodingFormat"] reshaped_data = forge.reshape(data, keep=property_to_display) forge.as_dataframe(reshaped_data) # - # ### Download dirpath = "./downloaded/" forge.download(data, "distribution.contentUrl", dirpath, overwrite=True) # ls ./downloaded/ # ### Display a result as 3D Neuron Morphology from neurom import load_morphology from neurom.view.plotly_impl import plot_morph3d import IPython neuron = load_morphology(f"{dirpath}/{data[0].distribution.name}") plot_morph3d(neuron, inline=False) IPython.display.HTML(filename='./morphology-3D.html') # ## Version the dataset # Tagging a dataset is equivalent to `git tag`. It allows to version a dataset. forge.tag(data, value="releaseV112") # + # The version argument can be specified to retrieve the dataset at a given tag. tagged_data = forge.retrieve(id=data[0].id, version="releaseV112") # - forge.as_dataframe(tagged_data) data[0].description="Neuron Morphology from Allen" forge.update(data[0]) non_tagged_data = forge.retrieve(id=data[0].id) forge.as_dataframe(non_tagged_data)
docs/src/main/paradox/docs/getting-started/notebooks/one_cell_minds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <img style="float: left; padding-right: 10px; width: 80px" src="https://raw.githubusercontent.com/trivikverma/researchgroup/master/static/media/resources/epa1316/TU_descriptor%20black.png"> EPA-1316 Introduction to *Urban* Data Science # # # ## Assignment 2: Geographic Visualisation # # **TU Delft**<br> # **Q1 2020**<br> # **Instructor:** <NAME> <br> # **TAs:** <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> <br> # **[Computational Urban Science & Policy Lab](https://research.trivikverma.com/)** <br> # # --- # # # ## 1. Introduction # # _Note:_ If you have not gone through **labs and homeworks 04-05**, kindly do so before starting this assignment, as those will help you with all the necessary knowledge for this assignment. This assignment will be useful for you when exploring and visualising your data, and discussing observations, outliers or relationships situated geographically, as part of the final project for this course or in the future. # # #### 1.1 Submission # # Please submit the results by Brightspace under **Assignment 02**, using a single file as example, # # ```text # firstname_secondname_thirdname_lastname_02.html # # ``` # # **If your file is not named in lowercase letters as mentioned above, your assignment will not be read by the script that works to compile 200 assignments and you will miss out on the grades. I don't want that, so be exceptionally careful that you name it properly. Don't worry if you spelled your name incorrectly. I want to avoid a situation where I have 200 assignments all called assignment_02.html** # # Please **do not** submit any data or files other than the ``html file``. # # #### 1.2 How do you convert to HTML? # # There are 2 ways, # # 1. from a running notebook, you can convert it into html by clicking on the file tab on the main menu of Jupyter Lab # * File &rightarrow; Export Notebooks as... &rightarrow; Export Notebook to HTML # 2. go to terminal or command line and type # * ``jupyter nbconvert --to html <notebook_name>.ipynb `` # # # #### 1.3 Learning Objectives # # This assignment is designed to support three different learning objectives. After completing this laboratory you will be able to: # # * Combine different datasets # * Work with Geographic data # * Plot important information about the data (graphs, scatter plots, choropleth, etc..) using the `principles of graphical excellence` and `guidelines of exploratory data analysis`. # # #### 1.4 Tasks # # `Problem Structure`: First formulate a hypothesis/RQ, identify two/three key variables, search for appropriate datasets (csv, excel, etc.), merge the csv and the given shape file, do a bit of EDA/spatial analysis and then report your result in a list of **5 neighbourhoods** that discuss the RQ. # # `What we expect`: Provide a list of **five neighbourhoods** in The Hague characterised as "safe" or "" (insert your measure based on what data you collected). Give at least two measurements that may be linked to each other (for example: your hypothesis is that neighbourhoods with no green space are more prone to populations with mental health issues). Be explicit about what definition you use for each measurement using markdown cells. I am not looking for mathematical equations as justification, but you are welcome to also form simple relations and show them in markdown. I shouldn't have to call you to find out what you mean by measurements of "Accessibility" or "Safety" or "Popularity" etc. Do not assume that there is one normative definition and skip your reasoning. # * Note that the measurement can also just be number of people living in a neighbourhood, their voting choice or availability of jobs. But it will not hurt to plot more than just a column from a csv. # # This assignment requires you to go through five tasks in eda and visualisation, and networks and spatial weights. # # 1. Use at least two datasets: merge at least 1 shapefile (already provided) and a csv file (you find and obtain). # 2. Justify your data collection process using markdown cells as you go through the notebook # 3. # 4. Justify your choice of the list on the basis of your analysis. # * Use at least **3 figures** to support your analysis. Think about exploratory data analysis (build data, clean data, explore global, explore group properties). # * These figures should have followed the principles of graphical excellence. Using markdown, write explicity under each figure at least **3 principles of excellence** you have used to make it. # * Create **choropleths** to display region-specific information (ex. population, voting choice or jobs availability) # * Be careful with the use of color (and try to be inclusive of color blind people) # * Use **one method** from the lectures to discuss what you observe for your variable(s). Examples below, # * local or global spatial autocorrelation # * network measures # * spatial weights / spatial lag # * binning # * feature scaling, normalisation or standardisation # 5. **[Optional]** Plot and Overlay Shapefiles to show Den Haag with some other elements like the sea, canals, centroids, amenities (try Open Street Maps data - using `osmnx`?), climate or land-use patterns, etc. # # Remember to always document your code! # ## 2. Download the Data # # For this assignment I am providing you with the shapefiles of The Hague. <NAME> has prepared these files with love and care so that you can connect it with either [<NAME>](https://denhaag.incijfers.nl/jive) or [CBS](https://www.cbs.nl/nl-nl/reeksen/kerncijfers-wijken-en-buurten-2004-2020) datasets without having to clean badly collected data. # # Note: For data from CBS, data is only complete upto 2017. You will have to subset the data on municpality using the variable name `gm_naam = 's-Gravenhage` and then subset on neighbourhood resolution using variable name `recs = Buurt` to get the data that can match the shapefiles we have provided. # # So after you unzip, we’ll work with the file ``neighborhoods.xxx``, which is in one of many geographic formats. Put the data in a convenient location on your computer or laptop, ideally in a folder called **data** which is next to this **jupyter notebook**. I recommend taking a look at the file with format `.json` in a text editor like _atom_ for any system or notepad++ for windows. These will also make your life easy for everything else on your computer. Make sure you’ve set your working directory in the correct manner – okay? # # It’s a big file and it may take a while to load onto your laptop and into Python (running on the jupyter labs environment). # # So, to summarise, you will use at least two datasets. # # 1. Download Shapefiles provided with the assignment # 2. Get a second dataset of your choice from The Hague city region using the links above (curate them as you like) # # #### More Data Sources # # You can find more data sources on Cities and Population, Climate indicators and Land-use in the following links in case you are attempting the **[Optional]** exercise. # # * http://citypopulation.de/ # * https://www.census.gov/programs-surveys/geography.html # * https://www.eea.europa.eu/data-and-maps # * http://download.geofabrik.de/ # # #### In case you get more data as shapefiles, and want to play with projections, a nice guide for it is [here](https://automating-gis-processes.github.io/CSC18/lessons/L2/projections.html) # ## 3. Start your analysis # your code here # use many cells if you like to structure your code well
static/epa1316-2020/assignments/assignment-02/.ipynb_checkpoints/assignment-02-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import sklearn import numpy as np from scipy import io import os import time import h5py import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error import math # # DATA snr_0 = h5py.File('/home/ssprl2/Documents/Gautam/Features/Traffic/T5_A_10/Sep_T_5_A_10_RI_1_OP.mat') data = np.transpose(snr_0['trainData'].value) print(data.shape) input_data_1 = data[:,0:514] print(input_data_1.shape) input_data_2 = data[:,514:1028] print(input_data_2.shape) input_data = np.dstack((input_data_1,input_data_2)) input_data = np.reshape(input_data, (2155100,514,2,1)) print(input_data.shape) del input_data_1 del input_data_2 Labels = data[:,1028:1542] print(Labels.shape) # # PLACEHOLDERS x = tf.placeholder(tf.float32,[None, 514, 2],name = "x-input") x_image = tf.reshape(x, [-1, 514, 2, 1], "x-image") y_true = tf.placeholder(tf.float32, [None, 514], name = "y-input") from keras.layers import LSTM, Dense, Dropout, Flatten, GRU, Reshape # # LAYERS # + model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(kernel_size = (5,1), input_shape = (514,2,1), filters = 256, activation='relu', kernel_initializer= tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), bias_initializer = tf.keras.initializers.Constant(0.1), strides=(1, 1), padding = 'same' )) model.add(tf.keras.layers.MaxPool2D(pool_size = (3,3), padding='same')) model.add(tf.keras.layers.Conv2D(kernel_size = (5,1), filters = 256, activation='relu', kernel_initializer= tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), bias_initializer = tf.keras.initializers.Constant(0.1), strides=(1, 1), padding = 'same' )) model.add(tf.keras.layers.Reshape((172,256))) model.add(tf.keras.layers.GRU(100, return_sequences=True, input_shape=(172, 129))) # returns a sequence of vectors of dimension 32 model.add(tf.keras.layers.GRU(100, return_sequences=True)) model.add(tf.keras.layers.GRU(110, return_sequences=True)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation='relu', kernel_initializer= tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), bias_initializer = tf.keras.initializers.Constant(0.1) )) model.add(tf.keras.layers.Dropout(0.2)) # model.add(tf.keras.layers.Dense(1024, activation='relu', # kernel_initializer= tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), # bias_initializer = tf.keras.initializers.Constant(0.1) )) model.add(tf.keras.layers.Dense(514, kernel_initializer= tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), bias_initializer = tf.keras.initializers.Constant(0.1) )) model.summary() # - print(model.input_shape) print(model.output_shape) model.summary() # + model.compile(optimizer= tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False) , loss='mean_squared_error') model.fit(input_data, Labels, epochs=20,batch_size=500) # Save tf.keras model in HDF5 format. keras_file = "keras_CGRU.h5" tf.keras.models.save_model(model, keras_file) # # Convert to TensorFlow Lite model. # converter = tf.lite.TFLiteConverter.from_keras_model_file(keras_file) # tflite_model = converter.convert() # open("converted_model.tflite", "wb").write(tflite_model) # - # # TRAINING # + checkpoint_dir = "CRNN/" tf.gfile.MakeDirs(checkpoint_dir) learningRates = np.hstack((1e-3*np.ones(9), 1e-4*np.ones(7), 1e-5*np.ones(5))) num_epochs = len(learningRates) with tf.Session() as sess: saver = tf.train.Saver() #save_relative_paths=True tf.train.write_graph(sess.graph_def, checkpoint_dir, "graph.pbtxt", True) init = tf.global_variables_initializer() sess.run(init) for epoch in np.arange(num_epochs): idx = np.arange(len(Labels)) np.random.shuffle(idx) #print("inside epoch loop = ", epoch) for i in np.arange(0, nDataSamples-1, trainBatchSize): x_batch = input_data[idx[i:i+trainBatchSize],:, :] y_batch = Labels[idx[i:i+trainBatchSize],:] feed = {x: x_batch, y_true: y_batch, learning_rate: learningRates[epoch], keep_prob: 0.80} sess.run(train_op,feed_dict=feed) if i%50 == 0: feed = {x: x_batch,y_true: y_batch, learning_rate: learningRates[epoch], keep_prob: 1.0} loss_value,prediction = sess.run([cross_entropy,y_pred], feed_dict=feed) rmse = mean_squared_error(y_batch[:,0:514], prediction[:,0:514])**0.5 mae = mean_absolute_error(y_batch[:,0:514],prediction[:,0:514]) print("epoch: %2d step: %6d RMSE: %3.2f MAE: %3.2f loss: %6.4f" % \ (epoch, i, rmse, mae, loss_value)) tf.gfile.MakeDirs(checkpoint_dir + '/model' + str(epoch)) checkpoint_file = os.path.join(checkpoint_dir + '/model' + str(epoch), "model") saver.save(sess, checkpoint_file) print("**** SAVED MODEL ****") print("**** COMPLETED ALL THE EPOCHS ****") # -
Separation/TrainingCodes/CGRU_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Business Understanding # # step 1 Pick a dataset and Import packages # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px # %matplotlib inline from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error from sklearn.linear_model import LinearRegression # In this notebook Iam using predict price value by Random Forest Regressor and Linear Regressor , visual tool of dataset and trying to answer important questions . # I am selecting Boston AirBnB listings as my dataset for the study. It is available on Kaggle. Listings dataset has various features/columns such as neighborhood, property_type, bedrooms, bathrooms, beds, price, reviews, ratings, etc. # # # Airbnb is an American vacation rental online marketplace company based in San Francisco, California. Airbnb maintains and hosts a marketplace, accessible to consumers on its website or app. Users can arrange lodging, primarily homestays, and tourism experiences or list their spare rooms, properties, or part of it for rental. On the other hand, users who are traveling are looking for stays search properties and rooms by neighborhood or location. Airbnb recommends the best price in the neighborhood and users book the best deal. # # Data Understanding # ### read Boston Airbnb datasets # + _listings_raw = pd.read_csv('listings.csv', index_col = "id") _listings = _listings_raw.copy() # - # # step 2 : three questions # ### we answering following questions: # + #What Features are affecting the price most? name the features that affect the price most. #Can we predict the price of a listing in Boston AirBnB? #What is the highest proportion in each cancellation_policy category ? #What is the highest proportion in each review_scores_rating category ? # - # # step 3: Prepare data # ### Exploring dataset # + # check of dataset shape print(_listings.shape) # - # The Boston Airbnb listings dataset has 3585 rows and 94 columns. #check dataset info print(_listings.info()) # Some columns have very few non-null values, # There are columns are not useful , # There are columns are of type object. # # print dataset _listings # ### Data cleaning and transformations¶ # # + #clean dataset def clean_listing_dataset (): # step 1: remove columns from the data set that are not useful for price prediction # step 2: remove columns that has very high percentage of na values # step 3: transform price and other numeric columns from string to int,float type _cols_to_drop = ['listing_url', 'thumbnail_url', 'medium_url', 'picture_url', 'picture_url', 'xl_picture_url', 'host_url', 'host_thumbnail_url', 'host_picture_url', 'city','state','country','country_code','market','requires_license', 'experiences_offered','first_review', 'last_review','calendar_last_scraped', 'calendar_updated', 'scrape_id', 'last_scraped', 'space', 'host_neighbourhood', 'neighborhood_overview', 'host_listings_count', 'zipcode', 'is_location_exact', 'host_location', 'host_total_listings_count', 'neighbourhood','smart_location','host_id'] # droping columns that are not useful _listings.drop(_cols_to_drop, axis=1, inplace=True) # droping columns having no values or less than thres _thresh = len(_listings) * .35 _listings.dropna(axis=1, thresh=_thresh, inplace = True) # remove $ and comma from price cols, convert it to float and fill na values by mean/median for _price_fe in ["cleaning_fee", "security_deposit", "price", "extra_people"]: # remove $ and comma from price, ignore na values so that we wont get any errors. _listings[_price_fe] = _listings[_price_fe].map(lambda p : p.replace('$','').replace(',',''), na_action='ignore') # convert cols to float type _listings[_price_fe] = _listings[_price_fe].astype(float) # fill na values with mean/median _listings[_price_fe].fillna(_listings[_price_fe].median(), inplace=True) # remove % from rate cols and convert it to int for _rate in ['host_response_rate', 'host_acceptance_rate']: # replace % with blank _listings[_rate] = _listings[_rate].map(lambda r: r.replace('%',''), na_action='ignore') # fill na values with backfill or ffill _listings[_rate].fillna(method="backfill", inplace=True) # convert to int type _listings[_rate] = _listings[_rate].astype(int) _listings['host_response_time'].fillna(method="backfill", inplace=True) # drop rows having na of ["bathrooms", "bedrooms", "beds"] since they are less in number we can not put any assumption and add bias _listings.dropna(axis=0, subset=["bathrooms", "bedrooms", "beds"], inplace=True) # convert ["bathrooms", "bedrooms", "beds"] into int as they make more sense for _room in ["bathrooms", "bedrooms", "beds"]: _listings[_room] = _listings[_room].astype(int) for _review in ["review_scores_rating", "review_scores_accuracy", "review_scores_cleanliness", "review_scores_checkin", "review_scores_communication", "review_scores_location", "review_scores_value", "reviews_per_month"]: _listings[_review].fillna(_listings[_review].mean(), inplace=True) print('data set after cleaning ::') print(_listings.info()) clean_listing_dataset() # - # drop columns that are not useful, drop columns having fewer values, fill na values and converting some object type columns to numeric columns. # ### Numerical features analysis # # + # visualize numerical of dataset plt.figure(figsize=(10,8)) plt.title('Price distribution') sns.histplot(_listings['price'], kde=True, fill=True) plt.show() print(_listings['price'].describe()) # - # price is distributed # ### remove outliers # + #check outliers and remove _listings_new = _listings[(_listings['price'] > 20) & (_listings['price'] < 500)] fig, axs = plt.subplots(1, 2, figsize=(24, 8), dpi=80) axs[0].set_title('Price distribution') axs[1].set_title('Log base 2 Price distribution') sns.histplot(_listings_new['price'], kde=True, ax=axs[0]) sns.histplot(_listings_new['price'].astype(int), kde=True, log_scale=2, ax=axs[1]) plt.show() print(_listings['price'].describe()) # - # ### Correlation Matrix of all Numerical columns #corrlation and visualize dataset _corr = _listings_new.select_dtypes(include=['int64', 'float64']).corr() _mask = np.zeros_like(_corr) _mask[np.triu_indices_from(_mask)] = True plt.figure(figsize=(24,12)) plt.title('Heatmap of corr of features') sns.heatmap(_corr, mask = _mask, vmax=.3, square=True, annot=True, fmt='.2f', cmap='coolwarm') plt.show() # We can see that cleaning_fee, guests_included, security_deposit, beds, bedrooms, bathrooms, accommodates, longitude, latitude, etc are having a strong relationship with price. We must select these columns for our model. # Surprisingly the number of reviews and reviews per month has a negative relationship with price. We can omit such features. # ### Categorical Features Analysis #check type of dataset _listings_new.select_dtypes(include=['object']).info() # print info of new value after clean _listings_new.info() # # Data Modeling # ### Data preparation for model # + # select numeric cols _num_cols = ['price', 'latitude','longitude', 'accommodates', 'bedrooms', 'bathrooms', 'beds', 'security_deposit', 'cleaning_fee', 'guests_included', 'availability_30', 'availability_60', 'availability_90', 'availability_365', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_location', 'review_scores_value', 'calculated_host_listings_count'] _numeric = _listings_new.select_dtypes(include=['int32','int64' ,'float64'])[_num_cols] print(_numeric.info()) # transform categorical columns into numeric and prepare new data frame _cat_cols = ['host_response_time', 'host_is_superhost', 'room_type', 'bed_type', 'neighbourhood_cleansed', 'cancellation_policy', 'property_type', 'host_identity_verified', 'instant_bookable', 'host_has_profile_pic', 'require_guest_profile_picture', 'require_guest_phone_verification'] _numeric[_cat_cols] = _listings_new[_cat_cols] _num_copy = _numeric.copy() _num_copy = _num_copy.replace({ "host_is_superhost": {"t": 1, "f": 2}, "instant_bookable": {"t": 1, "f": 2}, "host_identity_verified": {"t": 1, "f": 2}, "require_guest_profile_picture": {"t": 1, "f": 2}, "room_type": {"Entire home/apt": 1, "Private room": 2, "Shared room": 3}, "host_has_profile_pic": {"t": 1, "f": 2}, "bed_type": {"Real Bed": 1, "Futon": 2, "Airbed": 3, "Pull-out Sofa": 4, "Couch": 5}, "require_guest_phone_verification": {"t": 1, "f": 2}, "cancellation_policy": {"moderate": 1, "flexible": 2, "strict": 3, "super_strict_30": 4}}) _dummies = pd.get_dummies(_num_copy) print(_dummies.info()) # - # Prepare final dataset containing selected numerical and categorical features. I will first select numerical columns. Next, transform the categorical columns into numerical/binary vectors.by data frame and converts categorical columns into binary vector representations which are cool. # # to extract input (X) and output (y) features to train and evaluate a model. # # Data Modeling # + # model to predict pricing y = np.log2(_dummies['price'].astype(int)) X = _dummies.drop('price', axis =1 ) # split test and train dataset from X and y X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=45) def evaluate_model(_model, x_train, y_train, x_test, y_test): _model.fit(x_train, y_train) _prediction = _model.predict(x_test) _mean_abs_err = mean_absolute_error(y_test, _prediction) fig, axs = plt.subplots(1, 2, figsize=(24, 8), dpi=80) axs[0].set_title('Distribution of predicted vs actual values') ax1 = sns.kdeplot(data=y_test, color="g", label='Actual values', ax=axs[0]) ax2 = sns.kdeplot(data=_prediction, color="b", label='Predicted values', ax=ax1) sns.regplot(x=y_test, y=_prediction) plt.title('Distribution of predicted vs actual values') plt.xlabel('Price') plt.legend() plt.show() plt.close() print("Mean absolute error of {0}: {1}".format(_model, _mean_abs_err)) # Create instance of Random Forest Regressor and evaluate model _model_rf = RandomForestRegressor(n_estimators=76, random_state=47) evaluate_model(_model_rf, X_train, y_train, X_test, y_test) # Create instance of Linear Regressor and evaluate the same _model_lr = LinearRegression() evaluate_model(_model_lr, X_train, y_train, X_test, y_test) # - # # Evaluate the Result # ### What Features are affecting the price most? name the features that affect the price most. # # # Answer: Based on the study we can see that the following features are affecting the price: # # Selected Numerical Features: # # price, latitude,longitude, accommodates, bedrooms, bathrooms, beds, security_deposit, cleaning_fee, guests_included, availability_30, availability_60, availability_90, availability_365, review_score_rating, review_scores_accuracy, review_scores_cleanliness, review_scores_location, review-scores_value, calculated_host_listings_count. # # Selected Categorical Features: # # host_response_time, host_is_superhost, room_type, bed_type, neighbourhood_cleansed, cancellation_policy, property_type, host_identity_verified, instant_bookable, host_has_profile_pic, require_guest_profile_picture, require_guest_phone_verification. # ### Can we predict the price of a listing in Boston AirBnB? # # # Answer: RandomForestRegressor did a better job as compare to LinearRegressor. The absolute mean error for RFR is 0.31 whereas 0.35 for LR. # # # ### What is the highest proportion in each cancellation_policy category ? status_vals = _listings.cancellation_policy.value_counts() status_vals # + status_vals = _listings.cancellation_policy.value_counts()#Provide a pandas series of the counts for each Professional status # The below should be a bar chart of the proportion of individuals in each professional category if your status_vals # is set up correctly. (status_vals/_listings.shape[0]).plot(kind="bar"); plt.title("cancellation_policy"); # - # # Answer: Based on the graph we can see the highest proportion in each cancellation_policy is:strict # # ### What is the highest proportion in each review_scores_rating category ? review_vals = _listings.review_scores_rating.value_counts() review_vals # + review_vals = _listings.review_scores_rating.value_counts()#Provide a pandas series of the counts for each Professional status # The below should be a bar chart of the proportion of individuals in each professional category if your status_vals # is set up correctly. (review_vals/_listings.shape[0]).plot(kind="bar"); plt.title("review_scores_rating"); # - # # # Answer:Based on the graph we can see the highest proportion in each review_scores_rating is:91.95
project 1_DS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: classification # language: python # name: classification # --- # # Support Vector Machines # <img src='img/svm.png'> # # [Image Source](https://towardsdatascience.com/support-vector-machine-vs-logistic-regression-94cc2975433f) # + import pickle as pkl with open('../data/titanic_tansformed.pkl', 'rb') as f: df_data = pkl.load(f) # - df_data.head() df_data.shape data = df_data.drop("Survived",axis=1) label = df_data["Survived"] from sklearn.model_selection import train_test_split data_train, data_test, label_train, label_test = train_test_split(data, label, test_size = 0.2, random_state = 101) # + from sklearn.svm import SVC import time # Run Logistic Regression tic = time.time() svm_cla = SVC(kernel='linear') svm_cla.fit(data_train, label_train) print('Time taken for training SVM ', (time.time()-tic), 'secs') predictions = svm_cla.predict(data_test) print('Accuracy', svm_cla.score(data_test, label_test)) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(label_test, predictions)) print(classification_report(label_test, predictions)) # - # ### SVM Parameters print(svm_cla.coef_) # ## Hyper parameters # ### a. Changing kernel # + from sklearn.svm import SVC import time # Run Logistic Regression tic = time.time() svm_cla = SVC(kernel='poly', degree=3) svm_cla.fit(data_train, label_train) print('Time taken for training SVM ', (time.time()-tic), 'secs') predictions = svm_cla.predict(data_test) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(label_test, predictions)) print(classification_report(label_test, predictions)) # - # ### b. C value # <img src='img/svm_hyperparameter.gif'>
classification/notebooks/.ipynb_checkpoints/07 - SVM-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Science Projects # # This document is there to help you sturcture your data science project. The flow of this guide will help you craft a data science project quickly and pay attention to some of the key elements of problem solving. # # When you are working on a data science project, these are some of the key parts that you need to work on: # # - Problem Statement # - Data Acquisition # - Data Dictionary # - Feature extraction # - Data Cleaning # - EDA and Data Visualization # - Deriving Key insights from EDA # - Model building # - Evaluation # - Deriving Key Insights from model # - Exporting the model # # ## Step 1: Problem statement # # You need clearly define the problem that you are solving. Typically when you are working with simple datasets like the UCI machine learning repository dataset then your problem statement is decided by your dataset. However the entire process of problem defintion involves translating business goals to analysis goals. # # Ideally you should choose a specific domain, choose a problem to solve then find data to solve the problem. In most cases exact data may not be available, then you can look at various data sources and combine data to get the required dataset. In practice however, you may have decided on what problem to solve on the basis of the what data is available and accessible to you. So you may have to go in the opposite direction where you need to decide on a dataset and then decide what questions I can ask of it and then attack the problem. # # Either way keep in mind, that in the process you need to learn about different domains. # # **--provide examples here--** # # - [ ] Clearly state your data source. # - [ ] What type of data do you have? # - Structured/Unstructured. # - [ ] What are you predicting? # - [ ] What are your features? # - [ ] What is your target? # - [ ] What type of problem is it? # - [ ] Supervised/Unsupervised? # - [ ] Classification/Regression? # - [ ] If you have to combine features to define the target, discuss that here. # - [ ] Do you need to combine multiple data sources? # ## Step 2: Data Acqusition # ## Step 3: Data Dictionary # ## Step 4: Feature Extraction # # - If you have unstructured data then in this step you need to extract features from the data to generate a dataset # ## Step 5: Data cleaning # # Some points to keep in mind: # # - [ ] Find missing values. # - [ ] Find NaN and 0 values. # - [ ] Do all columns have the same dtypes? # - [ ] Convert dates to datetime types. # - [ ] You can use the python package arrow or datetime. # - [ ] Convert categorical variables to type 'category' if working with pandas. # - [ ] Convert strings to ints or floats if they represent numbers. # - [ ] Standardize strings # - [ ] Convert them to lower case if possible. # - [ ] Replace spaces with underscores or dashes. # - [ ] Remove white spaces around the string **this is very critical**. # - [ ] Check of inconsistent spellings *typically done manually*. # - [ ] Look for duplicate rows or columns. # - [ ] Look for preprocessed columns; example: A categorical column that has been duplicated # with categorical labels. # # A list of data cleaning libraries: https://mode.com/blog/python-data-cleaning-libraries/ # # ## Step 6: Data preperation # # - [ ] Convert categorical features to dummy indices if you are doing regression or assign numerical labels if you are doing classification # - [ ] Do test train split to generate a test set. Further do a train validation split, you will need to run the test train split function from sklearn twice for this purpose # # ## Step 7: Exploratory Data Analysis and Data Visualization # # There are multiple steps that you need to take here: # # - [ ] Identify outliers in the datsets. Keep track of them, we want to run to train the model with the outliers and without them to see their effect. # - [ ] Check for imbalance in the target variable. Quantify the imbalance. # - [ ] Pairplot if possible to check the relationship between all the features and the target. # - [ ] Look at the histogram for each variable, try to identify if you have a symmetric or normal distribution. # - [ ] If possible plot a QQ plot to check the normality of the data. If you want more information, refer to [this](https://refactored.ai/learn/normality-tests/24c311b1936a4037b29ef78d629f1320/). # - [ ] If its a classification problem, run a chi-square test between each categorical feature and the target to check for correlation and run ANOVE between the continuous/discrete features and the target to check for correlations. # - [ ] If its a regression problem get pearson correlations between the continuous features and target and run ANOVA between each categorical variable and target. # - Check for correlations between individual features; use similar approaches as you did with the target. # ## Step 8: Key Insights from EDA # ## Step 9: Model building # ## Step 10: Model evaluation # ## Step 11: Key Insights from Predictive analsis # ## Step 12: Exporting the model
{{ cookiecutter.repo_name }}/notebooks/Projects_instructions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Corrupt known signal with point spread # ====================================== # # The aim of this tutorial is to demonstrate how to put a known signal at a # desired location(s) in a :class:`mne.SourceEstimate` and then corrupt the # signal with point-spread by applying a forward and inverse solution. # # + import os.path as op import numpy as np import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse from mne.simulation import simulate_stc, simulate_evoked # - # First, we set some parameters. # # # + seed = 42 # parameters for inverse method method = 'sLORETA' snr = 3. lambda2 = 1.0 / snr ** 2 # signal simulation parameters # do not add extra noise to the known signals nave = np.inf T = 100 times = np.linspace(0, 1, T) dt = times[1] - times[0] # Paths to MEG data data_path = sample.data_path() subjects_dir = op.join(data_path, 'subjects') fname_fwd = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif') fname_inv = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-meg-fixed-inv.fif') fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') # - # Load the MEG data # ----------------- # # # + fwd = mne.read_forward_solution(fname_fwd) fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True, use_cps=False) fwd['info']['bads'] = [] inv_op = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')) raw.set_eeg_reference(projection=True) events = mne.find_events(raw) event_id = {'Auditory/Left': 1, 'Auditory/Right': 2} epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True) epochs.info['bads'] = [] evoked = epochs.average() labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir) label_names = [l.name for l in labels] n_labels = len(labels) # - # Estimate the background noise covariance from the baseline period # ----------------------------------------------------------------- # # cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) # Generate sinusoids in two spatially distant labels # -------------------------------------------------- # # # The known signal is all zero-s off of the two labels of interest signal = np.zeros((n_labels, T)) idx = label_names.index('inferiorparietal-lh') signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times) idx = label_names.index('rostralmiddlefrontal-rh') signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times) # Find the center vertices in source space of each label # ------------------------------------------------------ # # We want the known signal in each label to only be active at the center. We # create a mask for each label that is 1 at the center vertex and 0 at all # other vertices in the label. This mask is then used when simulating # source-space data. # # hemi_to_ind = {'lh': 0, 'rh': 1} for i, label in enumerate(labels): # The `center_of_mass` function needs labels to have values. labels[i].values.fill(1.) # Restrict the eligible vertices to be those on the surface under # consideration and within the label. surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno'] restrict_verts = np.intersect1d(surf_vertices, label.vertices) com = labels[i].center_of_mass(subject='sample', subjects_dir=subjects_dir, restrict_vertices=restrict_verts, surf='white') # Convert the center of vertex index from surface vertex list to Label's # vertex list. cent_idx = np.where(label.vertices == com)[0][0] # Create a mask with 1 at center vertex and zeros elsewhere. labels[i].values.fill(0.) labels[i].values[cent_idx] = 1. # Create source-space data with known signals # ------------------------------------------- # # Put known signals onto surface vertices using the array of signals and # the label masks (stored in labels[i].values). # # stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt, value_fun=lambda x: x) # Plot original signals # --------------------- # # Note that the original signals are highly concentrated (point) sources. # # # kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4, time_unit='s', initial_time=0.05, size=1200, views=['lat', 'med']) clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7]) brain_gen = stc_gen.plot(clim=clim, **kwargs) # Simulate sensor-space signals # ----------------------------- # # Use the forward solution and add Gaussian noise to simulate sensor-space # (evoked) data from the known source-space signals. The amount of noise is # controlled by `nave` (higher values imply less noise). # # # # + evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave, random_state=seed) # Map the simulated sensor-space data to source-space using the inverse # operator. stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method) # - # Plot the point-spread of corrupted signal # ----------------------------------------- # # Notice that after applying the forward- and inverse-operators to the known # point sources that the point sources have spread across the source-space. # This spread is due to the minimum norm solution so that the signal leaks to # nearby vertices with similar orientations so that signal ends up crossing the # sulci and gyri. # # brain_inv = stc_inv.plot(**kwargs) # Exercises # --------- # - Change the `method` parameter to either `dSPM` or `MNE` to explore the # effect of the inverse method. # - Try setting `evoked_snr` to a small, finite value, e.g. 3., to see the # effect of noise. # #
dev/_downloads/804ea48504b27f5f04fd03d517675af5/plot_point_spread.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Info: <NAME>, RIKEN, <EMAIL> # # # Open Quantum Dynamics with QuTiP # # # We use QuTiP's solvers to study the open dynamics of a quantum system evolving in time. # # # - #### Lindblad Master Equation # # - #### Lindblad Master Equation: Time evolution # # - #### Lindblad Master Equation: steady-state solution # # - #### Stochastic Dynamics: Quantum Trajectories with Monte Carlo import numpy as np import matplotlib.pyplot as plt from qutip import * # ### Define the operators and Hamiltonian # # We consider the spin-boson system, which is a prototypical model of light-matter interaction in cavity quantum electrodynamics (cQED): a single two-level system coupled to a single mode of the photonic field. Its Hamiltonian is described by # # \begin{eqnarray} # H &=& \omega_c a^\dagger a + \frac{\omega_0}{2}\sigma_z # +\frac{g}{2}\sigma_x\left(a+a^\dagger\right)+\frac{\omega_x}{2}\sigma_x, # \end{eqnarray} # # where we added also the possibility of the system of a classical drive onto the system at a frequency $\omega_x$. # + # spins sx_reduced = sigmax() sy_reduced = sigmay() sz_reduced = sigmaz() sp_reduced = sigmap() sm_reduced = sigmam() # photons nph = 4 a_reduced = destroy(nph) # tensor space sz = tensor(sz_reduced,qeye(nph)) sx = tensor(sx_reduced,qeye(nph)) sm = tensor(sm_reduced,qeye(nph)) sp = sm.dag() a = tensor(qeye(2), a_reduced) # hamiltonians wc = 1 w0 = 0.5*wc g = 0.1*w0 wx = 0.2*w0 Hcav = wc*a.dag()*a Hspin = w0*sz #+ wx*sx Hint = g*sx*(a+a.dag()) HintCheck = g*tensor(sx_reduced,a_reduced+a_reduced.dag()) H = Hcav + Hspin + Hint np.testing.assert_(Hint == HintCheck) # - # ### Define the initial state # The initial state of the system is given by # \begin{eqnarray} # \rho&=&\rho_\text{spin}\otimes\rho_\text{phot} # \end{eqnarray} # and in the case of a initially pure state, # \begin{eqnarray} # \rho&=&|\psi\rangle_\text{spin}\otimes|\psi\rangle_\text{phot} # \end{eqnarray} # # + # initial state psi0_spin = basis(2,0) psi0_phot = basis(nph,nph-int(nph/2)) psi0 = tensor(psi0_spin,psi0_phot) rho0 = ket2dm(psi0) # times at which to calculate the variables tlist = np.linspace(0,50,2000) # - # ### Lindblad master equation: $\texttt{mesolve}$ # # We now consider the time evolution of the open quantum system, in which $\rho$ is dissipatively coupled to a spin and photonic bath, # # \begin{eqnarray} # \frac{d}{dt}\rho =-i\lbrack H,\rho\rbrack+\gamma\mathcal{D}_{[\sigma_-]}\rho+\kappa\mathcal{D}_{[a]}\rho. # \end{eqnarray} # + kappa = 0.3 gamma = 0.3 my_options = Options(average_states = True, store_states = True) results = mesolve(H, psi0, tlist, c_ops=[np.sqrt(kappa)*a,np.sqrt(gamma)*sz], e_ops=[a.dag()*a,sz], options=my_options, progress_bar=True) # store time evoluted variables nph_t = results.expect[0] sz_t = results.expect[1] rho_t = results.states # - fs=20 plt.figure(figsize=(14,4)) plt.subplot(121) plt.plot(tlist, sz_t) plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle \sigma_z \rangle$",fontsize=fs) plt.subplot(122) plt.plot(tlist, nph_t/nph/0.5) plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle a^\dagger a \rangle$",fontsize=fs) plt.suptitle("Time evolution for spin and photonic excitations of a driven cavity with single two level system, with dissipation") plt.show() plt.close() # ### Steady state solver: $\texttt{steadystate}$ rhoss = steadystate(H,[np.sqrt(kappa)*a,np.sqrt(gamma)*sz]) nph_ss= expect(a.dag()*a,rhoss) sz_ss= expect(sz,rhoss) plt.figure() plt.imshow(abs(rhoss.full())) plt.show() plt.close() plt.figure() plt.subplot(121) plt.imshow(abs(ptrace(rhoss,0).full())) plt.xlabel("atom",fontsize=fs) plt.subplot(122) plt.imshow(abs(ptrace(rhoss,1).full())) plt.xlabel("photon",fontsize=fs) plt.show() plt.close() # ## Liouvillian structure # # We can derive thermodynamical properties of the out-of-equilibrium system by studying the spectrum of the Liouvillian. # + L = liouvillian(H,[np.sqrt(kappa)*a,np.sqrt(gamma)*sz]) # represent the Liouvillian plt.figure(figsize=(14,6)) plt.subplot(121) plt.imshow(np.real(L.full())) plt.title("Real values of the Liouvillian",fontsize=fs) plt.subplot(122) plt.imshow(np.imag(L.full())) plt.title("Imaginary values of the Liouvillian",fontsize=fs) plt.show() plt.close() # - # ### Liouvillian Spectrum, $\texttt{eigenstates}$ # + L = liouvillian(H,[np.sqrt(kappa)*a,np.sqrt(gamma)*sz]) # Plot the Liouvillian spectrum in the complex plane eigenvalues_L, eigenvectors_L = L.eigenstates() real_eigenvalues = np.real(eigenvalues_L) imag_eigenvalues = np.imag(eigenvalues_L) plt.figure(figsize=(10,10)) plt.plot(real_eigenvalues, imag_eigenvalues,"o") plt.title("Real values of the Liouvillian",fontsize=fs) plt.ylabel("Im($\lambda_i$)",fontsize=fs) plt.xlabel("Re($\lambda_i$)",fontsize=fs) plt.show() plt.close() # - # Let us decipher the spectrum of the Liouvillian: plt.figure(figsize=(10,10)) plt.plot(real_eigenvalues, imag_eigenvalues,"o") plt.title("Real values of the Liouvillian") plt.axvline(x=0,color="red") plt.axhline(y=0,color="black",linestyle="dashed") plt.ylabel("Im($\lambda_i$)",fontsize=fs) plt.xlabel("Re($\lambda_i$)",fontsize=fs) plt.show() plt.close() # We find symmetries in the Liouvillian spectrum with respect to the real and imaginary axis. plt.figure(figsize=(6,6)) plt.plot(real_eigenvalues[-1],imag_eigenvalues[-1],"o",markersize=20,label="$\lambda_0$") plt.plot(real_eigenvalues[-2],imag_eigenvalues[-2],"*",markersize=20,label="$\lambda_1$") plt.plot(real_eigenvalues[-3],imag_eigenvalues[-3],"s",markersize=20,label="$\lambda_2$") plt.xlim([-2,2]) plt.ylim([-2,2]) plt.legend(fontsize=20) plt.ylabel("Im($\lambda_i$)",fontsize=fs) plt.xlabel("Re($\lambda_i$)",fontsize=fs) plt.show() plt.close() # ### Photon counting statistics: $\texttt{mcsolve}$ # + # set dynamics options my_options = Options(average_states = False, store_states = True) # solve dynamics results_mc = mcsolve(H, psi0, tlist, c_ops=[np.sqrt(kappa)*a,np.sqrt(gamma)*sz], e_ops=[a.dag()*a,sz], options=my_options, progress_bar=True) # store time evoluted variables nph_mc_t = results_mc.expect[0] sz_mc_t = results_mc.expect[1] # - # The default options for $\texttt{mcsolve}$ imply solving the dynamics for 500 trajectories. # # This option can be controlled by setting a different number in $\texttt{Options(ntraj=500})$. rho_mc_t = results_mc.states len(rho_mc_t) #help(expect) sz_stoch_t = [] nph_stoch_t = [] for i in range(len(rho_mc_t)): sz_stoch_t.append(expect(sz,rho_mc_t[i])) nph_stoch_t.append(expect(a.dag()*a,rho_mc_t[i])) plt.figure(figsize=(14,4)) plt.subplot(121) plt.plot(tlist, sz_t, label="me solve") plt.plot(tlist, sz_mc_t, label="mc solve (500 trajectories)") plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle \sigma_z \rangle$",fontsize=fs) plt.legend(fontsize=15) plt.subplot(122) plt.plot(tlist, nph_t/nph/0.5, label="me solve") plt.plot(tlist, nph_mc_t/nph/0.5, label="mc solve (500 trajectories)") plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle a^\dagger a \rangle$",fontsize=fs) plt.legend(fontsize=15) plt.show() plt.close() # ### Lindblad master equation as the limit of the stochastic evolution: $\texttt{mesolve}$ and $\texttt{mcsolve}$ # 50 trajectories my_options50 = Options(ntraj=50) results_mc50 = mcsolve(H, psi0, tlist, c_ops=[np.sqrt(kappa)*a,np.sqrt(gamma)*sz], e_ops=[a.dag()*a,sz], options=my_options50, progress_bar=True) nph_mc_t50 = results_mc50.expect[0] sz_mc_t50 = results_mc50.expect[1] # 100 trajectories my_options100 = Options(ntraj=100) results_mc100 = mcsolve(H, psi0, tlist, c_ops=[np.sqrt(kappa)*a,np.sqrt(gamma)*sz], e_ops=[a.dag()*a,sz], options=my_options100, progress_bar=True) nph_mc_t100 = results_mc100.expect[0] sz_mc_t100 = results_mc100.expect[1] # 200 trajectories my_options200 = Options(ntraj=200) results_mc200 = mcsolve(H, psi0, tlist, c_ops=[np.sqrt(kappa)*a,np.sqrt(gamma)*sz], e_ops=[a.dag()*a,sz], options=my_options200, progress_bar=True) nph_mc_t200 = results_mc200.expect[0] sz_mc_t200 = results_mc200.expect[1] plt.figure(figsize=(16,6)) # spin excitation plt.subplot(121) plt.plot(tlist, sz_mc_t50, label="mc solve (50 trajectories)") plt.plot(tlist, sz_mc_t100, label="mc solve (100 trajectories)") plt.plot(tlist, sz_mc_t200, label="mc solve (200 trajectories)") plt.plot(tlist, sz_mc_t, label="mc solve (500 trajectories)") plt.plot(tlist, sz_t, label="me solve") plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle \sigma_z \rangle$",fontsize=fs) plt.legend(fontsize=15) # photonic excitation plt.subplot(122) plt.plot(tlist, nph_mc_t50/nph/0.5, label="mc solve (50 trajectories)") plt.plot(tlist, nph_mc_t100/nph/0.5, label="mc solve (100 trajectories)") plt.plot(tlist, nph_mc_t200/nph/0.5, label="mc solve (200 trajectories)") plt.plot(tlist, nph_mc_t/nph/0.5, label="mc solve (500 trajectories)") plt.plot(tlist, nph_t/nph/0.5, label="me solve") plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle a^\dagger a \rangle$",fontsize=fs) plt.legend(fontsize=15) plt.show() plt.close() # We observe that progressively, as the number of trajectories is increased, the value given by the average over the trajectories gets closer to the master equation value. # + plt.figure(figsize=(12,4)) plt.subplot(121) plt.plot(tlist, sz_t) plt.plot(tlist, sz_mc_t) plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle s_z \rangle$",fontsize=fs) plt.subplot(122) plt.plot(tlist, nph_t/nph/0.5) plt.plot(tlist, nph_mc_t/nph/0.5) plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle n_{ph} \rangle$",fontsize=fs) plt.show() plt.close() # - # ### Plotting single quantum trajectories # # By setting: # my_options = Options(average_states = False, store_states = True) # # we can inspect single quantum trajectories and their behaviour. # + plt.figure(figsize=(14,5)) plt.subplot(121) for i in range(len(sz_stoch_t)): plt.plot(tlist, sz_stoch_t[i],alpha=0.1) plt.plot(tlist, sz_t,"b",linewidth=3) plt.plot(tlist, sz_mc_t,"y",linewidth=2) plt.plot(tlist, sz_stoch_t[4]/nph/0.5,"g",linewidth=5) plt.plot(tlist, sz_stoch_t[6]/nph/0.5,"r",linewidth=5) plt.subplot(122) for i in range(len(sz_stoch_t)): plt.plot(tlist, nph_stoch_t[i]/nph/0.5,alpha=0.1) plt.plot(tlist, nph_t/nph/0.5,"b",linewidth=3) plt.plot(tlist, nph_mc_t/nph/0.5,"y",linewidth=2) plt.plot(tlist, nph_stoch_t[4]/nph/0.5,"g",linewidth=5) plt.plot(tlist, nph_stoch_t[6]/nph/0.5,"r",linewidth=5) plt.xlabel(r"$t$",fontsize=fs) plt.ylabel(r"$\langle X \rangle$",fontsize=fs) plt.show() plt.close() # - # We obseverve that the single trajectories can provide information about the phase of the system. In presence of a phase transition the quantum trajectory jumps between possible choices of the degenerate steady state. qutip.about()
sqd-run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import collections from collections import defaultdict import sys import json import random from jsmin import jsmin from io import StringIO import numpy as np import copy import importlib from functools import partial import math import os # script_n = os.path.basename(__file__).split('.')[0] script_n = 'multi_syn_201224' sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData from weight_database import WeightDatabase weightdb = WeightDatabase() weightdb.load_syn_db('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/gen_db/grc_axons/gen_201224_setup01_syndb_threshold_10_coalesced_filtered_10.gz') mpd = MyPlotData() mpd_raw = MyPlotData() hist = defaultdict(int) weights_db = weightdb.get_weights() for neuron, pc_weights in weights_db.items(): # print(n) for pc, weights in pc_weights.items(): mpd_raw.add_data_point( num_syns=len(weights)) hist[len(weights)] += 1 # print(hist) for k in sorted([k for k in hist.keys()]): print(f'{k}: {hist[k]}') mpd.add_data_point( count=hist[k], num_syns=k) # mpd = mpd.to_pdf('count', cumulative=False) mpd_cdf = mpd.to_pdf('count', cumulative=False) # + importlib.reload(my_plot); my_plot.my_displot( mpd_raw, x="num_syns", # y="count", # xlim=[None, 1.0], # s=100, kind='hist', # log_scale_x=True, # binwidth=.04, # kde=True, # kde_kws={'bw_adjust': 3.5}, stat='probability', discrete=True, context='paper', height=4, y_axis_label='Frequency', x_axis_label='Synapse count per Connection', show=True, save_filename=f'{script_n}_hist.svg', ) importlib.reload(my_plot); my_plot.my_displot( mpd_raw, x="num_syns", # y="count", # xlim=[None, 1.0], # s=100, kind='hist', # log_scale_x=True, # binwidth=.04, # kde=True, # kde_kws={'bw_adjust': 3.5}, stat='count', discrete=True, context='paper', height=4, y_axis_label='Count', x_axis_label='Synapse count per Connection', show=True, save_filename=f'{script_n}_count.svg', )
analysis/grc_pc/multi_syn_201224.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Run relevance backout here # + import pickle import re import os import random import numpy as np import torch from random import shuffle import argparse import pickle import collections import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import sys sys.path.append("..") from torch.utils.data import DataLoader, TensorDataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, SequentialSampler from tqdm import tqdm, trange from util.optimization import BERTAdam from util.processor import * from util.tokenization import * from util.evaluation import * from util.train_helper import * import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) from sklearn.metrics import classification_report # this imports most of the helpers needed to eval the model from run_classifier import * sys.path.append("..") import operator import matplotlib.pyplot as plt import seaborn as sns; sns.set() RETRAIN = False vocab_data_dir = "../../models/Transformer/vocab.txt" # - # #### Set-ups # + # Note that this notebook only supports single GPU evaluation # which is sufficient for most of tasks by using lower batch size. IS_CUDA = False if IS_CUDA: CUDA_DEVICE = "cuda:0" device = torch.device(CUDA_DEVICE) n_gpu = torch.cuda.device_count() logger.info("device %s in total n_gpu %d distributed training", device, n_gpu) else: # bad luck, we are on CPU now! logger.info("gpu is out of the picture, let us use CPU") device = torch.device("cpu") def inverse_mapping(vocab_dict): inverse_vocab_dict = {} for k, v in vocab_dict.items(): inverse_vocab_dict[v] = k return inverse_vocab_dict def translate(token_ids, vocab): tokens = [] for _id in token_ids.tolist(): tokens.append(vocab[_id]) return tokens def heatmap_viz(token_grad, vmin=0, cmap="Blues"): scores = [tu[1] for tu in token_grad] tokens = [tu[0] for tu in token_grad] fig, ax = plt.subplots(figsize=(10,1)) ax = sns.heatmap([scores], cmap=cmap, xticklabels=tokens, yticklabels=False, cbar_kws=dict(shrink=1, aspect=4, ), linewidths=0.8) ax.set_xticklabels(tokens, size = 18) cbar = ax.collections[0].colorbar # here set the labelsize by 20 cbar.ax.tick_params(labelsize=20) plt.show() def evaluate_with_hooks(test_dataloader, model, device, label_list): # we did not exclude gradients, for attribution methods model.eval() # this line will deactivate dropouts test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 pred_logits = [] actual = [] gs_scores = [] gi_scores = [] lrp_scores = [] lat_scores = [] inputs_ids = [] seqs_lens = [] # we don't need gradient in this case. for _, batch in enumerate(tqdm(test_dataloader, desc="Iteration")): input_ids, input_mask, label_ids, seq_lens = batch # truncate to save space and computing resource max_seq_lens = max(seq_lens)[0] input_ids = input_ids[:,:max_seq_lens] input_mask = input_mask[:,:max_seq_lens] input_ids = input_ids.to(device) input_mask = input_mask.to(device) label_ids = label_ids.to(device) seq_lens = seq_lens.to(device) # intentially with gradient tmp_test_loss, logits_raw, ctx_attn = \ model(input_ids, input_mask, seq_lens, labels=label_ids) logits_t = F.softmax(logits_raw, dim=-1) logits = logits_t.detach().cpu().numpy() pred_logits.append(logits) label_ids = label_ids.to('cpu').numpy() actual.append(label_ids) outputs = np.argmax(logits, axis=1) tmp_test_accuracy=np.sum(outputs == label_ids) sensitivity_class = len(label_list) - 1 # GS gs_score = torch.zeros(logits_t.shape) gs_score[:, sensitivity_class] = 1.0 gs_score = logits_raw*gs_score gs_score = model.backward_gradient(gs_score) gs_score = torch.norm(gs_score, dim=-1)*torch.norm(gs_score, dim=-1) gs_scores.append(gs_score) # GI # gi_score = torch.zeros(logits_t.shape) # gi_score[:, sensitivity_class] = 1.0 # gi_score = logits_raw*gi_score # gi_score = model.backward_gradient_input(gi_score) # gi_score = torch.norm(gi_score, dim=-1)*torch.norm(gi_score, dim=-1) # gi_scores.append(gi_score) # lrp # Rout_mask = torch.zeros((input_ids.shape[0], len(label_list))).to(device) # Rout_mask[:, sensitivity_class] = 1.0 # relevance_score = logits_raw*Rout_mask # lrp_score = model.backward_lrp(relevance_score) # lrp_score = lrp_score.cpu().detach().data # lrp_score = torch.abs(lrp_score).sum(dim=-1) # lrp_scores.append(lrp_score) # lat attention_scores = model.backward_lat(input_ids, ctx_attn) lat_scores.append(attention_scores.sum(dim=-1)) # other meta-data input_ids = input_ids.cpu().data seq_lens = seq_lens.cpu().data inputs_ids.append(input_ids) seqs_lens.append(seq_lens) test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_accuracy nb_test_examples += input_ids.size(0) nb_test_steps += 1 test_loss = test_loss / nb_test_steps test_accuracy = test_accuracy / nb_test_examples result = collections.OrderedDict() result = {'test_loss': test_loss, str(len(label_list))+ '-class test_accuracy': test_accuracy} logger.info("***** Eval results *****") for key in result.keys(): logger.info(" %s = %s\n", key, str(result[key])) # get predictions needed for evaluation pred_logits = np.concatenate(pred_logits, axis=0) actual = np.concatenate(actual, axis=0) pred_label = np.argmax(pred_logits, axis=-1) attribution_scores_state_dict = dict() attribution_scores_state_dict["inputs_ids"] = inputs_ids attribution_scores_state_dict["seqs_lens"] = seqs_lens attribution_scores_state_dict["gs_scores"] = gs_scores attribution_scores_state_dict["gi_scores"] = None # TODO: enable this for transformer as well! attribution_scores_state_dict["lrp_scores"] = None # TODO: enable this for transformer as well! attribution_scores_state_dict["lat_scores"] = lat_scores logger.info("***** Finish Attribution Backouts *****") return attribution_scores_state_dict def analysis_task(task_name, device, sentence_limit=5000): """ We need to set a limit otherwise it takes too long! """ task_name = task_name model_type = "Transformer" TASK_NAME = task_name lrp_data_dir = "../../results" vocab_data_dir = "../../models/Transformer/vocab.txt" DATA_DIR = "../../datasets/" + TASK_NAME + "/" # "../../data/uncased_L-12_H-768_A-12/" is for the default BERT-base pretrain MODEL_ROOT_PATH = "../../models/Transformer/" MODEL_PATH = "../../results/" + TASK_NAME + "/best_checkpoint.bin" EVAL_BATCH_SIZE = 24 # you can tune this down depends on GPU you have. # This loads the task processor for you. processors = { "SST5": SST5_Processor, "SemEval" : SemEval_Processor, "IMDb" : IMDb_Processor, "Yelp5" : Yelp5_Processor } processor = processors[TASK_NAME]() label_list = processor.get_labels() model, tokenizer, optimizer = \ load_model_setups(vocab_file=MODEL_ROOT_PATH + "vocab.txt", bert_config_file=None, init_checkpoint=MODEL_PATH, label_list=label_list, num_train_steps=20, do_lower_case=True, # below is not required for eval learning_rate=2e-5, warmup_proportion=0.1, init_lrp=True, model_type=model_type) model = model.to(device) # send the model to device test_examples = processor.get_test_examples(DATA_DIR, sentence_limit=sentence_limit) test_features = \ convert_examples_to_features( test_examples, label_list, 128, tokenizer, model_type) all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long) all_seq_len = torch.tensor([[f.seq_len] for f in test_features], dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_label_ids, all_seq_len) test_dataloader = DataLoader(test_data, batch_size=EVAL_BATCH_SIZE, shuffle=False) score_dict = evaluate_with_hooks(test_dataloader, model, device, label_list) return score_dict def load_attribution_scores(vocab_data_dir, inputs_ids, seqs_lens, raw_attribution_scores, min_freq=0, consider_speicial_tokens=False, normalized=True, min_length=0): vocab = inverse_mapping(load_vocab(vocab_data_dir, pretrain=False)) word_lrp = {} word_lrp_list = [] sentence_lrp = [] for batch_idx in range(len(inputs_ids)): for seq_idx in range(inputs_ids[batch_idx].shape[0]): seq_len = seqs_lens[batch_idx][seq_idx].tolist()[0] tokens = translate(inputs_ids[batch_idx][seq_idx], vocab)[:seq_len] attribution_scores = raw_attribution_scores[batch_idx][seq_idx][:seq_len] if normalized: # sentence_attribution_scores = F.softmax(torch.abs(attribution_scores), dim=-1).tolist() sentence_max = torch.max(torch.abs(attribution_scores), dim=-1)[0] sentence_attribution_scores = \ (torch.abs(attribution_scores)/sentence_max).tolist() else: sentence_attribution_scores = attribution_scores.tolist() if len(tokens) >= min_length: assert(len(tokens) == len(sentence_attribution_scores)) s_lrp = list(zip(tokens, sentence_attribution_scores)) sentence_lrp.append(s_lrp) for i in range(len(s_lrp)): token = s_lrp[i][0] score = s_lrp[i][1] word_lrp_list.append((token, score)) if token in word_lrp.keys(): word_lrp[token].append(score) else: word_lrp[token] = [score] filter_word_lrp = {} for k, v in word_lrp.items(): if len(v) > min_freq: filter_word_lrp[k] = sum(v)*1.0/len(v) filter_word_lrp = [(k, v) for k, v in filter_word_lrp.items()] filter_word_lrp.sort(key = lambda x: x[1], reverse=True) word_lrp_list.sort(key = lambda x: x[1], reverse=True) return filter_word_lrp, word_lrp_list, sentence_lrp def load_attribution_meta(vocab_data_dir, dataset_dict): attribution_meta = {} for item in ["gs_scores", "lat_scores"]: filtered_word_rank, raw_word_rank, sentence_revelance_score = \ load_attribution_scores(vocab_data_dir, dataset_dict["inputs_ids"], dataset_dict["seqs_lens"], dataset_dict[item]) attribution_meta[item] = {"filtered_word_rank": filtered_word_rank, "raw_word_rank": raw_word_rank, "sentence_revelance_score": sentence_revelance_score} return attribution_meta def plot_sentence_heatmaps(attribution_meta, n_sample=1): total_n = len(attribution_meta["gs_scores"]["sentence_revelance_score"]) random_n = random.randint(0, total_n) sentence_heatmap_dict = dict() for item in ["gs_scores", "lat_scores"]: sentence_heatmap_dict[item] = attribution_meta[item]["sentence_revelance_score"][random_n] heatmap_viz(sentence_heatmap_dict[item], vmin=0) def print_topk_words(attribution_meta, k=30, filtered=True): """ print top k words for a dataset """ from tabulate import tabulate words = [] words_neg = [] index = 0 for i in range(0, k): item_words = [] item_words_neg = [] for item in ["gs_scores", "lat_scores"]: word_rank = None if filtered: word_rank = attribution_meta[item]["filtered_word_rank"] else: word_rank = attribution_meta[item]["raw_word_rank"] item_words.append((word_rank[i][0], round(word_rank[i][1],5) ) ) item_words_neg.append(( word_rank[-(i+1)][0], round(word_rank[-(i+1)][1],5) )) words.append(item_words) words_neg.append(item_words_neg) # reversed ranking print(tabulate(words, headers=["gs_scores", "lat_scores"])) print("***") print(tabulate(words_neg, headers=["gs_scores", "lat_scores"])) # - sst5_dict = analysis_task("SST5", device, sentence_limit=2000) semeval_dict = analysis_task("SemEval", device, sentence_limit=2000) imdb_dict = analysis_task("IMDb", device, sentence_limit=2000) yelp5_dict = analysis_task("Yelp5", device, sentence_limit=2000) # ### Experiment 3.2.1 SST-5 Word Rank sst5_dict = analysis_task("SST5", device, sentence_limit=2000) sst5_attribution_meta = load_attribution_meta(vocab_data_dir, sst5_dict) print_topk_words(sst5_attribution_meta) plot_sentence_heatmaps(sst5_attribution_meta) # ### Exp. 3.2.2 Word deletion experiments # + from random import randrange def random_drop(input_ids_to_copy, seq_lens, k=1): input_ids = input_ids_to_copy.clone() for b in range(input_ids.shape[0]): if k > seq_lens[b][0]: input_ids[b] = 0. # zero out all of them else: zero_out_idx = random.sample(range(0, seq_lens[b][0]), k) for idx in zero_out_idx: input_ids[b][idx] = 0. return input_ids def topk_drop(input_ids_to_copy, scores, new_seq_lens, k=1): input_ids = input_ids_to_copy.clone() for b in range(input_ids.shape[0]): if k > new_seq_lens[b][0]: input_ids[b] = 0. else: _, zero_out_idx = torch.topk(scores[b][:new_seq_lens[b]], k, dim=-1) for idx in zero_out_idx: input_ids[b][idx] = 0. return input_ids def sentence_filter(seq_lens, min_len=0): sel_idx = [] for b in range(seq_lens.shape[0]): if seq_lens[b][0] >= min_len: sel_idx.append(b) return sel_idx def evaluate_with_word_deletion(test_dataloader, model, device, label_list, k=0, del_type="gi", original_correct=True, min_len=10): # we did not exclude gradients, for attribution methods model.eval() # this line will deactivate dropouts test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 pred_logits = [] actual = [] inputs_ids = [] seqs_lens = [] k_test_accuracy = [0.0]*k # we don't need gradient in this case. for _, batch in enumerate(tqdm(test_dataloader, desc="Iteration")): k_logits = [] input_ids, input_mask, label_ids, seq_lens = batch # truncate to save space and computing resource max_seq_lens = max(seq_lens)[0] input_ids = input_ids[:,:max_seq_lens] input_mask = input_mask[:,:max_seq_lens] sel_idx = sentence_filter(seq_lens, min_len=min_len) input_ids = input_ids.to(device) input_mask = input_mask.to(device) label_ids = label_ids.to(device) seq_lens = seq_lens.to(device) input_ids = input_ids[sel_idx] input_mask = input_mask[sel_idx] label_ids = label_ids[sel_idx] seq_lens = seq_lens[sel_idx] sensitivity_class = len(label_list) - 1 tmp_test_loss, logits, ctx_attn = \ model(input_ids, input_mask, seq_lens, labels=label_ids) logits_raw = F.softmax(logits, dim=-1) logits = logits_raw.detach().cpu().numpy() label_ids_raw = label_ids.to('cpu').numpy() outputs = np.argmax(logits, axis=1) tmp_idx_correct = outputs == label_ids_raw tmp_idx_correct = tmp_idx_correct.nonzero()[0] tmp_idx_wrong = outputs != label_ids_raw tmp_idx_wrong = tmp_idx_wrong.nonzero()[0] if original_correct: # select only those that correct new_input_ids = input_ids[tmp_idx_correct] new_input_mask = input_mask[tmp_idx_correct] new_seq_lens = seq_lens[tmp_idx_correct] new_label_ids = label_ids[tmp_idx_correct] else: # select only those that are wrong new_input_ids = input_ids[tmp_idx_wrong] new_input_mask = input_mask[tmp_idx_wrong] new_seq_lens = seq_lens[tmp_idx_wrong] new_label_ids = label_ids[tmp_idx_wrong] # corner case handling, if this batch contains no examples, we bypass if new_input_ids.shape[0] == 0: continue if k == 0: # no need to drop tmp_test_loss, logits, _ = \ model(new_input_ids, new_input_mask, new_seq_lens, labels=new_label_ids) else: if del_type == "random": # Random dropouts for k_i in range(0, k): new_input_ids_curr = random_drop(new_input_ids, new_seq_lens, k=k_i+1) tmp_test_loss, logits, _ = \ model(new_input_ids_curr, new_input_mask, new_seq_lens, labels=new_label_ids) k_logits.append(logits) elif del_type == "gs": # GS dropouts gs_score = torch.zeros(logits.shape) gs_score[:, sensitivity_class] = 1.0 gs_score = model.backward_gradient(gs_score) gs_score = torch.norm(gs_score, dim=-1)*torch.norm(gs_score, dim=-1) if original_correct: new_gs_score = gs_score[tmp_idx_correct] else: new_gs_score = gs_score[tmp_idx_wrong] # rerun for k_i in range(0, k): new_input_ids_curr = topk_drop(new_input_ids, new_gs_score, new_seq_lens, k=k_i+1) tmp_test_loss, logits, _ = \ model(new_input_ids_curr, new_input_mask, new_seq_lens, labels=new_label_ids) k_logits.append(logits) elif del_type == "gi": # GI dropouts gi_score = torch.zeros(logits.shape) gi_score[:, sensitivity_class] = 1.0 gi_score = model.backward_gradient_input(gi_score) gi_score = torch.norm(gi_score, dim=-1)*torch.norm(gi_score, dim=-1) if original_correct: new_gi_score = gi_score[tmp_idx_correct] else: new_gi_score = gi_score[tmp_idx_wrong] # rerun for k_i in range(0, k): new_input_ids_curr = topk_drop(new_input_ids, new_gi_score, new_seq_lens, k=k_i+1) tmp_test_loss, logits, _ = \ model(new_input_ids_curr, new_input_mask, new_seq_lens, labels=new_label_ids) k_logits.append(logits) elif del_type == "lrp": # lrp dropouts Rout_mask = torch.zeros((input_ids.shape[0], len(label_list))).to(device) Rout_mask[:, sensitivity_class] = 1.0 relevance_score = logits_raw*Rout_mask lrp_score = model.backward_lrp(relevance_score) lrp_score = lrp_score.cpu().detach().data lrp_score = torch.abs(lrp_score).sum(dim=-1) if original_correct: new_lrp_score = lrp_score[tmp_idx_correct] else: new_lrp_score = lrp_score[tmp_idx_wrong] # rerun for k_i in range(0, k): new_input_ids_curr = topk_drop(new_input_ids, new_lrp_score, new_seq_lens, k=k_i+1) tmp_test_loss, logits, _ = \ model(new_input_ids_curr, new_input_mask, new_seq_lens, labels=new_label_ids) k_logits.append(logits) elif del_type == "lat": # lat dropouts attention_scores = model.backward_lat(input_ids, ctx_attn) attention_scores = attention_scores.sum(dim=-1) if original_correct: new_attention_scores = attention_scores[tmp_idx_correct] else: new_attention_scores = attention_scores[tmp_idx_wrong] # rerun for k_i in range(0, k): new_input_ids_curr = topk_drop(new_input_ids, new_attention_scores, new_seq_lens, k=k_i+1) tmp_test_loss, logits, _ = \ model(new_input_ids_curr, new_input_mask, new_seq_lens, labels=new_label_ids) k_logits.append(logits) new_label_ids = new_label_ids.to('cpu').numpy() for k_i in range(0, k): logits = k_logits[k_i] logits_raw = F.softmax(logits, dim=-1) logits = logits_raw.detach().cpu().numpy() outputs = np.argmax(logits, axis=1) tmp_test_accuracy=np.sum(outputs == new_label_ids) k_test_accuracy[k_i] = k_test_accuracy[k_i] + tmp_test_accuracy nb_test_examples += new_input_ids.size(0) # same for all the ks nb_test_steps += 1 for k_i in range(0, k): test_accuracy = k_test_accuracy[k_i] test_accuracy = test_accuracy / nb_test_examples print("Drop words = %s, Accuracy = %.2f"%(k_i+1, test_accuracy)) for k_i in range(0, k): test_accuracy = k_test_accuracy[k_i] test_accuracy = test_accuracy / nb_test_examples print("%.2f"%(test_accuracy)) def word_deletion_task(task_name, device, sentence_limit=2000, k=0, del_type="random", original_correct=True): """ We need to set a limit otherwise it takes too long! """ model_type = "Transformer" TASK_NAME = task_name lrp_data_dir = "../../results" vocab_data_dir = "../../models/" + model_type + "/vocab.txt" DATA_DIR = "../../datasets/" + TASK_NAME + "/" # "../../data/uncased_L-12_H-768_A-12/" is for the default BERT-base pretrain MODEL_ROOT_PATH = "../../models/" + model_type +"/" MODEL_PATH = "../../results/" + TASK_NAME + "/best_checkpoint.bin" EVAL_BATCH_SIZE = 24 # you can tune this down depends on GPU you have. # This loads the task processor for you. processors = { "SST5": SST5_Processor, "SemEval" : SemEval_Processor, "IMDb" : IMDb_Processor, "Yelp5" : Yelp5_Processor } processor = processors[TASK_NAME]() label_list = processor.get_labels() model, tokenizer, optimizer = \ load_model_setups(vocab_file=MODEL_ROOT_PATH + "vocab.txt", bert_config_file=MODEL_ROOT_PATH + "bert_config.json", init_checkpoint=MODEL_PATH, label_list=label_list, num_train_steps=20, do_lower_case=True, # below is not required for eval learning_rate=2e-5, warmup_proportion=0.1, init_lrp=True , model_type=model_type) model = model.to(device) # send the model to device test_examples = processor.get_test_examples(DATA_DIR, sentence_limit=sentence_limit) test_features = \ convert_examples_to_features( test_examples, label_list, 128, tokenizer, model_type=model_type) all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long) all_seq_len = torch.tensor([[f.seq_len] for f in test_features], dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_label_ids, all_seq_len) test_dataloader = DataLoader(test_data, batch_size=EVAL_BATCH_SIZE, shuffle=False) evaluate_with_word_deletion(test_dataloader, model, device, label_list, k=k, del_type=del_type, original_correct=original_correct) # - # processors = { # "SST5": SST5_Processor, # "SemEval" : SemEval_Processor, # "IMDb" : IMDb_Processor, # "Yelp5" : Yelp5_Processor # } i = 10 print("===== Word Deletion with Max K=%s ====="%(i)) word_deletion_task("SST5", device, sentence_limit=1000, k=i, del_type="random", original_correct=True) i = 10 print("===== Word Deletion with Max K=%s ====="%(i)) word_deletion_task("SST5", device, sentence_limit=1000, k=i, del_type="gs", original_correct=True) i = 10 print("===== Word Deletion with Max K=%s ====="%(i)) word_deletion_task("SST5", device, sentence_limit=1000, k=i, del_type="gi", original_correct=True) i = 10 print("===== Word Deletion with Max K=%s ====="%(i)) word_deletion_task("SST5", device, sentence_limit=1000, k=i, del_type="lrp", original_correct=True) i = 10 print("===== Word Deletion with Max K=%s ====="%(i)) word_deletion_task("SST5", device, sentence_limit=1000, k=i, del_type="lat", original_correct=True) # ### Exp. 3.4 Correlations across datasets # Due to the memory limitation and cache limitations, we want run these analysis function 1 at a time to avoid failure. sst5_dict = analysis_task("SST5", device, sentence_limit=2000) semeval_dict = analysis_task("SemEval", device, sentence_limit=2000) imdb_dict = analysis_task("IMDb", device, sentence_limit=2000) yelp5_dict = analysis_task("Yelp5", device, sentence_limit=2000) # save it to disk to avoid repeatitive runs torch.save(sst5_dict, "./sst5_dict.pt") torch.save(semeval_dict, "./semeval.pt") torch.save(imdb_dict, "./imdb.pt") torch.save(yelp5_dict, "./yelp5.pt") # you can just load this for your second time sst5_dict = torch.load("./sst5.pt") semeval_dict = torch.load("./semeval.pt") imdb_dict = torch.load("./imdb.pt") yelp5_dict = torch.load("./yelp5.pt") # + def find_common_vocab(dict_list): assert len(dict_list) > 0 common_vocab = set(dict_list[0].keys()) for i in range(1, len(dict_list)): common_vocab = common_vocab.intersection(set(dict_list[i].keys())) return common_vocab def subset_score(dict_list): common_vocab = find_common_vocab(dict_list) per_word_score = [] for word in common_vocab: word_score = [] for d in dict_list: word_score.append(d[word]) per_word_score.append(word_score) return np.transpose(np.array(per_word_score)) def common_word_scores(attribution_method, vocab_data_dir, sst5_dict, semeval_dict, imdb_dict, yelp5_dict): attribution_method = attribution_method + "_scores" sst_filtered_word_rank, sst_raw_word_rank, sst_sentence_revelance_score = \ load_attribution_scores(vocab_data_dir, sst5_dict["inputs_ids"], sst5_dict["seqs_lens"], sst5_dict[attribution_method]) semeval_filtered_word_rank, semeval_raw_word_rank, semeval_sentence_revelance_score = \ load_attribution_scores(vocab_data_dir, semeval_dict["inputs_ids"], semeval_dict["seqs_lens"], semeval_dict[attribution_method]) imdb_filtered_word_rank, imdb_raw_word_rank, imdb_sentence_revelance_score = \ load_attribution_scores(vocab_data_dir, imdb_dict["inputs_ids"], imdb_dict["seqs_lens"], imdb_dict[attribution_method]) yelp5_filtered_word_rank, yelp5_raw_word_rank, yelp5_sentence_revelance_score = \ load_attribution_scores(vocab_data_dir, yelp5_dict["inputs_ids"], yelp5_dict["seqs_lens"], yelp5_dict[attribution_method]) sst_dict = dict() semeval_dict = dict() imdb_dict = dict() yelp5_dict = dict() for e in sst_filtered_word_rank: sst_dict[e[0]] = e[1] for e in semeval_filtered_word_rank: semeval_dict[e[0]] = e[1] for e in imdb_filtered_word_rank: imdb_dict[e[0]] = e[1] for e in yelp5_filtered_word_rank: yelp5_dict[e[0]] = e[1] score_list = subset_score([sst_dict, semeval_dict, imdb_dict, yelp5_dict]) score_df = pd.DataFrame({"SST-5": score_list[0], "SemEval": score_list[1], "IMDb": score_list[2], "Yelp-5": score_list[3]}) return score_df # - gs_score_df = common_word_scores("gs", vocab_data_dir, sst5_dict, semeval_dict, imdb_dict, yelp5_dict) # gi_score_df = common_word_scores("gi", vocab_data_dir, # sst5_dict, semeval_dict, imdb_dict, yelp5_dict) # lrp_score_df = common_word_scores("lrp", vocab_data_dir, # sst5_dict, semeval_dict, imdb_dict, yelp5_dict) lat_score_df = common_word_scores("lat", vocab_data_dir, sst5_dict, semeval_dict, imdb_dict, yelp5_dict) def corr_plot(score_df, color="red"): import matplotlib.pyplot as plt plt.rcParams['axes.facecolor'] = 'white' plt.rcParams['axes.edgecolor'] = "black" plt.rcParams["font.family"] = "Times New Roman" plt.rcParams['axes.labelsize'] = 30 import matplotlib matplotlib.rc('xtick', labelsize=15) matplotlib.rc('ytick', labelsize=15) from scipy.stats import pearsonr def reg_coef(x,y,label=None,color=None,**kwargs): ax = plt.gca() r,p = pearsonr(x,y) ax.annotate('r = {:.2f}'.format(r), xy=(0.5,0.5), xycoords='axes fraction', ha='center', size=30) ax.set_axis_off() g = sns.PairGrid(score_df) g.map_diag(sns.distplot, color=color) g.map_lower(sns.regplot, marker="+", line_kws={"color": "black"}, color=color) g.map_upper(reg_coef) corr_plot(gs_score_df, "orange") corr_plot(gi_score_df, "green") corr_plot(lrp_score_df, "red") corr_plot(lat_score_df, "green")
code/notebook/lrp_visualize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import azureml.core print("SDK version:", azureml.core.VERSION) from azureml.core import Datastore, Experiment, Workspace # - ws = Workspace.from_config() print('Name: {0}'.format(ws.name), 'Resource Group: {0}'.format(ws.resource_group), 'Location: {0}'.format(ws.location), 'Subscription Id: {0}'.format(ws.subscription_id), sep = '\n') Experiment.list(ws) experiment = Experiment(ws, 'extract_frames') runs = experiment.get_runs() for run in runs: print(run) experiment = Experiment(ws, 'extract_frames') runs = experiment.get_runs() for run in runs: if run.id == 'd43f791c-0e08-4734-bd40-d6b46001adda': print(run) run.cancel()
v1/notebooks/Administration Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ruchirpandey/test1/blob/master/Tarun_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="-3bLqNu43xU_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 491} outputId="f87c2df9-d0db-4344-94d7-988584c9239e" def argument_test_natural_number(f): def tarun(xy): if type(xy) == int and xy > 0: return f(xy) else: raise Exception("Argument is not an integer") return tarun @argument_test_natural_number def factorial(n): if n == 1: return 1 else: return n * factorial(n-1) for i in range(1,10): print(i, factorial(i)) print(factorial(-1)) # + id="gxHysXKe3xVR" colab_type="code" colab={} outputId="ac79b09d-113e-44f3-bdbb-9a0f54fefdb7" def factorial(k,l=3): print(k,"-",l) if type(k) == int and k > 0: if n == 1: return 1 else: return k * factorial(k-1) else: print("incorrect input") print(factorial(-6,7)) # + id="pxAenS4R3xVc" colab_type="code" colab={} outputId="865bc7b0-0f44-4637-e153-94b33c50e623" def call_counter(func): def helper(*args, **kwargs): helper.calls += 1 return func(*args, **kwargs) helper.calls = 0 return helper @call_counter def succ(x): return x + 1 @call_counter def mul1(x, y=1): return x*y + 1 @call_counter def tarun(x, z,l,y): return x*y + z + l +2 print(tarun.calls) print(succ.calls) for i in range(10): succ(i) mul1(3, 4) mul1(4) mul1(y=3, x=2) print(succ.calls) print(mul1.calls) print("-------------------") for j in range(5): print(tarun(j,j*2,j+5,j*j)) print("count is -",tarun.calls) # + id="nrZvMZ0K3xVq" colab_type="code" colab={} outputId="a7fc36d1-25a8-40b8-88fe-ab93dfd08b0b" range(10) # + id="9YSzoWpn3xVy" colab_type="code" colab={} outputId="881a6db4-cc61-4c06-ac63-fd396f97fa3c" import pandas # + id="R2aQkBCp3xV6" colab_type="code" colab={}
Tarun_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np auto_data = pd.read_csv('../../Data/automobile.data', sep=r'\s*,\s*', header=None, engine='python') auto_data.shape auto_data.head() columns = ["symboling","normalized-losses","make","fuel-type","aspiration","num-of-doors","body-style", "drive-wheels","engine-location", "wheel-base", "length", "width", "height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", "fuel-system", "bore", "stroke", "compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"] auto_data.columns = columns auto_data = auto_data.replace('?', np.nan) auto_data.head() auto_data.describe() auto_data.describe(include='all') auto_data.price.describe() auto_data.price = pd.to_numeric(auto_data.price, errors='coerce') auto_data.price.describe() auto_data = auto_data.drop('normalized-losses', axis=1) auto_data.head() auto_data.horsepower.describe() auto_data.horsepower = pd.to_numeric(auto_data.horsepower, errors='coerce') auto_data.horsepower.describe() auto_data["num-of-cylinders"].unique() cylinder_dict = {'four':4, 'six':6, 'five':5, 'three':3, 'twelve':12, 'two':2, 'eight':8} auto_data["num-of-cylinders"].replace(cylinder_dict, inplace=True) auto_data["num-of-cylinders"].describe() auto_data.head() auto_data = pd.get_dummies(auto_data, columns=["make","fuel-type","aspiration","num-of-doors", "body-style","drive-wheels","engine-location", "engine-type", "fuel-system"]) auto_data.head() auto_data = auto_data.dropna() auto_data.shape auto_data[auto_data.isnull().any(axis=1)] # + from sklearn.model_selection import train_test_split x = auto_data.drop('price', axis=1) y = auto_data.price x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2) # + # # ?lm.score # + from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(x_train, y_train) # - lm.score(x_train, y_train) #R^2 -> residual error/total error # Our regression model captured above 95% of the varience of the given data set # Its a low bias model since r^2 of train is 9.6 which is close to 1. pd.Series(lm.coef_, x.columns).sort_values() y_predict = lm.predict(x_test) import matplotlib.pyplot as plt def plot_variance(y_predict, y_test, y_label='Price'): plt.figure(figsize=(10,5)) plt.plot(y_predict, label='Predicted') plt.plot(y_test.values, label='Actual') plt.ylabel(y_label) plt.legend() plt.show() plot_variance(y_predict, y_test) r_squared = lm.score(x_test, y_test) r_squared # Its a low variance model since r^2 of test and r^2 of train are very close. from sklearn.metrics import mean_squared_error #objective function of the (ols) ordinary least squared regression mean_squared_error(y_predict, y_test) from math import sqrt #RMSE Root mean squared error sqrt(mean_squared_error(y_predict, y_test)) #Tells how much our model prediction might differ/vary from the actual output # ## Lasso Regression from sklearn.linear_model import Lasso lam = Lasso(alpha=5, normalize=True) lam.fit(x_train, y_train) pd.Series(lam.coef_, x.columns).sort_values() lam.score(x_train, y_train) y_predict = lam.predict(x_test) plot_variance(y_predict, y_test) lam.score(x_test, y_test) sqrt(mean_squared_error(y_predict, y_test)) # ## Ridge Regression from sklearn.linear_model import Ridge rlm = Ridge(alpha=.5, normalize=True) rlm.fit(x_train, y_train) pd.Series(rlm.coef_, x.columns).sort_values() rlm.score(x_train, y_train) y_pred = rlm.predict(x_test) plot_variance(y_pred, y_test) rlm.score(x_test, y_test) sqrt(mean_squared_error(y_predict, y_test))
MachineLearning/Regression/Automobile Price - Lasso.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] editable=true # # Part I. ETL Pipeline for Pre-Processing the Files # + [markdown] editable=true # #### Import Python packages # + editable=true # Import Python packages import pandas as pd import cassandra import re import os import glob import numpy as np import json import csv # + [markdown] editable=true # #### Creating list of filepaths to process original event csv data files # + editable=true # checking the current working directory print(os.getcwd()) # Get the current folder and subfolder event data filepath = os.getcwd() + '/event_data' # Create a for loop to create a list of files and collect each filepath for root, dirs, files in os.walk(filepath): # join the file path and roots with the subdirectories using glob file_path_list = glob.glob(os.path.join(root,'*')) #print(file_path_list) # + [markdown] editable=true # #### Processing the files to create the data file csv that will be used for Apache Casssandra tables # + editable=true # initiating an empty list of rows that will be generated from each file full_data_rows_list = [] # for every filepath in the file path list for f in file_path_list: # reading csv file with open(f, 'r', encoding = 'utf8', newline='') as csvfile: # creating a csv reader object csvreader = csv.reader(csvfile) # skipping the header next(csvreader) # extracting each data row one by one and append it for line in csvreader: #print(line) full_data_rows_list.append(line) #get total number of rows print(len(full_data_rows_list)) # uncomment to check to see what the list of event data rows will look like # print(full_data_rows_list) # creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \ # Apache Cassandra tables csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True) with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f: writer = csv.writer(f, dialect='myDialect') writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\ 'level','location','sessionId','song','userId']) for row in full_data_rows_list: if (row[0] == ''): continue writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16])) # + editable=true # check the number of rows in the csv file with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f: print(sum(1 for line in f)) # + [markdown] editable=true # # Part II. Use Apache Cassandra to Model the Database Tables. # # ## The event_datafile_new.csv contains the following columns: # - artist # - firstName of user # - gender of user # - item number in session # - last name of user # - length of the song # - level (paid or free song) # - location of the user # - sessionId # - song title # - userId # # The image below is a screenshot of what the denormalized data should appear like in the <font color=red>**event_datafile_new.csv**</font> after the code above is run:<br> # # <img src="images/image_event_datafile_new.jpg"> # + [markdown] editable=true # #### Creating a Cluster # + editable=true # make a connection to a Cassandra instance the local machine (127.0.0.1) from cassandra.cluster import Cluster cluster = Cluster() # To establish connection and begin executing queries, need a session session = cluster.connect() # + [markdown] editable=true # #### Create Keyspace # + editable=true # Create a Keyspace try: session.execute(""" CREATE KEYSPACE IF NOT EXISTS udacity WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':1} """) except Exception as e: print(e) # + [markdown] editable=true # #### Set Keyspace # + editable=true # Set KEYSPACE to the keyspace specified above try: session.set_keyspace('udacity') except Exception as e: print(e) # + [markdown] editable=true # ## Create tables to complete the following three tasks. # + [markdown] editable=true # ### Task1: Getting the artist and song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4 # # + [markdown] editable=true # #### For the query, sessionId and itemInSession are considered as composite key because it perfectly filters the data as per the query we are answering from the table. # + [markdown] editable=true # #### Create a table # + editable=true # Create table song_detail_session query1 = "CREATE TABLE IF NOT EXISTS song_detail_session" query1 = query1 + "(sessionId int, itemInSession int, artist text, length float, song text,PRIMARY KEY (sessionId, ItemInSession))" try: session.execute(query1) except Exception as e: print(e) # + [markdown] editable=true # #### Insert data # + editable=true # read data from the CSV file file = 'event_datafile_new.csv' with open(file, encoding = 'utf8') as f: csvreader = csv.reader(f) next(csvreader) # skip header # insert data into table song_detail_session for line in csvreader: query = "INSERT INTO song_detail_session (sessionId, itemInSession, artist, length, song)" query = query + "VALUES (%s, %s, %s, %s, %s)" session.execute(query, (int(line[8]), int(line[3]),line[0],float(line[5]),line[9])) # + [markdown] editable=true # #### Check the table created # + editable=true # Use a SELECT statement to verify the data was entered into the table query = "SELECT artist,length,song from song_detail_session WHERE sessionId = 338 AND itemInSession = 4" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.artist,row.length,row.song) # + [markdown] editable=true # ### TASK 2: Getting the artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182 # + [markdown] editable=true # #### For the query, sessionId and userId and ItemInSession are considered as the composite key where ItemInsession acts as the clustering key. Because it perfectly filters the data and songs are sorted by itemInsession. # + [markdown] editable=true # #### Create a table # + editable=true # create table song_playlist_session query2 = "CREATE TABLE IF NOT EXISTS song_playlist_session" query2 = query2 + """(sessionId int, itemInSession int, userid int, artist text, song text, firstName text, lastName text,\ PRIMARY KEY ((sessionId,userId), ItemInSession))""" try: session.execute(query2) except Exception as e: print(e) # + [markdown] editable=true # #### Insert data # + editable=true # read data from the CSV file file = 'event_datafile_new.csv' with open(file, encoding = 'utf8') as f: csvreader = csv.reader(f) next(csvreader) # skip header # insert the data into table song_playlist_session for line in csvreader: query = "INSERT INTO song_playlist_session (sessionId, itemInSession, userid, artist, song, firstName, lastName)" query = query + "VALUES (%s, %s, %s, %s, %s,%s,%s)" session.execute(query, (int(line[8]), int(line[3]),int(line[10]),line[0],line[9],line[1],line[4])) # + [markdown] editable=true # #### Check the table created # + editable=true # Use a SELECT statement to verify the data was entered into the table query = "SELECT artist,song,firstname,lastname from song_playlist_session WHERE sessionId = 182 AND userid =10" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.artist, row.song, row.firstname,row.lastname) # + [markdown] editable=true # ### TASK3: Getting user names (first and last) in my music app history who listened to the song 'All Hands Against His Own' # + [markdown] editable=true # #### For the query song and userId are considered as the composite key. Because we need to deal with the situaton when multiple users with the same name listen to the same song, and userId can identify each user. # + [markdown] editable=true # #### Create a table # + editable=true # create table song_user_name query3 = "CREATE TABLE IF NOT EXISTS song_user_name" query3 = query3 + "(song text,userId int, firstName text, lastName text, PRIMARY KEY (song,userId))" try: session.execute(query3) except Exception as e: print(e) # + [markdown] editable=true # #### Insert data # + editable=true # read data from the CSV file file = 'event_datafile_new.csv' with open(file, encoding = 'utf8') as f: csvreader = csv.reader(f) next(csvreader) # skip header # insert the data into table song_user_name for line in csvreader: query = "INSERT INTO song_user_name (song,userId,firstName, lastName)" query = query + "VALUES (%s, %s, %s,%s)" session.execute(query, (line[9],int(line[10]),line[1],line[4])) # + [markdown] editable=true # #### Check the table created # + editable=true # Use a SELECT statement to verify the data was entered into the table query = "SELECT firstName, lastName from song_user_name WHERE song='All Hands Against His Own' " try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.firstname,row.lastname) # + [markdown] editable=true # ### Drop the tables before closing out the sessions # + editable=true # Drop the table before closing out the sessions query = "drop table song_detail_session" try: rows = session.execute(query) except Exception as e: print(e) # + editable=true # Drop the table before closing out the sessions query = "drop table song_user_name" try: rows = session.execute(query) except Exception as e: print(e) # + editable=true # Drop the table before closing out the sessions query = "drop table song_playlist_session" try: rows = session.execute(query) except Exception as e: print(e) # + [markdown] editable=true # ### Close the session and cluster connection¶ # + editable=true session.shutdown() cluster.shutdown()
Apache_Cassandra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Expected return values for User class that implements Credentials class # This notebook give the expected output and input of every method in Users class # Import and create an instance of User Class from User import User c=User() # + ### Create New user Account # - c.new_user('john','<PASSWORD>') # **Test if user can create an account** # # **Create a new account** # # 1.The method accepts acount_name, username, and password as input. # # 2.If password is not given, the function will generate its a new password c.add_account('ig','if','qwertyuio') # If argument for password is not specified, the system will generate a password for you c.add_account('facebook','facebook_user',) # **Get Account details** c.get_account_details('facebook')
Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Libraries # + import torch import torch.autograd as autograd # computation graph from torch import Tensor # tensor node in the computation graph import torch.nn as nn # neural networks import torch.optim as optim # optimizers e.g. gradient descent, ADAM, etc. import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.mplot3d import Axes3D import matplotlib.ticker import numpy as np import time from pyDOE import lhs #Latin Hypercube Sampling import scipy.io #Set default dtype to float32 torch.set_default_dtype(torch.float) #PyTorch random number generator torch.manual_seed(1234) # Random number generators in other libraries np.random.seed(1234) # Device configuration device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) if device == 'cuda': print(torch.cuda.get_device_name()) # - # # *Data Prep* # # Training and Testing data is prepared from the solution file # + data = scipy.io.loadmat('Data/burgers_shock_mu_01_pi.mat') # Load data from file x = data['x'] # 256 points between -1 and 1 [256x1] t = data['t'] # 100 time points between 0 and 1 [100x1] usol = data['usol'] # solution of 256x100 grid points X, T = np.meshgrid(x,t) # makes 2 arrays X and T such that u(X[i],T[j])=usol[i][j] are a tuple # - # # Test Data # # We prepare the test data to compare against the solution produced by the PINN. # + colab={} colab_type="code" id="yddknKA2Xohp" ''' X_u_test = [X[i],T[i]] [25600,2] for interpolation''' X_u_test = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) # Domain bounds lb = X_u_test[0] # [-1. 0.] ub = X_u_test[-1] # [1. 0.99] ''' Fortran Style ('F') flatten,stacked column wise! u = [c1 c2 . . cn] u = [25600x1] ''' u_true = usol.flatten('F')[:,None] # - # # Training Data # + colab={} colab_type="code" id="8UVJmvZbXjXb" def trainingdata(N_u,N_f): '''Boundary Conditions''' #Initial Condition -1 =< x =<1 and t = 0 leftedge_x = np.hstack((X[0,:][:,None], T[0,:][:,None])) #L1 leftedge_u = usol[:,0][:,None] #Boundary Condition x = -1 and 0 =< t =<1 bottomedge_x = np.hstack((X[:,0][:,None], T[:,0][:,None])) #L2 bottomedge_u = usol[-1,:][:,None] #Boundary Condition x = 1 and 0 =< t =<1 topedge_x = np.hstack((X[:,-1][:,None], T[:,0][:,None])) #L3 topedge_u = usol[0,:][:,None] all_X_u_train = np.vstack([leftedge_x, bottomedge_x, topedge_x]) # X_u_train [456,2] (456 = 256(L1)+100(L2)+100(L3)) all_u_train = np.vstack([leftedge_u, bottomedge_u, topedge_u]) #corresponding u [456x1] #choose random N_u points for training idx = np.random.choice(all_X_u_train.shape[0], N_u, replace=False) X_u_train = all_X_u_train[idx, :] #choose indices from set 'idx' (x,t) u_train = all_u_train[idx,:] #choose corresponding u '''Collocation Points''' # Latin Hypercube sampling for collocation points # N_f sets of tuples(x,t) X_f_train = lb + (ub-lb)*lhs(2,N_f) X_f_train = np.vstack((X_f_train, X_u_train)) # append training points to collocation points return X_f_train, X_u_train, u_train # - # # Physics Informed Neural Network class Sequentialmodel(nn.Module): def __init__(self,layers): super().__init__() #call __init__ from parent class 'activation function' self.activation = nn.Tanh() 'loss function' self.loss_function = nn.MSELoss(reduction ='mean') 'Initialise neural network as a list using nn.Modulelist' self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)]) self.iter = 0 ''' Alternatively: *all layers are callable Simple linear Layers self.fc1 = nn.Linear(2,50) self.fc2 = nn.Linear(50,50) self.fc3 = nn.Linear(50,50) self.fc4 = nn.Linear(50,1) ''' 'Xavier Normal Initialization' # std = gain * sqrt(2/(input_dim+output_dim)) for i in range(len(layers)-1): # weights from a normal distribution with # Recommended gain value for tanh = 5/3? nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0) # set biases to zero nn.init.zeros_(self.linears[i].bias.data) 'foward pass' def forward(self,x): if torch.is_tensor(x) != True: x = torch.from_numpy(x) u_b = torch.from_numpy(ub).float().to(device) l_b = torch.from_numpy(lb).float().to(device) #preprocessing input x = (x - l_b)/(u_b - l_b) #feature scaling #convert to float a = x.float() ''' Alternatively: a = self.activation(self.fc1(a)) a = self.activation(self.fc2(a)) a = self.activation(self.fc3(a)) a = self.fc4(a) ''' for i in range(len(layers)-2): z = self.linears[i](a) a = self.activation(z) a = self.linears[-1](a) return a def loss_BC(self,x,y): loss_u = self.loss_function(self.forward(x), y) return loss_u def loss_PDE(self, x_to_train_f): nu = 0.01/np.pi x_1_f = x_to_train_f[:,[0]] x_2_f = x_to_train_f[:,[1]] g = x_to_train_f.clone() g.requires_grad = True u = self.forward(g) u_x_t = autograd.grad(u,g,torch.ones([x_to_train_f.shape[0], 1]).to(device), retain_graph=True, create_graph=True)[0] u_xx_tt = autograd.grad(u_x_t,g,torch.ones(x_to_train_f.shape).to(device), create_graph=True)[0] u_x = u_x_t[:,[0]] u_t = u_x_t[:,[1]] u_xx = u_xx_tt[:,[0]] f = u_t + (self.forward(g))*(u_x) - (nu)*u_xx loss_f = self.loss_function(f,f_hat) return loss_f def loss(self,x,y,x_to_train_f): loss_u = self.loss_BC(x,y) loss_f = self.loss_PDE(x_to_train_f) loss_val = loss_u + loss_f return loss_val 'callable for optimizer' def closure(self): optimizer.zero_grad() loss = self.loss(X_u_train, u_train, X_f_train) loss.backward() self.iter += 1 if self.iter % 100 == 0: error_vec, _ = PINN.test() print(loss,error_vec) return loss 'test neural network' def test(self): u_pred = self.forward(X_u_test_tensor) error_vec = torch.linalg.norm((u-u_pred),2)/torch.linalg.norm(u,2) # Relative L2 Norm of the error (Vector) u_pred = u_pred.cpu().detach().numpy() u_pred = np.reshape(u_pred,(256,100),order='F') return error_vec, u_pred # + [markdown] colab_type="text" id="bOjuHdzAhib-" # # *Solution Plot* # + colab={} colab_type="code" id="UWqNuRMLhg4m" def solutionplot(u_pred,X_u_train,u_train): fig, ax = plt.subplots() ax.axis('off') gs0 = gridspec.GridSpec(1, 2) gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) ax = plt.subplot(gs0[:, :]) h = ax.imshow(u_pred, interpolation='nearest', cmap='rainbow', extent=[T.min(), T.max(), X.min(), X.max()], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.plot(X_u_train[:,1], X_u_train[:,0], 'kx', label = 'Data (%d points)' % (u_train.shape[0]), markersize = 4, clip_on = False) line = np.linspace(x.min(), x.max(), 2)[:,None] ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1) ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1) ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1) ax.set_xlabel('$t$') ax.set_ylabel('$x$') ax.legend(frameon=False, loc = 'best') ax.set_title('$u(x,t)$', fontsize = 10) ''' Slices of the solution at points t = 0.25, t = 0.50 and t = 0.75 ''' ####### Row 1: u(t,x) slices ################## gs1 = gridspec.GridSpec(1, 3) gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5) ax = plt.subplot(gs1[0, 0]) ax.plot(x,usol.T[25,:], 'b-', linewidth = 2, label = 'Exact') ax.plot(x,u_pred.T[25,:], 'r--', linewidth = 2, label = 'Prediction') ax.set_xlabel('$x$') ax.set_ylabel('$u(x,t)$') ax.set_title('$t = 0.25s$', fontsize = 10) ax.axis('square') ax.set_xlim([-1.1,1.1]) ax.set_ylim([-1.1,1.1]) ax = plt.subplot(gs1[0, 1]) ax.plot(x,usol.T[50,:], 'b-', linewidth = 2, label = 'Exact') ax.plot(x,u_pred.T[50,:], 'r--', linewidth = 2, label = 'Prediction') ax.set_xlabel('$x$') ax.set_ylabel('$u(x,t)$') ax.axis('square') ax.set_xlim([-1.1,1.1]) ax.set_ylim([-1.1,1.1]) ax.set_title('$t = 0.50s$', fontsize = 10) ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.35), ncol=5, frameon=False) ax = plt.subplot(gs1[0, 2]) ax.plot(x,usol.T[75,:], 'b-', linewidth = 2, label = 'Exact') ax.plot(x,u_pred.T[75,:], 'r--', linewidth = 2, label = 'Prediction') ax.set_xlabel('$x$') ax.set_ylabel('$u(x,t)$') ax.axis('square') ax.set_xlim([-1.1,1.1]) ax.set_ylim([-1.1,1.1]) ax.set_title('$t = 0.75s$', fontsize = 10) plt.savefig('Burgers.png',dpi = 500) # - # # Main # + 'Generate Training data' N_u = 100 #Total number of data points for 'u' N_f = 10000 #Total number of collocation points X_f_train_np_array, X_u_train_np_array, u_train_np_array = trainingdata(N_u,N_f) 'Convert to tensor and send to GPU' X_f_train = torch.from_numpy(X_f_train_np_array).float().to(device) X_u_train = torch.from_numpy(X_u_train_np_array).float().to(device) u_train = torch.from_numpy(u_train_np_array).float().to(device) X_u_test_tensor = torch.from_numpy(X_u_test).float().to(device) u = torch.from_numpy(u_true).float().to(device) f_hat = torch.zeros(X_f_train.shape[0],1).to(device) layers = np.array([2,20,20,20,20,20,20,20,20,1]) #8 hidden layers PINN = Sequentialmodel(layers) PINN.to(device) 'Neural Network Summary' print(PINN) params = list(PINN.parameters()) '''Optimization''' 'L-BFGS Optimizer' optimizer = torch.optim.LBFGS(PINN.parameters(), lr=0.1, max_iter = 250, max_eval = None, tolerance_grad = 1e-05, tolerance_change = 1e-09, history_size = 100, line_search_fn = 'strong_wolfe') start_time = time.time() optimizer.step(PINN.closure) 'Adam Optimizer' # optimizer = optim.Adam(PINN.parameters(), lr=0.001,betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) # max_iter = 20000 # start_time = time.time() # for i in range(max_iter): # loss = PINN.loss(X_u_train, u_train, X_f_train) # optimizer.zero_grad() # zeroes the gradient buffers of all parameters # loss.backward() #backprop # optimizer.step() # if i % (max_iter/10) == 0: # error_vec, _ = PINN.test() # print(loss,error_vec) elapsed = time.time() - start_time print('Training time: %.2f' % (elapsed)) ''' Model Accuracy ''' error_vec, u_pred = PINN.test() print('Test Error: %.5f' % (error_vec)) ''' Solution Plot ''' # solutionplot(u_pred,X_u_train.cpu().detach().numpy(),u_train.cpu().detach().numpy())
PyTorch/Burgers' Equation/Burgers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import random import string path_to_excel_file = input("Enter path to source file : ") output_file = input("Enter output file name : ") password_object = {'country':[], 'password':[]} def randomPassword(stringLength=8): lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) df = pd.read_excel(path_to_excel_file) for d in df.get("country"): password = <PASSWORD>() password_object["country"].append(d.replace(" ","")) password_object["password"].append(password) result_df = pd.DataFrame(password_object) writer = pd.ExcelWriter(output_file) result_df.to_excel(writer, 'Sheet1', index=False) writer.save()
Password Generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 1 Exercises # ## Exercise 1.1 # ## Subtitle # ### subsubtitle # # **bold** # *italics* # # * bullet1 # * bullet2 # + import numpy as np import matplotlib.pyplot as plt import math y = [4.87, 3.40, 3.02, 6.76, 5.55, 17.59, 2.77, 10.14, 6.70, 3.69, 4.89, 4.80, 0.75, 14.58, 3.33, 7.86, 2.16, 4.72, 4.42, 4.11] min_value = np.amin(y) max_value = np.amax(y) print("The values: ", end='') print(y) print("The mean: ", end='') print(np.mean(y).round(2)) print("The median: ", end='') print(np.median(y)) # - plt.hist(x=y, bins=np.arange(math.floor(min_value), math.ceil(max_value), 1), color='darkgrey',alpha=1, rwidth=1, label='Experimental') plt.grid(axis='y', alpha=0.5) plt.xlabel('log-fold gene expressions') plt.ylabel('Frequency') plt.title('the expression of a specific gene for 20 people') plt.legend(loc='best') plt.show() # ## Exercise 1.7 # # ![title](exercise1_7.png) # + a = 0 b = 1 mean = ((a+b)/2) variance = (1/12)*(b-a)**2 #wikipedia https://en.wikipedia.org/wiki/Continuous_uniform_distribution print("The mean/first moment is: ") print(mean) print("The variance/second moment is: ") print(round(variance, 4)) print("The skewness/third moment is: ") print(0) #wikipedia print("The second central moment: ") print("The third central moment: ") # - # ## 26-11 # Homework: Read Chapter 2 (except 2.3); make Exercises 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, (2.7), 2.8 # Homework: Read Chapter 2 up to section 2.1.3; make Exercises 1.4, 1.5, 1.7, 1.8, 1.9, 1.10, 2.6, 2.8 # + y = [16,18,5,12,20,17,10,22,17,8,16,19,21,20,19,19,12,12,8,19, 17,10,9,8,19,15,12,18,12,17,11,12,13,15,13,10,12,16,11,15,13,13] plt.hist(x=y, bins=np.arange(math.floor(min_value), math.ceil(max_value), 1), color='darkgrey',alpha=1, rwidth=1, label='Experimental') plt.grid(axis='y', alpha=0.5) plt.xlabel('gram') plt.ylabel('Frequency') plt.title('chest nuts weights') plt.legend(loc='best') plt.show()
ds1/chapter_exercises.ipynb
# --- # title: "Drilling Down With Beautiful Soup" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Drilling Down With Beautiful Soup." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Preliminaries # Import required modules import requests from bs4 import BeautifulSoup import pandas as pd # ## Download the HTML and create a Beautiful Soup object # + # Create a variable with the URL to this tutorial url = 'http://en.wikipedia.org/wiki/List_of_A_Song_of_Ice_and_Fire_characters' # Scrape the HTML at the url r = requests.get(url) # Turn the HTML into a Beautiful Soup object soup = BeautifulSoup(r.text, "lxml") # - # If we looked at the soup object, we'd see that the names we want are in a heirarchical list. In psuedo-code, it looks like: # # - class=toclevel-1 span=toctext # - class=toclevel-2 span=toctext CHARACTER NAMES # - class=toclevel-2 span=toctext CHARACTER NAMES # - class=toclevel-2 span=toctext CHARACTER NAMES # - class=toclevel-2 span=toctext CHARACTER NAMES # - class=toclevel-2 span=toctext CHARACTER NAMES # # To get the CHARACTER NAMES, we are going to need to drill down to grap into loclevel-2 and grab the toctext # ## Setting up where to put the results # Create a variable to score the scraped data in character_name = [] # ## Drilling down with a forloop # for each item in all the toclevel-2 li items # (except the last three because they are not character names), for item in soup.find_all('li',{'class':'toclevel-2'})[:-3]: # find each span with class=toctext, for post in item.find_all('span',{'class':'toctext'}): # add the stripped string of each to character_name, one by one character_name.append(post.string.strip()) # ## Results # View all the character names character_name # ## Quick analysis: Which house has the most main characters? # Create a list object where to store the for loop results houses = [] # For each element in the character_name list, for name in character_name: # split up the names by a blank space and select the last element # this works because it is the last name if they are a house, # but the first name if they only have one name, # Then append each last name to the houses list houses.append(name.split(' ')[-1]) # + # Convert houses into a pandas series (so we can use value_counts()) houses = pd.Series(houses) # Count the number of times each name/house name appears houses.value_counts()
docs/python/web_scraping/beautiful_soup_drill_down.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''ProgramData'': virtualenv)' # language: python # name: python37364bitprogramdatavirtualenv99403c2e8abd4ba0909557516bfee9d9 # --- import xmltodict import math import urllib.request import itertools import pandas as pd import pickle from sklearn.utils import shuffle import time import numpy as np import traceback # + tags=[] import sqlite3 from sqlite3 import Error def create_con(db_file): conn = None try: conn = sqlite3.connect(db_file) return conn except Error as e: print(e) def create_table(conn, create_table_sql): try: c = conn.cursor() c.execute(create_table_sql) conn.commit() except Error as e: print(e) def create_game(conn, game): sql = """ INSERT INTO games(name) VALUES(?) """ c = conn.cursor() c.execute(sql, game) conn.commit() return c.lastrowid def create_reviews(conn, reviews): sql = """ INSERT INTO reviews(user, rating, comment, ID) VALUES(?,?,?,?) """ cur = conn.cursor() for review in reviews: cur.execute(sql,review) cur.execute(sql, review) conn.commit() return cur.lastrowid def do_sql(conn,sql): cur = conn.cursor() cur.execute(sql) rows = cur.fetchall() for row in rows: print(row) def count_reviews(conn): cur = conn.cursor() print(cur.execute("SELECT COUNT (*) FROM reviews;").fetchall()[0]) def count_distinct_reviews(conn): cur = conn.cursor() print(cur.execute("SELECT COUNT(*) FROM (SELECT DISTINCT * FROM reviews);").fetchall()[0]) def del_all_records(conn): cur = conn.cursor() cur.execute("DROP TABLE IF EXISTS reviews") cur.execute("DROP TABLE IF EXISTS games") conn.commit() # - from urllib.request import urlopen # the original csv from https://raw.githubusercontent.com/beefsack/bgg-ranking-historicals/master/2017-02-21.csv # The ID's where used in API calls to retrieve the game reviews link="https://raw.githubusercontent.com/beefsack/bgg-ranking-historicals/master/2020-08-19.csv" f = urlopen(link) games = pd.read_csv(f) games.describe() games.sort_values('Users rated',ascending=False,inplace=True) games[:5] # + tags=[] from sqlalchemy import create_engine sql_create_table_reviews = """ CREATE TABLE IF NOT EXISTS reviews( review_id integer PRIMARY KEY, user text NOT NULL, rating NOT NULL, comment text, ID integer, FOREIGN KEY(ID) REFERENCES games(ID));""" conn = create_con('test.db') del_all_records(conn) # empty database (optional) if conn: engine = create_engine('sqlite:///%s' % 'test.db', echo=True) games[['ID','Name']].to_sql('games', con=engine) # c = conn.cursor() # c.execute(""" DROP TABLE IF EXISTS games""") # create_table(conn, sql_create_table_games) # c.execute(""" DROP TABLE IF EXISTS reviews""") create_table(conn, sql_create_table_reviews) count_reviews(conn) # - def gen_batches(): num_batches = len(games)//100 for b in range(num_batches): result = slice(b*100,(b+1)*100) if b*100>-1: yield result yield slice((b+1)*100,len(games)+1) def number_rating_pages(game_id): url = 'https://www.boardgamegeek.com/xmlapi2/thing?id='+str(game_id)+'&ratingcomments=1' u = urllib.request.urlopen(url).read()#The url you want to open doc = xmltodict.parse(u) return math.ceil(int(doc['items']['item']['comments']['@totalitems'])/100) # + def get_url_result(ids,p): # given a url of an object and a page, return list of dicts with the comments url = 'https://www.boardgamegeek.com/xmlapi2/thing?id='+ids+'&ratingcomments=1' url += '&&page='+str(p) u = urllib.request.urlopen(url).read()#The url you want to open doc = xmltodict.parse(u) return doc # + tags=[] def process_result(url_result): remove_list = [] for g in url_result['items']['item']: if not isinstance(g, str): if 'comment' in g['comments'].keys(): try: review_tup = [(g['@id'],) + tuple(review.values()) for review in g['comments']['comment']] create_reviews(conn,review_tup) except Exception as e: print(e,traceback.format_exc()) else: remove_list.append(int(g['@id'])) else: print('crazy str',g) return remove_list # + tags=[] conn = create_con('test.db') game_generator = gen_batches() # get all games, but do this per batch of 100 games for batch in game_generator: print('new batch,', batch) # list of games games_in_batch = list(games['ID'][batch]) #get maximum number of pages from game on top of the batch num_pages = number_rating_pages(games_in_batch[0])+1 if num_pages>0: # get all the pages, quering as little games as possible and store the results for p in range(0,num_pages): for tryout in range(5): try: print('next query for page {} for {} games'.format(p,len(games_in_batch))) #print('games still in batch are \n',games_in_batch) if not games_in_batch: print('gamelist empty!') break # convert list of current games to url input ids = ','.join(str(g) for g in games_in_batch) #query API and parse to dictionary url_result = get_url_result(ids,p) print('data retrieved') # remove games that have no comment, they dont have to be queried again removelist = process_result(url_result) #print('to remove',removelist) games_in_batch = [g for g in games_in_batch if g not in removelist] time.sleep(8) # if p%1000==0: # with open('ratings.pickle', 'wb') as handle: # pickle.dump(rating_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) break except Exception as e: print('error',tryout,p,e) time.sleep(30) print('all done') conn.close() # - # + tags=[] # + tags=[] # get tables from db con = sqlite3.connect('test.db') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") print(cursor.fetchall()) # + tags=[] for val in ['user','rating','ID']: print(val,cur.execute("SELECT COUNT(*) FROM (SELECT DISTINCT "+ val +" FROM reviews);").fetchall()) # - # # Import DB # + tags=[] import pandas as pd import sqlite3 # Create your connection. cnx = sqlite3.connect('test.db') from aoc import timeit with timeit(): df = pd.read_sql_query("SELECT * FROM reviews", cnx) # - df.to_csv('17mreviews.csv') # + tags=[] # - df.rename(columns={'user':'ID','rating':'user','comment':'rating','ID':'comment'},inplace=True) df = df[['user', 'rating', 'comment','ID']] df['ID'] = df['ID'].astype('int64') df = df.merge(games[['ID','Name']],on='ID',how='left') df.head() len(df) df.drop_duplicates(subset=None, keep='first', inplace=True, ignore_index=True) len(df) df.drop_duplicates(subset=['user','ID'], keep='first', inplace=True, ignore_index=True) len(df) df['user'].nunique() filename = 'bgg-20m-reviews.csv' df.rename(columns={'Name':'name'},inplace=True) df.to_csv(filename) import pandas as pd import sqlite3 import aoc filename = 'bgg-20m-reviews.csv' with aoc.timeit(): df = pd.read_csv(filename,index_col=0)
bgg_ratings_get_data_through_API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.11 64-bit (''Spark'': conda)' # name: python3 # --- # # NYC Taxi Project: Getting the Data # + ## NOTES: ## TODO: dotenv configuration # TODO: Commenting and Formatting ## Alternative Methods: # [] url read into pandas and then export to parquet (pandas to pyarrow export? or pandas export?) # [] incorporate gpu or parallelization? # - # import packages import os import requests from io import BytesIO from urllib.request import urlopen from zipfile import ZipFile import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import pyarrow.csv as csv from pyarrow import fs from hdfs import InsecureClient from tqdm.notebook import tqdm # created directories root = './data' dirs = ['staging', 'nyc-taxi', 'geo_files'] for i in dir: path = os.path.join(root, i) os.mkdirs(path) # quick script to create all year-month dates for nyc-taxi files dates = [] for year in list(map(str, range(2017, 2021))): for month in list(map(str, range(1, 13))): if len(month) == 1: month = '0' + month else: pass date = year + '-' + month dates.append(date) # + # loop over urls to download files # nyc-taxi files are stored on s3 with a standard filename convention failed_links = [] csv_file_path = os.path.join('./data', 'staging', 'nyc-taxi') for i, date in enumerate(tqdm(dates)): filename = f'yellow_taxi_{date}.csv' if filename in os.listdir(csv_file_path): print(f'File already exisits: {csv_file_path+filename}') continue else: url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_{date}.csv" print(f'fetching yellow taxi {date} data from: \n{url}') try: file_req = requests.get(url) url_content = file_req.content csv_file = open(f'{csv_file_path}/yellow_taxi_{date}.csv', 'wb') csv_file.write(url_content) csv_file.close() except ConnectionError: failed_links.append(url) print(f'{i+1 - len(failed_links)}/{i+1} files successfully fetched') # - # get lookup table for taxi location ids url = 'https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv' print(f'fetching yellow taxi locations table...') file_req = requests.get(url) url_content = file_req.content csv_file = open(f'./data/taxi_location_lookup_table.csv', 'wb') csv_file.write(url_content) csv_file.close() # get taxi zone shapefiles zipurl = 'https://s3.amazonaws.com/nyc-tlc/misc/taxi_zones.zip' with urlopen(zipurl) as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: zfile.extractall('./data/geo_files') # convert csv files to parquet files parquet_file_path = os.path.join('./data', 'nyc-taxi') tq_csv_files = tqdm([file for file in os.listdir(csv_file_path)]) for csv_file in tq_csv_files: parquet_file = csv_file.replace('csv', 'parquet') if parquet_file in os.listdir(parquet_file_path): print(f'{parquet_file} already exists') continue tq_csv_files.set_description(f'Converting {csv_file} to parquet...') try: table = csv.read_csv(f'{csv_file_path}/{csv_file}') except pa.lib.ArrowInvalid: df = pd.read_csv(f'{csv_file_path}/{csv_file}', low_memory=False) table = pa.Table.from_pandas(df) pq.write_table(table, f'{parquet_file_path}/{parquet_file}', compression='snappy') # check total file size of parquet data files prqt_size = round(sum(os.path.getsize(os.path.join(parquet_file_path, f)) for f in os.listdir(parquet_file_path)) / 1e9, 2) print(f'Total parquet data size: {prqt_size} GB') # + # load data into hdfs namenode_URI = 'http://localhost:9870' hadoop_user = 'hadoop' hdfs = InsecureClient(namenode_URI, user=hadoop_user) hdfs_path = '/user/hadoop/input/' for parquet in os.listdir(parquet_file_path): local_path = os.path.join('./data', 'nyc-taxi', parquet) hdfs.upload(hdfs_path, local_path, n_threads=4) hdfs.list(hdfs_path)
GetData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Network : Linear Regression ( Large Dataset + Noise ) # + import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # - x_data = np.linspace(0.0,10.0,100000) + np.random.randn(100000) # y = mx + b y_true = (0.5*x_data)+ 5 + np.random.randn(100000) x_df = pd.DataFrame(data = x_data,columns =['XData']) print(x_df.head(5)) y_df = pd.DataFrame(data=y_true,columns=['YLabel']) print(y_df.head(5)) data = pd.concat([x_df,y_df],axis=1) data.head(5) data.sample(n=250).plot(kind= 'scatter' ,x = "XData",y = "YLabel") batch_size = 8 np.random.randn(2) m = tf.Variable(0.39) b = tf.Variable(0.23) xph = tf.placeholder(tf.float32,[batch_size]) yph = tf.placeholder(tf.float32,[batch_size]) y = m * xph + b # Loss Function. error = tf.reduce_sum(tf.square(yph-y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) train = optimizer.minimize(error) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) batches = 1000 for i in range(batches): rand_ind = np.random.randint(low = 0 , high = len(x_data),size = batch_size) print(x_data) feed = {xph:x_data[rand_ind],yph:y_true[rand_ind]} sess.run(train,feed_dict=feed) model_m,model_b =sess.run([m,b]) # print(sess.run(error)) print(model_m) print(model_b) y_hat = x_data*model_m + model_b data.sample(n=250).plot(kind= 'scatter' ,x = "XData",y = "YLabel") plt.plot(x_data,y_hat,'r')
03_NN_LR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from collections import deque from IPython.display import display, HTML pip install ipysheet import pandas as pd from collections import Counter from collections import defaultdict import math import numpy as np from pprint import pprint # from tqdm import tqdm FILL_CHAR = '~' s = '<NAME>' s_2 = '<NAME>, описка.' var_18_s = 13 var_18_b = 6 from IPython.display import display_html def display_side_by_side(*args): html_str='' for df in args: html_str+=df.to_html() display_html(html_str.replace('table','table style="display:inline"'),raw=True) def encode_LZ77(string:str, s:int, b:int, display = True, save = False, save_path:str = None, save_name:str = None)->list: """Encodes string LZ77, s - size of dictionary, b - size of buffer""" if display: print(string) dictionary = deque() for i in range(s): dictionary.append(FILL_CHAR) string += ''.join([FILL_CHAR for i in range(b)]) code = [] buffer = deque() for c in string[:b]: buffer.append(c) size_buffer = b df_dict = pd.DataFrame(columns = [i for i in range(s)]) df_buffer = pd.DataFrame(columns = [i for i in range(b)]) iteration = 0 while b<len(string): df_dict.loc[iteration] = list(dictionary) df_buffer.loc[iteration] = list(buffer) flag = False for sub_str_len in range(size_buffer,0,-1): for i in range(s-sub_str_len): #print('1',''.join(list(dictionary)[i:i+sub_str_len])) #print('2',''.join(list(buffer))[:sub_str_len]) if ''.join(list(dictionary)[i:i+sub_str_len])==''.join(list(buffer))[:sub_str_len]: code.append((i,sub_str_len,buffer[sub_str_len])) for i in range(sub_str_len+1): buffer.append(string[b]) c = buffer.popleft() dictionary.append(c) dictionary.popleft() b+=1 flag = True break if flag: break if not flag: buffer.append(string[b]) c = buffer.popleft() dictionary.append(c) dictionary.popleft() code.append((0,0,c)) b+=1 iteration+=1 df_code = pd.DataFrame(code) if display: display_side_by_side(df_dict,df_buffer,df_code) if save: df_dict.to_excel(save_path+save_name+'_dict.xlsx') df_buffer.to_excel(save_path+save_name+'_buffer.xlsx') df_code.to_excel(save_path+save_name+'_code.xlsx') return code code_s = encode_LZ77(s,13,6) pd.DataFrame([1]) code_s = encode_LZ77(s,13,6) def decode_LZ77(code:list, s:int, b:int, display = True, save = False, save_path:str = None, save_name:str = None)->str: """Decodes code LZ77, s - size of dictionary, b - size of buffer""" res = '' outputs = [] dictionary = deque() dictionaries = [] for _ in range(s): dictionary.append(FILL_CHAR) for tuple_ in code: output = '' for i in range(tuple_[1]): dictionary.append(dictionary[tuple_[0]+i]) output += dictionary[tuple_[0]+i] res += dictionary[tuple_[0]+i] # print(output) # outputs.append(output) for i in range(tuple_[1]): dictionary.popleft() dictionary.append(tuple_[2]) dictionary.popleft() if tuple_[2] == ' ': output += '?' else: output += tuple_[2] res += tuple_[2] outputs.append(output) dictionaries.append(list(dictionary)) df_outputs = pd.DataFrame(outputs) df_dictionaries = pd.DataFrame(dictionaries) df_code = pd.DataFrame(code) if display: display_side_by_side(df_code,df_outputs,df_dictionaries) if save: df_outputs.to_excel(save_path+save_name+'_outputs.xlsx') df_dictionaries.to_excel(save_path+save_name+'_dictionaries.xlsx') #df_code.to_excel(save_path+save_name+'_code.xlsx') return res decode_LZ77(code_s,13,6) # # main s_2 code_vyraz_2 = encode_LZ77(s_2,var_18_s,var_18_b,display= False,save = True,save_path='./results/',save_name='vyraz_2_dot') decoded_vyraz_2 = decode_LZ77(code_vyraz_2,var_18_s,var_18_b,display= False,save = True,save_path='./results/',save_name='vyraz_2_dot_decoded') code_vyraz = encode_LZ77(s,var_18_s,var_18_b,display= False,save = True,save_path='./results/',save_name='vyraz') decoded_vyraz = decode_LZ77(code_vyraz,var_18_s,var_18_b,display= False,save = True,save_path='./results/',save_name='vyraz_decoded') code = encode_LZ77(s_2,var_18_s,var_18_b) decode_LZ77(code,var_18_s, var_18_b) freq_dict = Counter(s) len(s) sorted(freq_dict.items(),key = lambda item:item[1],reverse = True) normalized_freq_dict = {c:round(freq/sum(freq_dict.values()),4) for c,freq in freq_dict.items()} normalized_freq_dict sorted_normalized_freq_dict = sorted(normalized_freq_dict.items(),key = lambda item: item[1] ,reverse = True) sorted_normalized_freq_dict def entropy(freq:dict)->float: res = 0 for freq in freq.values(): res+=-math.log(freq)*freq return res entropy(normalized_freq_dict) huffman = {'A':'01', 'L':'100', 'S':'101', 'P':'1100', 'U':'1101', 'C':'1111', " ":'1110', 'M':'000', 'I':'001' } s huffman_coded = '' for c in s: huffman_coded+=huffman[c] huffman_coded # # draft # + # def encode_LZ77(string,s,b): # dictionary = deque() # for i in range(s): # dictionary.append('^') # string += ''.join(['^' for i in range(b)]) # print(string) # code = [] # buffer = deque() # for c in string[:b]: # buffer.append(c) # size_buffer = b # df_dict = pd.DataFrame(columns = [i for i in range(s)]) # df_buffer = pd.DataFrame(columns = [i for i in range(b)]) # iteration = 0 # print(string) # while buffer: # try: # # print(buffer) # # print(dictionary) # df_dict.loc[iteration] = list(dictionary) # df_buffer.loc[iteration] = list(buffer) # flag = False # for sub_str_len in range(size_buffer,0,-1): # for i in range(s-sub_str_len): # #print('1',''.join(list(dictionary)[i:i+sub_str_len])) # #print('2',''.join(list(buffer))[:sub_str_len]) # if ''.join(list(dictionary)[i:i+sub_str_len])==''.join(list(buffer))[:sub_str_len]: # code.append((i,sub_str_len,buffer[sub_str_len])) # for i in range(sub_str_len+1): # buffer.append(string[b]) # c = buffer.popleft() # dictionary.append(c) # dictionary.popleft() # b+=1 # print('flag') # print(buffer) # print(dictionary) # print('code',code) # flag = True # print('flag') # break # if flag: # break # if not flag: # buffer.append(string[b]) # c = buffer.popleft() # dictionary.append(c) # dictionary.popleft() # code.append((0,0,c)) # b+=1 # iteration+=1 # except: # print('log') # break # print(code) # display_side_by_side(df_dict,df_buffer) # # df_dict.to_csv('./df_dict.csv') # # df_buffer.to_csv('./df_buffer.csv') # encode_LZ77(s_2,13,6) # - def encode_LZ77(string,s,b): dictionary = deque() for i in range(s): dictionary.append('.') string += ''.join(['.' for i in range(b)]) print(string) code = [] buffer = deque() for c in string[:b]: buffer.append(c) size_buffer = b df_dict = pd.DataFrame(columns = [i for i in range(s)]) df_buffer = pd.DataFrame(columns = [i for i in range(b)]) iteration = 0 while buffer: try: print(buffer) print(dictionary) #b+=1 #buffer.append(string[b]) #c = buffer.popleft() #dictionary.append(c) #dictionary.popleft() flag = False for sub_str_len in range(size_buffer,-1,-1): for i in range(s-sub_str_len): #print('1',''.join(list(dictionary)[i:i+sub_str_len])) #print('2',''.join(list(buffer))[:sub_str_len]) if ''.join(list(dictionary)[i:i+sub_str_len])==''.join(list(buffer))[:sub_str_len]: code.append((i,sub_str_len,buffer[sub_str_len])) for i in range(sub_str_len): b+=1 buffer.append(string[b]) c = buffer.popleft() dictionary.append(c) dictionary.popleft() """print('code',code)""" print('flag') flag = True print('flag') break if flag: break if not flag: b+=1 buffer.append(string[b]) c = buffer.popleft() dictionary.append(c) dictionary.popleft() code.append((0,0,c)) iteration+=1 df_dict.loc[iteration] = list(dictionary) df_buffer.loc[iteration] = list(buffer) except: break print(code) display(df_dict) display(df_buffer) df_dict.to_csv('./df_dict.csv') df_buffer.to_csv('./df_buffer.csv') encode_LZ77(s_2,13,6) # + def decode_LZ77(code,s,b): res = '' outputs = [] dictionary = deque() dictionaries = [] for _ in range(s): dictionary.append(FILL_CHAR) for tuple_ in code: output = '' for i in range(tuple_[1]): dictionary.append(dictionary[tuple_[0]+i]) if tuple_[2] == ' ': output += '?' else: output += dictionary[tuple_[0]+i] # print(output) # outputs.append(output) for i in range(tuple_[1]): dictionary.popleft() dictionary.append(tuple_[2]) dictionary.popleft() if tuple_[2] == ' ': output += '?' else: output += tuple_[2] outputs.append(output) dictionaries.append(list(dictionary)) # if tuple_[1] == 0: # dictionary.popleft() # dictionary.append(tuple_[2]) # output = tuple_[2] # outputs.append(output) # else: # for i in range(tuple_[1]): # print(tuple_[0]+i) # print(dictionary) # print(tuple_) # print(dictionary[tuple_[0]]) # dictionary.append(dictionary[tuple_[0]+i]) # output += dictionary[tuple_[0]+i] # outputs.append(output) # for i in range(tuple_[1]): # dictionary.popleft() # print(dictionary) # print(code) df_outputs = pd.DataFrame(outputs) df_dictionaries = pd.DataFrame(dictionaries) df_code = pd.DataFrame(code) display_side_by_side(df_code,df_outputs,df_dictionaries) # -
main_3.ipynb