code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import cv2 import numpy as np import matplotlib.pyplot as plt from os.path import join from unet_src.fast_inference import UnetV2FastApplier # %matplotlib inline # - # inference params device_id = 0 # GPU device id images_path = '/sly_task_data/img/' # it could be one image or directory with several images settings = {"device_id": 0} applier = UnetV2FastApplier(settings) def get_imgs_list(path): if os.path.isdir(path): imgs_list = [join(path, fp) for fp in os.listdir(path)] else: imgs_list = [path] return imgs_list # Get images list# Get i imgs_list = get_imgs_list(images_path) for img_fp in imgs_list: image = cv2.imread(img_fp) if image is None: print('Something wrong with image: {}'.format(img_fp)) break image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) mask = applier.inference(image) blank_mask = np.zeros((image.shape[0], image.shape[1], 3)).astype('uint8') for obj in mask["objects"]: if obj.class_title == 'road': obj.draw(blank_mask, [255, 0, 0]) res = cv2.addWeighted(image, 1, blank_mask, 0.7, 0) plt.figure(figsize=(10, 10)) plt.imshow(res) plt.show()
superviselySDK/help/tutorials_legacy/02_unet_inference/src/02_unet_inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="PXzooPBq7ICh" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="sQFJ6aT4nfhw" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/7.Clinical_NER_Chunk_Merger.ipynb) # + [markdown] id="8zIZE0c_7ICk" # # 7. Clinical NER Chunk Merger # + colab={"base_uri": "https://localhost:8080/", "height": 73, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} executionInfo={"elapsed": 14877, "status": "ok", "timestamp": 1639427983364, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="MdE588BiY3z1" outputId="9fa825e7-72e1-4aa1-bd00-6e0305d30a1c" import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) # Defining license key-value pairs as local variables locals().update(license_keys) # Adding license key-value pairs to environment variables import os os.environ.update(license_keys) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47154, "status": "ok", "timestamp": 1639428050489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="RQAa_8kUXEvh" outputId="b06a3e51-a482-400b-c3de-077b337fcbc7" # Installing pyspark and spark-nlp # ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION # Installing Spark NLP Healthcare # ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22425, "status": "ok", "timestamp": 1639428098916, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="TTVtJh6oW1EY" outputId="00e49d9c-ada8-449f-aaea-64365dbd9719" import json import os from pyspark.ml import Pipeline,PipelineModel from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl import sparknlp import pandas as pd params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) print (sparknlp.version()) print (sparknlp_jsl.version()) # + id="hx2jxxCaVlOV" # if you want to start the session with custom params as in start function above def start(secret): builder = SparkSession.builder \ .appName("Spark NLP Licensed") \ .master("local[*]") \ .config("spark.driver.memory", "16G") \ .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \ .config("spark.kryoserializer.buffer.max", "2000M") \ .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.11:"+version) \ .config("spark.jars", "https://pypi.johnsnowlabs.com/"+secret+"/spark-nlp-jsl-"+jsl_version+".jar") return builder.getOrCreate() #spark = start(secret) # + colab={"base_uri": "https://localhost:8080/", "height": 216} id="7zP-9FcXVzx7" outputId="6d355502-fa28-4247-9dc2-6b104f85abf4" spark # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3196, "status": "ok", "timestamp": 1639428901762, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="1zgsiTxjaiMd" outputId="c8581e51-6cbb-47a2-c170-80de276b3acd" # Sample data data_chunk_merge = spark.createDataFrame([ (1,"""A 63 years old man presents to the hospital with a history of recurrent infections that include cellulitis, pneumonias, and upper respiratory tract infections. He reports subjective fevers at home along with unintentional weight loss and occasional night sweats. The patient has a remote history of arthritis, which was diagnosed approximately 20 years ago and treated intermittently with methotrexate (MTX) and prednisone. On physical exam, he is found to be febrile at 102°F, rather cachectic, pale, and have hepatosplenomegaly. Several swollen joints that are tender to palpation and have decreased range of motion are also present. His laboratory values show pancytopenia with the most severe deficiency in neutrophils. """)]).toDF("id","text") data_chunk_merge.show(truncate=50) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28749, "status": "ok", "timestamp": 1639428890127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="r2Yr96wrWPUH" outputId="00bb6e6b-8b82-4264-b176-3921cecc80b6" # Annotator that transforms a text column from dataframe into an Annotation ready for NLP documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") # Sentence Detector annotator, processes various sentences per line sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") # Tokenizer splits words in a relevant format for NLP tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token") # Clinical word embeddings trained on PubMED dataset word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") # NER model trained on i2b2 (sampled from MIMIC) dataset clinical_ner = MedicalNerModel.pretrained("ner_deid_large", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("clinical_ner") clinical_ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "clinical_ner"]) \ .setOutputCol("clinical_ner_chunk") # internal clinical NER (general terms) jsl_ner = MedicalNerModel.pretrained("ner_jsl", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("jsl_ner") jsl_ner_converter = NerConverter() \ .setInputCols(["sentence", "token", "jsl_ner"]) \ .setOutputCol("jsl_ner_chunk") # merge ner_chunks by prioritizing the overlapping indices (chunks with longer lengths and highest information will be kept from each ner model) chunk_merger = ChunkMergeApproach()\ .setInputCols('clinical_ner_chunk', "jsl_ner_chunk")\ .setOutputCol('merged_ner_chunk') # merge ner_chunks regardess of overlapping indices # only works with 2.7 and later chunk_merger_NonOverlapped = ChunkMergeApproach()\ .setInputCols('clinical_ner_chunk', "jsl_ner_chunk")\ .setOutputCol('nonOverlapped_ner_chunk')\ .setMergeOverlapping(False) nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, clinical_ner, clinical_ner_converter, jsl_ner, jsl_ner_converter, chunk_merger, chunk_merger_NonOverlapped]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) # + executionInfo={"elapsed": 2039, "status": "ok", "timestamp": 1639428910499, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="JMI6QDcunfjI" merged_data = model.transform(data_chunk_merge).cache() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8385, "status": "ok", "timestamp": 1639428955044, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="h3zcTo_GgHoO" outputId="7d8bb5b5-596d-4fde-c90f-390e1b1a0118" from pyspark.sql import functions as F result_df = merged_data.select('id',F.explode('merged_ner_chunk').alias("cols")) \ .select('id',F.expr("cols.begin").alias("begin"), F.expr("cols.end").alias("end"), F.expr("cols.result").alias("chunk"), F.expr("cols.metadata.entity").alias("entity")) result_df.show(50, truncate=100) # + [markdown] id="6YwHxwQnIqgF" # ## NonOverlapped Chunk # # all the entities form each ner model will be returned one by one # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 777, "status": "ok", "timestamp": 1639429003755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="-GLlnbuUIuY4" outputId="9c6004ae-7bbd-4305-ff41-23fdb158b5c6" from pyspark.sql import functions as F result_df2 = merged_data.select('id',F.explode('nonOverlapped_ner_chunk').alias("cols")) \ .select('id',F.expr("cols.begin").alias("begin"), F.expr("cols.end").alias("end"), F.expr("cols.result").alias("chunk"), F.expr("cols.metadata.entity").alias("entity")) result_df2.show(50, truncate=100) # + [markdown] id="RRHkcM3xEkUo" # ### ChunkMergeApproach to admit N input cols # We can feed the ChunkMergerApproach more than 2 chunks, also, we can filter out the entities that we don't want to get from the ChunkMergeApproach using `setBlackList` parameter. # + id="3V33H6xhEo2C" import json # !mkdir data # + id="OxamRim-hwFZ" sample_text = """A 28 year old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation , associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting. Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation . Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Twenty days ago. Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . At birth the typical boy is growing slightly faster than the typical girl, but the velocities become equal at about seven months, and then the girl grows faster until four years. From then until adolescence no differences in velocity can be detected. 21-02-2020 21/04/2020 """ # + id="yyQ67FlTEpal" # Defining ContextualParser for feeding ChunkMergerApproach #defining rules date = { "entity": "Parser_Date", "ruleScope": "sentence", "regex": "\\d{1,2}[\\/\\-\\:]{1}(\\d{1,2}[\\/\\-\\:]{1}){0,1}\\d{2,4}", "valuesDefinition":[], "prefix": [], "suffix": [], "contextLength": 150, "context": [] } with open('data/date.json', 'w') as f: json.dump(date, f) age = { "entity": "Parser_Age", "ruleScope": "sentence", "matchScope":"token", "regex" : "^[1][0-9][0-9]|[1-9][0-9]|[1-9]$", "prefix":["age of", "age"], "suffix": ["-years-old", "years-old", "-year-old", "-months-old", "-month-old", "-months-old", "-day-old", "-days-old", "month old", "days old", "year old", "years old", "years", "year", "months", "old" ], "contextLength": 25, "context": [], "contextException": ["ago"], "exceptionDistance": 10 } with open("data/age.json", 'w') as f: json.dump(age, f) # + [markdown] id="_o_ycAgYf46P" # Using two ContextualParserApproach models and NER model in the same pipeline and merging by ChunkMergeApproach # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11661, "status": "ok", "timestamp": 1638987886967, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="747Y7h-2EpX_" outputId="c4025273-7832-452d-d7ed-768d58c5e841" documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") # Sentence Detector annotator, processes various sentences per line sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") # Tokenizer splits words in a relevant format for NLP tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token") # Contextual parser for age age_contextual_parser = ContextualParserApproach() \ .setInputCols(["sentence", "token"]) \ .setOutputCol("entity_age") \ .setJsonPath("data/age.json") \ .setCaseSensitive(False) \ .setContextMatch(False)\ .setPrefixAndSuffixMatch(False) chunks_age= ChunkConverter()\ .setInputCols("entity_age")\ .setOutputCol("chunk_age") # Contextual parser for date date_contextual_parser = ContextualParserApproach() \ .setInputCols(["sentence", "token"]) \ .setOutputCol("entity_date") \ .setJsonPath("data/date.json") \ .setCaseSensitive(False) \ .setContextMatch(False)\ .setPrefixAndSuffixMatch(False) chunks_date = ChunkConverter().setInputCols("entity_date").setOutputCol("chunk_date") # Clinical word embeddings word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") # Extracting entities by ner_deid_large ner_model = MedicalNerModel.pretrained("ner_deid_large","en","clinical/models") \ .setInputCols("sentence","token","embeddings") \ .setOutputCol("ner") ner_converter= NerConverter()\ .setInputCols(["sentence", "token", "ner"])\ .setOutputCol("ner_chunk")\ .setWhiteList(["DATE", "AGE"]) # Chunkmerger; prioritize age_contextual_parser parser_based_merge= ChunkMergeApproach()\ .setInputCols(["chunk_age", "chunk_date", "ner_chunk"])\ .setOutputCol("merged_chunks") # Chunkmerger; prioritize ner_chunk ner_based_merge= ChunkMergeApproach()\ .setInputCols(["ner_chunk", "chunk_age", "chunk_date"])\ .setOutputCol("merged_chunks_2") # Using black list for limiting the entity types that will be extracted limited_merge= ChunkMergeApproach()\ .setInputCols(["ner_chunk", "chunk_age", "chunk_date"])\ .setOutputCol("merged_chunks_black_list")\ .setBlackList(["DATE", "Parser_Date"]) # this will block the dates. pipeline= Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, age_contextual_parser, chunks_age, date_contextual_parser, chunks_date, word_embeddings, ner_model, ner_converter, parser_based_merge, ner_based_merge, limited_merge ]) empty_df= spark.createDataFrame([[""]]).toDF("text") model= pipeline.fit(empty_df) lmodel= LightPipeline(model) lresult= lmodel.fullAnnotate(sample_text)[0] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 381, "status": "ok", "timestamp": 1638987889793, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="gVAEetRLfwO7" outputId="c7ef7286-0fb4-4e80-a1de-1ab0c57ac5bd" lresult.keys() # + [markdown] id="hJSfWfcliXdw" # If there is an overlap among the input entity types, ChunkMergerApproach model prioritizes the leftmost input. <br/> # # At the 'parser_based_merge', we gave the contextual parser's chunks firstly. Therefore, 'parser_based_merge' prioritized the "Parser_Age" and "Parser_Date" entities over the "AGE" and "DATE" entity types that comes from NER model. <br/> # # At the 'ner_based_merge', we gave the Ner model's inputs firstly, thus 'ner_based_merge' prioritized the "AGE" and "DATE" entities over the "Parser_Age" and "Parser_Date". <br/> # # At the limited_merge, we excluded "DATE" and "Parser_Date" entity types. # # Let's compare the results of these ChunkMergeApproach below: # + colab={"base_uri": "https://localhost:8080/", "height": 142} executionInfo={"elapsed": 493, "status": "ok", "timestamp": 1638987926600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="GFxiZuPpEpVe" outputId="32f6d983-0b37-4dd1-ab75-85d1152dd84a" chunk= [] parser_based_merge= [] ner_based_merge= [] for i, k in list(zip(lresult["merged_chunks"], list(lresult["merged_chunks_2"],))): parser_based_merge.append(i.metadata["entity"]) ner_based_merge.append(k.metadata["entity"]) chunk.append(i.result) df= pd.DataFrame({"chunk": chunk,"parser_based_merged_entity": parser_based_merge, "ner_based_merged_entity": ner_based_merge}) df.head() # + [markdown] id="JojNm1cujUl0" # `.setBlackList()` applied results: # + colab={"base_uri": "https://localhost:8080/", "height": 80} executionInfo={"elapsed": 6, "status": "ok", "timestamp": 1638987930142, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14855809472179427810"}, "user_tz": -180} id="h5V3OqFoEpSm" outputId="0fdcd80d-6643-47e0-f87e-af9bee327461" chunk= [] limited_merge_entity= [] for i in list(lresult["merged_chunks_black_list"]): chunk.append(i.result) limited_merge_entity.append(i.metadata["entity"]) df= pd.DataFrame({"chunk": chunk, "limited_entity": limited_merge_entity }) df.head()
tutorials/Certification_Trainings/Healthcare/7.Clinical_NER_Chunk_Merger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Basic GAN # # * GAN-training: as per Goodfellow et al. (2014) # * Architecture: # * Discriminator: two CNN layers, two FC layers, outputs logits. # * Generator: four-layered DeConv Net. # * Comments: # * GAN is notoriously difficult to train in vanilla mode, very careful tuning of params, regularization and the architecture of the two nets is essential. # + import tensorflow as tf import random import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from tensorflow.examples.tutorials.mnist import input_data # - # ### Prepare data mnist = input_data.read_data_sets("MNIST_data/") x_train = mnist.train.images[:55000,:] print("Image count: {} | Image pixel size: {}".format(*x_train.shape)) print("Image sample:") plt.imshow(x_train[random.randint(0,len(x_train))].reshape([28,28]), cmap='gray_r') plt.show() # ### CNN # + def conv2d(x, W): return tf.nn.conv2d(input=x, filter=W, strides=[1,1,1,1], padding='SAME') def avg_pool2d(x): return tf.nn.avg_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # ksize: <batch,height,width,channels> # - # ### Discriminator def D(x_image, keep_prob=1.0, reuse=False): # x-image: <batch,height,width,in-channels> with tf.variable_scope('discriminator') as scope: if reuse: tf.get_variable_scope().reuse_variables() # Conv-avgpool 1 W_conv1 = tf.get_variable('d_wconv1', [5,5,1,8], initializer=tf.truncated_normal_initializer(stddev=0.02)) # shape: <filter-height/batch,filter-width/num-pixels,in-channels,out-channels> # in-channels = input #channels, usually set to 1. # out-channels = output #channels, however many params you want. b_conv1 = tf.get_variable('d_bconv1', [8], initializer=tf.constant_initializer(0)) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # <batch,height,width,out-chn> = <?,28,28,8> (NB: 'SAME' conv) h_pool1 = tf.nn.dropout(avg_pool2d(h_conv1),keep_prob) # <batch,height,width,out-chn> = <?,14,14,8> # 2x2 maxpooling, reducing height & width by half. # Conv-avgpool 2 W_conv2 = tf.get_variable('d_wconv2', [5,5,8,16], initializer=tf.truncated_normal_initializer(stddev=0.02)) b_conv2 = tf.get_variable('d_bconv2', [16], initializer=tf.constant_initializer(0)) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # <batch,height,width,out-chn> = <?,14,14,16> h_pool2 = tf.nn.dropout(avg_pool2d(h_conv2),keep_prob) # <batch,height,width,out-chn> = <?,7,7,16> # FC-1 W_fc1 = tf.get_variable('d_wfc1', [7*7*16,32], initializer=tf.truncated_normal_initializer(stddev=0.02)) # <height*width*out-chn,hid-fc1=32> b_fc1 = tf.get_variable('d_bfc1', [32], initializer=tf.constant_initializer(0)) h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*16]) # <batch,height,width,out-chn> -> <batch,height*width*out-chn> h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # affine: <batch,height*width*out-chn> * <height*width*out-chn,hid-fc1=32> = <batch,hid-fc1=32> # FC-2 W_fc2 = tf.get_variable('d_wfc2', [32,1], initializer=tf.truncated_normal_initializer(stddev=0.02)) # <hid-fc1=32,hid-fc2=1> b_fc2 = tf.get_variable('d_bfc2', [1], initializer=tf.constant_initializer(0)) y_conv= tf.add(tf.matmul(h_fc1, W_fc2), b_fc2) # affine: <batch,hid-fc1=32> * <hid-fc1=32,hid-fc2=1> = <batch,1> # NB: no softmax transformation, operating with logits. return y_conv # ### Generator def G(z, batch_size, z_dim, keep_prob=1.0, reuse=False): # z: random noise input: <batch,z-dim> # batch-size,z-dim: explicitly put for debugging, # but can be extracted at runtime with tf.unstack(tf.reshape(z)). with tf.variable_scope('generator') as scope: if reuse: tf.get_variable_scope().reuse_variables() g_dim = 64 # num-filters of first layer of generator c_dim = 1 # Color dimension of output (MNIST is grayscale, so c_dim = 1) s = 28 # Output size of the image s2, s4, s8, s16 = s//2, s//4, s//8, s//16 # Exponential image upscaling # ^1 -> ^4 for gradual transition. h0 = tf.reshape(z, [batch_size, s16+1, s16+1, 25]) # h0: <batch,height=2,width=2,in-chn=25> # s16=28//16 = 1 h0 = tf.nn.relu(h0) # DeConv-1 output1_shape = [batch_size,s8,s8,g_dim*4] # out-shape: <batch,height=3,width=3,out-chn=64*4> # s8=28//8 = 3 W_conv1 = tf.get_variable('g_wconv1', [5,5,output1_shape[-1],int(h0.get_shape()[-1])], initializer=tf.truncated_normal_initializer(stddev=0.1)) # shape: <filter-height/batch,filter-width/num-pixels,out-channels,in-channels> # NB: for deconv, in & out-channels switch place. b_conv1 = tf.get_variable('g_bconv1', [output1_shape[-1]], initializer=tf.constant_initializer(.1)) H_conv1 = tf.nn.conv2d_transpose(h0, W_conv1, output_shape=output1_shape, strides=[1,2,2,1], padding='SAME') + b_conv1 H_conv1 = tf.contrib.layers.batch_norm(inputs = H_conv1, center=True, scale=True, is_training=True, scope="g_bn1") # center,scale: (input-beta)/gamma # beta,gamma initialized as mean and std. H_conv1 = tf.nn.dropout(tf.nn.relu(H_conv1),keep_prob) # <batch,height=3,width=3,out-chn=256> # DeConv-2 output2_shape = [batch_size,s4-1,s4-1,g_dim*2] # out-shape: <batch,height=6,width=6,out-chn=64*2> # s4=28//4 = 7 W_conv2 = tf.get_variable('g_wconv2', [5,5,output2_shape[-1],int(H_conv1.get_shape()[-1])], initializer=tf.truncated_normal_initializer(stddev=0.1)) b_conv2 = tf.get_variable('g_bconv2', [output2_shape[-1]], initializer=tf.constant_initializer(.1)) H_conv2 = tf.nn.conv2d_transpose(H_conv1, W_conv2, output_shape=output2_shape, strides=[1,2,2,1], padding='SAME') + b_conv2 H_conv2 = tf.contrib.layers.batch_norm(inputs = H_conv2, center=True, scale=True, is_training=True, scope="g_bn2") H_conv2 = tf.nn.dropout(tf.nn.relu(H_conv2),keep_prob) # <batch,height=6,width=6,out-chn=128> # DeConv-3 output3_shape = [batch_size,s2-2,s2-2,g_dim*1] # out-shape: <batch,height=12,width=12,out-chn=64*1> # s2=28//2 = 14 W_conv3 = tf.get_variable('g_wconv3', [5,5,output3_shape[-1],int(H_conv2.get_shape()[-1])], initializer=tf.truncated_normal_initializer(stddev=0.1)) b_conv3 = tf.get_variable('g_bconv3', [output3_shape[-1]], initializer=tf.constant_initializer(.1)) H_conv3 = tf.nn.conv2d_transpose(H_conv2, W_conv3, output_shape=output3_shape, strides=[1,2,2,1], padding='SAME') + b_conv3 H_conv3 = tf.contrib.layers.batch_norm(inputs = H_conv3, center=True, scale=True, is_training=True, scope="g_bn3") H_conv3 = tf.nn.dropout(tf.nn.relu(H_conv3),keep_prob) # <batch,height=12,width=12,out-chn=64> # DeConv-final output4_shape = [batch_size,s,s,c_dim] # out-shape: <batch,height=28,width=28,out-chn=1> # s=28 # original data shape restored now. W_conv4 = tf.get_variable('g_wconv4', [5,5,output4_shape[-1],int(H_conv3.get_shape()[-1])], initializer=tf.truncated_normal_initializer(stddev=0.1)) b_conv4 = tf.get_variable('g_bconv4', [output4_shape[-1]], initializer=tf.constant_initializer(.1)) H_conv4 = tf.nn.conv2d_transpose(H_conv3, W_conv4, output_shape=output4_shape, strides=[1,2,2,1], padding='VALID') + b_conv4 H_conv4 = tf.nn.dropout(tf.nn.tanh(H_conv4),keep_prob) # <batch,height=28,width=28,out-chn=1> return H_conv4 # + # Results from generator prior to training tf.reset_default_graph() sess = tf.Session() z_dimensions = 100 # 100D random noise. z_test_placeholder = tf.placeholder(tf.float32, [None, z_dimensions]) sample_image = G(z_test_placeholder, 1, z_dimensions) test_z = np.random.normal(-1, 1, [1,z_dimensions]) sess.run(tf.global_variables_initializer()) temp = (sess.run(sample_image, feed_dict={z_test_placeholder: test_z})) # <batch-size=1,height=28,width=28,chn=1> my_i = temp.squeeze() # get rid of all dim-1's. -> <28,28> plt.imshow(my_i, cmap='gray_r') plt.show() # - # ### GAN # + # Build graph tf.reset_default_graph() sess = tf.Session() BATCH_SIZE = 32 x_placeholder = tf.placeholder(tf.float32, shape = [None,28,28,1]) # input images to the discriminator z_placeholder = tf.placeholder(tf.float32, [None, z_dimensions]) # input noise vectors to the generator Dx = D(x_placeholder) # <batch,1> # predicted (unscale) probabilities for the real images (i.e. logits). Gz = G(z_placeholder, BATCH_SIZE, z_dimensions) # <batch,height=28,width=28,chn=1> # the generated images. Dg = D(Gz, reuse=True) # i.e. D(Gz) # <batch,1> # discriminator prediction probabilities for generated images. # >> Dx, Gz, Dg # (<tf.Tensor 'discriminator/Add:0' shape=(?, 1) dtype=float32>, # <tf.Tensor 'generator/Tanh:0' shape=(16, 28, 28, 1) dtype=float32>, # <tf.Tensor 'discriminator_1/Add:0' shape=(16, 1) dtype=float32>) # Discriminator loss # J(D,G) = E[log(Dx)] + E[log(1-D(Gz))] # NB: only update D's params in training. d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dx, labels = tf.ones_like(Dx))) # maximizes E[log(Dx)] by pushing Dx to label=1 (recognizing real image). d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dg, labels = tf.zeros_like(Dg))) # maximizes E[log(1-D(Gz))] by pushing D(Gz) to label=0 (recognizing fake image). d_loss = d_loss_real + d_loss_fake # Generator loss # J(D,G) = E[log(1-D(Gz))] # NB: only update G's params in training. g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dg, labels = tf.ones_like(Dg))) # minimizes E[log(1-D(Gz))] by pushing D(G(z)) to label=1 (fool discriminator). # Separate training params # Ensure the correct params are updated in training. tvars = tf.trainable_variables() d_vars = [var for var in tvars if 'd_' in var.name] g_vars = [var for var in tvars if 'g_' in var.name] # Create optimizers for D & G adam = tf.train.AdamOptimizer(2e-4) trainer_D = adam.minimize(d_loss, var_list=d_vars) trainer_G = adam.minimize(g_loss, var_list=g_vars) sess.run(tf.global_variables_initializer()) # + # Training # D-G alternation set to 1-1 # i.e. one update of D or G and switch. NUM_EPOCH = 5 NUM_ITERS = 3000 VERBOSE = 1000 try: for e in range(NUM_EPOCH): print("Epoch {}:\n".format(e+1)) d_loss_track, g_loss_track = [],[] for i in range(NUM_ITERS): z_batch = np.random.normal(-1, 1, size=[BATCH_SIZE, z_dimensions]) real_image_batch = np.reshape(mnist.train.next_batch(BATCH_SIZE)[0], [BATCH_SIZE,28,28,1]) _,d_loss_ = sess.run([trainer_D, d_loss],feed_dict={z_placeholder:z_batch, x_placeholder:real_image_batch}) _,g_loss_ = sess.run([trainer_G, g_loss],feed_dict={z_placeholder:z_batch}) d_loss_track.append(d_loss_) g_loss_track.append(g_loss_) if i%VERBOSE==0: print("Loss at step {}: D-loss = {} | G-loss = {}".format(i,d_loss_,g_loss_)) print("\nEpoch average: D-loss = {} | G-loss = {}\n".format(np.mean(d_loss_track), np.mean(g_loss_track))) except KeyboardInterrupt: print("Stopped!") # - # Results from generator after training sample_image = G(z_placeholder, 1, z_dimensions, reuse=True) z_batch = np.random.normal(-1, 1, size=[1, z_dimensions]) temp = (sess.run(sample_image, feed_dict={z_placeholder: z_batch})) my_i = temp.squeeze() print("Image sample:") plt.imshow(my_i, cmap='gray_r') plt.show()
Basic GAN (MNIST demo).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp core # - # # nbdev tutorial 01 # # > some API details. #hide import nbdev from nbdev.showdoc import * from fastcore.test import * # + #export def say_hi(to): "Say hi to someone" return f'Hello {to} :)' say_hi('nbdev') # - # ### test example test_eq(say_hi('nbdev'), 'Hello nbdev :)') #export class HelloSayer: "say hello to `to` using `say_hi` func" def __init__(self, to): self.to = to def say(self): "do the saying" return say_hi(self.to) # ### show doc example nbdev.showdoc.show_doc(HelloSayer.say) o = HelloSayer('nbdev') print(o.say()) #hide from nbdev.export import notebook2script notebook2script()
00_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ***Support Vector Machine*** - Used for both classification and regression - mostly used for classification Hard margin - will try to avoid even single misclassification - overfitting,perform poorly on new data - affected by outliers Soft margin - allows some missclassification - generalize the model(overcomes the problem of overfitting) - not affected outliers Support vectors - the datapoints which are used in creating maximum margin are called as support vectors Kernel Type(kernel transform low dimensional space to higher dimension) - linear - polynomial - rbf - sigmoid One dimension---line two dimension---plane more than two dimension----hyperplanes from sklearn.datasets import load_wine wine=load_wine() wine.keys() print(wine.DESCR) import pandas as pd df=pd.DataFrame(wine.data,columns=wine.feature_names) df["Target"]=wine.target df.head() df["Target"].unique() df.isnull().sum() import matplotlib.pyplot as plt import seaborn as sns y=wine.target x=wine.data features=wine.feature_names for i in range(13): sns.boxplot(x=y,y=x[:,i]) plt.ylabel(features[i]) plt.show() from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=42) from sklearn.svm import SVC model=SVC() model.fit(x_train,y_train) y_pred=model.predict(x_test) df["Target"].value_counts() from sklearn.metrics import accuracy_score,confusion_matrix,precision_score,recall_score print(accuracy_score(y_pred,y_test)) print(confusion_matrix(y_pred,y_test)) # print(precision_score(y_pred,y_test)) # print(recall_score(y_pred,y_test)) help(SVC()) kernel=["linear","poly","rbf","sigmoid"] for i in kernel: model=SVC(kernel=i) model.fit(x_train,y_train) y_pred=model.predict(x_test) print("for kernel:",i) print("accuracy is:",accuracy_score(y_test,y_pred)) from sklearn.model_selection import GridSearchCV help(GridSearchCV) parameters={"C":[0.1,1,100,1000],"kernel":["linear","poly","rbf","sigmoid"],"degree":[1,2,3,4,5,6]} grid=GridSearchCV(SVC(), param_grid=parameters,cv=3) grid.fit(x_train,y_train) dir(grid) grid.best_params_ from sklearn.svm import SVC model=SVC(C= 0.1, degree=1, kernel='linear') model.fit(x_train,y_train) y_pred=model.predict(x_test) from sklearn.metrics import accuracy_score,confusion_matrix,precision_score,recall_score print(accuracy_score(y_pred,y_test)) print(confusion_matrix(y_pred,y_test)) #try to apply load_breast_cancer dataset to svm
Day11_SVM/Support_vector_machine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # name: python3 # --- # # Nigerian Music scraped from Spotify - an analysis import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df=pd.read_csv('../data/nigerian-songs.csv') df.head() df.info() df.isnull().sum() df.describe() top=df['artist_top_genre'].value_counts() plt.figure(figsize=(10,7)) sns.barplot(x=top[:10].index,y=top[:10].values) plt.xticks(rotation=45) plt.title('Top genres',color='blue') #Filtering out the entries with 'Missing' genre df=df[df['artist_top_genre']!= 'Missing'] top=df['artist_top_genre'].value_counts() plt.figure(figsize=(10,7)) sns.barplot(x=top[:10].index,y=top[:10].values) plt.xticks(rotation=45) plt.title('Top genres',color='blue') #Focussing on the top 3 genres, since they dominate the data by far df=df[(df['artist_top_genre']=='afro dancehall')| (df['artist_top_genre']=='afropop')| (df['artist_top_genre']=='nigerian pop')] df=df[df['popularity']>0] top=df['artist_top_genre'].value_counts() plt.figure(figsize=(10,7)) sns.barplot(x=top.index,y=top.values) plt.xticks(rotation=45) plt.title('Top genres',color='blue') corrmat=df.corr() f,ax=plt.subplots(figsize=(12,9)) sns.heatmap(corrmat,vmax=.8,square=True) # + sns.set_theme(style='ticks') g=sns.jointplot(data=df,x='popularity',y='danceability',hue='artist_top_genre',kind='kde') # - sns.FacetGrid(df,hue='artist_top_genre',size=5)\ .map(plt.scatter,'popularity','danceability')\ .add_legend()
5-Clustering/1-Visualize/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import os # use CPU or GPU os.environ['KERAS_BACKEND'] = 'theano' #os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ['THEANO_FLAGS'] = 'device=cuda0' import keras import numpy as np # ### Data and weight loaders # + from glob import glob from random import Random import json rng = Random(42) # + import kenlm import beamsearch reload(beamsearch) from utils import argmax_decode, word_error_rate, for_tf_or_th from beamsearch import beam_decode, beam_decode_u lm = kenlm.Model('data/lm/lm.binary') def iterate_weights(model_path): """Iterate over saved model weights""" for model_weight in glob(os.path.join(model_path, '') + '*.h5'): yield model_weight def pick_sample_files(desc_file, count, min_duration, max_duration): metadata = [] with open(desc_file) as f: for line in f: metadata.append(json.loads(line)) legitimates = [ sample for sample in metadata if min_duration <= sample['duration'] <= max_duration ] rng.shuffle(legitimates) return legitimates[:count] def test_generator(datagen, test_samples, batch_size=64, normalize=True): global in_ texts = [s['text'] for s in test_samples] durations = [s['duration'] for s in test_samples] paths = [s['key'] for s in test_samples] features = [datagen.featurize(p) for p in paths] if normalize: features = [datagen.normalize(f) for f in features] for i in range( np.ceil(len(features) / float(batch_size)).astype(int) ): batch_durations = durations[i*batch_size: (i+1)*batch_size] batch_features = features[i*batch_size: (i+1)*batch_size] batch_texts = texts[i*batch_size: (i+1)*batch_size] batch_paths = paths[i*batch_size: (i+1)*batch_size] max_length = max([f.shape[0] for f in batch_features]) batch_array = np.zeros((len(batch_features), max_length, features[0].shape[1]), dtype='float32') for fi in range(len(batch_features)): batch_array[fi, :batch_features[fi].shape[0], :] = batch_features[fi] yield {'x': batch_array, 'y': batch_texts, 'path': batch_paths, 'duration': batch_durations} def best_lm_alternative(true_sentence, wer, predictions, verbose=False): """ predictions is a list of tuples which first denote sentence and next is It's probablity """ best, best_score = None, np.finfo('float32').min for s, p in predictions: lm_score = lm.score(s) if lm_score > best_score: best, best_score = s, lm_score if best == predictions[0][0]: if verbose: print "language model didn't change prediction" best_wer = wer else: best_wer = word_error_rate([true_sentence], [best], decoded=True)[0] if verbose: print "language model changed prediction, WER changed from {old_wer} to {new_wer}".format( old_wer = wer, new_wer = best_wer ) return best, best_wer def evaluate(batch_generator, output_fn, learning_phase=False, use_lm=False, beam_width=12): all_nolm_wers, all_lm_wers = [], [] for batch in batch_generator: net_out = output_fn([batch['x'], learning_phase])[0] mtp_net_out = for_tf_or_th(net_out, net_out.swapaxes(0, 1)) pred_texts = [argmax_decode(o) for o in mtp_net_out] nolm_wers = word_error_rate(batch['y'], pred_texts, True) all_nolm_wers.append(nolm_wers) if use_lm: alt_beam_preds = lambda i: zip(*beam_decode_u(mtp_net_out[i, :, :], beam_width, normalize=True)) pred_texts, lm_wers = zip(*[best_lm_alternative(batch['y'][i], nolm_wers[i], alt_beam_preds(i)) for i in range(mtp_net_out.shape[0])]) all_lm_wers.append(np.array(lm_wers)) all_wers = all_lm_wers else: all_wers = all_nolm_wers for i, y in enumerate(batch['y']): print 'r:{}\np:{}\n{}: WER: {}, DURATION: {}, PATH: {}'.format(y, pred_texts[i], i, all_wers[-1][i], batch['duration'][i], batch['path'][i]) print 'batch mean WER: {}'.format(all_wers[-1].mean()) if use_lm: print 'LM WER: {} No LM WER: {}'.format(np.concatenate(all_lm_wers).mean(), np.concatenate(all_nolm_wers).mean()) else: 'whole mean WER: {}'.format(np.concatenate(all_wers).mean()) return mtp_net_out, pred_texts, all_wers, batch['y'] # - # ### Customize data generator test_desc = '/home/reith/deepspeech/ba-dls-deepspeech/descs/test-clean.json' #test_desc = '/home/reith/deepspeech/ba-dls-deepspeech/descs/test-other.json' #test_desc = '/home/reith/deepspeech/ba-dls-deepspeech/descs/dev-clean.json' from data_generator import DataGenerator datagen = DataGenerator() test_samples = pick_sample_files(test_desc, 1024, 0, 30) # Normalize by input data train_desc = '/home/reith/deepspeech/ba-dls-deepspeech/descs/train-clean-360.json' datagen.load_train_data(train_desc, 15) datagen.fit_train(100) # Or load them datagen.reload_norm('860-1000') # ### Load model # #### Theano mode # Load and test weights of a half-phoneme model #model_dir = '/home/reith/deepspeech/ba-dls-deepspeech/models/22-cont-23-i9696-lr1e-4-train-360-dur15/' #model_dir = '/home/reith/deepspeech/ba-dls-deepspeech/models/23-cont-i2494-joingrus-dur15-nobn-lr5e-5/' model_dir = '/home/reith/deepspeech/ba-dls-deepspeech/models/24-cont-train-860' # A summary of training procedure: # - 7 Epochs of dual phoneme-text on train-100 (20) # - 3 Epochs on train-500 for phoenme fine-tuning (21) # - 3 Epochs on train-500 for text fine-tuning (22) # - 2 Epochs on train-360 (23) # - 2 Epochs on train-360 dropping phoneme branch and and batch normalization (24) # make half phoneme model from model_wrp import HalfPhonemeModelWrapper model_wrp = HalfPhonemeModelWrapper() model = model_wrp.compile(nodes=1000, conv_context=5, recur_layers=5) output_fn = model_wrp.compile_output_fn() # or gru model from model_wrp import GruModel model_wrp = GruModel() model = model_wrp.compile(nodes=1000, conv_context=5, recur_layers=5, batch_norm=False) output_fn = model_wrp.compile_output_fn() # model.load_weights(os.path.join(model_dir, 'best-val-weights.h5')) model.load_weights(os.path.join(model_dir, 'model_19336_weights.h5')) # #### Tensorflow model # A summary of training procedure: # - 3 Epochs of dual phoneme-text on train-100 by dropout of 0.3 and leaky relu factor of 0.05 (40) # - 5 Epochs on train-100 for phoenme fine-tuning (41) # - 5 Epochs on train-100 for text fine-tuning (42) # - 5 Epochs on train-360 (43) # - 5 Epochs on train-860 dropping phoneme branch and and batch normalization and reduced dropout to 0.1 (44) # - 20 Epochs on train-860 reduced learning rate down to 5e-5 and for samples up to 20 seconds long (45) model_dir = '/home/reith/deepspeech/ba-dls-deepspeech/models/44-cont-45-i14490-dur20-lr5e-5' from model_wrp import GruModel model_wrp = GruModel() model = model_wrp.compile(nodes=1000, conv_context=5, recur_layers=5, dropout=.1, lirelu_alpha=.05, batch_norm=False) output_fn = model_wrp.compile_output_fn() model.load_weights(os.path.join(model_dir, 'best-val-weights.h5')) model.summary() # ### Evaluate model res = evaluate(test_generator(datagen, test_samples, normalize=True), output_fn, use_lm=False) res = evaluate(test_generator(datagen, test_samples, normalize=True), output_fn, beam_width=27, use_lm=True) # ### test... # + # thus idleness is the mother # thus i don't lissisthe mother def edits(word): letters = ''.join([chr(i) for i in range(ord('a'), ord('z') + 1)]) splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [l + r[1:] for l, r in splits if r] transposes = [l + r[1] + r[0] + r[2:] for l, r in splits if len(r) >1] replaces = [l + c + r[1:] for c in letters for l, r in splits if r] inserts = [l + c + r for c in letters for l, r in splits if r] return set(deletes + transposes + replaces + inserts) def edits_n(word, n): es = set([word]) for i in range(n): es = reduce(lambda a, b: a.union(b), (edits(w) for w in es)) return es def words(text): return text.split() def known_words(words): return {word for word in words if word in WORDS} def candidate_words(word): return (known_words([word]) or known_words(edits_n(word, 1)) or known_words(edits_n(word, 2)) or [word]) list(candidate_words("swam")) # - with open('./data/lm/words.txt') as f: WORDS = set(words(f.read())) r:a ring of amethyst i could not wear here plainer to my sight than that first kiss p:a ring of amathyst i could not wear here plainer two my sight then that first kits best_lm_alternative(res[3][3], res[2][3], zip(*beam_decode_u(res[0][:, 3, :], 12, normalize=True))) print best_lm_alternative(res[3][46], res[2][46], zip(*beam_decode_u(res[0][:, 46, :], 12, normalize=False))) print res[1][46] import edit_distance ref = 'there is no danger of the modern commentators on the timaeus falling into the absurdities of the neo platonists' pre = 'there is old danger of the madern commontychers un ther to meas falling into dubsurdities of the newo platinists' pre = 'there is old danger of the madern commontychers un ther to mes falling into dubsurdities of the newo platinists' #print edit_distance.SequenceMatcher(ref, pre).ratio() word_error_rate([ref], [pre], decoded=True)[0] # #### custom samples samples = [ {"duration": 4.905, "text": "he began a confused complaint against the wizard who had vanished behind the curtain on the left", "key": "/mnt/ml-data/LibriSpeech/test-clean/61/70968/61-70968-0000.wav"}, {"duration": 3.61, "text": "give not so earnest a mind to these mummeries child", "key": "/mnt/ml-data/LibriSpeech/test-clean/61/70968/61-70968-0001.wav"} ] evaluate(test_generator(datagen, samples, normalize=True), output_fn)
models-evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Table of Contents](./table_of_contents.ipynb) # # Smoothing # %matplotlib inline #format the book import book_format book_format.set_style() # ## Introduction # The performance of the Kalman filter is not optimal when you consider future data. For example, suppose we are tracking an aircraft, and the latest measurement deviates far from the current track, like so (I'll only consider 1 dimension for simplicity): # + import matplotlib.pyplot as plt data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3, 10.1, 9.9, 10.2, 10.0, 9.9, 11.4] plt.plot(data) plt.xlabel('time') plt.ylabel('position'); # - # After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether # # * The aircraft started a turn awhile ago, but the previous measurements were noisy and didn't show the change. # # * The aircraft is turning, and this measurement is very noisy # # * The measurement is very noisy and the aircraft has not turned # # * The aircraft is turning in the opposite direction, and the measurement is extremely noisy # # # Now, suppose the following measurements are: # # 11.3 12.1 13.3 13.9 14.5 15.2 # data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2] plt.plot(data + data2); # Given these future measurements we can infer that yes, the aircraft initiated a turn. # # On the other hand, suppose these are the following measurements. data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1] plt.plot(data + data3); # In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy. # ## An Overview of How Smoothers Work # # The Kalman filter is a *recursive* filter with the Markov property - it's estimate at step `k` is based only on the estimate from step `k-1` and the measurement at step `k`. But this means that the estimate from step `k-1` is based on step `k-2`, and so on back to the first epoch. Hence, the estimate at step `k` depends on all of the previous measurements, though to varying degrees. `k-1` has the most influence, `k-2` has the next most, and so on. # # Smoothing filters incorporate future measurements into the estimate for step `k`. The measurement from `k+1` will have the most effect, `k+2` will have less effect, `k+3` less yet, and so on. # # This topic is called *smoothing*, but I think that is a misleading name. I could smooth the data above by passing it through a low pass filter. The result would be smooth, but not necessarily accurate because a low pass filter will remove real variations just as much as it removes noise. In contrast, Kalman smoothers are *optimal* - they incorporate all available information to make the best estimate that is mathematically achievable. # ## Types of Smoothers # # There are three classes of Kalman smoothers that produce better tracking in these situations. # # * Fixed-Interval Smoothing # # This is a batch processing based filter. This filter waits for all of the data to be collected before making any estimates. For example, you may be a scientist collecting data for an experiment, and don't need to know the result until the experiment is complete. A fixed-interval smoother will collect all the data, then estimate the state at each measurement using all available previous and future measurements. If it is possible for you to run your Kalman filter in batch mode it is always recommended to use one of these filters a it will provide much better results than the recursive forms of the filter from the previous chapters. # # # * Fixed-Lag Smoothing # # Fixed-lag smoothers introduce latency into the output. Suppose we choose a lag of 4 steps. The filter will ingest the first 3 measurements but not output a filtered result. Then, when the 4th measurement comes in the filter will produce the output for measurement 1, taking measurements 1 through 4 into account. When the 5th measurement comes in, the filter will produce the result for measurement 2, taking measurements 2 through 5 into account. This is useful when you need recent data but can afford a bit of lag. For example, perhaps you are using machine vision to monitor a manufacturing process. If you can afford a few seconds delay in the estimate a fixed-lag smoother will allow you to produce very accurate and smooth results. # # # * Fixed-Point Smoothing # # A fixed-point filter operates as a normal Kalman filter, but also produces an estimate for the state at some fixed time $j$. Before the time $k$ reaches $j$ the filter operates as a normal filter. Once $k>j$ the filter estimates $x_k$ and then also updates its estimate for $x_j$ using all of the measurements between $j\dots k$. This can be useful to estimate initial paramters for a system, or for producing the best estimate for an event that happened at a specific time. For example, you may have a robot that took a photograph at time $j$. You can use a fixed-point smoother to get the best possible pose information for the camera at time $j$ as the robot continues moving. # # ## Choice of Filters # # The choice of these filters depends on your needs and how much memory and processing time you can spare. Fixed-point smoothing requires storage of all measurements, and is very costly to compute because the output is for every time step is recomputed for every measurement. On the other hand, the filter does produce a decent output for the current measurement, so this filter can be used for real time applications. # # Fixed-lag smoothing only requires you to store a window of data, and processing requirements are modest because only that window is processed for each new measurement. The drawback is that the filter's output always lags the input, and the smoothing is not as pronounced as is possible with fixed-interval smoothing. # # Fixed-interval smoothing produces the most smoothed output at the cost of having to be batch processed. Most algorithms use some sort of forwards/backwards algorithm that is only twice as slow as a recursive Kalman filter. # ## Fixed-Interval Smoothing # There are many fixed-lag smoothers available in the literature. I have chosen to implement the smoother invented by Rauch, Tung, and Striebel because of its ease of implementation and efficiency of computation. It is also the smoother I have seen used most often in real applications. This smoother is commonly known as an RTS smoother. # # Derivation of the RTS smoother runs to several pages of densely packed math. I'm not going to inflict it on you. Instead I will briefly present the algorithm, equations, and then move directly to implementation and demonstration of the smoother. # # The RTS smoother works by first running the Kalman filter in a batch mode, computing the filter output for each step. Given the filter output for each measurement along with the covariance matrix corresponding to each output the RTS runs over the data backwards, incorporating its knowledge of the future into the past measurements. When it reaches the first measurement it is done, and the filtered output incorporates all of the information in a maximally optimal form. # # The equations for the RTS smoother are very straightforward and easy to implement. This derivation is for the linear Kalman filter. Similar derivations exist for the EKF and UKF. These steps are performed on the output of the batch processing, going backwards from the most recent in time back to the first estimate. Each iteration incorporates the knowledge of the future into the state estimate. Since the state estimate already incorporates all of the past measurements the result will be that each estimate will contain knowledge of all measurements in the past and future. Here is it very important to distinguish between past, present, and future so I have used subscripts to denote whether the data is from the future or not. # # Predict Step # # $$\begin{aligned} # \mathbf{P} &= \mathbf{FP}_k\mathbf{F}^\mathsf{T} + \mathbf{Q } # \end{aligned}$$ # # Update Step # # $$\begin{aligned} # \mathbf{K}_k &= \mathbf{P}_k\mathbf{F}^\mathsf{T}\mathbf{P}^{-1} \\ # \mathbf{x}_k &= \mathbf{x}_k + \mathbf{K}_k(\mathbf{x}_{k+1} - \mathbf{Fx}_k) \\ # \mathbf{P}_k &= \mathbf{P}_k + \mathbf{K}_k(\mathbf{P}_{k+1} - \mathbf{P})\mathbf{K}_k^\mathsf{T} # \end{aligned}$$ # # As always, the hardest part of the implementation is correctly accounting for the subscripts. A basic implementation without comments or error checking would be: # # ```python # def rts_smoother(Xs, Ps, F, Q): # n, dim_x, _ = Xs.shape # # # smoother gain # K = zeros((n,dim_x, dim_x)) # x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy # # for k in range(n-2,-1,-1): # Pp[k] = F @ P[k] @ F.T + Q # predicted covariance # # K[k] = P[k] @ F.T @inv(Pp[k]) # x[k] += K[k] @ (x[k+1] - (F @ x[k])) # P[k] += K[k] @ (P[k+1] - Pp[k]) @ K[k].T # return (x, P, K, Pp) # ``` # # This implementation mirrors the implementation provided in FilterPy. It assumes that the Kalman filter is being run externally in batch mode, and the results of the state and covariances are passed in via the `Xs` and `Ps` variable. # # Here is an example. # + import numpy as np from numpy import random from numpy.random import randn import matplotlib.pyplot as plt from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise import kf_book.book_plots as bp def plot_rts(noise, Q=0.001, show_velocity=False): random.seed(123) fk = KalmanFilter(dim_x=2, dim_z=1) fk.x = np.array([0., 1.]) # state (x and dx) fk.F = np.array([[1., 1.], [0., 1.]]) # state transition matrix fk.H = np.array([[1., 0.]]) # Measurement function fk.P*= 10. # covariance matrix fk.R = noise # state uncertainty fk.Q = Q_discrete_white_noise(dim=2, dt=1., var=Q) # process uncertainty # create noisy data zs = np.asarray([t + randn()*noise for t in range (40)]) # filter data with Kalman filter, than run smoother on it mu, cov, _, _ = fk.batch_filter(zs) M, P, C, _ = fk.rts_smoother(mu, cov) # plot data if show_velocity: index = 1 print('gu') else: index = 0 if not show_velocity: bp.plot_measurements(zs, lw=1) plt.plot(M[:, index], c='b', label='RTS') plt.plot(mu[:, index], c='g', ls='--', label='KF output') if not show_velocity: N = len(zs) plt.plot([0, N], [0, N], 'k', lw=2, label='track') plt.legend(loc=4) plt.show() plot_rts(7.) # - # I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output. # # With a perhaps more reasonable amount of noise we can see that the RTS output nearly lies on the ideal output. The Kalman filter output, while much better, still varies by a far greater amount. plot_rts(noise=1.) # However, we must understand that this smoothing is predicated on the system model. We have told the filter that what we are tracking follows a constant velocity model with very low process error. When the filter *looks ahead* it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph. plot_rts(noise=7., Q=.1) # This underscores the fact that these filters are not *smoothing* the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements. # # Let's wrap this up by looking at the velocity estimates of Kalman filter vs the RTS smoother. plot_rts(7.,show_velocity=True) # The improvement in the velocity, which is an hidden variable, is even more dramatic. # ## Fixed-Lag Smoothing # # The RTS smoother presented above should always be your choice of algorithm if you can run in batch mode because it incorporates all available data into each estimate. Not all problems allow you to do that, but you may still be interested in receiving smoothed values for previous estimates. The number line below illustrates this concept. # + from kf_book.book_plots import figsize from kf_book.smoothing_internal import * with figsize(y=2): show_fixed_lag_numberline() # - # At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps. # # Derivation for this math is beyond the scope of this book; <NAME>'s *Optimal State Estimation* [2] has a very good exposition if you are interested. The essense of the idea is that instead of having a state vector $\mathbf{x}$ we make an augmented state containing # # $$\mathbf{x} = \begin{bmatrix}\mathbf{x}_k \\ \mathbf{x}_{k-1} \\ \vdots\\ \mathbf{x}_{k-N+1}\end{bmatrix}$$ # # This yields a very large covariance matrix that contains the covariance between states at different steps. FilterPy's class `FixedLagSmoother` takes care of all of this computation for you, including creation of the augmented matrices. All you need to do is compose it as if you are using the `KalmanFilter` class and then call `smooth()`, which implements the predict and update steps of the algorithm. # # Each call of `smooth` computes the estimate for the current measurement, but it also goes back and adjusts the previous `N-1` points as well. The smoothed values are contained in the list `FixedLagSmoother.xSmooth`. If you use `FixedLagSmoother.x` you will get the most recent estimate, but it is not smoothed and is no different from a standard Kalman filter output. # + from filterpy.kalman import FixedLagSmoother, KalmanFilter import numpy.random as random fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8) fls.x = np.array([0., .5]) fls.F = np.array([[1.,1.], [0.,1.]]) fls.H = np.array([[1.,0.]]) fls.P *= 200 fls.R *= 5. fls.Q *= 0.001 kf = KalmanFilter(dim_x=2, dim_z=1) kf.x = np.array([0., .5]) kf.F = np.array([[1.,1.], [0.,1.]]) kf.H = np.array([[1.,0.]]) kf.P *= 200 kf.R *= 5. kf.Q = Q_discrete_white_noise(dim=2, dt=1., var=0.001) N = 4 # size of lag nom = np.array([t/2. for t in range (0, 40)]) zs = np.array([t + random.randn()*5.1 for t in nom]) for z in zs: fls.smooth(z) kf_x, _, _, _ = kf.batch_filter(zs) x_smooth = np.array(fls.xSmooth)[:, 0] fls_res = abs(x_smooth - nom) kf_res = abs(kf_x[:, 0] - nom) plt.plot(zs,'o', alpha=0.5, marker='o', label='zs') plt.plot(x_smooth, label='FLS') plt.plot(kf_x[:, 0], label='KF', ls='--') plt.legend(loc=4) print(f'standard deviation fixed-lag: {np.mean(fls_res):.3f}') print(f'standard deviation kalman: {np.mean(kf_res):.3f}') # - # Here I have set `N=8` which means that we will incorporate 8 future measurements into our estimates. This provides us with a very smooth estimate once the filter converges, at the cost of roughly 8x the amount of computation of the standard Kalman filter. Feel free to experiment with larger and smaller values of `N`. I chose 8 somewhat at random, not due to any theoretical concerns. # ## References # [1] <NAME>, <NAME>, and <NAME>. "Maximum likelihood estimates of linear dynamic systems," *AIAA Journal*, **3**(8), pp. 1445-1450 (August 1965). # # [2] <NAME>. "Optimal State Estimation," <NAME> & Sons, 2006. # # http://arc.aiaa.org/doi/abs/10.2514/3.3166
13-Smoothing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !head Fremont.csv import pandas as pd data = pd.read_csv('Fremont.csv',index_col='Date',parse_dates=True) data.head() # %matplotlib inline data.plot() data.resample('W').sum().plot() import matplotlib.pyplot as plt plt.style.use('seaborn') data.columns = ['East', 'West'] data.resample('W').sum().plot() data.resample('D').sum().rolling(365).sum().plot() ax = data.resample('D').sum().rolling(365).sum().plot() ax.set_ylim(0,None) data['Total'] = data['West'] + data['East'] ax = data.resample('D').sum().rolling(365).sum().plot() ax.set_ylim(0,None) data.groupby(data.index.time)mean().plot() data.groupby(data.index.time).mean().plot() pivoted = data.pivot_table('Total',index = data.index.time, columns = data.index.date) pivoted.iloc[:5,:5] pivoted.plot(legend=False) pivoted.plot(legend=False, alpha=0.01) pivoted = data.pivot_table('East',index = data.index.time, columns = data.index.date) pivoted.iloc[:5,:5] pivoted.plot(legend=False, alpha=0.01) pivoted = data.pivot_table('West',index = data.index.time, columns = data.index.date) pivoted.iloc[:5,:5] pivoted.plot(legend=False, alpha=0.01)
test_notebook_bike_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable # # Orthogonalisation and dual bases # # In this notebook you implement functions to find orthogonal and bi-orthogonal bases and apply them to different spaces. You will see how orthogonal bases let you easily write function in terms of basis coefficients. # # First, fill the functions below. If it is too much, you can fill in just `dual_basis` and `_orthogonalisation` of your choice. # # You might want to use `np.linalg.cholesky`, `np.linalg.eig` and `np.linalg.qr`. What is the `step_size` for? # + def dual_basis(Phi, step_size): """Returns a basis dual to given basis Phi, calcualted using the inverse of the Gram matrix""" return def cholesky_orthogonalisation(Phi, step_size): """Return an orthogonal basis of the space spanned by Phi, calculated using Cholesky decomposion. """ return def gram_schmit_orthogonalisation(Phi, step_size): """Return an orthogonal basis of the space spanned by Phi, calculated using QR decomposion. """ return def symmetric_orthogonalization(Phi, step_size): """Return an orthogonal basis of the space spanned by Phi, calculated using eigenvalue decomposion. """ return # - # We have implemented for you most of the plotting function below, that should help you check if what you have implemented is correct. def plot_bases(t, primal_basis, dual_basis, x): """Plot both primal and dual basis, Gram matrix and signal x togheter with its reconstruction from basis coefficients""" step_size = (np.max(t)-np.min(t)) / t.size primal_coeffs = step_size * (dual_basis.transpose() @ x) x_estimate = primal_basis @ primal_coeffs Gram = step_size * dual_basis.transpose() @ primal_basis fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(15, 3)) for k in range(dimension): ax1.plot(t, primal_basis[:, k], label=f"$\\psi_{k}$") ax1.legend() ax1.set_title("Primal basis") for k in range(dimension): ax2.plot(t, dual_basis[:, k], label=f"$\\psi_{k}$") ax2.legend() ax2.set_title("Dual basis") im = ax3.imshow(Gram) ax_divider = make_axes_locatable(ax3) # add an axes to the right of the main axes. cax = ax_divider.append_axes("bottom", size="7%", pad="2%") fig.colorbar(im, cax=cax, orientation="horizontal") ax3.set_title("Gramian") ax4.plot(t, x, label="$x$") ax4.plot(t, x_estimate, label="$\\tilde x$") ax4.legend() ax4.set_title("Singal reconstrution") plt.show() # Below we generate for you the discretised times `t`, an example polynomial basis `Phi` and a random signal `x`. # + t = np.linspace(-1, 1, 900) step_size = (np.max(t)-np.min(t)) / t.size dimension = 4 Phi = np.stack([t**k for k in range(dimension)], axis=-1) x = Phi @ (2*np.random.random(dimension)-1) # - # We begin with checking that the basis `Phi` is not orthogonal, it is not dual to itself. plot_bases(t, Phi, Phi) # Now, plot `Phi` and it's dual (and check if it indeed is dual!) # And plot some ortogonal bases of the sub-space defined by `Phi` # You can play with different spaces. You can try for example shifted Gausianns or trygonometric polynomials.
BasisExpansion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Command Line Tools # # As mentioned before, running Pegasus is in a Jupyter notebook is very convenient for tutorials and for smaller workflows, but production workflows are most commonly submitted on dedicated HTCondor submit nodes using command line tools. This section of the tutorial uses the same workflow as we have seen in the previous sections, generated inside the notebook. Planning, submitting and checking status will be done using the command line tools. # # First, execute the following cell to generate the workflow. Note that we are just writing it out at the end. # + import logging from pathlib import Path from Pegasus.api import * logging.basicConfig(level=logging.DEBUG) # --- Properties --------------------------------------------------------------- props = Properties() props["pegasus.monitord.encoding"] = "json" props["pegasus.catalog.workflow.amqp.url"] = "amqp://friend:<EMAIL>:5672/prod/workflows" props["pegasus.mode"] = "tutorial" # speeds up tutorial workflows - remove for production ones props.write() # written to ./pegasus.properties # --- Replicas ----------------------------------------------------------------- with open("f.a", "w") as f: f.write("This is sample input to KEG") fa = File("f.a").add_metadata(creator="ryan") rc = ReplicaCatalog().add_replica("local", fa, Path(".").resolve() / "f.a") # --- Transformations ---------------------------------------------------------- preprocess = Transformation( "preprocess", site="condorpool", pfn="/usr/bin/pegasus-keg", is_stageable=False, arch=Arch.X86_64, os_type=OS.LINUX ) findrange = Transformation( "findrange", site="condorpool", pfn="/usr/bin/pegasus-keg", is_stageable=False, arch=Arch.X86_64, os_type=OS.LINUX ) analyze = Transformation( "analyze", site="condorpool", pfn="/usr/bin/pegasus-keg", is_stageable=False, arch=Arch.X86_64, os_type=OS.LINUX ) tc = TransformationCatalog().add_transformations(preprocess, findrange, analyze) # --- Workflow ----------------------------------------------------------------- ''' [f.b1] - (findrange) - [f.c1] / \ [f.a] - (preprocess) (analyze) - [f.d] \ / [f.b2] - (findrange) - [f.c2] ''' wf = Workflow("blackdiamond") fb1 = File("f.b1") fb2 = File("f.b2") job_preprocess = Job(preprocess)\ .add_args("-a", "preprocess", "-T", "3", "-i", fa, "-o", fb1, fb2)\ .add_inputs(fa)\ .add_outputs(fb1, fb2) fc1 = File("f.c1") job_findrange_1 = Job(findrange)\ .add_args("-a", "findrange", "-T", "3", "-i", fb1, "-o", fc1)\ .add_inputs(fb1)\ .add_outputs(fc1) fc2 = File("f.c2") job_findrange_2 = Job(findrange)\ .add_args("-a", "findrange", "-T", "3", "-i", fb2, "-o", fc2)\ .add_inputs(fb2)\ .add_outputs(fc2) fd = File("f.d") job_analyze = Job(analyze)\ .add_args("-a", "analyze", "-T", "3", "-i", fc1, fc2, "-o", fd)\ .add_inputs(fc1, fc2)\ .add_outputs(fd) wf.add_jobs(job_preprocess, job_findrange_1, job_findrange_2, job_analyze) wf.add_replica_catalog(rc) wf.add_transformation_catalog(tc) wf.write() # - # ## 1. Opening the Jupyter terminal # # To open a new terminal window, navigate back to the listings tab of Jupyter notebook. This is where you have been opening all the sections from. In the top right corner of the listing, click `New` and then `Terminal`. It looks something like: # # ![Terminal Start](../images/terminal-start.png) # # Once started, arrange your browser tabs/windows side by side so that you can see these instructions and the terminal window at the same time. In the following sections, when you are presented with a `$`, that means it is a command you can type in or copy and paste into the terminal window. Sometimes you have to substitute your own values and that is highlighted with square brackets `[]`. # # First, cd to the correct directory: # # $ cd ~/notebooks/03-Command-Line-Tools/ # # If you run `ls`, you should see these files: # # $ ls # 03-Command-Line-Tools.ipynb # f.a # pegasus.properties # workflow.yml # # The 3 latter ones were just generated by the cell above. # # ## 2. Planning and submitting # # We can now plan and submit the workflow by running: # # $ pegasus-plan --submit workflow.yml # # In the output of the plan command, you will see a reference to several other Pegasus commands such as pegasus-status. More importantly, a workflow directory was generated for the new workflow instance. This directory is the handle to the workflow instance and used by Pegasus command line tools. Some useful tools to know about: # # * **pegasus-status -v [wfdir]** Provides status on a workflow instance # * **pegasus-analyzer [wfdir]** Provides debugging clues why a workflow failed. Run this after a workflow has failed # * **pegasus-statistics [wfdir]** Provides statistics, such as walltimes, on a workflow after it has completed # * **pegasus-remove [wfdir]** Removes a workflow from the system # # # ## 3. Workflow status # # Use the workflow directory given in the output of the `pegasus-plan` command to determine the status of your workflow: # # $ pegasus-status -v [wfdir] # # The flags `-l` and `-v` are just two different version of more verbose output. Please see `pegasus-status --help` to see all the options available. # # You can keep running `pegasus-status` until the workflow has completed, or you can use the `-w` flag to mimic the `wait()` function we used in the API. This flag will make `pegasus-status` run periodically until the workflow is complete: # # $ pegasus-status -v -w [wfdir] # # # ## 4. Workflow statistics # # Once the workflow is complete, you can extract statistics from the provenance database: # # $ pegasus-statistics -s all [wfdir] # # # ## What's Next? # # The next notebook is `04-Summary/`
tutorial/docker/notebooks/03-Command-Line-Tools/03-Command-Line-Tools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Supply Network Design 1 # # ## Objective and Prerequisites # # This model is an example of a supply network design problem. Given a set of factories, depots, and customers, the goal is to determine how to satisfy customer demand while minimizing shipping costs. This problem can be regarded as one of finding the minimum cost flow through a network. # # This model is example 19 from the fifth edition of Model Building in Mathematical Programming, by <NAME> on pages 273-275 and 330-332. # # This example is of beginning difficulty; we assume that you know Python and have some knowledge of the Gurobi Python API and building mathematical optimization models. # # **Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*. # # --- # ## Problem Description # # In this problem, we have six end customers, each with a known demand for a product. Customer demand can be satisfied from a set of four depots, or directly from a set of two factories. Each depot can support a maximum volume of product moving through it, and each factory can produce a maximum amount of product. There are known costs associated with transporting the product, from a factory to a depot, from a depot to a customer, or from a factory directly to a customer. # # Our supply network has two factories, in Liverpool and Brighton, that produce a product. Each has a maximum production capacity: # # | Factory | Supply (tons) | # | --- | --- | # | Liverpool | 150,000 | # | Brighton | 200,000 | # # The product can be shipped from a factory to a set of four depots. Each depot has a maximum throughput. Depots don't produce or consume the product; they simply pass the product on to customers. # # | Depot | Throughput (tons) | # | --- | --- | # | Newcastle | 70,000 | # | Birmingham | 50,000 | # | London | 100,000 | # | Exeter | 40,000 | # # Our network has six customers, each with a given demand. # # | Customer | Demand (tons) | # | --- | --- | # | C1 | 50,000 | # | C2 | 10,000 | # | C3 | 40,000 | # | C4 | 35,000 | # | C5 | 60,000 | # | C6 | 20,000 | # # Shipping costs are given in the following table (in dollars per ton). Columns are source cities and rows are destination cities. Thus, for example, it costs $1 per ton to ship the product from Liverpool to London. A '-' in the table indicates that that combination is not possible, so for example it is not possible to ship from the factory in Brighton to the depot in Newcastle. # # | To | Liverpool | Brighton | Newcastle | Birmingham | London | Exeter | # | --- | --- | --- | --- | --- | --- | --- | # | Depots | # | Newcastle | 0.5 | - | # | Birmingham | 0.5 | 0.3 | # | London | 1.0 | 0.5 | # | Exeter | 0.2 | 0.2 | # | Customers | # | C1 | 1.0 | 2.0 | - | 1.0 | - | - | # | C2 | - | - | 1.5 | 0.5 | 1.5 | - | # | C3 | 1.5 | - | 0.5 | 0.5 | 2.0 | 0.2 | # | C4 | 2.0 | - | 1.5 | 1.0 | - | 1.5 | # | C5 | - | - | - | 0.5 | 0.5 | 0.5 | # | C6 | 1.0 | - | 1.0 | - | 1.5 | 1.5 | # # The question to be answered is how to satisfy the demands of the end customers while minimizing shipping costs. # # --- # ## Model Formulation # # ### Sets and Indices # # $f \in \text{Factories}=\{\text{Liverpool}, \text{Brighton}\}$ # # $d \in \text{Depots}=\{\text{Newcastle}, \text{Birmingham}, \text{London}, \text{Exeter}\}$ # # $c \in \text{Customers}=\{\text{C1}, \text{C2}, \text{C3}, \text{C4}, \text{C5}, \text{C6}\}$ # # $\text{Cities} = \text{Factories} \cup \text{Depots} \cup \text{Customers}$ # # ### Parameters # # $\text{cost}_{s,t} \in \mathbb{R}^+$: Cost of shipping one ton from source $s$ to destination $t$. # # $\text{supply}_f \in \mathbb{R}^+$: Maximum possible supply from factory $f$ (in tons). # # $\text{through}_d \in \mathbb{R}^+$: Maximum possible flow through depot $d$ (in tons). # # $\text{demand}_c \in \mathbb{R}^+$: Demand for goods at customer $c$ (in tons). # # ### Decision Variables # # $\text{flow}_{s,t} \in \mathbb{N}^+$: Quantity of goods (in tons) that is shipped from source $s$ to destionation $t$. # # # ### Objective Function # # - **Cost**: Minimize total shipping costs. # # \begin{equation} # \text{Minimize} \quad Z = \sum_{(s,t) \in \text{Cities} \times \text{Cities}}{\text{cost}_{s,t}*\text{flow}_{s,t}} # \end{equation} # # ### Constraints # # - **Factory output**: Flow of goods from a factory must respect maximum capacity. # # \begin{equation} # \sum_{t \in \text{Cities}}{\text{flow}_{f,t}} \leq \text{supply}_{f} \quad \forall f \in \text{Factories} # \end{equation} # # - **Customer demand**: Flow of goods must meet customer demand. # # \begin{equation} # \sum_{s \in \text{Cities}}{\text{flow}_{s,c}} = \text{demand}_{c} \quad \forall c \in \text{Customers} # \end{equation} # # - **Depot flow**: Flow into a depot equals flow out of the depot. # # \begin{equation} # \sum_{s \in \text{Cities}}{\text{flow}_{s,d}} = # \sum_{t \in \text{Cities}}{\text{flow}_{d,t}} # \quad \forall d \in \text{Depots} # \end{equation} # # - **Depot capacity**: Flow into a depot must respect depot capacity. # # \begin{equation} # \sum_{s \in \text{Cities}}{\text{flow}_{s,d}} \leq \text{through}_{d} # \quad \forall d \in \text{Depots} # \end{equation} # # --- # ## Python Implementation # # We import the Gurobi Python Module and other Python libraries. # + import numpy as np import pandas as pd import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 # - # ## Input Data # We define all the input data for the model. # + # Create dictionaries to capture factory supply limits, depot throughput limits, and customer demand. supply = dict({'Liverpool': 150000, 'Brighton': 200000}) through = dict({'Newcastle': 70000, 'Birmingham': 50000, 'London': 100000, 'Exeter': 40000}) demand = dict({'C1': 50000, 'C2': 10000, 'C3': 40000, 'C4': 35000, 'C5': 60000, 'C6': 20000}) # Create a dictionary to capture shipping costs. arcs, cost = gp.multidict({ ('Liverpool', 'Newcastle'): 0.5, ('Liverpool', 'Birmingham'): 0.5, ('Liverpool', 'London'): 1.0, ('Liverpool', 'Exeter'): 0.2, ('Liverpool', 'C1'): 1.0, ('Liverpool', 'C3'): 1.5, ('Liverpool', 'C4'): 2.0, ('Liverpool', 'C6'): 1.0, ('Brighton', 'Birmingham'): 0.3, ('Brighton', 'London'): 0.5, ('Brighton', 'Exeter'): 0.2, ('Brighton', 'C1'): 2.0, ('Newcastle', 'C2'): 1.5, ('Newcastle', 'C3'): 0.5, ('Newcastle', 'C5'): 1.5, ('Newcastle', 'C6'): 1.0, ('Birmingham', 'C1'): 1.0, ('Birmingham', 'C2'): 0.5, ('Birmingham', 'C3'): 0.5, ('Birmingham', 'C4'): 1.0, ('Birmingham', 'C5'): 0.5, ('London', 'C2'): 1.5, ('London', 'C3'): 2.0, ('London', 'C5'): 0.5, ('London', 'C6'): 1.5, ('Exeter', 'C3'): 0.2, ('Exeter', 'C4'): 1.5, ('Exeter', 'C5'): 0.5, ('Exeter', 'C6'): 1.5 }) # - # ## Model Deployment # # We create a model and the variables. The variables simply capture the amount of product that flows along each allowed path between a source and destination. Objective coefficients are provided here (in $\text{cost}$) , so we don't need to provide an optimization objective later. model = gp.Model('SupplyNetworkDesign') flow = model.addVars(arcs, obj=cost, name="flow") # Our first constraints require the total flow along arcs leaving a factory to be at most as large as the supply capacity of that factory. # + # Production capacity limits factories = supply.keys() factory_flow = model.addConstrs((gp.quicksum(flow.select(factory, '*')) <= supply[factory] for factory in factories), name="factory") # - # Our next constraints require the total flow along arcs entering a customer to be equal to the demand from that customer. # + # Customer demand customers = demand.keys() customer_flow = model.addConstrs((gp.quicksum(flow.select('*', customer)) == demand[customer] for customer in customers), name="customer") # - # Our final constraints relate to depots. The first constraints require that the total amount of product entering the depot must equal the total amount leaving. # + # Depot flow conservation depots = through.keys() depot_flow = model.addConstrs((gp.quicksum(flow.select(depot, '*')) == gp.quicksum(flow.select('*', depot)) for depot in depots), name="depot") # - # The second set limits the product passing through the depot to be at most equal the throughput of that deport. # + # Depot throughput depot_capacity = model.addConstrs((gp.quicksum(flow.select('*', depot)) <= through[depot] for depot in depots), name="depot_capacity") # - # We now optimize the model model.optimize() # --- # ## Analysis # # Product demand from all of our customers can be satisfied for a total cost of $\$198,500$. The optimal plan is as follows. product_flow = pd.DataFrame(columns=["From", "To", "Flow"]) for arc in arcs: if flow[arc].x > 1e-6: product_flow = product_flow.append({"From": arc[0], "To": arc[1], "Flow": flow[arc].x}, ignore_index=True) product_flow.index=[''] * len(product_flow) product_flow # --- # ## References # # <NAME>, Model Building in Mathematical Programming, fifth edition. # # Copyright © 2020 Gurobi Optimization, LLC
documents/Beginner/SupplyNetworkDesign1&2/supply_network_design_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Images # # La grotte de Lascaux est temoin d'une des premières tentatives de l'humanité de figer des images sur un support matériel. # En termes de science d'informatique il s'agit de **l'encodage d'une image** sur un substrat rocheux à l'aide de peinture. # # ![](https://upload.wikimedia.org/wikipedia/commons/1/1e/Lascaux_painting.jpg) import matplotlib.pyplot as plt import numpy as np # **Exercice** # Si le code ci-dessus produit l'erreur `ModuleNotFoundError` vous devez installer les modules `matplotlib` et `numpy`. Dans Thonny choisissez le menu **Outils > Gérer les paquets…** et installez les deux paquets nécessaires. # ## Matrice 2D # # Une image est un tableau en 2 dimension composé de petits rectangles. On appelle ces points élémentaires des pixels, de l'anglais **picture element**. # Une image noir et blanc est une image ou chaque pixel peut avoir deux valeurs: # # - 0 = noir # - 1 = blanc # # La fonction `np.zeros(m, n)` crée une matrice 2D de zeros de dimension m x n. img = np.zeros((5, 8), dtype='uint8') print(img) # **Exercice** # Créez une nouvelle variable `img2` qui contient une matrice de 0s de taille 4 x 12. # ## Afficher une image # Nous pouvons afficher une matrice 2D sous forme d'image avec la fonction `np.imshow()` plt.grid() plt.imshow(img, cmap='gray'); # **Attention** # Ici les lignes ne démarquent pas des pixels. Les lignes passent au milieu des pixels. # ## Les attributs d'image # L'attribut `shape` retourne les dimensions de l'image. Ici nous avons 5 lignes et 8 colonnes. img.shape # L'attribut `size` retourne le nombre de pixels. Ici nous avons 40 img.size # **Exercice** # Affichez les attributs **shape** et **size** de l'image `img2` que vous avez définit ci-dessus. # ## Le pixel # Les éléments d'une image sont appelé des pixels. Le nom **pixel** vient de l'anglais *picture element*. # # Nous pouvons adresser un pixel individuel avec ses indices `img[y, x]`. Voici comment changer deux pixels en blanc. img[1, 2] = 1 img[2, 6] = 1 print(img) # Nous pouvons afficher cette image avec plt.imshow(img, cmap='gray'); # **Exercice** # Ajoutez un pixel blanc en haut à droite, et un autre pixel blanc en bas à gauche. # ## Image blanche # # La matrice `np.ones()` permet de créer une matrice avec des `1`. img = np.ones((5, 5), dtype='uint8') print(img) # Nous l'utilisons pour créer un **smiley** en noir sur fond blanc. img = np.ones((5, 5), dtype='uint8') img[1, 1] = 0 img[3, 1:4] = 0 plt.imshow(img, cmap='gray'); # **Exercice** # Ajoutez le deuxième oeuil qui manque. # ## Affichage matricielle # # Les chiffres sur un affichage d'une calculatrice sont composé de pixels. # # ![](img/lcd_display.jpg) # # Nous pouvons créer nos propres images d'une lettre avec une matrice 6 x 5 pour créer la lettre **E**. E = np.array([[1, 0, 0, 0, 1], [1, 0, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 1, 1, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]) plt.imshow(E, cmap='gray') plt.grid() # La dimension de la lettre est bien 6 x 5. E.shape # **Exercice** # A partir d'une copie du code ci-dessus, définissez une image `F` qui représente la lettre F. # ## Définir une lettre # Voici encore la lettre L L = np.array([[1, 0, 1, 1, 1], [1, 0, 1, 1, 1], [1, 0, 1, 1, 1], [1, 0, 1, 1, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]) plt.imshow(L, cmap='gray') plt.grid() # **Exercice** # A partir d'une copie du code ci-dessus, définissez une image `O` qui représente la lettre O. # ## Juxtaposer des images # # Un ordinateur affiche un texte en juxtaposant les pixels d'une lettre. # # La fonction `np.hstack()` (=horizontal stack) permet d'empiler des images horizontalement. ELLE = np.hstack((E, L, L, E)) plt.imshow(ELLE, cmap='gray'); # **Exercice** # Avec les lettres que vous avez définits, écrivez le mot `FOLLE`. # ## Empiler des images # La fonction `np.vstack()` permet d'empiler des images verticalement. img = np.vstack((ELLE, ELLE, ELLE)) plt.imshow(img, cmap='gray'); # **Exercice** # Définissez les lettres manquantes pour écrire # # BELLE # FILLE # FOLLE # ## Lignes # # Avec l'opérateur de tranche `:` nous pouvons choisir des lignes entières ou partielles. # - `m:n` de m à n # - `:n` du début à n # - `m:` de m jusqu'à la fin img = np.ones((5, 8), dtype='uint8') img[0, :] = 0 img[2, :2] = 0 img[3, 2:5] = 0 img[4, 5:] = 0 plt.imshow(img, cmap='gray'); # **Exercice** # Créez une image avec 3 lignes horizontales noires. # ## Colonnes # # Avec l'opérateur de tranche `:` nous pouvons choisir des lignes ou colonnes entières. img = np.ones((5, 8), dtype='uint8') img[1, :] = 0 img[:, 2] = 0 plt.imshow(img, cmap='gray'); # **Exercice** # Créez une image de 5x7 pixels avec une croix passant par le centre. # ## Pixels aléatoires # # La fonction `np.random.randint(2)` crée des valeurs aléatoires entre 0 et 1. A = np.random.randint(2, size=(5, 3)) plt.imshow(A, cmap='gray'); # **Exercice** # Créez une image de 5x10 pixels avec 3 niveaux de gris aléatoires. A = np.random.randint(3, size=(5, 10)) plt.imshow(A, cmap='gray'); # ## Niveaux de gris # De plus nous pourrions choisir des valeurs aléatoires sur un spectre de gris avec 256 valeurs. A = np.random.randint(256, size=(5, 6)) plt.imshow(A, cmap='gray') plt.colorbar(); # ## Image monochrome # # Une image monochrome est composé de pixels en niveaux de gris variants. # # ![](img/house.jpg) # # **Exercice** # Tirez l'image de la maison ci-dessus vers le navigateur de Jupyter Lab. # ## Lire une image # # La fonction `imread` permet de lire une image à partir d'une image stockée dans un fichier. # ls img img = plt.imread('img/house.jpg') plt.imshow(img); # **Exercice** # Utilisez la fonction `imread` pour lire une image dans le dossier actuel. # Quel est le format de cette image, et quel est le nombre total des pixels? # ## La valeur d'un pixel # Voici les 9 premiers pixels de la première ligne toute en haut. img[0, :8] # Voici ces 9 pixels affichés comme image. plt.imshow(img[0:1, :8]); # Voici la valeur numérique du 3e pixel. print(img[0, 2]) # **Exercice** # Quel est la valeur numérique du pixel à la position (100, 100). # Ce pixel est-il plutôt noir ou blanc ? # ## Afficher une sous-région # # Nous pouvons extraire un sous-ensemble de pixels de taille 20 x 30 en utilisant l'opérateur de tranche `[m:n]` pour l'indice. plt.imshow(img[20:40, 20:50]); # **Exercice** # Trouez la sous-région qui contient la fenêtre à gauche et affichez-la.
doc/rep/image.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook per estrarre numpy arrays da un netcdf # Basta scegliere le variabili di interesse ed eseguire l'ultima cella, come ho lasciato da esempio from netCDF4 import Dataset import numpy as np from pathlib import Path import os import pickle # from wrf import getvar, latlon_coords, to_np # + dirpath = r"C:\Users\giova\Desktop\hills" variables = [ 'LFN', 'FLINEINT', 'F_ROS0', 'F_ROSX', 'F_ROSY', 'F_ROS', 'F_INT', 'F_LINEINT', 'F_LINEINT2', 'HGT', 'FWH', 'FZ0', 'FUEL_FRAC_BURNT', # = 1/w? 'FGIP', # total fuel load? 'FGRNHFX', 'T2', 'U', 'V', 'UF', 'VF' ] # ho segnato le variabili che potrebbero essere interessanti # decidete voi cosa vale la pena tenere e cosa va aggiunto # variables = ["ZSF"] savefilepath = r"C:\Users\giova\Desktop\sim results\hill.pkl" # + # root = Dataset(filepath, "r", format="NETCDF4") # root.variables # mostra tutte le variabili # + def get_array_from_fire_subgrid(var_name, root): sr_x=int(len(root.dimensions['west_east_subgrid'])/(len(root.dimensions['west_east'])+1)) sr_y=int(len(root.dimensions['south_north_subgrid'])/(len(root.dimensions['south_north'])+1)) data=root.variables[var_name] return np.array(data[:,:-sr_y,:-sr_x].data), sr_x, sr_y def process_data(root, var, expr=None): arr = np.array(root.variables[var][:].data) lats = root.variables['XLAT'][0, :, 0] longs = root.variables['XLONG'][0, 0, :] if 'south_north_subgrid' in root.variables[var].dimensions: arr, sr_x, sr_y = get_array_from_fire_subgrid(var, root) longs = np.linspace(longs.min(), longs.max(), len(root.dimensions['west_east_subgrid'])-sr_x), lats = np.linspace(lats.min(), lats.max(), len(root.dimensions['south_north_subgrid'])-sr_y) return arr, longs, lats # - with open(savefilepath, 'wb') as file: data = {} for var in variables: i = 0 save = True for filename in os.listdir(dirpath): if filename[:6] == "wrfout": print(f"Processing {filename}: {var}") root = Dataset(os.path.join(dirpath, filename), "r", format="NETCDF4") try: arr_temp, lon, lat = process_data(root, var) except Exception as e: save = False print(f"Could not save variable {var}") if i == 0 : arr_tot = arr_temp i+=1 else: np.append(arr_tot, arr_temp) if save: a = { "data" : arr_tot, "description": root.variables[var].description, "coordinates": root.variables[var].coordinates, "shape": arr_tot.shape } data[var] = a pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL) # + data = [] with open(savefilepath, 'rb') as fp: data = pickle.load(fp) data # -
WRF/WRF_Simulations/netcf_get_arrays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Confidence interval approximations for the AUROC # # The area under the receiver operating curve (AUROC) is one of the most commonly used performance metrics for binary classification. Visually, the AUROC is the integral between the sensitivity and false positive rate curves across all thresholds for a binary classifier. The AUROC can also be shown to be equivalent to an instance of the [Mann-Whitney-U test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test) (MNU), a non-parametric rank-based statistic. This post addresses two challenges when doing statistical testing for the AUROC: i) how to speed up the calculation of the AUROC, and ii) which inference procedure to use to obtain the best possible coverage. The AUROC's relationship to the MNU will be shown to be important for both speed ups in calculation and resampling approaches for the bootstrap. # ## (1) Methods for calculating the AUROC # # In the binary classification paradigm a model produces a score associated with the probability that an observation belongs to class 1 (as opposed to class 0). The AUROC of any model is a probabilistic term: $P(s^1 > s^0)$, where $s^k$ is the distribution of scores from the model for class $k$. In practice the AUROC is never known because the distribution of data is unknown! However, an unbiased estimate of the AUROC (a.k.a the empirical AUROC) can be calculated through one of several approaches. # # The first method is to draw the [ROC curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) by measuring the sensitivity/specificity across all thresholds, and then using the [trapezoidal rule](https://en.wikipedia.org/wiki/Trapezoidal_rule) for calculating the integral. This approach is computationally inefficient and should only be done for visualization purposes. A second method to obtain the empirical AUROC is to simply calculate the percentage of times the positive class score exceeds the negative class score: # # $$ # \begin{align} # AUC &= \frac{1}{n_1n_0} \sum_{i: y_i=1} \sum_{j: y_j=0} I(s_i > s_j) + 0.5\cdot I(s_i = s_j) \label{eq:auc_pair} # \end{align} # $$ # # Where $y_i$ is the binary label for the $i^{th}$ observation and $n_k$ is the number of instances for class $k$. If we assume that the positive class is some fraction of the observation in the population: $P(y=1) = c$, then on average, calculating the AUROC via \eqref{eq:auc_pair} requires $c(1-c)n^2$ operations which means $O(AUC)=n^2$. For larger sample sizes this quadratic complexity will lead to long run times. One method to bound the computational complexity of \eqref{eq:auc_pair} is to randomly sample, with replacement, $m$ samples from each class the data to get a stochastic approximation of the AUC. # # $$ # \begin{align} # \tilde{AUC} &= \frac{1}{m} \sum_{i} P(\tilde{s_i}^1 > \tilde{s_i}^0) \label{eq:auc_rand} # \end{align} # $$ # # Where $\tilde{s_i}^k$ is a random instance from the scores of class $k$. The stochastic AUROC approach has the nice computational advantage that it is $O(m)$. As with other stochastic methods, \eqref{eq:auc_rand} requires knowledge of the sampling variation of the statistic and seeding, which tends to discourage its use in practice. This post will encourage the use of the rank order of the data to calculate the empirical AUROC. # # $$ # \begin{align} # rAUC &= \frac{1}{n_1n_0} \sum_{i: y_i=1} r_i - \frac{n_1(n_1 +1)}{2} \label{eq:auc_rank} # \end{align} # $$ # # Where $r_i$ is the sample rank of the data. Since ranking a vector is $O(n\log n)$, the computational complexity of \eqref{eq:auc_rank} is linearithmic, which will mean significant speed ups over \eqref{eq:auc_pair}. # ## (2) Run-time comparisons # # The code block below shows the run-times for the different approaches to calculate the AUROC from section (1) across different sample sizes ($n$) with different positive class proportions ($n_1/n$). The stochastic approach using $m = 5 n$. It is easy to generate data from two distributions so that the population AUROC can be known in advance. For example, if $s^1$ and $s^0$ come from the normal distribution: # # $$ # \begin{align*} # s_i^0 \sim N(0,1)&, \hspace{2mm} s_i^1 \sim N(\mu,1), \hspace{2mm} \mu \geq 0, \\ # P(s_i^1 > s_i^0) &= \Phi\big(\mu / \sqrt{2}\big). # \end{align*} # $$ # # Alternatively one could use two exponential distributions: # # $$ # \begin{align*} # s_i^0 \sim Exp(1)&, \hspace{2mm} s_i^1 \sim Exp(\lambda^{-1}), \hspace{2mm} \lambda \geq 1, \\ # P(s_i^1 > s_i^0) &= \frac{\lambda}{1+\lambda}. # \end{align*} # $$ # # It is easy to see that scale parameter of the normal or exponential distribution can determined *a priori* to match some pre-specific AUROC target. # # $$ # \begin{align*} # \mu^* &= \sqrt{2} \cdot \Phi^{-1}(AUC) \\ # \lambda^* &= \frac{AUC}{1-AUC} # \end{align*} # $$ # # The simulations in this post will use the normal distribution for simplicity, although using the exponential distribution will change the results of the analysis. The reason is that the variance of the AUROC will be identical regardless of the distribution that generated it, as long as those two distributions have the same AUROC, of course. # + """ DEFINE HELPER FUNCTIONS NEEDED THROUGHOUT POST """ import os import numpy as np import pandas as pd import plotnine from plotnine import * from scipy import stats from scipy.interpolate import UnivariateSpline from timeit import timeit from sklearn.metrics import roc_curve, auc def rvec(x): return np.atleast_2d(x) def cvec(x): return rvec(x).T def auc_pair(y, s): s1, s0 = s[y == 1], s[y == 0] n1, n0 = len(s1), len(s0) count = 0 for i in range(n1): count += np.sum(s1[i] > s0) count += 0.5*np.sum(s1[i] == s0) return count/(n1*n0) def auc_rand(y, s, m): s1 = np.random.choice(s[y == 1], m, replace=True) s0 = np.random.choice(s[y == 0], m, replace=True) return np.mean(s1 > s0) def auc_rank(y, s): n1 = sum(y) n0 = len(y) - n1 den = n0 * n1 num = sum(stats.rankdata(s)[y == 1]) - n1*(n1+1)/2 return num / den def dgp_auc(n, p, param, dist='normal'): n1 = np.random.binomial(n,p) n0 = n - n1 if dist == 'normal': s0 = np.random.randn(n0) s1 = np.random.randn(n1) + param if dist == 'exp': s0 = np.random.exponential(1,n0) s1 = np.random.exponential(param,n1) s = np.concatenate((s0, s1)) y = np.concatenate((np.repeat(0, n0), np.repeat(1, n1))) return y, s # + target_auc = 0.75 mu_75 = np.sqrt(2) * stats.norm.ppf(target_auc) lam_75 = target_auc / (1 - target_auc) n, p = 500, 0.5 np.random.seed(2) y_exp, s_exp = dgp_auc(n, p, lam_75, 'exp') y_norm, s_norm = dgp_auc(n, p, mu_75, 'normal') fpr_exp, tpr_exp, _ = roc_curve(y_exp, s_exp) fpr_norm, tpr_norm, _ = roc_curve(y_norm, s_norm) df = pd.concat([pd.DataFrame({'fpr':fpr_exp,'tpr':tpr_exp,'tt':'Exponential'}), pd.DataFrame({'fpr':fpr_norm,'tpr':tpr_norm, 'tt':'Normal'})]) tmp_txt = df.groupby('tt')[['fpr','tpr']].mean().reset_index().assign(fpr=[0.15,0.15],tpr=[0.85,0.95]) tmp_txt = tmp_txt.assign(lbl=['AUC: %0.3f' % auc_rank(y_exp, s_exp), 'AUC: %0.3f' % auc_rank(y_norm, s_norm)]) plotnine.options.figure_size = (4, 3) gg_roc = (ggplot(df,aes(x='fpr',y='tpr',color='tt')) + theme_bw() + geom_step() + labs(x='FPR',y='TPR') + scale_color_discrete(name='Distrubition') + geom_abline(slope=1,intercept=0,linetype='--') + geom_text(aes(label='lbl'),size=10,data=tmp_txt)) gg_roc # ggtitle('ROC curve by distribution') # + # Get run-times for different sizes of n p_seq = [0.1, 0.3, 0.5] n_seq = np.arange(25, 500, 25) nrun = 1000 c = 5 if 'df_rt.csv' in os.listdir(): df_rt = pd.read_csv('df_rt.csv') else: np.random.seed(nrun) holder = [] for p in p_seq: print(p) for n in n_seq: cont = True m = c * n while cont: y, s = dgp_auc(n, p, 0, dist='normal') cont = sum(y) == 0 ti_rand = timeit('auc_rand(y, s, m)',number=nrun,globals=globals()) ti_rank = timeit('auc_rank(y, s)',number=nrun,globals=globals()) ti_pair = timeit('auc_pair(y, s)',number=nrun,globals=globals()) tmp = pd.DataFrame({'rand':ti_rand, 'rank':ti_rank, 'pair':ti_pair, 'p':p, 'n':n},index=[0]) holder.append(tmp) df_rt = pd.concat(holder).melt(['p','n'],None,'method') df_rt.to_csv('df_rt.csv',index=False) plotnine.options.figure_size = (7, 3.0) gg_ti = (ggplot(df_rt,aes(x='n',y='value',color='method')) + theme_bw() + facet_wrap('~p',labeller=label_both) + geom_line() + scale_color_discrete(name='Method',labels=['Pairwise','Stochastic','Rank']) + labs(y='Seconds (1000 runs)', x='n')) gg_ti # ggtitle('AUROC run-time') + # - # Figure 1 provides an example of two ROC curves coming from a Normal and Exponential distribution. Though the empirical AUROCs between the two curves is virtually identical, their respective sensitivity/specificity trade-offs are different. The Exponential distribution tends to have a more favourable sensitivity for high thresholds because of the right skew of the data. This figure is a reminder of some of the inherent limitations with using the AUROC as an evaluation measure. Although to repeat, the distribution of the AUROC statistic between these, or other, distributions would be the same. # # The significant runtime performance gains from using the ranking approach in \eqref{eq:auc_rank} is shown in Figure 2. The pairwise method from \eqref{eq:auc_pair} is many orders of magnitude slower once the sample size is more than a few dozen observations. The stochastic method's run time is shown to be slightly better than the ranking method. This is to be expected given that \eqref{eq:auc_rand} is linear in $n$. However, using the stochastic approach requires picking a permutation size that leads to sufficiently tight bounds around the point estimate. The simulations below show the variation around the estimate by the number of draws. # + # Get the quality of the stochastic approximation nsim = 100 n_seq = [100, 500, 1000] c_seq = np.arange(1,11,1).astype(int) if 'df_se.csv' in os.listdir(): df_se = pd.read_csv('df_se.csv') else: np.random.seed(nsim) holder = [] for n in n_seq: holder_n = [] for ii in range(nsim): y, s = dgp_auc(n, p, 0, dist='normal') gt_auc = auc_pair(y, s) sim_mat = np.array([[auc_rand(y, s, n*c) for c in c_seq] for x in range(nsim)]) dat_err = np.std(gt_auc - sim_mat,axis=0) holder_n.append(dat_err) tmp = pd.DataFrame(np.array(holder_n)).melt(None,None,'c','se').assign(n=n) holder.append(tmp) df_se = pd.concat(holder).reset_index(None, True) df_se.c = df_se.c.map(dict(zip(list(range(len(c_seq))),c_seq))) df_se.to_csv('df_se.csv',index=False) df_se = df_se.assign(sn=lambda x: pd.Categorical(x.n.astype(str),[str(z) for z in n_seq])) plotnine.options.figure_size = (4, 3) gg_se = (ggplot(df_se, aes(x='c',y='se',color='sn')) + theme_bw() + labs(y='Standard error',x='Number of draws * n') + geom_jitter(height=0,width=0.1,size=0.5,alpha=0.5) + scale_color_discrete(name='n') + scale_x_continuous(breaks=list(c_seq))) gg_se # ggtitle('Variation around point estimate from randomization method') # - # Figure 3 shows that the number of samples needed to get a small standard error to the ±1% is 4000 draws. In other words, if the actual empirical AUROC was 71%, we would expect 95% of the realizations to be around the 69-73% range. To get to the ±0.5% requires 10K draws. This shows that unless the user is happy to tolerate an error range of more than a percentage point, hundred of thousands of draws will likely be needed. # ## (3) Inference approaches # # After reviewing the different approaches for calculating the point estimate of the empirical AUROC, attention can now be turned to doing inference on this term. Knowing that a classifier has an AUROC on 78% on a test set provides little information if there is no quantification of the uncertainty around this range. In this section, we'll discuss three different approaches for generating confidence intervals ([CIs](https://en.wikipedia.org/wiki/Confidence_interval)) which are the most common method of uncertainty quantification in frequentist statistics. A two-sided CI at the $1-\alpha$% level is a random variable that has the following property: $P([l, u] \in AUC ) \geq 1-\alpha$. In other words, the probability that the true AUROC is contained within this upper and lower bound, $l$ and $u$ (which are random variables), is at least $1-\alpha$%, meaning the true statistic of interest (the AUROC) fails to be *covered* by this interval at most $\alpha$% of the time. An exact CI will cover the true statistic of interest exactly $1-\alpha$% of the time, given the test maximum power. # # The approaches below are by no means exhaustive. Readers are encouraged to review other [methods](https://arxiv.org/pdf/1804.05882.pdf) for other ideas. # ### Approach #1: Asymptotic U # # As was previously mentioned, the AUROC is equivalent to an MNU test. The asymptotic properties of this statistic have been known for [more than 70 years](https://projecteuclid.org/euclid.aoms/1177730491). Under the null hypothesis assumption that $P(s_i^1 > s_i^0) = 0.5$, the asymptotic properties of the U statistic for ranks can be shown to be: # # $$ # \begin{align*} # z &= \frac{U - \mu_U}{\sigma_U} \sim N(0,1) \\ # \mu_U &= \frac{n_0n_1}{2} \\ # \sigma^2_U &= \frac{n_0n_1(n_0+n_1+1)}{12} \\ # U &= n_1n_0 \cdot \max \{ AUC, (1-AUC) \} \\ # \end{align*} # $$ # # Note that additional corrections that need to be applied in the case of data which has ties, but I will not cover this issue here. There are two clear weaknesses to this approach. First, it appeals to the asymptotic normality of the $U$ statistic, which may be a poor approximation when $n$ is small. Second, this formula only makes sense for testing a null hypothesis of $AUC_0=0.5$. Notice that the constant in the denominator of the variance, 12, is the same as the constant in the variance of a [uniform distribution](https://en.wikipedia.org/wiki/Continuous_uniform_distribution). This is not a coincidence as the distribution of rank order statistics is uniform when the data come from the same distribution. To estimate this constant for $AUC\neq 0.5$, Monte Carlo simulations will be needed. Specifically we want to find the right constant $c(AUC)$ for the variance of the AUROC: # # $$ # \begin{align*} # \sigma^2_U(AUC) &= \frac{n_0n_1(n_0+n_1+1)}{c(AUC)} # \end{align*} # $$ # # # Even though it is somewhat computationally intensive to calculate these normalizing constants, their estimates hold true regardless of the sample of the sample sizes, as in $c(AUC;n_0,n_1)=c(AUC;n_0';n_1')$ for all $n_k, n_k' \in \mathbb{R}^+$. The code below estimates $c()$ and uses a spline to interpolate for values of the AUROC between the realized draws. # PRECOMPUTE THE VARIANCE CONSTANT... if 'dat_var.csv' in os.listdir(): dat_var = pd.read_csv('dat_var.csv') else: np.random.seed(1) nsim = 10000 n1, n0 = 500, 500 den = n1 * n0 auc_seq = np.arange(0.5, 1, 0.01) holder = np.zeros(len(auc_seq)) for i, auc in enumerate(auc_seq): print(i) mu = np.sqrt(2) * stats.norm.ppf(auc) Eta = np.r_[np.random.randn(n1, nsim)+mu, np.random.randn(n0,nsim)] Y = np.r_[np.zeros([n1,nsim],dtype=int)+1, np.zeros([n0,nsim],dtype=int)] R1 = stats.rankdata(Eta,axis=0)[:n1] Amat = (R1.sum(0) - n1*(n1+1)/2) / den holder[i] = (n0+n1+1) / Amat.var() / den dat_var = pd.DataFrame({'auc':auc_seq, 'c':holder}) dat_var = pd.concat([dat_var.iloc[1:].assign(auc=lambda x: 1-x.auc), dat_var]).sort_values('auc').reset_index(None, True) dat_var.to_csv('dat_var.csv', index=False) # Calculate the spline spl = UnivariateSpline(x=dat_var.auc, y=dat_var.c) dat_spline = pd.DataFrame({'auc':dat_var.auc, 'spline':spl(dat_var.auc)}) plotnine.options.figure_size=(4,3) gg_c = (ggplot(dat_var,aes(x='auc',y='np.log(c)')) + theme_bw() + geom_point()+labs(y='log c(AUC)',x='AUROC') + geom_line(aes(x='auc',y='np.log(spline)'), data=dat_spline,color='red') + ggtitle('Red line is spline (k=3)')) gg_c # Figure 4 shows that the constant term is growing quite rapidly. The stochastic estimate of the constant at AUROC=0.5 of 11.9 is close to the true population value of 12. # # ### Approach #2: Newcombe's Wald Method # # A second approach is to use a (relatively) new approach from [Newcombe (2006)](https://onlinelibrary.wiley.com/doi/10.1002/sim.2324). Unlike the asymptotic approach above, Newcombe's method automatically calculates the different level of the variance for different values of the AUROC. # # $$ # \begin{align*} # \sigma^2_{AUC} &= \frac{AUC(1-AUC)}{(n_1-1)(n_0-1)} \cdot \Bigg[ 2n - 1 - \frac{3n-3}{(2-AUC)(1+AUC)} \Bigg] # \end{align*} # $$ # # Assuming $n_1 = c\cdot n$ then $O(\sigma^2_{AUC})=\frac{AUC(1-AUC)}{n}$, which is very similar to the variance of the binomial proportion (see [here](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval)). # ### Approach #3: Bootstrapping ranks # # The final inference approach is that of [bootstrap](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)), which generates new copies of the statistic by resampling the data. Though the ability to get additional randomness by resampling rows of the data seems a little mysterious, if not dubious, it has a solid mathematical foundation. The bootstrap is equivalent to drawing from the empirical CDF (eCDF) of a random variable. Since the eCDF is known to be a [consistent](https://en.wikipedia.org/wiki/Glivenko%E2%80%93Cantelli_theorem) estimate of the true CDF, the error of the bootstrap will naturally decrease as $n$ grows. The bootstrap has the attractive property that it is fully non-parametric and works from a broad class of statistics. Note that there is no one way to do the "bootstrap" for inference, and I compare three common approaches: i) quantile, ii) classic, iii) studentized. For a review of other approaches, see [here](http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf). # # # $$ # \begin{align*} # \tilde{AUC}^{(k)} &= \frac{1}{n_1n_0} \sum_{i: y_i=1} \tilde{r}_i^{(k)} - \frac{n_1(n_1 +1)}{2} \\ # \sigma^2_{BS} &= \frac{1}{K-1}\sum_{k=1}^K (\tilde{AUC}^{(k)} - \bar{\tilde{AUC}}) # \end{align*} # $$ # # The $k^{th}$ bootstrap (out of $K$ total bootstraps), is generated by sampling, with replacement, the ranks of the positive score classes, and the bootstrap AUROC is calculated using the same formula from \eqref{eq:auc_rank}. Bootstrapping the ranks has the incredibly attractive property that the relative runtime is going to scale with the total number of bootstraps ($K$). If we had to recalculate the ranks for every bootstrap sample, then this would require an additional sorting call. The formulas for the three bootstrapping approaches are shown below for a $1-\alpha$% symmetric CI. # # $$ # \begin{align*} # \text{Quantile}& \\ # [l, u] &= \big[\tilde{AUC}^{(k)}_{\lfloor\alpha/2\cdot K\rfloor}, \tilde{AUC}^{(k)}_{\lceil(1-\alpha/2)\cdot K\rceil} \big] \\ # \\ # \text{SE}& \\ # [l, u] &= \big[AUC + \sigma_{BS}\cdot z_{\alpha/2}, AUC - \sigma_{BS}\cdot z_{\alpha/2}\big] \\ # \\ # \text{Studentized}& \\ # [l, u] &= \big[AUC + \sigma_{BS}\cdot z_{\alpha/2}^*, AUC - \sigma_{BS}\cdot z_{1-\alpha/2}^*\big] \\ # z_\alpha^* &= \Bigg[ \frac{\tilde{AUC}^{(k)} - AUC}{\sigma^{(k)}_{BS}} \Bigg]_{\lfloor\alpha\cdot K\rfloor} # \end{align*} # $$ # # The quantile approach simply takes the empirical $\alpha/2$ and $1-\alpha/2$ quantiles of the AUROC from its bootstrapped distribution. Though the quantile approach is easily for suited to skewed bootstrapped distributions (i.e. the CIs are not symmetric), it is known to be biased for sample sizes. The classic bootstrap, simply uses the bootstrapped AUROCs to estimate its empirical variance, and then use the standard normal approximation to generate CIs. The Studentized approach combines the estimate of the variance from the SE/classic approach but also takes into account the possibility for a skewed distribution. For each bootstrap sample, an additional $K$ (or some large number) samples are drawn, so that each bootstrapped sample has an estimate of its variance. These studentized, or normalized, scores are then used in place of the quantile from the normal distribution. # ## (4) Simulations # # Now we are ready to test the bootstrapping methods against their analytic counterparts. The simulations below will use a 10% positive class balance, along with a range of different sample sizes. Symmetric CIs will be calculated for the 80%, 90%, and 95% level. A total of 1500 simulations are run. An 80% symmetric CI that is exact should a coverage of 80%, meaning that the true AUROC is contained within the CI 80% of the time. A CI that has a coverage below its nominal level will have a type-1 error rate that is greater than expected, whilst a CI that has coverage above its nominal level will have less power (i.e. a higher type-II error). In other words, the closer a CI is to its nominal level, the better. # + """ HELPER FUNCTION TO RETURN +- INTERVALS A: array of AUCs se: array of SEs cv: critical values (can be array: will be treated as 1xk) """ def ret_lbub(A, se, cv, method): ub = cvec(A)+cvec(se)*rvec(cv) lb = cvec(A)-cvec(se)*rvec(cv) df_ub = pd.DataFrame(ub,columns=cn_cv).assign(bound='upper') df_lb = pd.DataFrame(lb,columns=cn_cv).assign(bound='lower') df = pd.concat([df_ub, df_lb]).assign(tt=method) return df nsim = 1500 prop = 0.1 n_bs = 1000 n_student = 250 n_seq = [50, 100, 250, 1000]#[] auc_seq = [0.5, 0.7, 0.9 ] #"true" AUROC between the distributions pvals = (1-np.array([0.8, 0.9, 0.95]))/2 crit_vals = np.abs(stats.norm.ppf(pvals)) cn_cv = ['p'+str(i+1) for i in range(len(pvals))] np.random.seed(1) if 'res.csv' in os.listdir(): res = pd.read_csv('res.csv') else: holder = [] for n in n_seq: for auc in auc_seq: print('n: %i, AUROC: %0.2f' % (n, auc)) n1 = int(np.round(n * prop)) n0 = n - n1 den = n1*n0 mu = np.sqrt(2) * stats.norm.ppf(auc) Eta = np.r_[np.random.randn(n1, nsim)+mu, np.random.randn(n0,nsim)] Y = np.r_[np.zeros([n1,nsim],dtype=int)+1, np.zeros([n0,nsim],dtype=int)] # Calculate the AUCs across the columns R1 = stats.rankdata(Eta,axis=0)[:n1] Amat = (R1.sum(0) - n1*(n1+1)/2) / den # --- Approach 1: Asymptotic U --- # sd_u = np.sqrt((n0+n1+1)/spl(Amat)/den) df_asym = ret_lbub(Amat, sd_u, crit_vals, 'asymptotic') # --- Approach 2: Newcombe's wald sd_newcombe = np.sqrt(Amat*(1-Amat)/((n1-1)*(n0-1))*(2*n-1-((3*n-3)/((2-Amat)*(1+Amat))))) df_newcombe = ret_lbub(Amat, sd_newcombe, crit_vals, 'newcombe') # --- Approach 3: Bootstrap the ranks --- # R1_bs = pd.DataFrame(R1).sample(frac=n_bs,replace=True).values.reshape([n_bs]+list(R1.shape)) auc_bs = (R1_bs.sum(1) - n1*(n1+1)/2) / den sd_bs = auc_bs.std(0,ddof=1) # - (i) Standard error method - # df_bs_se = ret_lbub(Amat, sd_bs, crit_vals, 'bootstrap_se') # - (ii) Quantile method - # df_lb_bs = pd.DataFrame(np.quantile(auc_bs,pvals,axis=0).T,columns=cn_cv).assign(bound='lower') df_ub_bs = pd.DataFrame(np.quantile(auc_bs,1-pvals,axis=0).T,columns=cn_cv).assign(bound='upper') df_bs_q = pd.concat([df_ub_bs, df_lb_bs]).assign(tt='bootstrap_q') # - (iii) Studentized - # se_bs_s = np.zeros(auc_bs.shape) for j in range(n_bs): R1_bs_s = pd.DataFrame(R1_bs[j]).sample(frac=n_student,replace=True).values.reshape([n_student]+list(R1.shape)) auc_bs_s = (R1_bs_s.sum(1) - n1*(n1+1)/2) / den se_bs_s[j] = auc_bs_s.std(0,ddof=1) # Get the t-score dist t_bs = (auc_bs - rvec(Amat))/se_bs_s df_lb_t = pd.DataFrame(cvec(Amat) - cvec(sd_bs)*np.quantile(t_bs,1-pvals,axis=0).T,columns=cn_cv).assign(bound='lower') df_ub_t = pd.DataFrame(cvec(Amat) - cvec(sd_bs)*np.quantile(t_bs,pvals,axis=0).T,columns=cn_cv).assign(bound='upper') df_t = pd.concat([df_ub_t, df_lb_t]).assign(tt='bootstrap_s') # Combine tmp_sim = pd.concat([df_asym, df_newcombe, df_bs_se, df_bs_q, df_t]).assign(auc=auc, n=n) holder.append(tmp_sim) # Merge and save res = pd.concat(holder) res = res.rename_axis('idx').reset_index().melt(['idx','bound','tt','auc','n'],cn_cv,'tpr') res = res.pivot_table('value',['idx','tt','auc','n','tpr'],'bound').reset_index() res.tpr = res.tpr.map(dict(zip(cn_cv, 1-2*pvals))) res = res.assign(is_covered=lambda x: (x.lower <= x.auc) & (x.upper >= x.auc)) res.to_csv('res.csv',index=False) res_cov = res.groupby(['tt','auc','n','tpr']).is_covered.mean().reset_index() res_cov = res_cov.assign(sn = lambda x: pd.Categorical(x.n, x.n.unique())) lvls_approach = ['asymptotic','newcombe','bootstrap_q','bootstrap_se','bootstrap_s'] lbls_approach = ['Asymptotic', 'Newcombe', 'BS (Quantile)', 'BS (Classic)', 'BS (Studentized)'] res_cov = res_cov.assign(tt = lambda x: pd.Categorical(x.tt, lvls_approach).map(dict(zip(lvls_approach, lbls_approach)))) res_cov.rename(columns={'tpr':'CoverageTarget', 'auc':'AUROC'}, inplace=True) tmp = pd.DataFrame({'CoverageTarget':1-2*pvals, 'ybar':1-2*pvals}) plotnine.options.figure_size = (6.5, 5) gg_cov = (ggplot(res_cov, aes(x='tt', y='is_covered',color='sn')) + theme_bw() + geom_point() + facet_grid('AUROC~CoverageTarget',labeller=label_both) + theme(axis_text_x=element_text(angle=90), axis_title_x=element_blank()) + labs(y='Coverage') + geom_hline(aes(yintercept='ybar'),data=tmp) + scale_color_discrete(name='Sample size')) gg_cov # - # Figure 5 shows the coverage results for the different approaches across different conditions. Newcombe's method is consistently the worst performer, with the CIs being much too conservative. The estimated standard errors (SEs) are at least 40% larger than the asymptotic ones (code not shown), leading to a CI with significantly reduced power. The asymptotic approach and quantile/classic bootstrap have SEs which are too small when the sample size is limited, leading under-coverage and an inflated type-I error rate. For sample sizes of at least 1000, the asymptotic intervals are quite accurate. The studentized bootstrap is by far the most accurate approach, especially for small sample sizes, and tends to be conservative (over-coverage). Overall the studentized bootstrap is the clear winner. However, it is also the most computationally costly, which means for large samples the asymptotic estimates may be better. # ## (5) Ranking bootstraps? # # Readers may be curious whether ranking the bootstraps, rather than bootstrapping the ranks, may lead to better inference. Section (3) has already noted the obvious computational gains from bootstrapping the ranks. Despite my initial impression that ranking the bootstraps would lead to more variation because of the additional variation in the negative class, this turned out not to be the case due to the creation of ties in the scores which reduces the variation in the final AUROC estimate. The simulation block shows that the SE of the bootstrapped ranks is higher than the ranked bootstraps in terms of the AUROC statistic. Since the bootstrap approach did not have a problem of over-coverage, the smaller SEs will lead to higher type-I error rates, especially for small sample sizes. In this case, the statistical advantages of bootstrapping the ranks also coincidence with a computational benefit. if 'df_bs.csv' in os.listdir(): df_bs = pd.read_csv('df_bs.csv') else: seed = 1 np.random.seed(seed) n_bs, nsim = 1000, 1500 n1, n0, mu = 25, 75, 1 s = np.concatenate((np.random.randn(n1, nsim)+mu, np.random.randn(n0,nsim))) y = np.concatenate((np.repeat(1,n1),np.repeat(0,n0))) r = stats.rankdata(s,axis=0)[:n1] s1, s0 = s[:n1], s[n1:] r_bs = pd.DataFrame(r).sample(frac=n_bs,replace=True,random_state=seed).values.reshape([n_bs]+list(r.shape)) s_bs1 = pd.DataFrame(s1).sample(frac=n_bs,replace=True,random_state=seed).values.reshape([n_bs]+list(s1.shape)) s_bs0 = pd.DataFrame(s0).sample(frac=n_bs,replace=True,random_state=seed).values.reshape([n_bs]+list(s0.shape)) s_bs = np.concatenate((s_bs1, s_bs0),axis=1) r_s_bs = stats.rankdata(s_bs,axis=1)[:,:n1,:] auc_bs = (r_bs.sum(1) - n1*(n1+1)/2)/(n1*n0) auc_s_bs = (r_s_bs.sum(1) - n1*(n1+1)/2)/(n1*n0) se_bs = auc_bs.std(0) se_s_bs = auc_s_bs.std(0) df_bs = pd.DataFrame({'bs_r':se_bs, 'r_bs':se_s_bs}) df_bs.to_csv('df_bs.csv', index=False) print('Mean AUROC for bootstrapping ranks: %0.3f, and ranking bootstraps: %0.3f' % (np.mean(df_bs.bs_r),np.mean(df_bs.r_bs)))
_rmd/extra_AUC_CI/auc_sim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K-means clustering demo # ## 1. Different distance metrics # + from math import sqrt def manhattan(v1,v2): res=0 dimensions=min(len(v1),len(v2)) for i in range(dimensions): res+=abs(v1[i]-v2[i]) return res def euclidean(v1,v2): res=0 dimensions=min(len(v1),len(v2)) for i in range(dimensions): res+=pow(abs(v1[i]-v2[i]),2) return sqrt(float(res)) def cosine(v1,v2): dotproduct=0 dimensions=min(len(v1),len(v2)) for i in range(dimensions): dotproduct+=v1[i]*v2[i] v1len=0 v2len=0 for i in range (dimensions): v1len+=v1[i]*v1[i] v2len+=v2[i]*v2[i] v1len=sqrt(v1len) v2len=sqrt(v2len) # we need distance here - # we convert cosine similarity into distance return 1.0-(float(dotproduct)/(v1len*v2len)) def pearson(v1,v2): # Simple sums sum1=sum(v1) sum2=sum(v2) # Sums of the squares sum1Sq=sum([pow(v,2) for v in v1]) sum2Sq=sum([pow(v,2) for v in v2]) # Sum of the products pSum=sum([v1[i]*v2[i] for i in range(min(len(v1),len(v2)))]) # Calculate r (Pearson score) numerator=pSum-(sum1*sum2/len(v1)) denominator=sqrt((sum1Sq-pow(sum1,2)/len(v1))*(sum2Sq-pow(sum2,2)/len(v1))) if denominator==0: return 1.0 # we need distance here - # we convert pearson correlation into distance return 1.0-numerator/denominator def tanimoto(v1,v2): c1,c2,shared=0,0,0 for i in range(len(v1)): if v1[i]!=0 or v2[i]!= 0: if v1[i]!=0: c1+=1 # in v1 if v2[i]!=0: c2+=1 # in v2 if v1[i]!=0 and v2[i]!=0: shared+=1 # in both # we need distance here - # we convert tanimoto similarity into distance return 1.0-(float(shared)/(c1+c2-shared)) # - # ## 2. K-means clustering algorithm # + import random # k-means clustering def kcluster(rows,distance=euclidean,k=4): # Determine the minimum and maximum values for each point ranges=[(min([row[i] for row in rows]),max([row[i] for row in rows])) for i in range(len(rows[0]))] # Create k randomly placed centroids clusters=[[random.random()*(ranges[i][1]-ranges[i][0])+ranges[i][0] for i in range(len(rows[0]))] for j in range(k)] lastmatches=None bestmatches = None for t in range(100): print ('Iteration %d' % t) bestmatches=[[] for i in range(k)] # Find which centroid is the closest for each row for j in range(len(rows)): row=rows[j] bestmatch=0 for i in range(k): d=distance(clusters[i],row) if d<distance(clusters[bestmatch],row): bestmatch=i bestmatches[bestmatch].append(j) # If the results are the same as last time, this is complete if bestmatches==lastmatches: break lastmatches=bestmatches # Move the centroids to the average of the cluster members for i in range(k): avgs=[0.0]*len(rows[0]) if len(bestmatches[i])>0: for rowid in bestmatches[i]: for m in range(len(rows[rowid])): avgs[m]+=rows[rowid][m] for j in range(len(avgs)): avgs[j]/=len(bestmatches[i]) clusters[i]=avgs return bestmatches # - # ## 3. Toy demo: clustering papers by title # ### 3.1. Data preparation # The input is a list of Computer Science paper titles from file [titles.txt](titles.txt). file_name = "titles.txt" f = open(file_name, "r", encoding="utf-8") i = 0 for line in f: print("document", i, ": ", line.strip()) i += 1 # To compare documents written in Natural Language, we need to decide how to decide which attributes of a document are important. The simplest possible model is called a **bag of words**: that is we consider each word in a document as a separate and independent dimension. # # First, we collect all different words occuring across all the document collection (called corpora in NLP). These will become our dimensions. # We create a vector as big as the entire vocabulary in a given corpora. # Next we represent each document as a numeric vector: the number of occurrences of a given word becomes value in the corresponding vector dimension. # # Here are the functions for converting documents into bag of words: # + import re # Returns dictionary of word counts for a text def get_word_counts(text, all_words): wc={} words = get_words(text) # Loop over all the entries for word in words: if (word not in stopwords) and (word in all_words): wc[word] = wc.get(word,0)+1 return wc # splits text into words def get_words(txt): # Split words by all non-alpha characters words=re.compile(r'[^A-Z^a-z]+').split(txt) # Convert to lowercase return [word.lower() for word in words if word!=''] # converts counts into a vector def get_word_vector(word_list, wc): v = [0]*len(word_list) for i in range(len(word_list)): if word_list[i] in wc: v[i] = wc[word_list[i]] return v # prints matrix def print_word_matrix(docs): for d in docs: print (d[0], d[1]) # - # Some words of the document should be ignored. These are words that are very commonly used in all documents no matter the topic of the document: ''the'', ''it'', ''and'' etc. These words are called **stop words**. Which words to consider as stop words is application-dependent. One of possible stop words collection is given in file ''stop_words.txt''. # + stop_words_file = "stop_words.txt" f = open(stop_words_file, "r", encoding="utf-8") stopwords = [] for line in f: stopwords.append(line.strip()) f.close() print(stopwords[:20]) # - # We collect all unique words and for each document we will count how many times each word is present. # + file_name = "titles.txt" f = open(file_name, "r", encoding="utf-8") documents = [] doc_id = 1 all_words = {} # transfer content of a file into a list of lines lines = [line for line in f] # create a dictionary of all words and their total counts for line in lines: doc_words = get_words(line) for w in doc_words : if w not in stopwords: all_words[w] = all_words.get(w,0)+1 unique_words = set() for w, count in all_words.items(): if all_words[w] > 1 : unique_words.add(w) # create a matrix of word presence in each document for line in lines: documents.append(["d"+str(doc_id), get_word_counts(line,unique_words)]) doc_id += 1 unique_words=list(unique_words) print("All unique words:",unique_words) print(documents) # - # Now we want to convert each document into a numeric vector: # + out = open(file_name.split('.')[0] + "_vectors.txt", "w") # write a header which contains the words themselves for w in unique_words: out.write('\t' + w) out.write('\n') # print_word_matrix to file for i in range(len(documents)): vector = get_word_vector(unique_words, documents[i][1]) out.write(documents[i][0]) for x in vector: out.write('\t' + str(x)) out.write('\n') out.close() # - # Our data now looks like this matrix: doc_vectors_file = "titles_vectors.txt" f = open(doc_vectors_file, "r", encoding="utf-8") s = f.read() print(s) # + # This function will read document vectors file and produce 2D data matrix, # plus the names of the rows and the names of the columns. def read_vector_file(file_name): f = open(file_name) lines=[line for line in f] # First line is the column headers colnames=lines[0].strip().split('\t')[:] # print(colnames) rownames=[] data=[] for line in lines[1:]: p=line.strip().split('\t') # First column in each row is the rowname if len(p)>1: rownames.append(p[0]) # The data for this row is the remainder of the row data.append([float(x) for x in p[1:]]) return rownames,colnames,data # This function will transpose the data matrix def rotatematrix(data): newdata=[] for i in range(len(data[0])): newrow=[data[j][i] for j in range(len(data))] newdata.append(newrow) return newdata # - # As the result of all this, we have the matrix where the rows are document vectors. # Each vector dimension represents a unique word in the collection. # The value in each dimension represents the count of this word in a particular document. # ### 3.2. Clustering documents # # Performing k-means clustering. # + doc_vectors_file = "titles_vectors.txt" docs,words,data=read_vector_file(doc_vectors_file) num_clusters=2 print('Searching for {} clusters:'.format(num_clusters)) # + clust=kcluster(data,distance=pearson,k=num_clusters) print() print ('Document clusters') print ('=================') for i in range(num_clusters): print ('cluster {}:'.format(i+1)) print ([docs[r] for r in clust[i]]) print() # - # Does this grouping make sense? for d in documents: print(d) # ### 3.3. Clustering words by their occurrence in documents # We may consider that the words are similar if they occur in the same document. We say that the words are connected - they belong to the same topic, they occur in a similar context. # If we want to cluster words by their occurrences in the documents, all we need to do is to transpose the document matrix. rdata=rotatematrix(data) num_clusters = 3 print ('Grouping words into {} clusters:'.format(num_clusters)) clust=kcluster(rdata,distance=cosine,k=num_clusters) print() print ('word clusters:') print("=============") for i in range(num_clusters): print("cluster {}".format(i+1)) print ([words[r] for r in clust[i]]) print() # Copyright &copy; 2022 <NAME>. All rights reserved.
kmeans_clustering_demo.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + [markdown] id="cac<PASSWORD>" tags=[] # # 응용통계학 과제 # > 다중공선성 # # - toc:true # - branch: master # - badges: true # - comments: false # - author: 최서연 # - categories: [Applied Statistics, 다중공선성] # - # # 응용통계학 과제 20220512 # - 202150754 최서연 # > 다중공선성 # # 다중공선성이 존재하는 상황을 가정하고 # # # # 다중공선성을 어느 정도 제거한 모형 (M1)과 다중공선성이 내재되어 있는 모형 (M2) 을 고려하여 # # # # 두 모형의 예측력을 모의실험을 통해 비교하여라, # # # # 단, 실험은 여러 번 반복하여 평균적인 결과를 report하되 설명변수의 개수는 3개 이상으로 설정하여라. # # # # 이미 존재하는 문서들을 참고하거나 재현해도 무방함. # # # # (첨부된 문서 참고) # + Nrep = 200 n = 1000 hatb1 <- hatb2 <- c() te1 <- te2 <- c() for (k in 1:Nrep) { x1 = runif(n,-3,3) x2 = x1 + rnorm(n,0,0.01) #X = model.matrix(~ x1 + x2) #solve(t(X)%*%X) y = 2 + x1 + 2*x2 + rnorm(n) ind = sample(1:n,500) tx1 = x1[ind] tx2 = x2[ind] test_x1 = x1[-ind] test_x2 = x2[-ind] ty = y[ind] test_y = y[-ind] fit1 = lm(ty~tx1+tx2) fit2 = lm(ty~tx1) hatb1[k] = fit1$coefficients[2] hatb2[k] = fit2$coefficients[2] te1[k] = mean((test_y - predict(fit1,newdata=data.frame(test_x1,test_x2)))^2) te2[k] = mean((test_y - predict(fit2,newdata=data.frame(test_x1)))^2) #summary(lm(y~x1+x2)) #print(k) } c(mean(te1),mean(te2)) c(mean((hatb1-1)^2),mean((hatb2-2)^2)) # - # --- library(regclass) library(car) # --- # ## 시도 1 # + Nrep = 200 n = 1000 hatb1 <- hatb2 <- c() te1 <- te2 <- c() for (k in 1:Nrep) { x1 = runif(n,-3,3) x2 = x1 + rnorm(n,0,0.01) x3 = x1^2 + rnorm(n,0,0.01) y = 2 + x1 + 2*x2 + 3*x3 + rnorm(n) ind = sample(1:n,500) tx1 = x1[ind] tx2 = x2[ind] tx3 = x3[ind] test_x1 = x1[-ind] test_x2 = x2[-ind] test_x3 = x3[-ind] ty = y[ind] test_y = y[-ind] M2 = lm(ty~tx1+tx2+tx3) M1 = lm(ty~tx1) hatb1[k] = M2$coefficients[2] hatb2[k] = M1$coefficients[2] te1[k] = mean((test_y - predict(M1,newdata=data.frame(test_x1,test_x2,test_x3)))^2) te2[k] = mean((test_y - predict(M1,newdata=data.frame(test_x1)))^2) } c(mean(te1),mean(te2)) c(mean((hatb1-2)^2),mean((hatb2-1)^2)) # - # --- # ## 시도 2 # - $y_1=3+x_1+1.5x_2+3.5x_3+\epsilon$ # - $x_2=x_1+\epsilon$ # - $x_3=x_1+\epsilon$ # - 1000번 반복 # - 500개 데이터 x1 = runif(500) x2 = x1 + rnorm(500,0,0.01) x3 = x1 + rnorm(500,0,0.1) y1= 3 + x1 + 1.5*x2 + 3.5*x3 + rnorm(500) cor(x1,x2) cor(x1,x3) # 높은 상관계수 확인 M1 = lm(y1~x1) M2 = lm(y1~x1+x2+x3) VIF(M2) # 다중공선성 존재를 가정한 모형의 VIF 10 넘는 모습이었다. print(M1$coefficients) print(M2$coefficients) mean((y1-predict(M1,data.frame(x1)))^2) mean((y1-predict(M2,data.frame(x1,x2,x3)))^2) # 다중공선성을 제거한 모형이 다중공선성이 있는 모형보다 제곱평균오차가 컸다. # 반복 result1 = c() result2 = c() for (i in 1:1000){ x1 = runif(500) x2 = x1 + rnorm(500,0,0.01) x3 = x1 + rnorm(500,0,0.01) y1= 3 + x1 + 1.5*x2 + 3.5*x3 + rnorm(500) train_x1 = x1[1:250] train_x2 = x2[1:250] train_x3 = x3[1:250] train_y1 = y1[1:250] test_x1 = x1[251:500] test_x2 = x2[251:500] test_x3 = x3[251:500] test_y1 = y1[251:500] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2+train_x3) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3)))^2) } print(mean(result1));print(mean(result2)) # 다중공선성이 있는 모형과 다중공선성이 없는 모형의 MSE가 비슷한 값이 나왔다. M2$coefficients VIF(M2) # --- # ## 시도 3 # - $y_1=3+x_1+2x_2+3x_3+\epsilon$ # - $x_2=x_1^2+\epsilon$ # - $x_3=x_1^3+\epsilon$ # - 1000번 반복 # - 1000개 데이터 result1 = c() result2 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1^2 + rnorm(1000,0,0.01) x3 = x1^3 + rnorm(1000,0,0.01) y1= 3 + x1 + 2*x2 + 3*x3 + rnorm(1000) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2+train_x3) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3)))^2) } print(mean(result1));print(mean(result2)) M2$coefficients VIF(M2) # --- # + [markdown] tags=[] # ## 시도4 # - # - $y_1=3+x_1+2x_2+3x_3+\epsilon$ # - 시도 3과 다른 점: $\epsilon$의 $mean=0$, $sd=0.1$ 가정 # - $x_2=x_1^2+\epsilon$ # - $x_3=x_2^3+\epsilon$ # - 1000번 반복 # - 1000개 데이터 result1 = c() result2 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1^2 + rnorm(1000,0,0.01) x3 = x2^3 + rnorm(1000,0,0.01) y1= 3 + x1 + 2*x2 + 3*x3 + rnorm(1000,0,0.1) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2+train_x3) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3)))^2) } print(mean(result1));print(mean(result2)) M2$coefficients VIF(M2) # --- # + [markdown] tags=[] # ## 시도 5 # - # - $y_1=3+x_1+5x_2+10x_3+\epsilon$ # - $x_2=2x_1+\epsilon$ # - $x_3=3x_1+\epsilon$ # - 1000번 반복 # - 1000개 데이터 result1 = c() result2 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1*2 + rnorm(1000,0,0.01) x3 = x1*3 + rnorm(1000,0,0.01) y1= 3 + x1 + 5*x2 + 10*x3 + rnorm(1000) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2+train_x3) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3)))^2) } print(mean(result1));print(mean(result2)) M2$coefficients VIF(M2) # - $x_1$이랑 제곱관계였던 시도 4까지의 결과와 조금 다르게 거의 비슷한 모습 # --- # + [markdown] tags=[] # ## 시도 6 # - # - $y_1=3+x_1+5x_2+10x_3+\epsilon$ # - $x_2=2x_1+\epsilon$ # - $x_3=3x_1+\epsilon$ # - $x_4=4x_2+\epsilon$ # - 1000번 반복 # - 1000개 데이터 result1 = c() result2 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1*2 + rnorm(1000,0,0.01) x3 = x1*3 + rnorm(1000,0,0.01) x4 = x2*4 + rnorm(1000,0,0.01) y1= 3 + x1 + 5*x2 + 10*x3 + 15*x4 + rnorm(1000) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_x4 = x4[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_x4 = x4[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2+train_x3++train_x4) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3,test_x4)))^2) } print(mean(result1));print(mean(result2)) M2$coefficients VIF(M2) # --- # + [markdown] tags=[] # ## 시도 7 # - # - $y_1=3+x_1+5x_2+10x_3+15x_4+20x_5+\epsilon$ # - $x_2=2x_1+\epsilon$ # - $x_3=3x_1+\epsilon$ # - $x_4=4x_1+\epsilon$ # - $x_5=5x_1+\epsilon$ # - 1000번 반복 # - 1000개 데이터 # - 설명변수를 하나씩 제거해나가며 값 확인 result1 = c() result2 = c() result2_2 = c() result2_3 = c() result2_4 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1*2 + rnorm(1000,0,0.01) x3 = x1*3 + rnorm(1000,0,0.01) x4 = x1*4 + rnorm(1000,0,0.01) x5 = x1*5 + rnorm(1000,0,0.01) y1= 3 + x1 + 5*x2 + 10*x3 + 15*x4 + 20*x5 + rnorm(1000) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_x4 = x4[1:500] train_x5 = x5[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_x4 = x4[501:1000] test_x5 = x5[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2) M2_2 = lm(train_y1~train_x1+train_x2+train_x3) M2_3 = lm(train_y1~train_x1+train_x2+train_x3+train_x4) M2_4 = lm(train_y1~train_x1+train_x2+train_x3+train_x4+train_x5) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2,test_x3)))^2) result2_2[i]=mean((test_y1-predict(M2_2,data.frame(test_x1,test_x2,test_x3)))^2) result2_3[i]=mean((test_y1-predict(M2_3,data.frame(test_x1,test_x2,test_x3,test_x4)))^2) result2_4[i]=mean((test_y1-predict(M2_4,data.frame(test_x1,test_x2,test_x3,test_x4,test_x5)))^2) } print(mean(result1));print(mean(result2));print(mean(result2_2));print(mean(result2_3));print(mean(result2_4)) M2$coefficients;M2_2$coefficients;M2_3$coefficients;M2_4$coefficients VIF(M2);VIF(M2_2);VIF(M2_3);VIF(M2_4) # --- # + [markdown] tags=[] # ## 시도 8 # - # - $y_1=3+x_1+1.5x_2+2x_3+2.5x_4+3x_5+\epsilon$ # - $x_2=x^2_1+\epsilon$ # - $x_3=x^3_1+\epsilon$ # - $x_4=x^4_1+\epsilon$ # - $x_5=x^5_1+\epsilon$ # - 1000번 반복 # - 1000개 데이터 # - 설명변수를 하나씩 제거해나가며 값 확인 result1 = c() result2 = c() result2_2 = c() result2_3 = c() result2_4 = c() for (i in 1:1000){ x1 = runif(1000) x2 = x1^2 + rnorm(1000,0,0.01) x3 = x1^3 + rnorm(1000,0,0.01) x4 = x1^4 + rnorm(1000,0,0.01) x5 = x1^5 + rnorm(1000,0,0.01) y1= 3 + x1 + 1.5*x2 + 2*x3 + 2.5*x4 + 3*x5 + rnorm(1000) train_x1 = x1[1:500] train_x2 = x2[1:500] train_x3 = x3[1:500] train_x4 = x4[1:500] train_x5 = x5[1:500] train_y1 = y1[1:500] test_x1 = x1[501:1000] test_x2 = x2[501:1000] test_x3 = x3[501:1000] test_x4 = x4[501:1000] test_x5 = x5[501:1000] test_y1 = y1[501:1000] M1 = lm(train_y1~train_x1) M2 = lm(train_y1~train_x1+train_x2) M2_2 = lm(train_y1~train_x1+train_x2+train_x3) M2_3 = lm(train_y1~train_x1+train_x2+train_x3+train_x4) M2_4 = lm(train_y1~train_x1+train_x2+train_x3+train_x4+train_x5) result1[i]=mean((test_y1-predict(M1,data.frame(test_x1)))^2) result2[i]=mean((test_y1-predict(M2,data.frame(test_x1,test_x2)))^2) result2_2[i]=mean((test_y1-predict(M2_2,data.frame(test_x1,test_x2,test_x3)))^2) result2_3[i]=mean((test_y1-predict(M2_3,data.frame(test_x1,test_x2,test_x3,test_x4)))^2) result2_4[i]=mean((test_y1-predict(M2_4,data.frame(test_x1,test_x2,test_x3,test_x4,test_x5)))^2) } print(mean(result1));print(mean(result2));print(mean(result2_2));print(mean(result2_3));print(mean(result2_4)) M2$coefficients;M2_2$coefficients;M2_3$coefficients;M2_4$coefficients VIF(M2);VIF(M2_2);VIF(M2_3);VIF(M2_4) # --- # 다중공선성을 어느정도 제거한 모델(`M1`)과 제거하지 않은 모델들(`M2`,`M2_2`,`M2_3`,`M2_4`)을 비교해보았다. # - 설명변수($x_1,x_2,x_3,x_4,x_5$)끼리 배수 관계에 있던 모델들과 다르게 거듭제곱 관계에 있던 모델들은 결과(MSE)가 비슷하긴 하지만 설명변수가 배수관계에 있던 모델만큼 비슷하진 않았다. # - 배수 관계 가정: `시도2`, `시도5`, `시도6`, `시도7` # - 거듭제곱 관계 가정: `시도3`, `시도4`, `시도8` # - 다중공선성을 가정하여 모델을 만들었기 때문에 당연하게 `VIF;분산팽창요인`이 모든 시도에서 크게 나온 결과를 확인할 수 있었다. # - `시도2`, `시도3`을 비교해보니 데이터 500개를 사용할 때보다 1000개를 사용할때 다중공선성을 가정한 MSE가 조금 더 컸다. 하지만 분산퍙창계수의 차이는 1000개의 데이터를 사용했을때 더 작았다. # - 시도2보다 시도3에서 M1과 M2의 차이가 크다고 했지만 그 크기는 0.3정도의 차이이긴 하다. # - 분산팽창요인은 그래도 시도2와 시도3이 10을 충분히 넘긴 값이긴 헀다. # - `시도3`과 `시도4`의 차이는 반응변수 $y$의 $\epsilon$ 의 $rnorm$ 지정을 해줄때 시도4에 $mean=0, sd=0.1$을 지정해준 것이다. # - 결과를 보니 분산팽창계수가 낮아졌다. 하지만 여전히 모두 10을 넘었다. # - `시도7`,`시도8` 을 미루어 보아 설명변수끼리 관계가 있고, 또 여러개 존재했을때 설명변수가 추가될수록 모형의 평균제곱오차가 조금씩 커지는 경향을 보였다. # - 즉, 예측력이 조금씩 줄어드는 경향을 보였다.
_notebooks/2022-05-13-as-hw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from html_to_nena import html_tostring # + # Characters to be replaced replace = { # standardizing substutions '\u2011': '\u002d', # non-breaking hyphen to normal hyphen-minus '\u01dd': '\u0259', # 'ǝ' "turned e" to 'ə' schwa '\uf1ea': '\u003d', # SIL deprecated double hyphen to '=' equals sign '\u2026': '...', # '…' horizontal ellipsis to three dots 'J\u0335': '\u0248', # 'J' + short combining stroke to 'Ɉ' J with stroke 'J\u0336': '\u0248', # J' + long combining stroke to 'Ɉ' J with stroke '<y>': '\u02b8', # superscript small letter y # corrections of errors '\u002d\u032d': '\u032d\u002d', # Switch positions of Hyphen and Circumflex accent below 'ʾ>': '>ʾ', # misplaced alaph in superscript <sup>Pʾ</sup>afšɑ̄rī̀<sup>P</sup> (Urmi_C, somewhere?) # There may be some other stray alaph's and other anomalies out there. # Will have to think of some tests to find them. } # Barwar barwar_patterns = { 'gp-sectionheading-western': ( (('text_id', 'title'), '^\s*([A-Z]\s*[0-9]+)\s+(.*?)\s*$'), ), 'gp-subsectionheading-western': ( (('informant', 'place'), '^\s*Informant:\s+(.*)\s+\((.*)\)\s*$'), ), } # Urmi urmi_patterns = { 'gp-sectionheading-western': ( (('text_id',), '^\s*([A-Z]\s*[0-9]+)\s*'), ), 'gp-subsectionheading-western': ( (('title', 'informant', 'place'), '^\s*(.*?)\s*\(([^,]*),\s+(.*)\)\s*$'), (('title',), '^\s*(.*?)\s*$'), ), 'gp-subsubsectionheading-western': ( (('version', 'informant', 'place'), '^\s*(Version\s+[0-9]+):\s+(.*?)\s+\((.*)\)\s?$') ), } file = 'texts/bar text a8.html' def is_heading(e): return e.tag == 'h2' for line in html_tostring( 'texts/bar text a8.html', heading_patterns=barwar_patterns, is_heading=is_heading, text_start=is_heading, replace=replace, ): print(line) # + # Alternative (easier?) format for patterns, # without the need for class attribute strings, # but with danger of ambiguity, so need to place # more precise patterns first. barwar_patterns2 = ( ('^\s*([A-Z]\s*[0-9]+)\s+(.*?)\s*$', ('text_id', 'title')), ('^\s*Informant:\s+(.*)\s+\((.*)\)\s*$', ('informant', 'place')), ) urmi_patterns2 = ( ('^\s*([A-Z]\s*[0-9]+)\s*', ('text_id',)), ('^\s*(Version\s+[0-9]+):\s+(.*?)\s+\((.*)\)\s?$', ('version', 'informant', 'place')), ('^\s*(.*?)\s*\(([^,]*),\s+(.*)\)\s*$', ('title', 'informant', 'place')), ('^\s*(.*?)\s*$', ('title',)), )
parse_nena/archive/ToStandardText.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Titanic exercise # # Titanic dataset. Mоже да се изтегли от тук: # https://www.kaggle.com/c/titanic # # #### Задача # # Да подобрим класирането на Стефан в kaggle. # # #### Изводи от лекцията # # След направените анализи в/у feature-те по време на лекцията на курса можем да стигнем до следните наблюдения и изводи: # - ... # - ... # # Също така можем да опитаме някакъв feature engineering, "почиставне" и обработка на данните като: # - ... # - ... # ### Стратегия # - ... # - ... # #### Започваме # # Нека първо да си инсталираме нужните библиотеки: # !pip install numpy scipy matplotlib ipython scikit-learn pandas pillow mglearn # Нека сега да import-нем всички нужни неща за нашата работа: # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import mglearn import warnings from IPython.display import display # %matplotlib inline warnings.filterwarnings('ignore') pd.options.display.max_rows = 20 # - from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score # Сега да заредим данните: original = pd.read_csv('data/titanic/train.csv', index_col='PassengerId') original original.isnull().sum().sort_values() # Да си направим копие на данните, с което ще работим: data = original.copy() data # ### Feature engineering & Data cleaning # # Сега ще си поиграем малко с данните, като ще следваме идеите описани по-горе. # # Но преди да започнем "масажирането" на данните ни, ще трябва да попълним празните места: data.Embarked.fillna('S', inplace=True) # + def create_title(data): data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False) data.loc[data.Title == 'Mlle', 'Title'] = 'Miss' data.loc[data.Title == 'Mme', 'Title'] = 'Mrs' data.loc[data.Title == 'Ms', 'Title'] = 'Miss' other_titles = ['Dona', 'Dr', 'Rev', 'Col', 'Major', 'Countess', 'Don', 'Jonkheer', 'Capt', 'Lady', 'Sir'] data.Title = data.Title.replace(other_titles, 'Other') return data data = create_title(data) data.head(5) # + def add_age_by_title(data): age_by_title = data.groupby('Title').Age.mean() data.loc[(data.Age.isnull()) & (data.Title == 'Mr'), 'Age'] = age_by_title['Mr'] data.loc[(data.Age.isnull()) & (data.Title == 'Mrs'), 'Age'] = age_by_title['Mrs'] data.loc[(data.Age.isnull()) & (data.Title == 'Master'), 'Age'] = age_by_title['Master'] data.loc[(data.Age.isnull()) & (data.Title == 'Miss'), 'Age'] = age_by_title['Miss'] data.loc[(data.Age.isnull()) & (data.Title == 'Other'), 'Age'] = age_by_title['Other'] return data data = add_age_by_title(data) data.isnull().sum().sort_values() # + def add_age_group(data): data['AgeGroup'] = 0 data.loc[data['Age'] <= 16, 'AgeGroup'] = 0 data.loc[(data['Age'] > 16) & (data['Age'] <= 32), 'AgeGroup'] = 1 data.loc[(data['Age'] > 32) & (data['Age'] <= 48), 'AgeGroup'] = 2 data.loc[(data['Age'] > 48) & (data['Age'] <= 64), 'AgeGroup'] = 3 data.loc[data['Age'] > 64, 'AgeGroup'] = 4 return data data = add_age_group(data) data.head(2) # + def add_family_size_group_encoding(data): data['FamilySize'] = data['Parch'] + data['SibSp'] + 1 data['IsAlone'] = (data.FamilySize == 1).astype(float) data['IsSmallFamily'] = ((2 <= data.FamilySize) & (data.FamilySize < 5)).astype(float) data['IsLargeFamily'] = (5 <= data.FamilySize).astype(float) data['IsChild'] = (data.Age < 16).astype(float) data['IsAdult'] = (16 <= data.Age).astype(float) data = data.drop('FamilySize', axis=1) return data # add_family_size_group_encoding(data) # data.head(2) # + def add_fare_group(data): # data['FareRange'] = pd.qcut(data['Fare'], 4) data['FareGroup'] = 0 data.loc[data['Fare'] <= 7.91, 'FareGroup'] = 0 data.loc[(data['Fare'] > 7.91) & (data['Fare'] <= 14.454), 'FareGroup'] = 1 data.loc[(data['Fare'] > 14.454) & (data['Fare'] <= 31), 'FareGroup'] = 2 data.loc[(data['Fare'] > 31) & (data['Fare'] <= 513), 'FareGroup'] = 3 return data data = add_fare_group(data) data.head(2) # - def hot_encoding(data, col): dummies = pd.get_dummies(data[col], prefix=col) data = pd.concat([data, dummies],axis=1) data.drop(col, axis=1, inplace=True) return data # + def prepare_data(data): data = data.copy() data = hot_encoding(data, 'Title') data = hot_encoding(data, 'AgeGroup') data = hot_encoding(data, 'FareGroup') data = hot_encoding(data, 'Embarked') data = hot_encoding(data, 'Pclass') data.Sex = data.Sex.map({'male': 1,'female': 0}) add_family_size_group_encoding(data) # data['Cabin'] = data['Cabin'].fillna('N') # data['Cabin'] = data['Cabin'].astype(str) # data['Cabin'] = data['Cabin'].astype(str).str[0] # data.loc[data['Cabin'] == 'T', 'Cabin'] = 'N' # data['Cabin'] = data.Cabin.apply(lambda x: ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'N'].index(x)) # data = hot_encoding(data, 'Cabin') data = data.drop([ 'Name', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'FamilySize', 'Cabin' ], axis=1) return data data = prepare_data(data) data.columns # - data.info() # + X = data.drop('Survived', axis=1) y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, stratify=y) model = RandomForestClassifier(random_state=0).fit(X_train, y_train) print("train score: ", model.score(X_train, y_train)) print("test score: ", model.score(X_test, y_test)) search = GridSearchCV(model, {'n_estimators': [ 10, 20, 25, 30, 50, 60, 80, 100], 'max_depth': [2, 4, 5, 6, 8, 10, 12, 15], 'criterion': ['gini','entropy'] }) search.fit(X, y) print(search.best_score_) print(search.best_estimator_) pd.DataFrame(search.cv_results_)[['rank_test_score', 'mean_test_score', 'params']].sort_values(by='rank_test_score').head(5) # - model = RandomForestClassifier(max_depth=4, random_state=0, n_estimators=70, criterion='gini') model.fit(X_train, y_train) print("train score: ", model.score(X_train, y_train)) print("test score: ", model.score(X_test, y_test)) # + test = pd.read_csv('data/titanic/test.csv', index_col=['PassengerId']) test = create_title(test) test = add_age_by_title(test) test = add_age_group(test) test = add_fare_group(test) test = prepare_data(test) test.columns test.info() # + predictions = model.predict(test) frame = pd.DataFrame({ 'PassengerId': pd.read_csv('data/titanic/test.csv').PassengerId, 'Survived': predictions }) frame = frame.set_index('PassengerId') frame.to_csv('data/titanic/predictions.csv') frame.head() # -
02-hw-titanic-exercise.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp data.load # + #export from fastai2.torch_basics import * from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind _loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter) # - from nbdev.showdoc import * bs = 4 letters = list(string.ascii_lowercase) # ## DataLoader # + #export def _wif(worker_id): set_num_threads(1) info = get_worker_info() ds = info.dataset.d ds.nw,ds.offs = info.num_workers,info.id set_seed(info.seed) ds.wif() class _FakeLoader: _IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = ( None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count) def __init__(self, d, pin_memory, num_workers, timeout): self.dataset,self.default,self.worker_init_fn = self,d,_wif store_attr(self, 'd,pin_memory,num_workers,timeout') def __iter__(self): return iter(self.d.create_batches(self.d.sample())) @property def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0] @contextmanager def no_multiproc(self): old_nw = self.num_workers try: self.num_workers = 0 yield self.d finally: self.num_workers = old_nw _collate_types = (ndarray, Tensor, typing.Mapping, str) # - #export def fa_collate(t): b = t[0] return (default_collate(t) if isinstance(b, _collate_types) else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence) else default_collate(t)) # + #e.g. x is int, y is tuple t = [(1,(2,3)),(1,(2,3))] test_eq(fa_collate(t), default_collate(t)) test_eq(L(fa_collate(t)).map(type), [Tensor,tuple]) t = [(1,(2,(3,4))),(1,(2,(3,4)))] test_eq(fa_collate(t), default_collate(t)) test_eq(L(fa_collate(t)).map(type), [Tensor,tuple]) test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple]) # - #export def fa_convert(t): return (default_convert(t) if isinstance(t, _collate_types) else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence) else default_convert(t)) # + t0 = array([1,2]) t = [t0,(t0,t0)] test_eq(fa_convert(t), default_convert(t)) test_eq(L(fa_convert(t)).map(type), [Tensor,tuple]) # - #export class SkipItemException(Exception): pass #export @log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch') @funcs_kwargs class DataLoader(GetAttr): _noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split() for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x") _methods = _noop_methods + 'create_batches create_item create_batch retain \ get_idxs sample shuffle_fn do_batch create_batch'.split() _default = 'dataset' def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None, shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs): if batch_size is not None: bs = batch_size # PyTorch compatibility assert not (bs is None and drop_last) if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__') if n is None: try: n = len(dataset) except TypeError: pass store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device') self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0 self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout) def __len__(self): if self.n is None: raise TypeError if self.bs is None: return self.n return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1) def get_idxs(self): idxs = Inf.count if self.indexed else Inf.nones if self.n is not None: idxs = list(itertools.islice(idxs, self.n)) if self.shuffle: idxs = self.shuffle_fn(idxs) return idxs def sample(self): idxs = self.get_idxs() return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs) def __iter__(self): self.randomize() self.before_iter() for b in _loaders[self.fake_l.num_workers==0](self.fake_l): if self.device is not None: b = to_device(b, self.device) yield self.after_batch(b) self.after_iter() if hasattr(self, 'it'): delattr(self, 'it') def create_batches(self, samps): self.it = iter(self.dataset) if self.dataset is not None else None res = filter(lambda o:o is not None, map(self.do_item, samps)) yield from map(self.do_batch, self.chunkify(res)) def new(self, dataset=None, cls=None, **kwargs): if dataset is None: dataset = self.dataset if cls is None: cls = type(self) cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout, bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device) for n in self._methods: cur_kwargs[n] = getattr(self, n) return cls(**merge(cur_kwargs, kwargs)) @property def prebatched(self): return self.bs is None def do_item(self, s): try: return self.after_item(self.create_item(s)) except SkipItemException: return None def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last) def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs)) def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1)) def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b) def create_item(self, s): return next(self.it) if s is None else self.dataset[s] def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b) def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b) def to(self, device): self.device = device def one_batch(self): if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches') with self.fake_l.no_multiproc(): res = first(self) if hasattr(self, 'it'): delattr(self, 'it') return res # Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream). # + class RandDL(DataLoader): def create_item(self, s): r = random.random() return r if r<0.95 else stop() L(RandDL()) # - L(RandDL(bs=4, drop_last=True)).map(len) dl = RandDL(bs=4, num_workers=4, drop_last=True) L(dl).map(len) test_eq(dl.fake_l.num_workers, 4) with dl.fake_l.no_multiproc(): test_eq(dl.fake_l.num_workers, 0) L(dl).map(len) test_eq(dl.fake_l.num_workers, 4) # + def _rand_item(s): r = random.random() return r if r<0.95 else stop() L(DataLoader(create_item=_rand_item)) # - # If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch. # + ds1 = DataLoader(letters) test_eq(L(ds1), letters) test_eq(len(ds1), 26) test_shuffled(L(DataLoader(letters, shuffle=True)), letters) ds1 = DataLoader(letters, indexed=False) test_eq(L(ds1), letters) test_eq(len(ds1), 26) t2 = L(tensor([0,1,2]),tensor([3,4,5])) ds2 = DataLoader(t2) test_eq_type(L(ds2), t2) t3 = L(array([0,1,2]),array([3,4,5])) ds3 = DataLoader(t3) test_eq_type(L(ds3), t3.map(tensor)) ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1)) test_eq_type(L(ds4), t3) test_eq(t3.f, 1) # - # If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch. def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d) # + ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0) test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx') ds1 = DataLoader(letters,4,num_workers=2) test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz') ds1 = DataLoader(range(12), bs=4, num_workers=3) test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11]))) ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2)) test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10'])) test_eq(t3.f, 2) it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1)) test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])]) # + class SleepyDL(list): def __getitem__(self,i): time.sleep(random.random()/50) return super().__getitem__(i) t = SleepyDL(letters) # %time test_eq(DataLoader(t, num_workers=0), letters) # %time test_eq(DataLoader(t, num_workers=2), letters) # %time test_eq(DataLoader(t, num_workers=4), letters) dl = DataLoader(t, shuffle=True, num_workers=1) test_shuffled(L(dl), letters) test_shuffled(L(dl), L(dl)) # + class SleepyQueue(): "Simulate a queue with varying latency" def __init__(self, q): self.q=q def __iter__(self): while True: time.sleep(random.random()/100) try: yield self.q.get_nowait() except queues.Empty: return q = Queue() for o in range(30): q.put(o) it = SleepyQueue(q) # %time test_shuffled(L(DataLoader(it, num_workers=4)), range(30)) # + class A(TensorBase): pass for nw in (0,2): t = A(tensor([1,2])) dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw) b = first(dl) test_eq(type(b), A) t = (A(tensor([1,2])),) dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw) b = first(dl) test_eq(type(b[0]), A) # + class A(TensorBase): pass t = A(tensor(1,2)) tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device) b = first(tdl) test_eq(type(b), A) # Unknown attributes are delegated to `dataset` test_eq(tdl.pop(), tensor(1,2)) # - # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/02_data.load.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- vernum = 11 n_enabled = True # + # noexport import os os.system('export_notebook identify_domain_training_data_v' + str(vernum) + '_all_insession.ipynb') # - from tmilib import * import csv # + import sys num_prev_enabled = 2 # for version 78 #num_prev_enabled = int(sys.argv[1]) if n_enabled: num_labels_enabled = 2 + num_prev_enabled # since we disabled the n label else: num_labels_enabled = 1 + num_prev_enabled # since we disabled the n label data_version = 4+8*(vernum-2) + num_prev_enabled print 'num_prev_enabled', num_prev_enabled print 'data_version', data_version # - twenty_letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t"] #domain_to_letter = {x:twenty_letters[i] for i,x in enumerate(top_domains)} domain_id_to_letter = {domain_to_id(x):twenty_letters[i] for i,x in enumerate(top_n_domains_by_visits(20))} #print domain_id_to_letter #print domain_to_letter productivity_letters = {-2: 'v', -1: 'w', 0: 'x', 1: 'y', 2: 'z'} domain_id_to_productivity_letter = [productivity_letters[x] for x in get_domain_id_to_productivity()] #print domain_id_to_productivity[:10] #print domain_id_to_productivity_letter[:10] # + def get_row_names(include_domain=False, printing=False): if printing: loc_prev_enabled = 7 else: loc_prev_enabled = num_prev_enabled output_row_names = [ 'label', 'spanlen', 'since_cur', 'cur_domain_letter', #'cur_domain_productivity', 'to_next', 'next_domain_letter', #'next_domain_productivity', 'n_eq_c', 'nref_eq_c', 'nref_eq_zero', 'cref_eq_c', 'cref_eq_n', 'cref_eq_zero', ] for idx_p_zeroidx in range(loc_prev_enabled): sp = str(idx_p_zeroidx + 1) new_feature_names_for_p = [ 'since_prev' + sp, 'prev' + sp +'_domain_letter', #'prev' + sp + '_domain_productivity', 'n_eq_p' + sp, 'nref_eq_p' + sp, 'cref_eq_p' + sp, 'visits_since_p' + sp, ] output_row_names.extend(new_feature_names_for_p) # only v8 and onwards output_row_names.extend([ 'switchto_in_session_cur', #'switchfrom_in_session_cur', 'switchto_in_session_next', #'switchfrom_in_session_next', ]) for idx_p_zeroidx in range(loc_prev_enabled): sp = str(idx_p_zeroidx + 1) new_feature_names_for_p = [ 'switchto_in_session_prev' + sp, #'switchfrom_in_session_prev' + sp, ] output_row_names.extend(new_feature_names_for_p) if include_domain: output_row_names.insert(0, 'time_sec') output_row_names.insert(1, 'user') output_row_names.insert(2, 'ref_domain') return tuple(output_row_names) row_names = [] #row_names = get_row_names() #print row_names # + #print 'output.append([' + ', '.join([get_row_names(True, True)]) + '])' # - print 'cached_locals = {' + ', '.join(['"' + x + '": ' + x for x in get_row_names(True, True)]) + '}' def get_rows_for_user(user, include_domain=False): output = [] #ordered_visits = get_history_ordered_visits_corrected_for_user(user) ordered_visits = get_history_ordered_visits_corrected_for_user(user) ordered_visits = exclude_bad_visits(ordered_visits) #active_domain_at_time = get_active_domain_at_time_for_user(user) active_seconds_set = set(get_active_insession_seconds_for_user(user)) active_second_to_domain_id = {int(k):v for k,v in get_active_second_to_domain_id_for_user(user).viewitems()} prev_domain_ids = [-1]*8 domain_id_to_most_recent_visit = {} domain_id_to_num_switchto = Counter() #domain_id_to_num_switchfrom = Counter() total_items = 0 skipped_items = 0 prev_visit_time = 0 prev_visit_domain_id = -1 visit_id_to_domain_id = {} domain_id_to_history_idx = Counter() for idx,visit in enumerate(ordered_visits): if idx+1 >= len(ordered_visits): break next_visit = ordered_visits[idx+1] referring_visit_id = int(next_visit['referringVisitId']) cur_referring_visit_id = int(visit['referringVisitId']) nref_eq_zero = 'T' if referring_visit_id == 0 else 'F' cref_eq_zero = 'T' if cur_referring_visit_id == 0 else 'F' new_session = False if visit['visitTime'] > prev_visit_time + 1000*60*20: new_session = True prev_visit_time = visit['visitTime'] if new_session: #prev_visit_domain_id = -1 #prev_domain_ids = [-1]*8 #domain_id_to_most_recent_visit = {} domain_id_to_num_switchto = Counter() #domain_id_to_num_switchfrom = Counter() cur_domain = url_to_domain(visit['url']) cur_domain_id = domain_to_id(cur_domain) next_domain = url_to_domain(next_visit['url']) next_domain_id = domain_to_id(next_domain) domain_id_to_history_idx[cur_domain_id] = idx visit_id_to_domain_id[int(visit['visitId'])] = cur_domain_id nref_domain_id = visit_id_to_domain_id.get(referring_visit_id, -1) cref_domain_id = visit_id_to_domain_id.get(cur_referring_visit_id, -1) if cur_domain_id != prev_visit_domain_id: domain_id_to_num_switchto[cur_domain_id] += 1 prev_visit_domain_id = cur_domain_id cur_time_sec = int(round(visit['visitTime'] / 1000.0)) next_time_sec = int(round(next_visit['visitTime'] / 1000.0)) domain_id_to_most_recent_visit[cur_domain_id] = cur_time_sec if prev_domain_ids[0] != cur_domain_id: #prev_domain_ids = ([cur_domain_id] + [x for x in prev_domain_ids if x != cur_domain_id])[:4] if cur_domain_id in prev_domain_ids: prev_domain_ids.remove(cur_domain_id) prev_domain_ids.insert(0, cur_domain_id) while len(prev_domain_ids) > 8: prev_domain_ids.pop() # prev_domain_ids includes the current one if cur_time_sec > next_time_sec: continue prev1_domain_id = prev_domain_ids[1] prev2_domain_id = prev_domain_ids[2] prev3_domain_id = prev_domain_ids[3] prev4_domain_id = prev_domain_ids[4] prev5_domain_id = prev_domain_ids[5] prev6_domain_id = prev_domain_ids[6] prev7_domain_id = prev_domain_ids[7] n_eq_c = 'T' if (next_domain_id == cur_domain_id) else 'F' n_eq_p1 = 'T' if (next_domain_id == prev1_domain_id) else 'F' n_eq_p2 = 'T' if (next_domain_id == prev2_domain_id) else 'F' n_eq_p3 = 'T' if (next_domain_id == prev3_domain_id) else 'F' n_eq_p4 = 'T' if (next_domain_id == prev4_domain_id) else 'F' n_eq_p5 = 'T' if (next_domain_id == prev5_domain_id) else 'F' n_eq_p6 = 'T' if (next_domain_id == prev6_domain_id) else 'F' n_eq_p7 = 'T' if (next_domain_id == prev7_domain_id) else 'F' nref_eq_c = 'T' if nref_domain_id == cur_domain_id else 'F' nref_eq_p1 = 'T' if nref_domain_id == prev1_domain_id else 'F' nref_eq_p2 = 'T' if nref_domain_id == prev2_domain_id else 'F' nref_eq_p3 = 'T' if nref_domain_id == prev3_domain_id else 'F' nref_eq_p4 = 'T' if nref_domain_id == prev4_domain_id else 'F' nref_eq_p5 = 'T' if nref_domain_id == prev5_domain_id else 'F' nref_eq_p6 = 'T' if nref_domain_id == prev6_domain_id else 'F' nref_eq_p7 = 'T' if nref_domain_id == prev7_domain_id else 'F' cref_eq_n = 'T' if cref_domain_id == next_domain_id else 'F' cref_eq_c = 'T' if cref_domain_id == cur_domain_id else 'F' cref_eq_p1 = 'T' if cref_domain_id == prev1_domain_id else 'F' cref_eq_p2 = 'T' if cref_domain_id == prev2_domain_id else 'F' cref_eq_p3 = 'T' if cref_domain_id == prev3_domain_id else 'F' cref_eq_p4 = 'T' if cref_domain_id == prev4_domain_id else 'F' cref_eq_p5 = 'T' if cref_domain_id == prev5_domain_id else 'F' cref_eq_p6 = 'T' if cref_domain_id == prev6_domain_id else 'F' cref_eq_p7 = 'T' if cref_domain_id == prev7_domain_id else 'F' visits_since_p1 = idx - domain_id_to_history_idx.get(prev1_domain_id, 0) visits_since_p2 = idx - domain_id_to_history_idx.get(prev2_domain_id, 0) visits_since_p3 = idx - domain_id_to_history_idx.get(prev3_domain_id, 0) visits_since_p4 = idx - domain_id_to_history_idx.get(prev4_domain_id, 0) visits_since_p5 = idx - domain_id_to_history_idx.get(prev5_domain_id, 0) visits_since_p6 = idx - domain_id_to_history_idx.get(prev6_domain_id, 0) visits_since_p7 = idx - domain_id_to_history_idx.get(prev7_domain_id, 0) for time_sec in xrange(cur_time_sec, next_time_sec): if time_sec not in active_seconds_set: continue ref_domain_id = active_second_to_domain_id.get(time_sec, -1) if ref_domain_id == -1: ref_domain = 'none' else: ref_domain = id_to_domain(ref_domain_id) total_items += 1 label = None if n_enabled: available_labels = ( (cur_domain_id, 'c'), (next_domain_id, 'n'), (prev1_domain_id, 'p1'), (prev2_domain_id, 'p2'), (prev3_domain_id, 'p3'), (prev4_domain_id, 'p4'), (prev5_domain_id, 'p5'), (prev6_domain_id, 'p6'), (prev7_domain_id, 'p7'), )[:num_labels_enabled] else: available_labels = ( (cur_domain_id, 'c'), # (next_domain_id, 'n'), (prev1_domain_id, 'p1'), (prev2_domain_id, 'p2'), (prev3_domain_id, 'p3'), (prev4_domain_id, 'p4'), (prev5_domain_id, 'p5'), (prev6_domain_id, 'p6'), (prev7_domain_id, 'p7'), )[:num_labels_enabled] # c p n p q r s t for label_value,label_name in available_labels: if ref_domain_id == label_value: label = label_name break if label == None: if include_domain: label = 'u' else: skipped_items += 1 continue next_domain_letter = domain_id_to_letter.get(next_domain_id, 'u') cur_domain_letter = domain_id_to_letter.get(cur_domain_id, 'u') prev1_domain_letter = domain_id_to_letter.get(prev1_domain_id, 'u') prev2_domain_letter = domain_id_to_letter.get(prev2_domain_id, 'u') prev3_domain_letter = domain_id_to_letter.get(prev3_domain_id, 'u') prev4_domain_letter = domain_id_to_letter.get(prev4_domain_id, 'u') prev5_domain_letter = domain_id_to_letter.get(prev5_domain_id, 'u') prev6_domain_letter = domain_id_to_letter.get(prev6_domain_id, 'u') prev7_domain_letter = domain_id_to_letter.get(prev7_domain_id, 'u') next_domain_productivity = domain_id_to_productivity_letter[next_domain_id] cur_domain_productivity = domain_id_to_productivity_letter[cur_domain_id] prev1_domain_productivity = domain_id_to_productivity_letter[prev1_domain_id] prev2_domain_productivity = domain_id_to_productivity_letter[prev2_domain_id] prev3_domain_productivity = domain_id_to_productivity_letter[prev3_domain_id] prev4_domain_productivity = domain_id_to_productivity_letter[prev4_domain_id] prev5_domain_productivity = domain_id_to_productivity_letter[prev5_domain_id] prev6_domain_productivity = domain_id_to_productivity_letter[prev6_domain_id] prev7_domain_productivity = domain_id_to_productivity_letter[prev7_domain_id] since_cur = time_sec - cur_time_sec to_next = next_time_sec - time_sec spanlen = since_cur + to_next prev1_domain_last_visit = domain_id_to_most_recent_visit.get(prev1_domain_id, 0) prev2_domain_last_visit = domain_id_to_most_recent_visit.get(prev2_domain_id, 0) prev3_domain_last_visit = domain_id_to_most_recent_visit.get(prev3_domain_id, 0) prev3_domain_last_visit = domain_id_to_most_recent_visit.get(prev3_domain_id, 0) prev4_domain_last_visit = domain_id_to_most_recent_visit.get(prev4_domain_id, 0) prev5_domain_last_visit = domain_id_to_most_recent_visit.get(prev5_domain_id, 0) prev6_domain_last_visit = domain_id_to_most_recent_visit.get(prev6_domain_id, 0) prev7_domain_last_visit = domain_id_to_most_recent_visit.get(prev7_domain_id, 0) since_prev1 = time_sec - prev1_domain_last_visit since_prev2 = time_sec - prev2_domain_last_visit since_prev3 = time_sec - prev3_domain_last_visit since_prev4 = time_sec - prev4_domain_last_visit since_prev5 = time_sec - prev5_domain_last_visit since_prev6 = time_sec - prev6_domain_last_visit since_prev7 = time_sec - prev7_domain_last_visit if since_cur == 0: since_cur = 0.0001 if to_next == 0: to_next = 0.0001 if spanlen == 0: spanlen = 0.0001 if since_prev1 == 0: since_prev1 = 0.0001 if since_prev2 == 0: since_prev2 = 0.0001 if since_prev3 == 0: since_prev3 = 0.0001 if since_prev4 == 0: since_prev4 = 0.0001 if since_prev5 == 0: since_prev5 = 0.0001 if since_prev6 == 0: since_prev6 = 0.0001 if since_prev7 == 0: since_prev7 = 0.0001 since_cur = log(since_cur) to_next = log(to_next) spanlen = log(spanlen) since_prev1 = log(since_prev1) since_prev2 = log(since_prev2) since_prev3 = log(since_prev3) since_prev4 = log(since_prev4) since_prev5 = log(since_prev5) since_prev6 = log(since_prev6) since_prev7 = log(since_prev7) switchto_in_session_cur = domain_id_to_num_switchto[cur_domain_id] switchto_in_session_next = domain_id_to_num_switchto[next_domain_id] switchto_in_session_prev1 = domain_id_to_num_switchto[prev1_domain_id] switchto_in_session_prev2 = domain_id_to_num_switchto[prev2_domain_id] switchto_in_session_prev3 = domain_id_to_num_switchto[prev3_domain_id] switchto_in_session_prev4 = domain_id_to_num_switchto[prev4_domain_id] switchto_in_session_prev5 = domain_id_to_num_switchto[prev5_domain_id] switchto_in_session_prev6 = domain_id_to_num_switchto[prev6_domain_id] switchto_in_session_prev7 = domain_id_to_num_switchto[prev7_domain_id] cached_locals = {"time_sec": time_sec, "user": user, "ref_domain": ref_domain, "label": label, "spanlen": spanlen, "since_cur": since_cur, "cur_domain_letter": cur_domain_letter, "to_next": to_next, "next_domain_letter": next_domain_letter, "n_eq_c": n_eq_c, "nref_eq_c": nref_eq_c, "nref_eq_zero": nref_eq_zero, "cref_eq_c": cref_eq_c, "cref_eq_n": cref_eq_n, "cref_eq_zero": cref_eq_zero, "since_prev1": since_prev1, "prev1_domain_letter": prev1_domain_letter, "n_eq_p1": n_eq_p1, "nref_eq_p1": nref_eq_p1, "cref_eq_p1": cref_eq_p1, "visits_since_p1": visits_since_p1, "since_prev2": since_prev2, "prev2_domain_letter": prev2_domain_letter, "n_eq_p2": n_eq_p2, "nref_eq_p2": nref_eq_p2, "cref_eq_p2": cref_eq_p2, "visits_since_p2": visits_since_p2, "since_prev3": since_prev3, "prev3_domain_letter": prev3_domain_letter, "n_eq_p3": n_eq_p3, "nref_eq_p3": nref_eq_p3, "cref_eq_p3": cref_eq_p3, "visits_since_p3": visits_since_p3, "since_prev4": since_prev4, "prev4_domain_letter": prev4_domain_letter, "n_eq_p4": n_eq_p4, "nref_eq_p4": nref_eq_p4, "cref_eq_p4": cref_eq_p4, "visits_since_p4": visits_since_p4, "since_prev5": since_prev5, "prev5_domain_letter": prev5_domain_letter, "n_eq_p5": n_eq_p5, "nref_eq_p5": nref_eq_p5, "cref_eq_p5": cref_eq_p5, "visits_since_p5": visits_since_p5, "since_prev6": since_prev6, "prev6_domain_letter": prev6_domain_letter, "n_eq_p6": n_eq_p6, "nref_eq_p6": nref_eq_p6, "cref_eq_p6": cref_eq_p6, "visits_since_p6": visits_since_p6, "since_prev7": since_prev7, "prev7_domain_letter": prev7_domain_letter, "n_eq_p7": n_eq_p7, "nref_eq_p7": nref_eq_p7, "cref_eq_p7": cref_eq_p7, "visits_since_p7": visits_since_p7, "switchto_in_session_cur": switchto_in_session_cur, "switchto_in_session_next": switchto_in_session_next, "switchto_in_session_prev1": switchto_in_session_prev1, "switchto_in_session_prev2": switchto_in_session_prev2, "switchto_in_session_prev3": switchto_in_session_prev3, "switchto_in_session_prev4": switchto_in_session_prev4, "switchto_in_session_prev5": switchto_in_session_prev5, "switchto_in_session_prev6": switchto_in_session_prev6, "switchto_in_session_prev7": switchto_in_session_prev7} output.append([cached_locals[row_name] for row_name in row_names]) #print 'user', user, 'guaranteed error', float(skipped_items)/total_items, 'skipped', skipped_items, 'total', total_items return { 'rows': output, 'skipped_items': skipped_items, 'total_items': total_items, } # + def create_domainclass_data_for_users(users, filename, include_domain=False): if sdir_exists(filename): print 'already exists', filename return outfile = csv.writer(open(sdir_path(filename), 'w')) global row_names row_names = get_row_names(include_domain) outfile.writerow(row_names) total_items = 0 skipped_items = 0 for user in users: data = get_rows_for_user(user, include_domain) total_items += data['total_items'] if total_items == 0: print user, 'no items' continue skipped_items += data['skipped_items'] print user, 'skipped', float(data['skipped_items'])/data['total_items'], 'skipped', data['skipped_items'], 'total', data['total_items'] outfile.writerows(data['rows']) print 'guaranteed error', float(skipped_items) / total_items, 'skipped', skipped_items, 'total', total_items # - create_domainclass_data_for_users(get_training_users(), 'domainclass_cpn_train_v' + str(data_version) +'_all_insession.csv') create_domainclass_data_for_users(get_test_users(), 'domainclass_cpn_test_v' + str(data_version) + '_all_insession.csv') create_domainclass_data_for_users(get_test_users(), 'domainclass_cpn_test_all_withdomain_v' + str(data_version) + '_all_insession.csv', True)
identify_domain_training_data_v11_all_insession.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytorch # language: python # name: pytorch # --- # # Linear Regression # import packages # %matplotlib widget import torch import numpy as np import matplotlib.pyplot as plt # Hyper-parameters input_size = 1 output_size = 1 num_epochs = 60 learning_rate = 0.001 # Dataset and Pretreat X_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], [9.779], [6.182], [7.59], [2.167], [7.042], [10.791], [5.313], [7.997], [3.1]], dtype=np.float32) y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], [3.366], [2.596], [2.53], [1.221], [2.827], [3.465], [1.65], [2.904], [1.3]], dtype=np.float32) # Linear regression model model = torch.nn.Linear(input_size, output_size) # Loss and optimizer criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the model for epoch in range(num_epochs): # Convert numpy arrays to torch tensors. inputs = torch.from_numpy(X_train) targets = torch.from_numpy(y_train) # Forward pass outputs = model(inputs) loss = criterion(outputs, targets) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 5 == 0: print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # Plot the graph predicted = model(torch.from_numpy(X_train)).detach().numpy() plt.plot(X_train, y_train, 'ro', label='Original data') plt.plot(X_train, predicted, label='Fitted line') plt.legend() plt.show() # Save the mdoel checkpoint. torch.save(model.state_dict(), 'linear_regression.ckpt')
image_classification/linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 选择 # ## 布尔类型、数值和表达式 # ![](../Photo/33.png) # - 注意:比较运算符的相等是两个等号,一个等到代表赋值 # - 在Python中可以用整型0来代表False,其他数字来代表True # - 后面还会讲到 is 在判断语句中的用发 1 > 2 1 < 2 1<=2<=3 bool(-1) a = id(1) b = id(1) #因为AB不是同一个对象 print(a,b) a is b # ## 字符串的比较使用ASCII值 a = "ac" b = "ad" a < b # ## Markdown # - https://github.com/younghz/Markdown # # ## EP: # - <img src="../Photo/34.png"></img> # - 输入一个数字,判断其实奇数还是偶数 b1 = bool(4) print(b1) j = int(False) print(j) # ## 产生随机数字 # - 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数 # 产生一个随机数,你去输入,如果你输入的数大于随机数,那么就告诉你太大了,反之,太小了, # 然后你一直输入,知道它满意为止 num = random.randint(0,5) for i in range(3): import random a = eval(input("zs")) if num > a: print("太小了") if num < a: print("太大了") if num == a: print("ok") break # ## 其他random方法 # - random.random 返回0.0到1.0之间前闭后开区间的随机浮点 # - random.randrange(a,b) 前闭后开 random.random() # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确 # - 进阶:写一个随机序号点名程序 import random num_1 = random.randrange(1,5) num_2 = random.randrange(1,5) print(num_1,num_2) while 1: sum_ = eval(input(">>")) if sum_ == (num_1 + num_2): print("true") else: print("flase") # ## if语句 # - 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句 # - Python有很多选择语句: # > - 单向if # - 双向if-else # - 嵌套if # - 多向if-elif-else # # - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进 # - 切记不可tab键和space混用,单用tab 或者 space # - 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐 # ## EP: # - 用户输入一个数字,判断其实奇数还是偶数 # - 进阶:可以查看下4.5实例研究猜生日 # ## 双向if-else 语句 # - 如果条件为真,那么走if内部语句,否则走else内部语句 age = input("年轻嘛[y/n]") if age == "y": handsome = input("帅否[y/n]") if handsome == "y": wife = input("有没有老婆[y/n]") if wife =="y": print("回家的诱惑") else: print("结婚") else: print("考虑一下") else: print("拜拜") # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误 # ## 嵌套if 和多向if-elif-else # ![](../Photo/35.png) # ## EP: # - 提示用户输入一个年份,然后显示表示这一年的动物 # ![](../Photo/36.png) # - 计算身体质量指数的程序 # - BMI = 以千克为单位的体重除以以米为单位的身高的平方 # ![](../Photo/37.png) years = int(input("请输入年份:")) if years%12 == 0: years = "hou" elif years % 12 ==1: years = "ji" elif years % 12 ==2: years = "gou" elif years % 12 ==3: years = "zhu" elif years % 12 ==4: years = "shu" elif years % 12 ==5: years = "hou" elif years % 12 ==6: years = "niu" elif years % 12 ==7: years = "hu" elif years % 12 ==8: years = "tu" elif years % 12 ==9: years = "long" elif years % 12 ==10: years = "se" else: years = "ma" print(years) high = float(input("身高m:")) weight = float(input("体重kg:")) BMI = weight / high **2 if BMI<18.5: print("超轻") elif 18.5<=BMI<25.0: print("标准") elif 25.0<=BMI<30.0: print("超重") else: print("肥胖") # ## 逻辑运算符 # ![](../Photo/38.png) # ![](../Photo/39.png) # ![](../Photo/40.png) # ## EP: # - 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年 # - 提示用户输入一个年份,并返回是否是闰年 # - 提示用户输入一个数字,判断其是否为水仙花数 year = eval(input("year")) if (year%4 == 0 and year%100 != 0) or (year%400 == 0): print("闰年") else: print("不是") num = eval(input(">>")) bai = num // 100 shi = num // 10 % 10 ge = num % 10 if bai**3 + shi **3 + ge **3 == num: print("水仙花") else: print("no") # ## 实例研究:彩票 # ![](../Photo/41.png) import random num = random.randint(10,99) print(num) n = input(">>") num_shi = num //10 num_ge = num % 10 if n[0] == "0": n_shi = 0 else: n_shi = int(n) // 10 n_ge = int(n) % 10 if num == n: print("10000") elif num_shi + num_ge == n_shi + n_ge: print("3000") elif (num_ge == n_ge or num_ge == n_shi) or (num_shi == n_ge or num_shi == n_shi): print("1000") # # Homework # - 1 # ![](../Photo/42.png) import math a = eval(input("a")) b = eval(input("b")) c = eval(input("c")) l1 = b**2 - 4*a*c if l1 >0: l2 = math.sqrt(l1) r1 = (-b + l2 )/(2*a) r2 = (-b - l2 )/(2*a) print(r1,r2) if l1 == 0: print(r1) else: print("the equation has on real roots") # - 2 # ![](../Photo/43.png) import random num_1 = int(random.randrange(0,100)) num_2 = int(random.randrange(0,100)) print(num_1,num_2) while 1: sum_ = eval(input(">>")) if sum_ == (num_1 + num_2): print("true") else: print("flase") # - 3 # ![](../Photo/44.png) day = eval(input('0-6的数字:')) today = eval(input('几天后:')) if day==0: x = today%7+day print(today,'天后是星期',x) if day==1: x = today%7+day print(today,'天后是星期',x) if day==2: x = today%7+day print(today,'天后是星期',x) if day==3: x = today%7+day print(today,'天后是星期',x) if day==4: x = today%7+day print(today,'天后是星期',x) if day==5: x = today%7+day print(today,'天后是星期',x) if day==6: x = today%7+day print(today,'天后是星期',x) # - 4 # ![](../Photo/45.png) a = int(input("a:")) b = int(input("b:")) c = int(input("c:")) list = [a,b,c] list.sort() print(list) x = int(input("x")) y = int(input("y")) z = int(input("z")) if x>y: x,y = y,x if x>z: x,z = z,x if y>z: y,z = z,y print(x,y,z) # - 5 # ![](../Photo/46.png) rice_1_price,rice_1_package = eval(input("price,package")) rice_2_price,rice_2_package = eval(input("price,package")) if (rice_1_price/rice_1_package) < (rice_2_price / rice_2_package): print("第二个好") elif (rice_1_price / rice_1_package) == (rice_2_price / rice_2_package): print("一样个好") elif (rice_1_price / rice_1_package) > (rice_2_price / rice_2_package): print("第一个好") # - 6 # ![](../Photo/47.png) year = eval(input('年份:')) month = eval(input('月份:')) if (year % 4 == 0 and year % 100 != 0) or (year % 400 ==0) : if (month == 2): print("29天") if (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12): print("31天") if (month == 4 or month == 6 or month == 9 or month == 11): print("30天") else: if (month == 2): print("28天") if (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12): print("31天") if (month == 4 or month == 6 or month == 9 or month == 11): print("30天") # - 7 # ![](../Photo/48.png) import random x = random.randint(0,1) y = eval(input('正面为1,反面为0 输入猜测数字')) if y == x: print('猜测正确') else : print('猜测错误') # - 8 # ![](../Photo/49.png) import random computer = random.randint(0,2) mine = eval(input('剪刀为0,石头为1,布为2,剪刀-石头-布:')) if computer>mine: print('computer won') if computer<mine: print('mine won') if computer==mine: print('it is a draw') # - 9 # ![](../Photo/50.png) year = eval(input('输入年份:')) m = eval(input('输入月份:')) q = eval(input('输入天数:')) if m>=3: j = year/100 k = year%100 h = (q+((26*(m+1))/10)+k+(k/4)+(j/4)+(5*j))%7 if h == 0: print('六') if h == 1: print('日') if h == 2: print('一') if h == 3: print('二') if h == 4: print('三') if h == 5: print('四') if h == 6: print('五') print('day of the ',h) if m<3: m1=13 j = (year-1)/100 k = (year-1)%100 h1 = q+(26*(m1+1))/10 h2 = k+k/4+j/4+5*j h = (h1+h2)%7 if h == 0: print('六') if h == 1: print('日') if h == 2: print('一') if h == 3: print('二') if h == 4: print('三') if h == 5: print('四') if h == 6: print('五') print('day of the ',h) # - 10 # ![](../Photo/51.png) # + y = random.randint(1,52) if y%13==0: if y//13==0: print('梅花') if y//13==1: print('红桃') if y//13==2: print('方块') if y//13==3: print('黑桃') print('Ace') if y%13==10: if y//13==0: print('梅花') if y//13==1: print('红桃') if y//13==2: print('方块') if y//13==3: print('黑桃') print('Jack') if y%13==11: if y//13==0: print('梅花') if y//13==1: print('红桃') if y//13==2: print('方块') if y//13==3: print('黑桃') print('Queen') if y%13==12: if y//13==0: print('梅花') if y//13==1: print('红桃') if y//13==2: print('方块') if y//13==3: print('黑桃') print('King') elif 1<=y%13<=10: if y//13==0: print('梅花') if y//13==1: print('红桃') if y//13==2: print('方块') if y//13==3: print('黑桃') print(y%13) # - # - 11 # ![](../Photo/52.png) x = eval(input('输入一个三位整数')) if x%10==x//100: print('是回文数') else: print('不是回文数') x = eval(input('输入一个三位整数')) if x%10==x//100: print('是回文数') else: print('不是回文数') # - 12 # ![](../Photo/53.png) a,b,c = eval(input('输入边长')) if (a+b>c) and (a+c>b) and (b+c>a): print(a+b+c) else: print('输入的边长不符合') a,b,c = eval(input('输入边长')) if (a+b>c) and (a+c>b) and (b+c>a): print(a+b+c) else: print('输入的边长不符合')
7.18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import Oger import numpy as np from sklearn.datasets import make_moons import matplotlib.pyplot as plt import mdp resnode = Oger.nodes.ReservoirNode(output_dim = 100) readoutnode = Oger.nodes.RidgeRegressionNode() flow = resnode + readoutnode x,y = Oger.datasets.narma30() data = [None, zip(x[0:-1],y[0:-1])] flow.train(data) # + plt.plot(flow(x[-1])) plt.plot(y[-1]) plt.show() # - gridsearch_parameters = {resnode:{'input_scaling': mdp.numx.arange(0.1, 0.5, 0.1)}} opt = Oger.evaluation.Optimizer(gridsearch_parameters, Oger.utils.nrmse) opt.grid_search(data, flow, cross_validate_function=Oger.evaluation.n_fold_random, n_folds=5)
code/Jupyter/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mella30/Deep-Learning-with-Tensorflow-2/blob/main/Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week2_Maximum_likelihood_estimation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9mz7IKRcLo0u" # # Maximum likelihood estimation: how neural networks learn # + [markdown] id="gk7pJf4eLo0u" # This reading is a review of maximum likelihood estimation (MLE), an important learning principle used in neural network training. # + id="ISo00HIVMgST" from IPython.display import Image # + [markdown] id="CSLjo71MLo0v" # ## Introduction # # Why are neural networks trained the way they are? For example, why do you use a mean squared error loss function for a regression task, but a sparse categorical crossentropy loss for classification? The answer lies in the *likelihood* function, with a long history in statistics. In this reading, we'll look at what this function is and how it leads to the loss functions used to train deep learning models. # # Since you're taking a course in Tensorflow Probability, I'll assume you already have some understanding of probability distributions, both discrete and continous. If you don't, there are countless resources to help you understand them. I find the [Wikipedia page](https://en.wikipedia.org/wiki/Probability_distribution) works well for an intuitive introduction. For a more solid mathematical description, see an introductory statistics course. # + [markdown] id="Ht452JMnLo0w" # ## Probability mass and probability density functions # # Every probability distribution has either a probability mass function (if the distribution is discrete) or a probability density function (if the distribution is continuous). This function roughly indicates the probability of a sample taking a particular value. We will denote this function $P(y | \theta)$ where $y$ is the value of the sample and $\theta$ is the parameter describing the probability distribution. Written out mathematically, we have: # # $$ # P(y | \theta) = \text{Prob} (\text{sampling value $y$ from a distribution with parameter $\theta$}). # $$ # # When more than one sample is drawn *independently* from the same distribution (which we usually assume), the probability mass/density function of the sample values $y_1, \ldots, y_n$ is the product of the probability mass/density functions for each individual $y_i$. Written formally: # # $$ # P(y_1, \ldots, y_n | \theta) = \prod_{i=1}^n P(y_i | \theta). # $$ # # This all sounds more complicated than it is: see the examples below for a more concrete illustration. # + [markdown] id="XW7id2VOLo0w" # ## The likelihood function # # Probability mass/density functions are usually considered functions of $y_1, \ldots, y_n$, with the parameter $\theta$ considered fixed. They are used when you know the parameter $\theta$ and want to know the probability of a sample taking some values $y_1, \ldots, y_n$. You use this function in *probability*, where you know the distribution and want to make deductions about possible values sampled from it. # # The *likelihood* function is the same, but with the $y_1, \ldots, y_n$ considered fixed and with $\theta$ considered the independent variable. You usually use this function when you know the sample values $y_1, \ldots, y_n$ (because you've observed them by collecting data), but don't know the parameter $\theta$. You use this function in *statistics*, where you know the data and want to make inferences about the distribution they came from. # # This is an important point, so I'll repeat it: $P(y_1, \ldots, y_n | \theta)$ is called the *probability mass/density function* when considered as a function of $y_1, \ldots, y_n$ with $\theta$ fixed. It's called the *likelihood* when considered as a function of $\theta$ with $y_1, \ldots, y_n$ fixed. For the likelihood, the convention is using the letter $L$, so that # # $$ # \underbrace{L(y_1, \ldots, y_n | \theta)}_{\text{ likelihood,} \\ \text{function of $\theta$}} = \underbrace{P(y_1, \ldots, y_n | \theta)}_{\text{probabiliy mass/density,} \\ \text{ function of $y_1, \ldots, y_n$}} # $$ # # Let's see some examples of this below. # + [markdown] id="jqOUi5t_Lo0x" # #### Bernoulli distribution # # We'll start by looking at the [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution) with parameter $\theta$. It's the distribution of a random variable that takes value 1 with probability $\theta$ and 0 with probability $1-\theta$. Let $P(y | \theta)$ be the probability that the event returns value $y$ given parameter $\theta$. Then # # $$ # \begin{align} # L(y | \theta) = P(y | \theta) &= \begin{cases} # 1 - \theta \quad \text{if} \, y = 0 \\ # \theta \quad \quad \, \, \, \text{if} \, y = 1 \\ # \end{cases} \\ # &= (1 - \theta)^{1 - y} \theta^y \quad y \in \{0, 1\} # \end{align} # $$ # # If we assume samples are independent, we also have # $$ # L(y_1, \ldots, y_n | \theta) = \prod_{i=1}^n (1 - \theta)^{1 - y_i} \theta^{y_i}. # $$ # # For example, the probability of observing $0, 0, 0, 1, 0$ is # # $$ # L(0, 0, 0, 1, 0 | \theta) = (1 - \theta)(1 - \theta)(1 - \theta)\theta(1 - \theta) = \theta(1 - \theta)^4. # $$ # # Note that, in this case, we have fixed the data, and are left with a function just of $\theta$. This is called the *likelihood* function. Let's plot the likelihood as a function of $\theta$ below. # + id="OamzFKpUMm9w" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="b1b6229a-3f3d-4e7a-c0b8-bdfef723da38" # Run this cell to download and view a figure to plot the Bernoulli likelihood function # !wget -q -O bernoulli_likelihood.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1vX9ARfK3QU6ZqxUyMM63s2lKfdwx2Bj9" Image("bernoulli_likelihood.png", width=500) # + [markdown] id="h3RZdEbOLo0x" # #### Normal (Gaussian) distribution # # This idea also generalises naturally to the [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) (also called the *Gaussian* distribution). This distribution has two parameters: a mean $\mu$ and a standard deviation $\sigma$. We hence let $\theta = (\mu, \sigma)$. The probability density function (the analogue of the probability mass function for continuous distributions) is: # # $$ # L(y | \theta) = P(y | \theta) = P(y | \mu, \sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp \Big( - \frac{1}{2 \sigma^2} (y - \mu)^2 \Big). # $$ # # For a sequence of independent observations $y_1, \ldots, y_n$, the likelihood is # # $$ # L(y_1, \ldots, y_n | \mu, \sigma) = \prod_{i=1}^n \frac{1}{\sqrt{2 \pi \sigma^2}} \exp \Big( - \frac{1}{2 \sigma^2} (y_i - \mu)^2 \Big). # $$ # # The *likelihood* is hence the same, but viewed as a function of $\mu$ and $\sigma$, with $y_1, \ldots, y_n$ viewed as constants. For example, if the observed data is -1, 0, 1, the likelihood becomes # # $$ # L(-1, 0, 1 | \mu, \sigma) = (2 \pi \sigma^2)^{-3/2} \exp \Big( - \frac{1}{2 \sigma^2} (\mu-1)^2 + (\mu)^2 + (\mu+1)^2 \Big). # $$ # # which we can plot as a function of $\mu$ an $\sigma$ below. # + id="7y6Pf8MaNDp2" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="16ccc901-d7ba-4662-a710-ea5cef636d72" # Run this cell to download and view a figure to plot the Gaussian likelihood function # !wget -q -O gaussian_likelihood.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1vKOhDpuujwANx1dpAw5-CMLPeIiyDgEi" Image("gaussian_likelihood.png", width=500) # + [markdown] id="anDPCeNFLo0y" # ## Maximum likelihood estimation # # The likelihood function is commonly used in statistical inference when we are trying to fit a distribution to some data. This is usually done as follows. Suppose we have observed data $y_1, \ldots, y_n$, assumed to be from some distribution with unknown parameter $\theta$, which we want to estimate. The likelihood is # # $$ # L(y_1, \ldots, y_n | \theta). # $$ # # The *maximum likelihood estimate* $\theta_{\text{MLE}}$ of the parameter $\theta$ is then the value that maximises the likelihood $L(y_1, \ldots, y_n | \theta)$. For the example of the Bernoulli distribution with observed data 0, 0, 0, 1, 0 (as in the plot above), this gives us $p=\frac{1}{5}$, which is where the plot takes its maximum. For the normal distribution with data -1, 0, 1, this is the region where the plot is brightest (indicating the highest value), and this occurs at $\mu=0, \sigma=\sqrt{\frac{2}{3}}$. In this way, we *pick the values of the parameter that make the data we have observed the most likely*. Written in mathematical notation, this is # # $$ # \theta_{\text{MLE}} = \arg \max_{\theta} L(y_1, \ldots, y_n | \theta). # $$ # + [markdown] id="Wj-rJhP8Lo0z" # ## The negative log-likelihood # # Recall that, for independent observations, the likelihood becomes a product: # # $$ # L(y_1, \ldots, y_n | \theta) = \prod_{i=1}^n P(y_i | \theta). # $$ # # Furthermore, since the $\log$ function increases with its argument, maximising the likelihood is equivalent to maximising the log-likelihood $\log L(y_1, \ldots, y_n | \theta)$. This changes the product into a sum: # # $$ # \begin{align} # \theta_{\text{MLE}} &= \arg \max_{\theta} L(y_1, \ldots, y_n | \theta) \\ # &= \arg \max_{\theta} \log L(y_1, \ldots, y_n | \theta) \\ # &= \arg \max_{\theta} \log \prod_{i=1}^n L(y_i | \theta) \\ # &= \arg \max_{\theta} \sum_{i=1}^n \log L(y_i | \theta). # \end{align} # $$ # # Furthermore, convention in optimisation is that we always *minimise* a function instead of maximising it. Hence, maximising the likelihood is equivalent to *minimising* the *negative log-likelihood*: # # $$ # \theta_{\text{MLE}} = \arg \min_{\theta} \text{NLL}(y_1, \ldots, y_n | \theta) # $$ # # where the *negative log-likelihood* NLL is defined as # # $$ # \text{NLL}(y_1, \ldots, y_n | \theta) = - \sum_{i=1}^n \log L(y_i | \theta). # $$ # + [markdown] id="YArMAiCcLo0z" # ## Training neural networks # # How is all this used to train neural networks? We do this, given some training data, by picking the weights of the neural network that maximise the likelihood (or, equivalently, minimise the negative loglikelihood) of having observed that data. More specifically, the neural network is a function that maps a data point $x_i$ to the parameter $\theta$ of some distribution. This parameter indicates the probability of seeing each possible label. We then use our true labels and the likelihood to find the best weights of the neural network. # # Let's be a bit more precise about this. Suppose we have a neural network $\text{NN}$ with weights $\mathbf{w}$. Furthemore, suppose $x_i$ is some data point, e.g. an image to be classified, or an $x$ value for which we want to predict the $y$ value. The neural network prediction (the feedforward value) $\hat{y}_i$ is # # $$ # \hat{y}_i = \text{NN}(x_i | \mathbf{w}). # $$ # # We can use this to train the neural network (determine its weights $\mathbf{w}$) as follows. We assume that the neural network prediction $\hat{y}_i$ forms part of a distribution that the true label is drawn from. Suppose we have some training data consisting of inputs and the associated labels. Let the data be $x_i$ and the labels $y_i$ for $i=1, \ldots, n$, where $n$ is the number of training samples. The training data is hence # # $$ # \text{training data} = \{(x_1, y_1), \ldots, (x_n, y_n)\} # $$ # # For each point $x_i$, we have the neural network prediction $\hat{y}_i = \text{NN}(x_i | \mathbf{w})$, which we assume specifies a distribution. We also have the true label $y_i$. The weights of the trained neural network are then those that minimise the negative log-likelihood: # # $$ # \begin{align} # \mathbf{w}^* &= \arg \min_{\mathbf{w}} \big( - \sum_{i=1}^n \log L(y_i | \hat{y}_i) \big) \\ # &= \arg \min_{\mathbf{w}} \big( - \sum_{i=1}^n \log L(y_i | \text{NN}(x_i | \mathbf{w})) \big) # \end{align} # $$ # # In practice, determining the true optimum $\mathbf{w}^*$ is not always possible. Instead, an approximate value is sought using stochastic gradient descent, usually via a *backpropagation* of derivatives and some optimization algorithm such as `RMSprop` or `Adam`. # # Let's see some examples to make this idea more concrete. # + [markdown] id="XLoEsqHwLo00" # #### Bernoulli distribution: binary classifiers # # Suppose we want a neural network NN that classifies images into either cats or dogs. Here, $x_i$ is an image of either a cat or a dog, and $\hat{y}_i$ is the probability that this image is either a cat (value 0) or a dog (value 1): # # $$ # \hat{y}_i = \text{NN}(x_i | \mathbf{w}) = \text{Prob}(\text{image is dog}). # $$ # # Note that this is just a Bernoulli distribution with values 0 and 1 corresponding to cat and dog respectively, of which we discussed the likelihood function above. Given training data $\{(x_1, y_1), \ldots, (x_n, y_n)\}$, with $y_i \in \{0, 1\}$, we have the negative log-likelihood # # $$ # \begin{align} # \text{NLL}((x_1, y_1), \ldots, (x_n, y_n) | \mathbf{w}) &= - \sum_{i=1}^n \log L(y_i | \hat{y}_i) \\ # &= - \sum_{i=1}^n \log \big( (1 - \hat{y}_i)^{1 - y_i} \hat{y}_i^{y_i} \big) \\ # &= - \sum_{i=1}^n \big( (1 - y_i) \log(1 - \hat{y}_i) + y_i \log \hat{y}_i \big) \\ # &= - \sum_{i=1}^n \big( (1 - y_i) \log(1 - \text{NN}(x_i | \mathbf{w})) + y_i \log \text{NN}(x_i | \mathbf{w}) \big). \\ # \end{align} # $$ # # This is exactly the sparse categorical cross-entropy loss function used when training a classification neural network. Hence, the reason why we typically use categorical cross-entropy loss functions when training classification data is exactly because this is the negative log-likelihood under a Bernoulli (or, when there are more than 2 classes, a categorical) distribution. # + [markdown] id="-QdSwciGLo00" # #### Normal distribution: least squares regression # # The idea works the same way in a regression task. Here, we have an $x$-value $x_i$ and want to predict the associated $y$-value $y_i$. We can use a neural network to do this, giving a prediction $\hat{y}_i$: # # $$ # \hat{y}_i = \text{NN}(x_i | \mathbf{w}). # $$ # # For example, suppose we were doing linear regression with the following data. # + id="resZrThDNayw" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="2a6578e0-b73d-4582-f9c9-cc2cca0f7257" # Run this cell to download and view a figure to plot the example data # !wget -q -O linear_regression.png --no-check-certificate "https://docs.google.com/uc?export=download&id=13p6E1qKf92b7UIYOxkU_jPpu9R5rUWfz" Image("linear_regression.png", width=500) # + [markdown] id="7ulp5oRALo00" # It's not possible to put a straight line through every data point. Furthermore, even points with the same $x$ value might not have the same $y$ value. We can interpret this as $y$ being linearly related to $x$ with some noise. More precisely, we may assume that # # $$ # y_i = f(x_i) + \epsilon_i \quad \quad \epsilon_i \sim N(0, \sigma^2) # $$ # # where $f$ is some function we want to determine (the regression) and $\epsilon_i$ is some Gaussian noise with mean 0 and constant variance $\sigma^2$. In deep learning, we might approximate $f(x_i)$ by a neural network $\text{NN}(x_i | \mathbf{w})$ with weights $\mathbf{w}$ and output $\hat{y}_i$. # # $$ # \hat{y}_i = \text{NN}(x_i | \mathbf{w}) = f(x_i) # $$ # # Under this assumption, we have # # $$ # \epsilon_i = y_i - \hat{y}_i \sim N(0, \sigma^2) # $$ # # and hence, given training data $\{(x_1, y_1), \ldots, (x_n, y_n)\}$, we have the negative log-likelihood (assuming the noise terms are independent): # # $$ # \begin{align} # \text{NLL}((x_1, y_1), \ldots, (x_n, y_n) | \mathbf{w}) &= - \sum_{i=1}^n \log L(y_i | \hat{y}_i) \\ # &= - \sum_{i=1}^n \log \Big( \frac{1}{\sqrt{2\pi\sigma^2}} \exp \Big( - \frac{1}{2\sigma^2} (\hat{y}_i - y_i)^2 \Big) \Big) \\ # &= \frac{n}{2} \log (2\pi\sigma^2) + \frac{1}{2\sigma^2} \sum_{i=1}^n (\hat{y}_i - y_i)^2 \\ # &= \frac{n}{2} \log (2\pi\sigma^2) + \frac{1}{2\sigma^2} \sum_{i=1}^n (\text{NN}(x_i | \mathbf{w}) - y_i)^2. # \end{align} # $$ # # Note that only the last term includes the weights. Hence, minimising the negative log-likelihood is equivalent to minimising # # $$ # \sum_{i=1}^n (\text{NN}(x_i | \mathbf{w}) - y_i)^2 # $$ # # which is exactly the sum of squared errors. Hence, least squares regression (or training a neural network using the mean squared error) is equivalent to training a neural network to match the expected value of an output by minimising the negative log-likelihood assuming a Gaussian error term with constant variance. # + [markdown] id="gJCmX3mJLo01" # ## Conclusion # # This was a very short introduction to maximum likelihood estimation, which is essential for deep learning, especially of the probabilistic variety that we'll be doing in this course. The method of maximum likelihood estimation is key to training neural networks, and typically informs the choice of loss function. In fact, you have probably trained neural networks using maximum likelihood estimation without even knowing it! # + [markdown] id="R9JEldurLo02" # ## Further reading and resources # I find that the Wikipedia pages for many statistical concepts offer excellent intuition. If you'd like to read up on these ideas in more detail, I'd recommend these: # * The Wikipedia page for Probability Distribution: https://en.wikipedia.org/wiki/Probability_distribution # * The Wikipedia page for Maximum Likelihood Estimation: https://en.wikipedia.org/wiki/Maximum_likelihood_estimation
Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week2_Maximum_likelihood_estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Natural Language Processing # The essence of Natural Language Processing lies in making computers understand the natural language. That’s not an easy task though. Computers can understand the structured form of data like spreadsheets and the tables in the database, but human languages, texts, and voices form an unstructured category of data, and it gets difficult for the computer to understand it, and there arises the need for Natural Language Processing. # # Importing Libraries import os import nltk import nltk.corpus print(os.listdir(nltk.data.find("corpora"))) from nltk.corpus import brown brown.words() nltk.corpus.gutenberg.fileids() hamlet = nltk.corpus.gutenberg.words('shakespeare-hamlet.txt') hamlet for word in hamlet[:500]: print(word, sep = ' ', end = ' ') # # Tokenization # Tokenization is one of the most common tasks when it comes to working with text data. But what does the term ‘tokenization’ actually mean? # # Tokenization is essentially splitting a phrase, sentence, paragraph, or an entire text document into smaller units, such as individual words or terms. Each of these smaller units are called tokens. # # Importing the library for word tokenization from nltk.tokenize import word_tokenize AI ="""<NAME>, a brilliant mathematician, who broke the Nazi encryption machine Enigma, came up with a history-changing question, “Can machines think?” in 1950. The actual research began in 1956, at a conference held at Dartmouth College (a lot of the inventions have come into the picture, thanks to the Ivy League). A couple of attendees at the conference were the ones who came up with the idea and also the name “Artificial Intelligence”. But since the whole idea was new, people didn’t buy the idea and funding for further research was pulled off. This period, the 1950s – 1980s was called “AI Winter”. In the early 1980s however, the Japanese government saw a future in AI and started funding the field again. As this was interconnected to the electronics and computer science fields, there was a sudden spike in those as well. The first AI machine was introduced to the world in 1997; IBM’s Deep Blue became the first computer to beat a chess champion when it defeated Russian grandmaster <NAME>. And that, my dear readers, was the advent of a massive field called “AI”.""" type(AI) # # Performing word tokenization AI_tokens = word_tokenize(AI) AI_tokens len(AI_tokens) # For finding the occurance of lowercase words., from nltk.probability import FreqDist fdist = FreqDist() for word in AI_tokens: fdist[word.lower()]+=1 fdist fdist['the'] len(fdist) fdist_top5 = fdist.most_common(5) fdist_top5 from nltk.tokenize import blankline_tokenize AI_blank = blankline_tokenize(AI) AI_blank len(AI_blank) # # Ngrams, Bigrams and Trigrams # 1) Tokens of any number of consecutive written words are called Ngrams. # # 2) Tokens of two consecutive written words are called Bigrams. # # 3) Tokens of three consecutive written words are calles Trigrams. from nltk.util import bigrams, trigrams, ngrams string = "The basic function of the algorithms of AI is data analysis. Let me put it this way. How do you think human beings learn new things? They observe. They observe and that’s how they learn. Machines learn the same way. " quotes_tokens = nltk.word_tokenize(string) quotes_tokens quotes_bigrams = list(nltk.bigrams(quotes_tokens)) quotes_bigrams quotes_trigrams = list(nltk.trigrams(quotes_tokens)) quotes_trigrams # # Stemming # Stemming involves normalizing a word into its base or root form. # # Ex: Consider the words: Affect, Affection, Affected, Affecting. # The base or root form of the above words is "Äffect." # # Note: The NLTK tool provides mainly three types of stemmers, namely: # 1) PorterStemmer # # 2) LancasterStemmer # # 3) SnowballStemmer from nltk.stem import PorterStemmer pst = PorterStemmer() pst.stem("Loving") # + words_to_stem = ['Killing', 'Saving', 'Protecting', 'Served', 'loved'] for words in words_to_stem: print(words + ":" + pst.stem(words)) # + from nltk.stem import LancasterStemmer lst = LancasterStemmer() for words in words_to_stem: print(words + ":" + lst.stem(words)) # + from nltk.stem import SnowballStemmer sbst = SnowballStemmer('english') # - for words in words_to_stem: print(words + ":" + sbst.stem(words)) # # Lemmatization # 1) Groups together different inflicted forms of a word, called a lemma. # # 2) Somehow similar to stemming as it maps words into one common root. # # 3) The outcome of lemmatization is a proper word. # # for example, the words "going" and "gone" when lemmatized, should return "go" as the result. # # Lemmatization does acquire Wordnet database often for its functioning. # + from nltk.stem import wordnet from nltk.stem import WordNetLemmatizer word_lem = WordNetLemmatizer() # - word_lem.lemmatize('corpora') for words in words_to_stem: print(words + ":" + word_lem.lemmatize(words)) # + from nltk.corpus import stopwords stopwords.words('english') # - len(stopwords.words('english')) fdist_top5 import re punctuation = re.compile(r'[-.?!:;()|0-9]') # # Parts of Speech Tagging sentence = "Mama is a natural when it comes to kicking ass." sent_tokens = word_tokenize(sentence) len(sent_tokens) sent_tokens len(sent_tokens) for token in sent_tokens: print(nltk.pos_tag([token])) # + sent2 = "Elise kicked Arno in the balls." sen2_tokens = word_tokenize(sent2) sen2_tokens # - for token in sen2_tokens: print(nltk.pos_tag([token])) # # Named Entity Recognition # The detection of a named entity which could be either a movie, a monetary value, a location or even a person is called Named Entity Recognition. # # We import ne_chunk to perform Named Entity Recognition. from nltk import ne_chunk # + line = "The US President resides in the White House." line_token = word_tokenize(line) line_tag = nltk.pos_tag(line_token) # - line_ner = ne_chunk(line_tag) line_ner # # Syntax Tree # It is a tree repersentation of the syntactic structure of sentences or strings. # Notebook author - Sathvik.
Natural Language Processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 2 : Reacher (Continuous Control) # ## Prepare the environment # If necessary, uncomment and execute the following commands to install the Python requirements for this project # #!pip -q install ../python # !pip install --no-deps unityagents # Load the Unity environment. Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # + from unityagents import UnityEnvironment import numpy as np # Load a single agent of the environment env = UnityEnvironment(file_name='Reacher_Linux_NoVis/Reacher.x86_64') # Get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # + # Imports import random import torch import numpy as np from collections import deque import time import matplotlib.pyplot as plt # Set plotting options # %matplotlib inline plt.style.use('ggplot') np.set_printoptions(precision=3, linewidth=120) # Hide Matplotlib deprecate warnings import warnings warnings.filterwarnings("ignore") # High resolution plot outputs for retina display # %config InlineBackend.figure_format = 'retina' # - # ## Check the Unity environment # + # Reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) # size of each action ENV_ACTION_SIZE = brain.vector_action_space_size # size of the state space states = env_info.vector_observations ENV_STATE_SIZE = states.shape[1] print('There are {} agents. Each observes a state with length: {} and act within an action space of length: {}'.format(states.shape[0], ENV_STATE_SIZE, ENV_ACTION_SIZE)) # - # ### Train the Agent with DDPG # # Define a training function # + from ddpg_agent import Agent # Agent default hyperparameters BUFFER_SIZE = int(1e5) # replay buffer size BATCH_SIZE = 128 # minibatch size GAMMA = 0.99 # discount factor TAU = 1e-3 # for soft update of target parameters LR_ACTOR = 1e-4 # learning rate of the actor LR_CRITIC = 1e-3 # learning rate of the critic WEIGHT_DECAY = 0 # L2 weight decay ACTOR_FC1_UNITS = 400 # Number of units for the layer 1 in the actor model ACTOR_FC2_UNITS = 300 # Number of units for the layer 2 in the actor model CRITIC_FCS1_UNITS = 400 # Number of units for the layer 1 in the critic model CRITIC_FC2_UNITS = 300 # Number of units for the layer 2 in the critic model BN_MODE = 0 # Use Batch Norm. - 0=disabled, 1=BN before Activation, 2=BN after Activation (3, 4 are alt. versions of 1, 2) ADD_OU_NOISE = True # Add Ornstein-Uhlenbeck noise MU = 0. # Ornstein-Uhlenbeck noise parameter THETA = 0.15 # Ornstein-Uhlenbeck noise parameter SIGMA = 0.2 # Ornstein-Uhlenbeck noise parameter def ddpg(n_episodes=5000, max_t=500, state_size=ENV_STATE_SIZE, action_size=ENV_ACTION_SIZE, random_seed=10, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, buffer_size=BUFFER_SIZE, batch_size=BATCH_SIZE, bn_mode=BN_MODE, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=SIGMA): # Instantiate the Agent agent = Agent(state_size=state_size,action_size=action_size, random_seed=random_seed, actor_fc1_units=actor_fc1_units, actor_fc2_units=actor_fc2_units, critic_fcs1_units=critic_fcs1_units, critic_fc2_units=critic_fc2_units, buffer_size=buffer_size, batch_size=batch_size, bn_mode=bn_mode, gamma=gamma, tau=tau, lr_actor=lr_actor, lr_critic=lr_critic, weight_decay=weight_decay, add_ounoise=add_ounoise, mu=mu, theta=theta, sigma=sigma) scores_deque = deque(maxlen=100) scores = [] print("\nStart training:") for i_episode in range(1, n_episodes+1): # Reset the env and get the state (Single Agent) env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] # Reset the DDPG Agent (Reset the internal state (= noise) to mean mu) agent.reset() # Reset the score score = 0 for t in range(max_t): action = agent.act(state) # select an action env_info=env.step(action)[brain_name] # send action to the environment next_state = env_info.vector_observations[0] # get next state (Single Agent) reward = env_info.rewards[0] # get reward (Single Agent) done = env_info.local_done[0] # see if episode finished (Single Agent) #if i_episode<2: # print("Debug: steps={} reward={} done={}".format(t,reward,done)) # Save experience in replay memory, and use random sample from buffer to learn agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: #print("Episode {} has terminated at step {}".format(i_episode, t)) break # Save scores and compute average score over last 100 episodes scores_deque.append(score) scores.append(score) avg_score = np.mean(scores_deque) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format(i_episode, avg_score, score), end="") if i_episode % 100 == 0: torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') # Early stop if avg_score > 30: print('\rEnvironment solved in {} episodes with an Average Score of {:.2f}'.format(i_episode, avg_score)) return scores print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, avg_score)) return scores # - # Define a helper function to plot the training scores def plot_training(scores): # Plot the Score evolution during the training fig = plt.figure() ax = fig.add_subplot(111) ax.tick_params(axis='x', colors='deepskyblue') ax.tick_params(axis='y', colors='deepskyblue') plt.plot(np.arange(1, len(scores)+1), scores, color='deepskyblue') plt.ylabel('Score', color='deepskyblue') plt.xlabel('Episode #', color='deepskyblue') plt.show() # #### Evaluate Training Hyperparameters # + print("Test try #1 : Default Values") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=BN_MODE, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=SIGMA) plot_training(scores) print("\n\nTest try #2 : Smaller sigma for OU Noise process") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=BN_MODE, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #3 : Use Batch Normization (before activation) in Actor/Critic models + #2") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=1, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #4 : Use Batch Normization (after activation) in Actor/Critic models + #2") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #3bis : Use Batch Normization (after activation) in Actor/Critic models + #2") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=3, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #4bis : Use Batch Normization (after activation) in Actor/Critic models + #2") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=ACTOR_FC1_UNITS, actor_fc2_units=ACTOR_FC2_UNITS, critic_fcs1_units=CRITIC_FCS1_UNITS, critic_fc2_units=CRITIC_FC2_UNITS, bn_mode=4, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #5 : Small neural networks + #3") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=1, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #6 : Small neural networks + #4") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #7 : Faster learning rate for critic + #6") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=1e-4, lr_critic=1e-4, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #8 : Faster learning rates + #6") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=2e-4, lr_critic=2e-4, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #9 : Even Faster learning rates + #6") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=5e-4, lr_critic=5e-4, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try #10 : Even More Faster learning rates + #6") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=GAMMA, tau=TAU, lr_actor=1e-3, lr_critic=1e-3, weight_decay=WEIGHT_DECAY, add_ounoise=ADD_OU_NOISE, mu=MU, theta=THETA, sigma=0.1) plot_training(scores) print("\n\nTest try : Agent candidate") scores = ddpg(n_episodes=100, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=0.99, tau=1e-3, lr_actor=2e-4, lr_critic=2e-4, weight_decay=0., add_ounoise=True, mu=0., theta=0.15, sigma=0.1 ) plot_training(scores) # - # #### Full Training (Initial version) # Based on the previous results, I choosed to try a full training on the candidate version, but with a slighty increased learning rate. # + scores = ddpg(n_episodes=1500, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=0.99, tau=1e-3, lr_actor=5e-4, lr_critic=5e-4, weight_decay=0., add_ounoise=True, mu=0., theta=0.15, sigma=0.1 ) plot_training(scores) # - # #### Full Training (Final version) # While the environment can be solved with the previous configuration, the learning can be slightly enhanced by dimming a bit the learning rates used. # + scores = ddpg(n_episodes=1500, max_t=1000, actor_fc1_units=128, actor_fc2_units=128, critic_fcs1_units=128, critic_fc2_units=128, bn_mode=2, gamma=0.99, tau=1e-3, lr_actor=2e-4, lr_critic=2e-4, weight_decay=0., add_ounoise=True, mu=0., theta=0.15, sigma=0.1 ) plot_training(scores) # - env.close()
p2_continuous-control/Reacher_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np def mean_squared_error(y, t): return 0.5 * np.sum((y-t)**2) def cross_entropy_error(y, t): delta = 1e-7 return -np.sum(t*np.log(y+delta)) def cross_entropy_error_batch(y, t): if y.ndim == 1: t = np.reshape(1,t.size) y = np.reshape(1,y.size) batch_size = y.shape[0] delta = 1e-7 return -np.sum(t*np.log(y+delta)) / batch_size # + # 편미분과 기울기 def numerical_gradient(f, x): h = 1e-4 grad = np.zeros_like(x) it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index tmp_val = x[idx] x[idx] = tmp_val + h fxh1 = f(x) x[idx] = tmp_val - h fxh2 = f(x) grad[idx] = fxh1 - fxh2 / (2*h) x[idx] = tmp_val it.iternext() return grad # + # 경사하강법 def gradient_descent(f, init_x, lr=0.01, step_num=100): x = init_x for i in range(step_num): grad = numerical_gradient(f, x) x -= lr * grad return x # + class TwoLayerNet: def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01): self.params = {} self.params['W1'] = weight_init_std * np.random.rand(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = weight_init_std * np.random.rand(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def sigmoid(self, x): return 1 / (1+np.exp(-x)) def softmax(self, x): c = np.max(x) exp_x = np.exp(x) exp_all = np.sum(exp_x) return exp_x / exp_all def predict(self, x): W1, W2 = self.params['W1'], self.params['W2'] b1, b2 = self.params['b1'], self.params['b2'] a1 = np.dot(x, W1) + b1 z1 = self.softmax(a1) a2 = np.dot(z1, W2) + b2 y = self.softmax(a2) return y def cross_entropy_error(self, y, t): if y.ndim == 1: t = t.reshape(1, t.size) y = y.reshape(1, y.size) if y.ndim == t.ndim: t = t.argmax(axis=1) batch_size = y.shape[0] return -np.sum(np.log(y[np.arange(batch_size),t])) / batch_size def loss(self, x, t): y = self.predict(x) return self.cross_entropy_error(y,t) def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy def numerical_gradient(self, f, x): h = 1e-4 grad = np.zeros_like(x) it = np.nditer(x, flags =['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index tmp_val = x[idx] x[idx] = tmp_val + h fxh1 = f(x) x[idx] = tmp_val - h fxh2 = f(x) grad[idx] = (fxh1 - fxh2) / (2*h) x[idx] = tmp_val it.iternext() return grad def numerical_gradient_all(self, x, t): loss_W = lambda W: self.loss(x,t) grads = {} grads['W1'] = self.numerical_gradient(loss_W, self.params['W1']) grads['b1'] = self.numerical_gradient(loss_W, self.params['b1']) grads['W2'] = self.numerical_gradient(loss_W, self.params['W2']) grads['b2'] = self.numerical_gradient(loss_W, self.params['b2']) return grads # - from dataset03.dataset.mnist import load_mnist (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False) # + iters_num = 1000 train_size = x_train.shape[0] batch_size = 100 learning_rate = 0.1 network = TwoLayerNet(input_size = 784, hidden_size=50, output_size = 10) train_loss_list = [] train_acc_list = [] test_acc_list = [] # 1에폭당 반복 수 iter_per_epoch = max(train_size/batch_size, 1) # + from tqdm.auto import tqdm # progress bar for i in tqdm(range(iters_num)): # 미니배치 획득 batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] # 기울기 계산 grad = network.numerical_gradient_all(x_batch, t_batch) # 매개변수 갱신 for key in ['W1','b1','W2', 'b2']: grad[key] -= learning_rate * grad[key] # 학습 경과 기록 loss = network.loss(x_batch, t_batch) train_loss_list.append(loss) # 1에폭 당 정확도 계산 if i % iter_per_epoch == 0: train_acc = network.accuracy(x_train, t_train) test_acc = network.accuracy(x_test, t_test) train_acc_list.append(train_acc) test_acc_list.append(test_acc) print("train acc, test acc | "+ str(train_acc) + ", " + str(test_acc)) # -
ch4/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 基本程序设计 # - 一切代码输入,请使用英文输入法 # ## 编写一个简单的程序 # - 圆公式面积: area = radius \* radius \* 3.1415 # ### 在Python里面不需要定义数据的类型 # ## 控制台的读取与输入 # - input 输入进去的是字符串 # - eval # - 在jupyter用shift + tab 键可以跳出解释文档 # ## 变量命名的规范 # - 由字母、数字、下划线构成 # - 不能以数字开头 \* # - 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合) # - 可以是任意长度 # - 驼峰式命名 # ## 变量、赋值语句和赋值表达式 # - 变量: 通俗理解为可以变化的量 # - x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式 # - test = test + 1 \* 变量在赋值之前必须有值 # ## 同时赋值 # var1, var2,var3... = exp1,exp2,exp3... # ## 定义常量 # - 常量:表示一种定值标识符,适合于多次使用的场景。比如PI # - 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 # ## 数值数据类型和运算符 # - 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 # <img src = "../Photo/01.jpg"></img> # ## 运算符 /、//、** # ## 运算符 % # ## EP: # - 25/4 多少,如果要将其转变为整数该怎么改写 # - 输入一个数字判断是奇数还是偶数 # - 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒 # - 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天 # ## 科学计数法 # - 1.234e+2 # - 1.234e-2 # ## 计算表达式和运算优先级 # <img src = "../Photo/02.png"></img> # <img src = "../Photo/03.png"></img> # ## 增强型赋值运算 # <img src = "../Photo/04.png"></img> # ## 类型转换 # - float -> int # - 四舍五入 round # ## EP: # - 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数) # - 必须使用科学计数法 # # Project # - 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) # ![](../Photo/05.png) # # Homework # - 1 # <img src="../Photo/06.png"></img> C=int(input('Enter a degree in Celsius:')) F=(9/5)*C+32 print('{} Celsius is {} Fahrenheit'.format(C,F)) # - 2 # <img src="../Photo/07.png"></img> # + radius=float(input()) length=int(input()) import math area=radius*radius*math.pi volume=area*length print('Enter the radius and length of a cylinder:{},{}'.format(radius,length)) print('The area is %.4f'%area) print('The volume is %.1f'%volume) # - # - 3 # <img src="../Photo/08.png"></img> feet=float(input('Enter a value for feet:')) meters=feet*0.305 print('{} feet is {} meters'.format(feet,meters)) # - 4 # <img src="../Photo/10.png"></img> M=float(input('Enter the amount of water in kilograms:')) i=float(input('Enter the inital temperature:')) f=float(input('Enter the final temperature:')) Q=M*(f-i)*4184 print('The energy needed is %.1f'%Q) # - 5 # <img src="../Photo/11.png"></img> balance=int(input()) rate=float(input()) print('Enter balance and interest rate :{},{}'.format(balance,rate)) interest=balance*(rate/1200) print('The interest is %.5f'%interest) # - 6 # <img src="../Photo/12.png"></img> v0=float(input()) v1=float(input()) t=float(input()) a=(v1-v0)/t print('Enter v0,v1 and t:{},{},{}'.format(v0,v1,t)) print('Enter average acceleration is %.4f'%a) # - 7 进阶 # <img src="../Photo/13.png"></img> m=int(input('Enter the monthly saving amount:')) sum=0 for i in range(6): sum=(m+sum)*(1+0.00417) print('After the sixth month,the account value is %.2f'%sum) # - 8 进阶 # <img src="../Photo/14.png"></img> n=int(input('Enter a number between 0 and 1000:')) b=int(n//100) s=int(n//10%10) g=int(n%10) sum=b+s+g print('The sum of the digits is {}'.format(sum))
7.16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2021년 8월 4일 수요일 # ### BaekJoon - NN (Python) # ### 문제 : https://www.acmicpc.net/problem/11944 # ### 블로그 : https://somjang.tistory.com/entry/BaekJoon-11944%EB%B2%88-NN-Python # ### Solution # + def NN(N, M): return (N * int(N))[:int(M)] if __name__ == "__main__": N, M = input().split() print(NN(N, M))
DAY 401 ~ 500/DAY443_[BaekJoon] NN (Python).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 그래프의 구성 ## 노드(= node, vertex, 꼭지점) 집합 V ## 엣지(= edge, 간선) 집합 E ## 노드에는 데이터, 엣지에는 노드와 노드 사이의 정보가 포함 # G = (V, E) ## V = {v1, v2, v3} ## E = {e1, e2, e3} ## e1 = (v1, v2), e2 = (v1, v3), e3 = (v2, v3) # 가중그래프: 엣지에 가중치가 포함된 경우 # 방향그래프: 엣지가 방향성을 가짐 # 그래프를 구현하는 방식 ## 인접 행렬 ## 인접 리스트 # + # 그래프 구현 # reference: https://www.python-course.eu/graphs_python.php class Graph(object): def __init__(self, graph_dict = None): if graph_dict == None: graph_dict = {} self.__graph_dict = graph_dict def vertices(self): return list(self.__graph_dict.keys()) def edges(self): return self.__generate_edges() def add_vertex(self, vertex): if vertex not in self.__graph_dict: self.__graph_dict[vertex] = [] def add_edge(self, edge): edge = set(edge) (vertex1, vertex2) = tuple(edge) if vertex1 in self.__graph_dict: self.__graph_dict[vertex1].append(vertex2) else: self.__graph_dict[vertex1] = [vertex2] def __generate_edges(self): edges = [] for vertex in self.__graph_dict: for neighbour in self.__graph_dict[vertex]: if {neighbour, vertex} not in edges: edges.append({vertex, neighbour}) return edges def __str__(self): res = "vertices: " for k in self.__graph_dict: res += str(k) + " " res += "\nedges: " for edge in self.__generate_edges(): res += str(edge) + " " return res # + g = { "a" : ["d"], "b" : ["c"], "c" : ["b", "c", "d", "e"], "d" : ["a", "c"], "e" : ["c"], "f" : [] } graph = Graph(g) # - graph.vertices() graph.edges() graph.add_vertex("z") graph.vertices() graph.add_edge({"a","z"}) print(graph.edges()) graph.add_edge({"x","y"}) print(graph.vertices()) print(graph.edges())
data-structure/graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5po4DW_CLM9Y" colab_type="text" # ## `defaultdict` # # Cria um dicionário com resposta padrão para buscas em chaves inexistentes. # # Ao invés de retornar um erro, cria uma nova entrada. # + id="VaZeE-pUkUmA" colab_type="code" colab={} from collections import defaultdict # + id="noXZ8TPiJz6J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="8fe8266a-f321-4c86-ec08-1312744424e3" dict = {1: 'A'} print(type(dict)) dict # + id="fT5OEb65I5rp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="22c68724-7e3f-4a4b-997a-b45546db78fb" dict = defaultdict(lambda:'padrão') print(type(dict)) dict # + id="GmQ_Rur5Hp5a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4b162bbf-8bfc-4a30-b389-811697040f8b" dict[1] = 'A' dict # + id="H_7toKcfIGZ7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="6ae1c754-ef05-4928-f2b1-f9cf393e955c" print(dict[1]) # Chave existente print(dict[2]) # Chave inexistente (vai ser adicionada ao dicionário com o valor padrão pré-estabelecido) dict # + [markdown] id="JEVY9BNfNvSj" colab_type="text" # https://realpython.com/python-defaultdict/
Notebooks/Dictionaries_DefaultDict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The dynamical equations are given as # # \begin{align} # \dot{S_{ni}} &=-\lambda_{ni}(t)S_{ni}+\sigma_{ni},\\ # \dot{E}_{ni} &=\lambda_{i}(t)S_{ni}-\gamma_{E}E_{ni},\\ # \dot{I}_{ni}^{a} &=\alpha\gamma_{E}E_{ni}-\gamma_{I^{a}}I_{ni}^{a},\\ # \dot{I}_{ni}^{s} &=\bar{\alpha}\gamma_{E}E_{ni}-\gamma_{I^{s}}I_{ni}^{s},\\ # \dot{I}_{ni}^{h} &=h_{i}\gamma_{I^{s}}I_{ni}^{s}-\gamma_{I^{h}}I_{ni}^{h},\\ # \dot{I}_{ni}^{c} &=c_{i}\gamma_{I^{h}}I_{ni}^{h}-\gamma_{I^{c}}I_{ni}^{c},\\ # \dot{I}_{ni}^{m} &=m_{i}\gamma_{I^{c}}I_{ni}^{c},\\ # \dot{N}_{ni} &=\sigma_{ni}-m_{ni}\gamma_{I^{c}}I_{ni}^{c} # \end{align} # # # # The key idea is to note that the rate of infection of a susceptible individual in age group `i` at the node `n` is given as # # \begin{align} # \lambda_{ni}(t)=\lambda^0_{ni}(t)=\beta\sum_{j=1}^{M}\left(C_{ij}\frac{I_{nj}^{a}}{N_{j}}+f_{sa}C_{ij}\frac{I_{nj}^{s}}{N_{j}}\right) # \end{align} # # # Commute can then be modeled by changing this to # # # \begin{align} # \lambda_{ni}^{T}(t)=\beta\sum_{j=1}^{M}\sum_{n=1,n\neq m}^{Nd}r_{T}T_{nm}\left(C_{ij}\frac{I_{mj}^{a}}{N_{j}}+f_{sa}C_{ij}\frac{I_{mj}^{s}}{N_{j}}\right). # \end{align} # # # while workplace interaction can be modeled as # # # \begin{align} # \lambda_{ni}^{W}(t)=\beta\sum_{j=1}^{M}\sum_{n=1,n\neq m}^{Nd}r_{W}W_{nm}\left(C_{ij}\frac{I_{mj}^{a}}{N_{j}}+f_{sa}C_{ij}\frac{I_{mj}^{s}}{N_{j}}\right). # \end{align} # # # # The simulation then runs by choosing appropriate $\lambda$ as a function of time. The constants $r_T$ and $r_W$ control the strength of interactions and the matrices $W_{nm}$ and $T_{nm}$ specify which `nm` nodes interact. # # # # %%capture ## compile for this notebook import os owd = os.getcwd() os.chdir('../') # %run setup.py install os.chdir(owd) # + # %matplotlib inline import numpy as np import pandas as pd import nodgeo import matplotlib.pyplot as plt #from matplotlib import rc; rc('text', usetex=True) # + M=16 # number of age groups # load age structure data my_data = np.genfromtxt('data/age_structures/UK.csv', delimiter=',', skip_header=1) aM, aF = my_data[:, 1], my_data[:, 2] # set age groups Ni0=aM+aF; Ni0=Ni0[0:M]; # + my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_home_2.xlsx', sheet_name='United Kingdom of Great Britain') CH0 = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_work_2.xlsx', sheet_name='United Kingdom of Great Britain',index_col=None) CW0 = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_school_2.xlsx', sheet_name='United Kingdom of Great Britain',index_col=None) CS0 = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_other_locations_2.xlsx', sheet_name='United Kingdom of Great Britain',index_col=None) CO0 = np.array(my_data) CH = np.zeros((16, 16)) CH[0,:]= np.array((0.478812799633172, 0.55185413960287,0.334323605154544,0.132361228266194,0.138531587861408,0.281604887066586,0.406440258772792,0.493947983343078,0.113301080935514,0.0746826413664804,0.0419640342896305,0.0179831987029717,0.00553694264516568,0.00142187285266089,0,0.000505582193632659)) for i in range(15): CH[i+1, :] = CH0[i, :] CW = np.zeros((16, 16)) CW[0,:]= np.array((0,0,0,0,0,0,0,0,0,0,0,0,0,0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000820604524144799,0.0000120585150153575,0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000316436833811157)) for i in range(15): CW[i+1, :] = CW0[i, :] CS = np.zeros((16, 16)) CS[0,:]= np.array((0.974577996106766,0.151369805263473,0.00874880925953218,0.0262790907947637,0.0111281607429249,0.0891043051294382,0.125477587043249,0.0883182775274553,0.0371824197201174,0.0294092695284747,0.0000000000000000000000000000000000000510911446027435,0.0000000000000000000000000000000113982464440009,0.00758428705895781,0.00151636767747242,0.0000000000000000000000000000000000000000000000000123262013953524,0.000000000000000000000000000000000000000000000000000000000000000597486362181075)) for i in range(15): CS[i+1, :] = CS0[i, :] CO = np.zeros((16, 16)) CO[0,:]= np.array((0.257847576361162,0.100135168376607,0.0458036773638843,0.127084549151753,0.187303683093508,0.257979214509792,0.193228849121415,0.336594916946786,0.309223290169635,0.070538522966953,0.152218422246435,0.113554851510519,0.0615771477785246,0.040429874099682,0.0373564987094767,0.00669781557624776)) for i in range(15): CO[i+1, :] = CO0[i, :] ## matrix of total contacts C=CH+CW+CS+CO fig,aCF = plt.subplots(2,2); aCF[0][0].pcolor(CH, cmap=plt.cm.get_cmap('GnBu', 10)); aCF[0][1].pcolor(CW, cmap=plt.cm.get_cmap('GnBu', 10)); aCF[1][0].pcolor(CS, cmap=plt.cm.get_cmap('GnBu', 10)); aCF[1][1].pcolor(CO, cmap=plt.cm.get_cmap('GnBu', 10)); # + beta = 0.4/24 # infection rate gE = (1.0/5)/24 gIa = (1.0/7)/24 # recovery rate of asymptomatic infectives gIs = (1.0/7)/24 # recovery rate of symptomatic infectives alpha = 0.3 # fraction of asymptomatic infectives fsa = 1 # the self-isolation parameter gIh = (1.0/14)/24 gIc = (1.0/1)/24 sa = 100*np.ones(M) # rate of additional/removal of population by birth etc sa[0] = 1500 # birth sa[12:16] = -300 # mortality hh = 0.1*np.ones(M) # fraction which goes from Is to hospital cc = 0.05*np.ones(M) # fraction which goes from hospital to ICU mm = np.zeros(M) # mortality from IC mm = np.array((0,0,.0,1,1,1,1,1,1,3.5,3.5,3.5,3.5,6,6,14.2))/100 # + beta = 1/24 # infection rate gE = (1.0/5)/24 gIa = (1.0/7)/24 # recovery rate of asymptomatic infectives gIs = (1.0/7)/24 # recovery rate of symptomatic infectives alpha = 0.3 # fraction of asymptomatic infectives fsa = 1 # the self-isolation parameter gIh = (1.0/7)/24 gIc = (1.0/7)/24 sa = 0*np.ones(M) # rate of additional/removal of population by birth etc hh = 0.5*np.ones(M) # fraction which goes from Is to hospital cc = np.array((0,0,.0,1,1,1,1,1,1,3.5,3.5,3.5,3.5,6,6,14.2))/100 mm = 0.5*cc # fraction which goes from hospital to ICU Nd=21 N0= np.zeros((M*Nd)) for i in range(Nd): N0[i*M:(i+1)*M]=Ni0/Nd Ni = N0 E0 = np.zeros((M*Nd)); Ia0 = 0*np.ones((M*Nd)); # Ia0[6:13]=4; Ia0[2:6]=2; Ia0[13:16]=2 Is0 = 0*np.ones((M*Nd)); #Is_0[6:13]=8; Is_0[2:6]=4; Is_0[13:16]=4 for i in range(int(Nd/3)): Ia0[3*M*i:3*M*i+10] = 10 Is0 = Ia0 Ih0 = np.zeros((M*Nd)) Ic0 = np.zeros((M*Nd)) Im0 = np.zeros((M*Nd)) R0 = np.zeros((M*Nd)) S0 = Ni - (E0 + Ia0 + Is0 + Ih0 + Ic0 + Im0 + R0) tL, wL = 6, 6# int(Nd/5), int(Nd/5) Tr = np.zeros((Nd, tL)) Wo = np.zeros((Nd, wL)) for i in range(Nd): Tr[i,:] = np.linspace(i+1, i+tL, tL)%(Nd-1)#np.random.randint(Nd, size=tL) Wo[i,:] = np.linspace(i+1, i+wL, wL)%(Nd-1)#np.random.randint(Nd, size=wL) rW, rT = .1, .1 def contactMatrix(t): return C # duration of simulation and data file Tf = 24*60; Nt=120; # instantiate model parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs, 'gIh':gIh,'gIc':gIc, 'gE':gE, 'fsa':fsa, 'rW':rW, 'rT':rT, 'hh':hh, 'cc':cc, 'mm':mm} model = nodgeo.deterministic.SEI5R(parameters, Nd, M, Ni, Wo, Tr) # simulate model #data = model.simulate(S0, E0, Ia0, Is0, Ih0, Ic0, Im0, contactMatrix, Tf, Nt, nodeInteraction='False') # - # ## Non-interacting nodes # + data = model.simulate(S0, E0, Ia0, Is0, Ih0, Ic0, Im0, contactMatrix, Tf, Nt, nodeInteraction='False') ## first M*Nd points are S, then M*Nd points are E, then M*Nd points are Ia, Is, Ih, Ic, Im, Ni ## thus a total of 8*M*Nd points radius = np.sqrt(np.arange(Nd)/float(Nd)) golden_angle = np.pi * (3 - np.sqrt(5)); theta = golden_angle * np.arange(Nd) points = np.zeros((Nd, 2)); points[:,0] = np.cos(theta); points[:,1] = np.sin(theta) points *= radius.reshape((Nd, 1)); points = points*4*np.sqrt(Nd) plt.rcParams.update({'font.size': 26}) M1=5*M*Nd f = plt.figure(figsize=(16, 15)); for ti in range(1): ti=Nt-1 IM1=np.zeros((Nd)); IM2=np.zeros((Nd)); IM3=np.zeros((Nd)); IM4=np.zeros((Nd)) IS=np.zeros((Nd)); for n in range(Nd): for i in range(3): IM1[n] += data['X'][ti, M1+n*M+i] IS[n] += data['X'][ti, 3*M*Nd+n*M+i] for i in range(6): IM2[n] += data['X'][ti, M1+n*M+3+i] for i in range(4): IM3[n] += data['X'][ti, M1+n*M+9+i] for i in range(3): IM4[n] += data['X'][ti, M1+n*M+12+i] print(IM4[0:4]) #print(IS[0:4]) Imm = (np.max((IM1, IM2, IM3, IM4))) + 0.000001 IM1 = IM1/Imm IM2 = IM2/Imm IM3 = IM3/Imm IM4 = IM4/Imm sz=200 sp = f.add_subplot(2,2,1); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM1, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('0-15 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,2); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM2, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('15-45 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,3); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM3, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('45-65 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,4); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM4, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('65-80 Years'); plt.axis('square'); plt.axis('off'); plt.colorbar(); #plt.savefig('this%05d_.png'%(ti)) #plt.clf() # - # ## Interacting nodes # + data = model.simulate(S0, E0, Ia0, Is0, Ih0, Ic0, Im0, contactMatrix, Tf, Nt, nodeInteraction='True') ## first M*Nd points are S, then M*Nd points are E, then M*Nd points are Ia, Is, Ih, Ic, Im, Ni ## thus a total of 8*M*Nd points radius = np.sqrt(np.arange(Nd)/float(Nd)) golden_angle = np.pi * (3 - np.sqrt(5)); theta = golden_angle * np.arange(Nd) points = np.zeros((Nd, 2)); points[:,0] = np.cos(theta); points[:,1] = np.sin(theta) points *= radius.reshape((Nd, 1)); points = points*4*np.sqrt(Nd) M1=5*M*Nd f = plt.figure(figsize=(16, 15)); for ti in range(1): ti=Nt-1 IM1=np.zeros((Nd)); IM2=np.zeros((Nd)); IM3=np.zeros((Nd)); IM4=np.zeros((Nd)) IS=np.zeros((Nd)); for n in range(Nd): for i in range(3): IM1[n] += data['X'][ti, M1+n*M+i] IS[n] += data['X'][ti, 3*M*Nd+n*M+i] for i in range(6): IM2[n] += data['X'][ti, M1+n*M+3+i] for i in range(4): IM3[n] += data['X'][ti, M1+n*M+9+i] for i in range(3): IM4[n] += data['X'][ti, M1+n*M+12+i] print(IM4[0:4]) #print(IS[0:4]) Imm = (np.max((IM1, IM2, IM3, IM4))) + 0.000001 IM1 = IM1/Imm IM2 = IM2/Imm IM3 = IM3/Imm IM4 = IM4/Imm sz=200 sp = f.add_subplot(2,2,1); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM1, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('0-15 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,2); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM2, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('15-45 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,3); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM3, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('45-65 Years'); plt.axis('square'); plt.axis('off'); sp = f.add_subplot(2,2,4); plt.scatter(points[:,0], points[:,1], s=sz*np.ones(Nd), c=IM4, cmap=plt.cm.Reds) plt.clim(0, 1); plt.title('65-80 Years'); plt.axis('square'); plt.axis('off'); #plt.colorbar(); #plt.savefig('this%05d_.png'%(ti)) #plt.clf()
examples_mft/examples_1storder/ex3-SEI5R-network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import de Bibliotecas import pandas as pd import matplotlib as mpl from matplotlib import pyplot as plt import seaborn as sns # + # NB Configuration # %config InlineBackend.figure_format = 'retina' sns.set_context('talk') plt.style.use(r'../../src/visualization/my_style.mplstyle') # - path = r'../../data/raw/boletim.csv.gz' df = pd.read_csv(path) df['notes'].dropna().reset_index() # # Conclusão # Esse arquivo se refere aos boletins lidos para a criação do DataFrame.
notebooks/exploratory/eda_boletim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Named Entities Recognition with NLTK # - Named entities sind Orte, Firmen, Organisationen, Länder, etc.. alles Eigennamen. # - Oft sind diese Eigennamen relativ wichtig im Text. import nltk nltk.download('averaged_perceptron_tagger') # nltk.download('maxent_ne_chunker') # Biblithek, die Namens-Entitäten raus holt nltk.download('words') print(nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize("<NAME> is from the United States of America and works at Microsoft Research Labs")))) # der print-Funktion wird von innen nach aussen abgearbeitet: mit den ((())) kann man verschiedene Arbeitsschritte # schachteln print(nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize("<NAME> kommt aus Deutschland und arbeitet bei der Forschungsabteilung von Microsoft")))) # # Named Entities Recognition as a Service # - Extrahieren von wichtigen sog. Named entities (cities, persons, etc..) # - http://www.opencalais.com/opencalais-api/ # - http://www.opencalais.com/wp-content/uploads/folder/ThomsonReutersOpenCalaisAPIUserGuideR12_1.pdf # - Super praktisch wenn man sehr viele Dokumente hat und aggregiert sehen möchte worüber gesprochen wird.... # ### Aufgabe # Holt euch den text von http://www.spiegel.de/politik/ausland/brexit-das-endspiel-hat-begonnen-a-1238299.html mit dem readability tool. ==> Funktioniert nur auf Chrome text = '''Brexit: Das Endspiel hat begonnen By <NAME> und <NAME>, Brüssel und London|Nov. 14th, 2018 Send to Kindle Mittwoch, 14.11.2018 10:36 Uhr Bei den Brexit-Verhandlungen ist offenbar ein entscheidendes Zwischenziel erreicht worden. Nachdem die britische Regierung am Dienstag verkündete, es gebe eine Einigung mit Brüssel, mehrten sich auch in der EU-Hauptstadt die Anzeichen, dass die Unterhändler von EU und Großbritannien auf technischer Ebene ein belastbares Papier erarbeitet haben. Am Mittwoch will die Premierministerin Theresa May nun ihr Kabinett auf die Brüsseler Übereinkunft einschwören. Der Fraktionschef der Europäischen Volkspartei, <NAME>, sagte in den ARD-"Tagesthemen", es gebe "positive Signale", dass es zu einer Einigung komme: "Ja, der weiße Rauch steigt auf." Eine offizielle Bestätigung für einen Deal gab es von den Unterhändlern aus Brüssel zunächst nicht. Allerdings wollen die Botschafter der 27 restlichen EU-Länder am Mittwoch über einen Zeitplan bis zu einem möglichen Sondergipfel am Ende des Monats beraten, ein Vorhaben, das sinnlos wäre, wenn es keinen Grund für vorsichtigen Optimismus gäbe. Mittwoch Gespräche im Kabinett, bei Erfolg: Sondergipfel am 25. November Zuletzt waren die Gespräche bis tief in die Nacht im sogenannten Tunnel geführt worden, unter maximaler Geheimhaltung und mit der gegenseitigen Zusicherung, dass nichts davon vorzeitig nach außen dringt. Dann, am frühen Dienstagabend, bestätigte ein Sprecher Mays, dass tatsächlich ein Vertragsentwurf vorliege. Die Premierministerin habe ihr Kabinett für Mittwoch, 14 Uhr Ortszeit, zu einer außerordentlichen Sitzung einbestellt. Ihr Ziel ist, möglichst noch am Mittwoch verkünden zu können, dass ihre Regierung weitgehend geschlossen hinter der mit Brüssel getroffenen Vereinbarung steht. Danach würde umgehend ein EU-Sondergipfel, vermutlich für den 25. November, einberufen. Wird May das Ergebnis der Unterhändler diesmal zu Hause durchsetzen? In Brüssel ist man hingegen vorsichtig mit dem Wort Einigung. Es gebe einen Text, auf den sich die Unterhändler geeinigt hätten, politisch sei der aber noch nicht abgesegnet, sagen mit der Angelegenheit vertraute EU-Diplomaten. Nun liege der Ball, mal wieder, in London. Eine ähnliche Situation hatte es bereits vor rund vier Wochen gegeben, als die Unterhändler in Brüssel ebenfalls weit gekommen waren, 10 Downing Street aber in letzter Sekunde den Stecker zog, weil May das Ergebnis in London nicht durchsetzen konnte. Trotzdem bereitet auch die EU nun die Grundlagen für einen Sondergipfel vor, auf dem May für das Publikum auf der Insel ausreichend Verhandlungsdramatik inszenieren könnte. Die Sitzung der Botschafter der 27 verbleibenden EU-Länder am Mittwochnachmittag wurde um den Tagesordnungspunkt "State of Play" erweitert, es geht also um den Stand der Brexit-Verhandlungen. Ursprünglich wollte man lediglich über die Vorbereitungen für den Fall beraten, dass es zu keiner Einigung kommt. Geht die Besprechung positiv aus, könnte bereits Anfang kommender Woche ein sogenannter Allgemeiner Rat der Europaminister einberufen werden, der den Sondergipfel der Staats- und Regierungschefs vorbereitet. Noch sind diese Planungen im Konjunktiv. Umweltminister <NAME> REUTERS Umweltminister <NAME> 400-Seiten-Lektüre der Minister in 10 Downing Street Alles hängt nun von den Signalen aus London ab. Ob es May gelingt, ihr Kabinett hinter dem Brüsseler Papier zu versammeln, ist offen. Sämtliche Minister wurden am Dienstag nacheinander in 10 Downing Street einbestellt, um den etwa 400 Seiten umfassenden Vertragsentwurf studieren zu können. Darunter auch jene politischen Schwergewichte, von deren Urteil das weitere Schicksal des Brexits - und Mays - abhängt, allen voran Innenminister <NAME>, Außenminister <NAME> und Umweltminister <NAME>. Allen drei werden Ambitionen nachgesagt, May eher früher als später auf dem Londoner Chefsessel abzulösen. Die Kernfrage, um die sich alles dreht, ist, ob es May gelungen ist, der EU ein weiteres entscheidendes Zugeständnis im Problemfall Irland abzuringen. Um nach dem Brexit sichtbare Waren- und Zollkontrollen an der irisch-nordirischen Grenze zu verhindern, hatten sich London und Brüssel schon vor Wochen darauf geeinigt, dass das Vereinigte Königreich bis zum Abschluss eines Freihandelsvertrags in der Zollunion mit der EU verbleiben kann. Für May diente dies auch dazu, eine Sonderregel für Nordirland zu verschleiern, den sogenannten Backstop. Sprachlich ist die EU May dabei offenbar weit entgegengekommen, in der Sache aber weniger. Die EU besteht weiter auf diese Notfallregel, nach der Nordirland in Zollunion im Binnenmarkt bleiben soll, und die dann in Kraft treten soll, wenn bei den Verhandlungen über die künftigen Beziehungen keine bessere Lösung gefunden werden kann. Entscheidende Feuerprobe: Die Abstimmung im britischen Parlament London drängte jedoch stets darauf, dass es diese Auffangregeln einseitig beenden kann. Brüssel bestand umgekehrt darauf, dass dies nur mit seiner Zustimmung geschehen könne und dass das letzte Wort der bei Brexiteers so verhasste Europäische Gerichtshof haben müsse. Am Ende zäher Verhandlungen waren dies die letzten verbliebenen Streitpunkte. Mehrere EU-Länder zeigten sich bei den vergangenen Botschaftertreffen auch besorgt darüber, dass sich das Vereinigte Königreich in einer Zollunion Sonderrechte herausnehmen und EU-Standards etwa beim Umweltschutz oder der Sozialpolitik unterbieten könnte. Dazu kommt: Selbst wenn es May mit der nun getroffenen Vereinbarung gelingen sollte, ihr Kabinett ohne weitere nennenswerte Rücktritte hinter sich zu vereinen und selbst wenn ein EU-Sondergipfel Ende November den Deal absegnen sollte, steht die entscheidende Feuerprobe noch aus. "May hat keine Chance, irgendetwas durchs Parlament zu bekommen" Voraussichtlich Mitte Dezember würde May die Vereinbarung dem britischen Parlament zur Abstimmung vorlegen müssen. 320 Stimmen wird sie dort brauchen. Ihre eigene Partei kommt jedoch nur auf 318 Abgeordnete, die Premierministerin ist daher auf zehn Stimmen der ultranationalistischen nordirischen DUP angewiesen. Diese hat bereits erkennen lassen, dass sie keinem Deal zustimmen wird, der Nordirland nach dem Brexit einen Sonderstatus innerhalb des Königreichs zuweise. Zudem wird May in ihrer eigenen Partei von beiden Seiten massiv unter Druck gesetzt: Sture EU-Feinde lehnen im Prinzip jeden Kompromiss mit der EU ab, passionierte EU-Freunde hoffen, den Brexit noch komplett verhindern zu können. Dass ausreichend viele Abgeordnete der Opposition May notfalls zur Seite springen könnten, gilt als wenig wahrscheinlich. Am Dienstag gab sich einer der erbittertsten Brexit-Ultras, der konservative Ex-Staatsminister <NAME>, denn auch siegesgewiss. Dem SPIEGEL sagte er: "Die Premierministerin hat keine Chance, irgendetwas durchs Parlament zu bekommen.''' import httplib2 import json LOCAL_API_KEY = '<KEY>' # Aquire this by registering at the Calais site CALAIS_TAG_API = 'https://api.thomsonreuters.com/permid/calais' headers = { 'X-AG-Access-Token': LOCAL_API_KEY, 'content-type': 'text/raw', 'outputformat': 'application/json' } http = httplib2.Http() response, content = http.request(CALAIS_TAG_API, 'POST', headers=headers, body=text) jcontent = json.loads(content.decode()) # Parse the json return into a python dict jcontent["doc"]["meta"] jcontent["http://d.opencalais.com/comphash-1/559b47bc-c90a-3a9e-9ef1-6f71b9e4447a"] # Funktioniert nicht, da Pfad verändert for (k,v) in jcontent.iteritems(): try: print("Key: %s Type: %s Name: %s" % (k,v["name"],v["_type"])) except: print("No name present for entity %s" % k) # ## Aufgabe # - Von welchen Ländern und Unternehmen wird vor allem gesprochen?
14 Text und Machine Learning/2.3 Named Entities Recognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 7. Conclusions and Future Considerations # # [index](../Index.ipynb) | [prev](./06.AnomalyDetection.ipynb) | [next](./08.08.Acknowledgements.ipynb) # Below are the key conclusions to the three research questions in this study: # # **1. What is the level of complexity, required to build a fast, and reliable object detection pipeline, using *IOT devices* and *Computer Vision*?** # # A reliable data collection stage manifested itself with a high complexity. $6$ months of image capture posed various challenges, and led to the following insights: # # - It is crucial to place the camera in the right location. It may require wiring the house with the ethernet cables, and an investment in Power Over Ethernet adapters. Camera units (and *IOT* devices) placed outside of the house, need to be monitored against environmental effects: direct exposure to sunlight, humidity, dust, dirt, insects and even birds. All of them can have a negative impact on the picture quality # - Multiple tasks performed on each frame from the camera, may introduce processing latency. Motion sensing (*Background Subtraction*) with suitable parameters, and fast object detector (*Yolo v2*), can eliminate this problem # - Smooth transmission of *High Definition* images to a web browser, can be achieved by using *web sockets* in a separate Python thread # - Software services need to start automatically when devices are rebooted, or when network connections are broken. Utilizing *Supervisor* Linux utility, and a proper network setup can minimize the loss in data # # **2. Given the dataset with collected images, can the future object counts be accurately predicted using *Machine Learning*?** # # Object counts for a given category (*Person* or *Vehicle*), can be predicted with relatively low error rates using Machine Learning models. # # This process requires a significant amount of image data extraction, cleaning and pre-processing. Numerous models of different type and complexity, have been tested (ranging from *Linear Regression* through *Bi-Directional LTSM Neural Networks*). # # Given the evidence gathered in Chapter 5, there are two types of models, which can be successfully applied to make predictions: a probabilistic model (*Gaussian Process*), and a point estimate model (*Histogram-Based Gradient Boosting Regressor*). # # While Gaussian Processes have an advantage of providing uncertainty about the predictions, Gradient Boosting models are faster to train, and more robust to the object category selection. # # **3. Can *Anomaly Detection* algorithms assist in recognizing anomalous patterns in the object detection data?** # # Applying anomaly detection algorithms to the collected image data, can generate useful results. # # #### Hourly threshold estimation # # Estimating a maximum number of objects per hour, allows to flag anomalies above that threshold. Each object category, like Person or Vehicle, is analyzed individually. # # *Probabilistic approach*, which utilizes *gamma* distribution and *Poisson* likelihood function, produces an optimal result and classifies $61$ out of $4140$ observations as anomalous. # # #### Raw image classification # # The second methodology applies an Auto Encoder Neural Network directly to raw image data. This technique is categorized as *Unsupervised Machine Learning*, as the historical images are not labeled. In contrast with *hourly threshold estimation*, multiple object classes are considered inside a single model. # # The inner workings of this method, is to search for images, which differ the most from the others, using raw pixel data. This technique presents two opportunities: # # - An alert can be triggered, if an incoming image deviates outside of a threshold (calculated using *mean squared error*). In an experiment, a gathering of people outside of the house, was successfully flagged as an anomalous event # - Time spent of manual image analysis, can be significantly reduced, by sorting an image collection using the anomaly threshold, in a descending order. Additionally, this approach should lower the risk of missing an important event # # In a model evaluation stage, a hand-labeled dataset with $30$ images was used. The best model model was able to classify $9$ out of $15$ anomalies correctly. It obtained a *Recall score* of $0.6$, while not sacrificing the *F1 score* of $0.72$. # # **Recommendations for future work** # # By developing a Minimum Viable Product, incorrect assumptions and potential weaknesses, can be quickly identified in the core features. The *MVP* should also include a basic user interface, with a good representation of forecast and anomaly data. # # Further future recommendations are summarized below: # # - Modern AI systems should emphasize ethics and protect privacy. Privacy mode should at least blur people's faces, or even full silhouettes, if required # - To prove that the system is truly generalizable, it should ideally be deployed in another household # - Anomaly detection based on hourly threshold estimation, can be significantly enhanced, by incorporating forecast data. Threshold estimated via forecast predictions, would carry additional information, like day of the week, and weather conditions # - Portability might potentially be strengthened, by allowing to consume an *RTSP* stream, instead of only *Message Queues* # - Security can be enhanced by an addition of waterproof casing, a camera with night vision mode, or even another camera looking at the same scene, but from a different angle # - Current strategy for counting objects is rather basic, and uses *Euclidean Distance*. To allow for more advanced object tracking, *Kalman Filter* could be utilized # - In the raw image classification, *Variational auto encoder* could replace the vanilla version. It would prevent overfitting, and ensure that the properties of latent space, optimize generative process # - New versions of Python libraries could improve performance, and reduce resource consumption # - Overall cost of the hardware, could potentially be significantly lowered, assuming that the *on-device learning* alone can achieve accurate results, and high performance # - Higher volume of collected data, would open up the possibility, to test other forecasting models, which can use periodicity and seasonality components # - After AI is deployed in production, it should be able to adopt itself, to the changes in the environment. This can be achieved by utilizing the most recent subset of detections for training data # # **Final remark** # # Use of AI in the Home Monitoring setting, is still quite underutilized. However, there is a potential for further adoption, due to relatively low hardware costs, and exponential progress in the fields of Computer Vision and Machine Learning. # # While building modern AI systems, it is Engineers' responsibility to prioritize ethics, transparency and explainability. These factors will future-proof the design, against potential changes in law. # # The proposed system can play an important role in enhancing the security of monitored objects, by utilizing valuable insights drawn from the collected data. # [index](../Index.ipynb) | [prev](./06.AnomalyDetection.ipynb) | [next](./08.08.Acknowledgements.ipynb)
Chapters/07.Conclusions.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # ## PART A # # In the facility location problem, the goal is to optimally place facilities so as to minimize transportation costs from the facilities to the customers. In practice, one rarely knows the demand of customers with a high degree of accuracy. In addition, the transportation costs themselves may vary with time and are subject to uncertainty. In this version of the facility location problem, we assume that all data is known. # # In the first part of this problem, the goal is to locate one or more facilities out of five possible sites, which we designated as $F_1,F_2,F_3,F_4$, and $F_5$. The cost of selecting a facility is \$40. Coincidentally, there are also five customers that need to be serviced. We refer to them as $C_1,C_2,C_3,C_4$ and $C_5$. The delivery cost from each possible facility site to each of the five customers is known. The cost of satisfying (all of) the demand of Customer $C_j$ from site $F_i$ is $d_{ij}$. (Note that the first index is for the site and the second is for the customer.) The delivery cost from all possible sites to all customers are given in Table 1 below. # # | | Customer $C_1$ | Customer $C_2$ | Customer $C_3$ | Customer $C_4$ | Customer $C_5$ | # |------------|---------------:|---------------:|---------------:|---------------:|---------------:| # | Site $F_1$ | 30 | 15 | 59 | 78 | 27 | # | Site $F_2$ | 50 | 42 | 25 | 30 | 53 | # | Site $F_3$ | 64 | 14 | 30 | 20 | 62 | # | Site $F_4$ | 46 | 19 | 66 | 48 | 11 | # | Site $F_5$ | 19 | 40 | 60 | 31 | 27 | # # The decision variables for this problem are as follows: # # - $y_j=1$ if a facility is located at site $F_j$. Otherwise, $y_j=0$. # - $0≤x_{ij}≤1$ represent the fraction of demand from $C_j$ satisfied by facility $F_i$. # # The cost associated with customer $C_1$ would be # # $$d_{11}x_{11}+d_{21}x_{21}+d_{31}x_{31}+d_{41}x_{41}+d_{51}x_{51}$$ # # One would also need to add constraints that ensures that $x_{ij}=0,\forall j=1,\ldots,5$ whenever $y_i=0$, which means we cannot use site $F_i$ to serve any customer $C_j$ if we don’t locate a facility at site $i$; (2) all the customers’ demand need to be satisfied. # # Formulate this problem as a deterministic optimization problem in which the objective is to minimize the expected total cost which is the sum of the facility opening cost and the expected delivery cost. # # Choose the correct objection function from below. # ## PART B # # Assume that a facility i is either used to serve a customer j or it is not, rather than a fractional amount. Write all the constraint of given problem. # ## PART C # # Solve the deterministic facility location you formulated above using Julia/JuMP. Which facilities are open? Select both of them. # ## PART D # # What will be the total minimum cost? # ## PART E # # In the second part of the problem, we will consider the stochastic version. The delivery cost of satisfying customers from the facilities is uncertain. It depends on which future scenario occurs. There are five possible scenarios. Below is the probability of each scenario: $p_1,p_2,\ldots,p_5$. # # | | Scenario 1 | Scenario 2 | Scenario 3 | Scenario 4 | Scenario 5 | # |-------------|-----------:|-----------:|-----------:|-----------:|-----------:| # | Probability | 0.15 | 0.15 | 0.25 | 0.2 | 0.25 | # # We will model this as a 2-stage stochastic optimization problem. In the first stage, the decision is which of the five facilities to select. Similarly, we let $y_j=1$ if a facility is located at site $F_i$. Otherwise, $y_j=0$. The cost of opening up a facility is \$40. # # After the facilities are selected, the decision maker learns which of the five scenarios occurs. We let the scenarios be designated as $S_1,S_2,S_3,S_4$ and $S_5$. We let dijk denote the cost of satisfying demand of customer $C_j$ using facility $F_i$ under scenario $S_k$. Note that the first subscript corresponds to the facility and the second subscript corresponds to the customer. The data of $d_{ij}^k$ is stored in pset6_p1_data.xlsx # # We let $x_{ij}^k=1$ if customer $C_{ji}^s$ served by facility $F_i$ under scenario $S_k$. Otherwise, it is $0$. # # Model the problem of minimizing the following sum: the cost of opening facilities plus the expected cost of satisfying demand for customers from the open facilities # # Choose the correct objection function from below. # ## PART F # # Choose all necessary constraint(s) from below. # ## PART G # # Solve the stochastic facility location problem you formulated above using Julia/JuMP. Complete PS6_p1_partfg.jl to solve the problem. We already provide data in it. # # Which facilities will be opened under the optimal solution? # ## PART H # # What will be the total minimum cost?
week06/Problem 3 - Facility Location Problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Extract from JSON and XML # # You'll now get practice extracting data from JSON and XML. You'll extract the same population data from the previous exercise, except the data will be in a different format. # # Both JSON and XML are common formats for storing data. XML was established before JSON, and JSON has become more popular over time. They both tend to be used for sending data via web APIs, which you'll learn about later in the lesson. # # Sometimes, you can obtain the same data in either JSON or XML format. That is the case for this exercise. You'll use the same data except one file is formatted as JSON and the other as XML. # # There is a solution file for these exercises. Go to File->Open and click on 2_extract_exercise_solution.ipynb. # # Extract JSON and JSON Exercise # # First, you'll practice extracting data from a JSON file. Run the cell below to print out the first line of the JSON file. # + ### # Run the following cell. # This cell loads a function that prints the first n lines of # a file. # # Then this function is called on the JSON file to print out # the first line of the population_data.json file ### def print_lines(n, file_name): f = open(file_name) for i in range(n): print(f.readline()) f.close() print_lines(1, 'population_data.json') # - # The first "line" in the file is actually the entire file. JSON is a compact way of representing data in a dictionary-like format. Luckily, pandas has a method to [read in a json file](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_json.html). # # If you open the link with the documentation, you'll see there is an *orient* option that can handle JSON formatted in different ways: # ``` # 'split' : dict like {index -> [index], columns -> [columns], data -> [values]} # 'records' : list like [{column -> value}, ... , {column -> value}] # 'index' : dict like {index -> {column -> value}} # 'columns' : dict like {column -> {index -> value}} # 'values' : just the values array # ``` # # In this case, the JSON is formatted with a 'records' orientation, so you'll need to use that value in the read_json() method. You can tell that the format is 'records' by comparing the pattern in the documentation with the pattern in the JSON file. # # Next, read in the population_data.json file using pandas. # + # TODO: Read in the population_data.json file using pandas's # read_json method. Don't forget to specific the orient option # store the rsults in df_json import pandas as pd df_json = pd.read_json('population_data.json', orient='records') # - # TODO: Use the head method to see the first few rows of the resulting # dataframe df_json.head() # Notice that this population data is the same as the data from the previous exercise. The column order might have changed, but the data is otherwise the same. # # Other Ways to Read in JSON # # Besides using pandas to read JSON files, you can use the json library. Run the code cell below to see an example of reading in JSON with the json library. Python treats JSON data like a dictionary. # + import json # read in the JSON file with open('population_data.json') as f: json_data = json.load(f) # print the first record in the JSON file print(json_data[0]) print('\n') # show that JSON data is essentially a dictionary print(json_data[0]['Country Name']) print(json_data[0]['Country Code']) # - # # Extract XML # # Next, you'll work with the same data except now the data is in xml format. Run the next code cell to see what the first fifteen lines of the data file look like. print_lines(15, 'population_data.xml') # XML looks very similar to HTML. XML is formatted with tags with values inside the tags. XML is not as easy to navigate as JSON. Pandas cannot read in XML directly. One reason is that tag names are user defined. Every XML file might have different formatting. You can imagine why XML has fallen out of favor relative to JSON. # ### How to read and navigate XML # # There is a Python library called BeautifulSoup, which makes reading in and parsing XML data easier. Here is the link to the documentation: [Beautiful Soup Documentation](https://www.crummy.com/software/BeautifulSoup/) # # The find() method will find the first place where an xml element occurs. For example using find('record') will return the first record in the xml file: # # ```xml # <record> # <field name="Country or Area" key="ABW">Aruba</field> # <field name="Item" key="SP.POP.TOTL">Population, total</field> # <field name="Year">1960</field> # <field name="Value">54211</field> # </record> # ``` # # The find_all() method returns all of the matching tags. So find_all('record') would return all of the elements with the `<record>` tag. # # Run the code cells below to get a basic idea of how to navigate XML with BeautifulSoup. To navigate through the xml file, you search for a specific tag using the find() method or find_all() method. # # Below these code cells, there is an exercise for wrangling the XML data. # + # import the BeautifulSoup library from bs4 import BeautifulSoup # open the population_data.xml file and load into Beautiful Soup with open("population_data.xml") as fp: soup = BeautifulSoup(fp, "lxml") # lxml is the Parser type # + # output the first 5 records in the xml file # this is an example of how to navigate with BeautifulSoup i = 0 # use the find_all method to get all record tags in the document for record in soup.find_all('record'): # use the find_all method to get all fields in each record i += 1 for record in record.find_all('field'): print(record['name'], ': ' , record.text) print() if i == 5: break # - # # XML Exercise (Challenge) # # Create a data frame from the xml file. This exercise is somewhat tricky. One solution would be to convert the xml data into dictionaries and then use the dictionaries to create a data frame. # # The dataframe should have the following layout: # # | Country or Area | Year | Item | Value | # |----|----|----|----| # | Aruba | 1960 | Population, total | 54211 | # | Aruba | 1961 | Population, total | 55348 | # etc... # # Technically, extracting XML, transforming the results, and putting it into a data frame is a full ETL pipeline. This exercise is jumping ahead in terms of what's to come later in the lesson. But it's a good chance to familiarize yourself with XML. # + # %%time # TODO: Create a pandas data frame from the XML data. # HINT: You can use dictionaries to create pandas data frames. # HINT: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.from_dict.html#pandas.DataFrame.from_dict # HINT: You can make a dictionary for each column or for each row # HINT: Modify the code from the previous code cell # output the first 5 records in the xml file # this is an example of how to navigate with BeautifulSoup with open("population_data.xml") as fp: soup = BeautifulSoup(fp, "lxml") # lxml is the Parser type # use the find_all method to get all record tags in the document data_dictionary = {'Country or Area':[], 'Year':[], 'Item':[], 'Value':[]} for record in soup.find_all('record'): for record in record.find_all('field'): data_dictionary[record['name']].append(record.text) df = pd.DataFrame.from_dict(data_dictionary) df = df.pivot(index='Country or Area', columns='Year', values='Value') df.reset_index(level=0, inplace=True) # - df # # Conclusion # # Like CSV, JSON and XML are ways to format data. If everything is formatted correctly, JSON is especially easy to work with. XML is an older standard and a bit trickier to handle. # # As a reminder, there is a solution file for these exercises. You can go to File->Open and then click on 2_extract_exercise.
lessons/ETLPipelines/2_extract_exercise/2_extract_exercise-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''aml_template'': conda)' # language: python # name: python361064bitamltemplatecondaa8d5916ec83f44f8921a274321300a27 # --- # + [markdown] Collapsed="false" colab_type="text" id="jYysdyb-CaWM" # # MNIST Fashion Image Classification # + [markdown] Collapsed="false" # ## Import required packages # + Collapsed="false" import os import gzip import numpy as np import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt # + [markdown] Collapsed="false" # ## Load data # + Collapsed="false" def load_fashion_mnist_data(path): # train labels with gzip.open(os.path.join(path, "train-labels-idx1-ubyte.gz"), 'rb') as label_path: train_labels = np.frombuffer(label_path.read(), dtype=np.uint8, offset=8) # train images with gzip.open(os.path.join(path, "train-images-idx3-ubyte.gz"), 'rb') as image_path: train_images = np.frombuffer(image_path.read(), dtype=np.uint8, offset=16).reshape(len(train_labels), 28, 28) # test labels with gzip.open(os.path.join(path, "t10k-labels-idx1-ubyte.gz"), 'rb') as label_path: test_labels = np.frombuffer(label_path.read(), dtype=np.uint8, offset=8) # test images with gzip.open(os.path.join(path, "t10k-images-idx3-ubyte.gz"), 'rb') as image_path: test_images = np.frombuffer(image_path.read(), dtype=np.uint8, offset=16).reshape(len(test_labels), 28, 28) # labels labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] return train_images, train_labels, test_images, test_labels, labels mnist_fashion_data_folder = "mnist_fashion/01_data" train_images, train_labels, test_images, test_labels, labels = load_fashion_mnist_data(mnist_fashion_data_folder) # + [markdown] Collapsed="false" colab_type="text" id="Brm0b_KACaWX" # ## Explore data # # + Collapsed="false" print("Training Set") print("============") print(f"Shape : {train_images.shape}") print(f"Labels : {sorted(set(train_labels))}") print("") print("Test Set") print("============") print(f"Shape : {test_images.shape}") print(f"Labels : {sorted(set(test_labels))}") # - plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.grid(False) plt.show() # + [markdown] Collapsed="false" colab_type="text" id="ES6uQoLKCaWr" # ## Preprocess data # + Collapsed="false" train_images = train_images / 255.0 test_images = test_images / 255.0 # + Collapsed="false" plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(labels[train_labels[i]]) plt.show() # + [markdown] Collapsed="false" colab_type="text" id="59veuiEZCaW4" # ## Build model # + Collapsed="false" model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # + [markdown] Collapsed="false" colab_type="text" id="qKF6uW-BCaW-" # ## Train model # + Collapsed="false" model.fit(train_images, train_labels, epochs=10) # + [markdown] Collapsed="false" # ## Evaluate model # + Collapsed="false" test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) # + [markdown] Collapsed="false" # ## Use model for predictions # + Collapsed="false" def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array, true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(labels[predicted_label], 100*np.max(predictions_array), labels[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array, true_label[i] plt.grid(False) plt.xticks(range(10)) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') predictions = model.predict(test_images) num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions[i], test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions[i], test_labels) plt.tight_layout() plt.show()
mnist_fashion/02_exploration/mnist_fashion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/HuiWangCJLU/text_mining/blob/main/investor_sentiment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="R7mXAtM0speM" pip install pyyaml ua-parser user-agents fake-useragent # + [markdown] id="lhvAbuRpnuKP" # 基于东方财富股评的爬虫,查询网页源代码进行操作(方便起见文件名没改哈) # + colab={"base_uri": "https://localhost:8080/"} id="rg1L8hyrowhj" outputId="13324cbf-556f-4886-9069-e35c9c379164" # 爬取东方财富网的股评 from lxml import etree # lxml 是一种使用 Python 编写的库;利用etree.HTML()将html字符串转化为element对象 # lxml 是一种使用 Python 编写的库;etree可以从html源码中获取想要的内容 #lxml是XML和HTML的解析器,其主要功能是解析和提取这两种语言文本中的数据; #也可以利用XPath语法,来定位特定的元素及节点信息 import requests from fake_useragent import UserAgent # 生成随机请求头,反爬 import time # 延迟,设置时间延迟 import csv import pandas as pd f = open('白酒吧股评.csv', 'w', encoding='utf-8-sig', newline="") # 创建文件对象 # w表示覆盖,a表示追加,r只读方式打开 # newline是为了解决写入时新增行与行之间的一个空白行问题 csv_write = csv.DictWriter(f, fieldnames=['author','title','belong', 'date']) csv_write.writeheader() all_read = [] all_title = [] all_author = [] all_date = [] for i in range(1,100): print('##################正在抓取第{}页的数据#######################'.format(i)) url = 'https://stock.finance.sina.com.cn/stock/go.php/vReport_List/kind/industry/index.phtml?p={}'.format(str(i)) headers = { 'User-Agent': UserAgent().random } response = requests.get(url=url, headers=headers) html = etree.HTML(response.text) #response.text是网页的源码 ,etree.HTML是用来解析网页 构造了一个XPath解析对象并对HTML文本进行自动修正 # 利用XPath语法,通过标签筛选来定位特定元素的属性值 read = html.xpath('//tr//td[6]//text()') title1 = html.xpath('//td[contains(@class,"tal f14")]//a[contains(@target,"_blank")]') title = [] for i in title1: title.append(i.attrib.get('title')) #Attrib 显示、设置或删除指派给文件或目录的只读、存档、系统以及隐藏属性。如果在不含参数的情况下使用,则 attrib 命令会显示当前目录中所有文件的属性。 author = html.xpath('//a[contains(@target,"_blank")]//div[contains(@class,"fname05")]//text()') date = html.xpath('//tr//td[4]//text()') all_read += read all_title += title all_author += author all_date += date for read,title,author,date in zip(all_read,all_title, all_author,all_date): # python中的zip()函数是用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组, #然后返回由这些元组组成的对象,这样做的好处是节约了不少内存 data_dict = {'author':read,'title': title,'belong':author, 'date': date} csv_write.writerow(data_dict) # 保存为一个csv文件 # + [markdown] id="F-G7PgnHoD3d" # 两行测试用的代码emmm # + id="ql5Iz8cWumW9" author = html.xpath('//a[contains(@target,"_blank")]//div[contains(@class,"fname05")]//text()') # + id="pATG6CilJ17-" title1 = html.xpath('//td[contains(@class,"tal f14")]//a[contains(@target,"_blank")]/@title') # + id="ZRg_c59xsp2Y" # 数据预处理 import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab #图像处理包括图像复原最常用的工具是Matlab,随着Python和深度学习的火热,Python和matlab的混合编程成为需要。 import matplotlib as mpl #python底层绘图库,主要是做数据可视化图表 # 解决中文乱码问题 plt.rcParams['font.sans-serif'] = ['Simhei'] f = open('白酒吧股评.csv',encoding='utf-8') df=pd.read_csv(f) # + colab={"base_uri": "https://localhost:8080/"} id="G7B1vno6SYQA" outputId="e74aaeca-0120-4b0c-d72a-24d979a118ba" df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="IRuobfmmSn7e" outputId="03b580bf-a5b4-412c-8537-e0cc71c48769" df # + colab={"base_uri": "https://localhost:8080/", "height": 596} id="w5fPWrg8TJi4" outputId="436ecb9a-962f-4bdf-d6b5-85f445bec265" df.tail(150) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="pJhYP87CTfkc" outputId="bc41c01a-067a-4032-d70d-a0f01a900445" df_new=df df_new["date"] = df_new["date"].apply(lambda x: str(x).split(" ")[0]) df_new #df_new.to_csv('预处理后的股评') #写入到 csv 文件 # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="Ric5RxfmX0Rk" outputId="476ff4f2-cd4e-4440-fadd-14d05ad32b01" df1 = df_new[df_new["date"] >= "2021-11-01"] df1 # + id="fKGsv04STSKT" #如果想要保存新的csv文件,则为 df1.to_csv("白酒_new.csv",index=False,encoding="utf-8") #index=False输出不显示index(索引)值 # df.to_csv写入到 csv 文件 # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="kSiILqENTmbJ" outputId="55d253c4-56fc-4675-f1c1-b05468e5f3f5" df1 # + id="GVvWoxjobDxb" dd = df1['title'] dd.to_csv('title.txt', sep='\t', index=False) # + [markdown] id="iVXfqaCPoSL9" # 倒入数据进行关键词分析 # + id="bcWMLHWFbAme" import pandas as pd import numpy as np import matplotlib.pyplot as plt import jieba.analyse zhetian = pd.read_table(r"title.txt") # + colab={"base_uri": "https://localhost:8080/", "height": 272} id="p44fuqtgg2zx" outputId="fcd46b11-6a88-4657-b0d6-d3cd1ff5dd34" import jieba from wordcloud import WordCloud import matplotlib.pyplot as plt import jieba.analyse as anls import re from collections import Counter #1、读取文本 text = open("title.txt", 'r', encoding='utf-8').read() #加载停用词表 stopwords = [line.strip() for line in open('stopword.txt', encoding='UTF-8').readlines()] # list类型 #分词未去停用词 text_split = jieba.cut(text) # 未去掉停用词的分词结果 list类型 #去掉停用词的分词结果 list类型 text_split_no = [] for word in text_split: if word not in stopwords: text_split_no.append(word) #print(text_split_no) text_split_no_str =' '.join(text_split_no) #list类型分为str #基于tf-idf提取关键词 print("基于TF-IDF提取关键词结果:") keywords = [] for x, w in anls.extract_tags(text_split_no_str, topK=20, withWeight=True): keywords.append(x) #前20关键词组成的list keywords = ' '.join(keywords) #转为str print(keywords) #画词云 wordcloud = WordCloud(font_path="simsun.ttf",background_color="white",width=1000,height=880).generate(keywords) #keywords为字符串类型 plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") # + [markdown] id="gXr1EZASoc0c" # 测试用...哪个更好看点 # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="1qAcq49zkZK5" outputId="42da3450-d736-4fda-93d2-e5d0a433735f" wordcloud = WordCloud(font_path="simsun.ttf",background_color="white",width=1000,height=880).generate(keywords) #keywords为字符串类型 plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off")
investor_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings('ignore') from sklearn import preprocessing import tensorflow as tf # %matplotlib inline import sklearn import seaborn as sns from sklearn.preprocessing import StandardScaler df=pd.read_csv('45 channel 1sec deltapower with labels.csv') print('Number of rows and columns:', df.shape) df.head(5) # - # # # **Labelling COLUMNS** print(df.columns) new_names = {'1.7684':'a', '4.5086':'b', '6.5071':'c', '3.7271':'cd', '3.0514':'d','1.4297':'de','3.9083':'e','8.2294':'f', '2.8497':'g', '1.8704':'l', '1.5071':'m', '6.7917':'n', '2.6181':'o', '1.9082':'p', '2.0764':'q', '0.33154':'r', '0.058108':'s', '1.3143':'t', '2.0596':'u', '0.46565':'v', '0.18181':'w', '0.0021533':'x', '0.91821':'y', '1.063':'z', '2.6008':'aa', '2.8307':'bb', '5.3624':'cc', '31.953':'dd', '3.8214':'ee', '1.869':'ff', '1.7548':'gg', '6.5689':'hh', '2.8358':'ii', '1.9051':'jj', '6.5849':'kk', '7.843':'ll', '0.011987':'mm', '1.4325':'nn', '6.9551':'oo', '6.9551':'pp', '0.047714':'qq', '1.6717':'rr','0.23703':'j','0.03289':'k','0.78239':'h','1.8171':'i','1':'labels'} df= df.rename(index=str, columns=new_names) # ***Checking any null Values*** df.info() # + pd.set_option('display.max_columns', None) print(df.describe()) # - # ***spliting the file in the data and target class*** data = df.iloc[:,:-1].values.tolist() target = df.iloc[:,-1].tolist() # + print(data) print(target) # - # **heatmap** col_list = df.columns fig, ax = plt.subplots(figsize=(20,20)) sns.heatmap(df[col_list].corr(),square=True,linewidths=1) plt.title('Correlation of Variables') # **Normalizing** from sklearn import preprocessing from sklearn.model_selection import train_test_split x =df[df.columns[:46]] y =df.labels x_train, x_test, y_train, y_test = train_test_split(x, y , train_size = 0.7, random_state = 90) # **Select numerical columns which needs to be normalized** train_norm = x_train[x_train.columns[0:20]] test_norm = x_test[x_test.columns[0:20]] # **Normalize Training Data** std_scale = preprocessing.StandardScaler().fit(train_norm) x_train_norm = std_scale.transform(train_norm) # **Converting numpy array to dataframe** training_norm_col = pd.DataFrame(x_train_norm, index=train_norm.index, columns=train_norm.columns) x_train.update(training_norm_col) print (x_train.head()) # **Normalize Testing Data by using mean and SD of training set** x_test_norm = std_scale.transform(test_norm) testing_norm_col = pd.DataFrame(x_test_norm, index=test_norm.index, columns=test_norm.columns) x_test.update(testing_norm_col) print (x_train.head()) # **Support vector machine** import numpy from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score # + def svm_classifier(): X = data y = target # Split the data into training/testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6, random_state=42) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # SVM Classifier clf = SVC(kernel = 'rbf', random_state = 50) clf.fit(X_train, y_train) y_predict = clf.predict(X_test) cm = confusion_matrix(y_test, y_predict) print(cm) print("Accuracy score : ") print(accuracy_score(y_test, y_predict)*100) if __name__ == '__main__': svm_classifier() # - pip install --force-reinstall tensorflow # **sequential model** # + import numpy as np from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(64, input_dim=46, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.fit(x_train, y_train, epochs=25, batch_size=128) score = model.evaluate(x_test, y_test, batch_size=128) # - # **LSTM MODELLING** # + from keras.models import Sequential from keras.layers import Dense, Dropout from keras.layers import Embedding from keras.layers import LSTM max_features = 1024 model = Sequential() model.add(Embedding(max_features, output_dim=256)) model.add(LSTM(128)) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=2, epochs=10) score = model.evaluate(x_test, y_test, batch_size=16) # -
45 channel_1sec_deltapower.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %%time from dask.distributed import Client client = Client(n_workers=4) import dask.dataframe as dd import pandas as pd import os import dask filename = os.path.join('../clojure-datasets/data-Compustat-lohi.csv') # 1.8 M dataset #filename = os.path.join('../clojure-datasets/data-Compustat-x2.csv') # 3.6 M dataset #filename = os.path.join('../clojure-datasets/data-CRSP.csv') # 80 M dataset #crsp_filename = os.path.join('../clojure-datasets/CRSP-extract.csv') # 80 M dataset df = dd.read_csv(filename, dtype={'exchg': 'float64', 'sic': 'float64'}) #other = dd.read_csv(crsp_filename) #ddf = dd.from_pandas(df, npartitions=10) # =================== Change this part to test speed ====================== # # element-wise operations df['new_col'] = df['datacqtr'] + 20 # Compustat #df['new_col'] = df['PRC'] + 20 # CRSP # row-wise #df = df[df.datacqtr > 1000.0] # aggregation #df = df.datacqtr.max() # groupby aggregate #df = df.groupby(df.conm).datafqtr.max() # left join #df = df.join(other, how='left') # left join #df = df.join(other, how='right') # inner join #df = df.join(other, how='inner') # ========================================================================= # #df.to_csv('./output/data-Compustat-output-*.csv') # output as separate csv files df.to_csv('dask_output.csv', single_file=True) # output as a single file # -
benchmark/.ipynb_checkpoints/dask-benchmark-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: soccerdata # language: python # name: soccerdata # --- # + nbsphinx="hidden" import pandas as pd pd.set_option('display.max_columns', None) # + nbsphinx="hidden" # %env SOCCERDATA_LOGLEVEL=ERROR # %env SOCCERDATA_NOCACHE=True # %env SOCCERDATA_NOSTORE=True # - import soccerdata as sd # # Match History mh = sd.MatchHistory(leagues="ENG-Premier League", seasons=2021) print(mh.__doc__) # ## Historic match results and betting odds hist = mh.read_games() hist.head()
docs/datasources/MatchHistory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5iEWwOxUwKkL" # # **Water Cloud Model calibration for soybean** # Input: Sentinel-1 VV-VH backscatter intensities (extracted by points using: GEE) # # Input: Load modified excel sheet with backscatter intensities and in-situ data pairs # # Output: LUT for a specific channel VV or VH # + id="hGfXpuOjSIFJ" # Install the PyDrive wrapper & import libraries. # This only needs to be done once per notebook. # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # Uploaded soybean_calibration.xlsx in google drive and copy the id # Example: https://drive.google.com/file/d/1ZOa0DYqRiXpKKKXwGIIizioI6xr7qi43/view?usp=sharing file_id = '1ZOa0DYqRiXpKKKXwGIIizioI6xr7qi43' downloaded = drive.CreateFile({'id': file_id}) # + id="R-wnJft3gtil" # Download the file to a local disk as 'soybean_calibration.xlsx'. downloaded.GetContentFile('soybean_calibration.xlsx') # + colab={"base_uri": "https://localhost:8080/"} id="DyO0dvOrUAh4" outputId="0f78ff56-a2e5-4d5c-d5cf-9d5e3f2642c1" # Here it is -- # !ls -lha soybean_calibration.xlsx # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="K4Mf12h-g22q" outputId="92888d82-8a8f-4b38-dd69-bd6de4c10337" # Now, we can use pandas read_excel after installing the excel importer. # !pip install -q xlrd import pandas as pd df = pd.read_excel('soybean_calibration.xlsx') df # + id="D67FaEhUh-91" # import libraries import numpy as np import matplotlib import matplotlib.pyplot as plt from scipy.optimize import curve_fit import warnings from scipy.optimize import differential_evolution # + id="JFyEUobIiQoi" #Pandas dataframe to matrix conversion Y=df.values; #Incidence angle th=30; thr=th*3.1415/180; y=Y[:,5]; ## Backscatter VV x1=Y[:,0]; ## PAI x2=Y[:,3]; ## Soil moisture #------------------------------------------------------------------------------ ## Linear scale function-WCM def fitFunc(X,a,b,c,d,e): x1,x2=X return (a*(np.power(x1,e))*np.cos(thr)*(1-np.exp((-2)*b*np.power((x1),1)/np.cos(thr))))+((d*np.exp(c*x2))*np.cos(thr)*np.exp((-2)*b*np.power((x1),1)/np.cos(thr))) ##----------------------------------------------------------------------------- ##----------------------------------------------------------------------------- ## function for genetic algorithm to minimize (RMSE error) ## bounds on parameters are set in generate_Initial_Parameters() below ## genetic algorithm for initial parameter estimation. def Error(parameterTuple): warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm return np.sqrt(np.sum((y - fitFunc((x1,x2), *parameterTuple)) ** 2).mean()) def generate_Initial_Parameters(): ## min and max used for bounds parameterBounds = [] parameterBounds.append([0,1.1]) # parameter bounds for a parameterBounds.append([0,0.5]) # parameter bounds for b parameterBounds.append([-0.5,1]) # parameter bounds for c parameterBounds.append([-0.5,1]) # parameter bounds for d parameterBounds.append([-1.5,1]) # parameter bounds for e ##parameterBounds.append([-100,100]) # parameter bounds for f ## "seed" the numpy random number generator for repeatable results ##https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.differential_evolution.html result = differential_evolution(Error, parameterBounds, strategy='best1bin',polish=True,seed=3,init='latinhypercube') return result.x ## generate initial parameter values initialParameters = generate_Initial_Parameters() ##----------------------------------------------------------------------------- ## OR directly define initial parameters #initialParameters=[0.2,1.357,2,4,-1.965] ##----------------------------------------------------------------------------- ##----------------------------------------------------------------------------- # curve fit the test data fitParams,fitCovariances = curve_fit(fitFunc,(x1,x2),y, initialParameters,method='lm',maxfev=6000,ftol=1e-8) #fitParams # + id="5gCWmR4SjTx1" ##----------------------------------------------------------------------------- #predicting with fitted function A = (x1,x2) ypred=fitFunc(A,fitParams[0],fitParams[1],fitParams[2],fitParams[3],fitParams[4]) #ypred # + colab={"base_uri": "https://localhost:8080/", "height": 347} id="HzVZTRHykAl6" outputId="84801d55-f7f7-4775-a53f-6759fb4d9fd2" #rmse estimation def rmse(predictions, targets): return np.sqrt(((predictions - targets) ** 2).mean()) rmse_val = rmse(ypred, y) #print('RMSE=',rmse_val) #Correlation coefficient corrr=np.corrcoef(ypred,y) rr= corrr[0,1] #print('r=',rr) # ##--------------------------------------------------------------- ## Plotting calibration points plt.scatter(y,ypred) plt.xlim([0, 0.3]) plt.ylim([0, 0.3]) plt.xlabel("Observed $\sigma^0$") plt.ylabel("Estimated $\sigma^0$") plt.title("VV-Soybean") plt.plot([0, 0.3], [0, 0.3], 'k:') plt.annotate('r = %.2f'%rr, xy=(0.015,0.27)) #round off upto 3decimals plt.annotate('RMSE = %.3f'%rmse_val, xy=(0.015,0.24)) matplotlib.rcParams.update({'font.size': 15}) plt.gca().set_aspect('equal', adjustable='box') plt.show() #plt.savefig('VV_Soybean.png') print('Fitted WCM coefficients for VV =\n',fitParams) # + colab={"base_uri": "https://localhost:8080/"} id="sIQ5CKYPpLnQ" outputId="34cb97bf-03f7-40b3-bbe3-f413552977bd" LUT = np.vstack((x1, x2, ypred)) LUT_soybean = LUT.T #LUT.shape df2 = pd.DataFrame(LUT_soybean, columns = ['PAI','SM','VV']) print(df2) # + [markdown] id="A9VeXuNZzQre" # ***Exporting the LUT to Google Drive*** # # First Create a directory named 'EarthEngine' in your google drive manualy # # Then run the follwing snippet # + colab={"base_uri": "https://localhost:8080/"} id="2sCM481CrS71" outputId="82136129-417c-473f-b88a-6e59dd87c0a4" from google.colab import drive drive.mount('/drive') df2.to_csv('/drive/My Drive/EarthEngine/SoybeanLUT.csv') #df2.to_excel('/drive/My Drive/EarthEngine/SoybeanLUT.xlsx') drive.flush_and_unmount()
Chapter05/Sec552/WCM_calibration_Springer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %time data = pd.read_csv('../train.csv') test = pd.read_csv('../test.csv') samplesub = pd.read_csv('../sample_submission.csv') import nltk nltk.download('stopwords') from nltk.corpus import stopwords import unicodedata norm = lambda w : unicodedata.normalize('NFKD', w).encode('ASCII', 'ignore').decode('ASCII') all_stopw = set() for corpus in ['english', 'portuguese', 'spanish']: all_stopw.update(set(map(norm, stopwords.words(corpus)))) # <hr> data = data.sample(frac = 1) # keep index! def normalize(curr): # remove accent curr = curr.str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8') # to lower case curr = curr.str.lower() # remove not alphanumerics or . , curr = curr.str.replace('[^a-zA-Z0-9.,]', ' ') # let , and . be the same char curr = curr.str.replace('[.]', ',') # remove . , not between numbers curr = curr.str.replace('(?<=[0-9])[,]+(?=[0-9])', '.') curr = curr.str.replace('[,]', ' ') # set all digits to 0 curr = curr.str.replace('[0-9]', '0') # separate ' <digits><letters ' like in 22g or 12ms # curr = curr.str.replace('(^| )([0-9]+)([a-zA-Z]+)($| )', r'\1\2 \3\4') # remove some Pt plurals curr = curr.str.replace('\\b([a-zA-Z]+[aeiouwy])(s)\\b', r'\1') # remove 4 consec (same) letters to just one curr = curr.str.replace(r'([a-zA-Z])\1{3,}', r'\1') # 3 is four? -> three of \1 after first \1... # separate 4 or more consecutive (different or not) letters curr = curr.str.replace(r'([a-zA-Z]{4,})', r' \1 ') # Other ideas: return curr X_data = data.title test_data = test.title X_full = pd.concat([X_data, test_data]) # %%time X_full = normalize(X_full) sp2 = len(data) # %%time xremo = lambda s : ' '.join([w for w in s if w not in all_stopw]) X_full = X_full.str.split().apply(xremo) # %%time # remove tokens with numbers -> is it necessary at this level? # X_full = X_full.str.replace('[a-z0-9_]*[0-9][a-z0-9_]*', ' ') train_norm = X_full[:sp2] test_norm = X_full[sp2:] # %%time # train_words = X_norm.str.split(expand=True).stack().value_counts().to_dict() train_words = dict() for t in train_norm: for w in t.split(): train_words[w] = train_words.get(w, 0) + 1 # %%time # test_words = test_norm.str.split(expand=True).stack().value_counts().to_dict() test_words = dict() for t in test_norm: for w in t.split(): test_words[w] = test_words.get(w, 0) + 1 len(test_words), len(train_words) testwseries = pd.Series(test_words).index temp_test = testwseries.isin(pd.Series(train_words).index) temp_test.mean() testwseries[~temp_test].values[-200:] # + # %%time ## how many instances are necessary (assuming X_norm is shuffled already) test_counter = test_words.copy() priorities = np.full(len(X_norm), 999_999) for i, t in enumerate(X_norm): for w in t.split(): if w in test_counter: priorities[i] = min(priorities[i], test_words[w] - test_counter[w]) test_counter[w] -= 1 # 1 for equal ammount, .5 for twice in train, ... # actually any positive val works # - data['priorities'] = priorities data.sort_values(['category', 'label_quality', 'priorities'], inplace=True) # %time reduced = data.groupby(['category'], as_index=False).head(800) # %%time red_norm = normalize(reduced.title) # red_norm = red_norm.str.replace('[a-z0-9_]*[0-9][a-z0-9_]*', ' ') red_norm = red_norm.str.split().apply(xremo) # %%time red_words = red_norm.str.split(expand=True).stack().value_counts().to_dict() temp2_test = testwseries.isin(pd.Series(red_words).index) temp2_test.mean(), temp2_test.sum() # 800 ->937, 1000 -> 94 # %%time left = data.loc[~data.index.isin(reduced.index)].query('priorities <= 5') # 0 -> 1650, 1,0 -> 32k, 1-5 -> 97k, 1-10 -> 188k len(left) reduced = pd.concat([reduced, left]) # next time do a shuffle after this reduced = reduced.sample(frac = 1) # %%time red_norm = normalize(reduced.title) # red_norm = red_norm.str.replace('[a-z0-9_]*[0-9][a-z0-9_]*', ' ') red_norm = red_norm.str.split().apply(xremo) red_words = red_norm.str.split(expand=True).stack().value_counts().to_dict() temp2_test = testwseries.isin(pd.Series(red_words).index) temp2_test.mean() (~temp2_test).sum() len(red_norm) # <hr> # ## Data Simplification from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(data.category) # + # must save this for later le.classes_ # - # + # change target to numeric reduced['category'] = le.transform(reduced.category) # + # change label_quality to binary integer reduced['label_quality'] = reduced.label_quality.map({'reliable' : 0, 'unreliable' : 1}) # + # change language to binary integer reduced['language'] = reduced.language.map({'spanish' : 0, 'portuguese' : 1}) # - reduced.head() reduced.shape reduced.to_csv('../data-reduced-800-v3-shuffled.csv', index = True)
MeLi_BaseGen/MeLi_BaseGen_V3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="9zruyI72dBd2" # # Five intersting functions on Pytorch # # ### Pytorch is an open source library used for deep learinig application and heavily optimized for vector operation.It some with out of the box support for GPU. # # Here are five intersting function supported by Pytorch. # - Bitwise operations # - CPU vs GPU # - Autograd # - Histograms # - Distributions # + colab={} colab_type="code" id="hjjVWHWHdBd4" # Import torch and other required modules import torch import time import matplotlib.pyplot as plt import numpy as np # + [markdown] colab_type="text" id="vpAeR1YYdBd-" # ## Bitwise operations avalible in Pytorch # # In computer programming, a bitwise operation operates on one or more bit patterns or binary numerals at the level of their individual bits. It is a fast and simple action, directly supported by the processor, and is used to manipulate values for comparisons and calculations. # # Here I assigned two tensors (a & b) with some binary values and performed bitwise AND, OR and XOR operations.The results can be seen above # # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="RRB4Mz2TdBeB" outputId="0aeffd9b-63e7-4186-cb7d-2dd2e42e432e" # Bitwise operations supported in Pytorch a = torch.tensor([1, 0, 1]) b = torch.tensor([0,0,0]) and_op = torch.bitwise_and(a,b) print("Output of 111 AND 000 is ", and_op) or_op = torch.bitwise_or(a,b) print("Output of 111 OR 000 is ", or_op) xor_op = torch.bitwise_xor(a, b) print("Exclusive OR of 111 and 000 is ", xor_op) # + [markdown] colab_type="text" id="b--4nvXgAGeb" # # CPU vs GPU Speedup # Pytorch allows us to use gpu for compute without installing cuDNN libraries. The following example uses CPU and GPU for multiplying two matrices and shows the speed gain from CPU to GPU. # + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="Sok5uO_IdBeH" outputId="1e3fb231-9ccb-4c12-a620-53c5fff625cb" '''CPU vs GPU speedup !!This example requires a CUDA enabled GPU ''' size = 1024 #choose large matrix else the speedup won't be significant a = torch.randn(size,size) b = torch.randn(size,size) #For CPU start = time.time() ans_cpu = torch.matmul(a, b) stop = time.time() CPU_time = stop - start print("CPU time", CPU_time) #For GPU device = torch.cuda.current_device() # get the name of your current CUDA device a = a.to(device) # Send those tensors to VRAM b = b.to(device) start = time.time() ans_gpu = torch.matmul(a, b) stop = time.time() GPU_time = stop - start print("GPU time",GPU_time) #Speed gain speed_up = CPU_time / GPU_time print("Speed ",speed_up) # + [markdown] colab_type="text" id="EU3vRSLHkBXc" # # Autograd # Using Pytorch autograd package we will find the derivate of a polynomial function f(x) at x = 0-10 # # + colab={"base_uri": "https://localhost:8080/", "height": 399} colab_type="code" id="QoU_tjqMj95x" outputId="86f843c7-6f49-4c89-a1e0-6e4243950389" for i in range(11): x = torch.autograd.Variable(torch.Tensor([i]),requires_grad=True) #independent var x y = 5*x**4 + 3*x**3 + 7*x**2 + 9*x - 5 #f(x) print("f(x) = ", float(y)) y.backward() #calc the drivate print("f'(x) = ", float(x.grad)) # + [markdown] colab_type="text" id="SDASV90zFfL0" # # Histogram # Pytorch can also be used to compute histogram of a tensor. In this example I am using matplotlib hist function to draw the histogram. # # + colab={} colab_type="code" id="WIwu9fF-GiwD" def plot_histogram(data): # An "interface" to matplotlib.axes.Axes.hist() method n, bins, patches = plt.hist(x=data, bins='auto', color='#0504aa', alpha=0.8, rwidth=0.85) # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="eSoCcnvudBeV" outputId="1810d43c-b382-40ab-ad9a-ef5ceacb253e" data = torch.randn(1000) torch.histc(data, bins=8, min=0, max=100) plot_histogram(data=data) # + [markdown] colab_type="text" id="EU5MGjrGzBcc" # # Distributions # Pytorch makes it easy to get various types of distribution. Some normal distribution are plotted below. # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="fGgt-l9FzA_H" outputId="6c8df5dd-cd24-4789-dc05-240f90f6fd65" normal_1 = torch.empty(10000).normal_(mean=0,std=1) plot_histogram(normal_1) # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="eynOPzRw1YnI" outputId="20e459d1-0751-4e85-dde8-98df75fd4646" normal_35 = torch.empty(10000).normal_(mean=100,std=35) plot_histogram(normal_35) # + [markdown] colab_type="text" id="X1t4qk4WdBfV" # ## Conclusion # # So, this was a very basic overview of of some basic feature of pytorch.In this notebook we created some basic boolean logic, multiplied matrices in CPU and GPU and compared the speed difference, plotted histogram and learned to generated normal distribution of various characteristics. # + [markdown] colab_type="text" id="QGZaeq2vdBfW" # ## Reference Links # Provide links to your references and other interesting articles about tensors # * Official documentation for `torch.Tensor`: https://pytorch.org/docs/stable/tensors.html # # * Normal Distribution: https://stackoverflow.com/questions/51136581/how-to-create-a-normal-distribution-in-pytorch # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="9_NMQxzJdBfW" outputId="1e6598aa-68c1-4ddc-d9cd-f67b9a3879e7" # !pip install jovian --upgrade --quiet # + colab={} colab_type="code" id="1PW4GoKLdBfa" import jovian # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Lq4IUFwrdBfd" outputId="b1af74d0-857e-40c7-c514-511bd77c82f4" jovian.commit('01_tensor_operations') # -
Pytorch-Zero-to-GANs/01_tensor_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Implementation of Softmax Regression from Scratch # # :label:`chapter_softmax_scratch` # # # Just as we implemented linear regression from scratch, # we believe that multiclass logistic (softmax) regression # is similarly fundamental and you ought to know # the gory details of how to implement it from scratch. # As with linear regression, after doing things by hand # we will breeze through an implementation in Gluon for comparison. # To begin, let's import our packages. # + import sys sys.path.insert(0, '..') # %matplotlib inline import d2l import torch from torch.distributions import normal # - # We will work with the Fashion-MNIST dataset just introduced, # cuing up an iterator with batch size 256. batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) # ## Initialize Model Parameters # # Just as in linear regression, we represent each example as a vector. # Since each example is a $28 \times 28$ image, # we can flatten each example, treating them as $784$ dimensional vectors. # In the future, we'll talk about more sophisticated strategies # for exploiting the spatial structure in images, # but for now we treat each pixel location as just another feature. # # Recall that in softmax regression, # we have as many outputs as there are categories. # Because our dataset has $10$ categories, # our network will have an output dimension of $10$. # Consequently, our weights will constitute a $784 \times 10$ matrix # and the biases will constitute a $1 \times 10$ vector. # As with linear regression, we will initialize our weights $W$ # with Gaussian noise and our biases to take the initial value $0$. # + attributes={"classes": [], "id": "", "n": "9"} num_inputs = 784 num_outputs = 10 W = normal.Normal(loc = 0, scale = 0.01).sample((num_inputs, num_outputs)) b = torch.zeros(num_outputs) # - # Recall that we need to *attach gradients* to the model parameters. # More literally, we are allocating memory for future gradients to be stored # and notifiying PyTorch that we want gradients to be calculated with respect to these parameters in the first place. # + attributes={"classes": [], "id": "", "n": "10"} W.requires_grad_(True) b.requires_grad_(True) # - # ## The Softmax # # Before implementing the softmax regression model, # let's briefly review how `torch.sum` work # along specific dimensions in a PyTorch tensor. # Given a matrix `X` we can sum over all elements (default) or only # over elements in the same column (`dim=0`) or the same row (`dim=1`). # Note that if `X` is an array with shape `(2, 3)` # and we sum over the columns (`torch.sum(X, dim=0`), # the result will be a (1D) vector with shape `(3,)`. # If we want to keep the number of axes in the original array # (resulting in a 2D array with shape `(1,3)`), # rather than collapsing out the dimension that we summed over # we can specify `keepdim=True` when invoking `torch.sum`. # + attributes={"classes": [], "id": "", "n": "11"} X = torch.tensor([[1, 2, 3], [4, 5, 6]]) torch.sum(X, dim=0, keepdim=True), torch.sum(X, dim=1, keepdim=True) # - # We are now ready to implement the softmax function. # Recall that softmax consists of two steps: # First, we exponentiate each term (using `torch.exp`). # Then, we sum over each row (we have one row per example in the batch) # to get the normalization constants for each example. # Finally, we divide each row by its normalization constant, # ensuring that the result sums to $1$. # Before looking at the code, let's recall # what this looks expressed as an equation: # # $$ # \mathrm{softmax}(\mathbf{X})_{ij} = \frac{\exp(X_{ij})}{\sum_k \exp(X_{ik})} # $$ # # The denominator, or normalization constant, # is also sometimes called the partition function # (and its logarithm the log-partition function). # The origins of that name are in [statistical physics](https://en.wikipedia.org/wiki/Partition_function_(statistical_mechanics)) # where a related equation models the distribution # over an ensemble of particles). # + attributes={"classes": [], "id": "", "n": "12"} def softmax(X): X_exp = torch.exp(X) partition = torch.sum(X_exp, dim=1, keepdim=True) return X_exp / partition # The broadcast mechanism is applied here # - # As you can see, for any random input, we turn each element into a non-negative number. Moreover, each row sums up to 1, as is required for a probability. # Note that while this looks correct mathematically, # we were a bit sloppy in our implementation # because failed to take precautions against numerical overflow or underflow # due to large (or very small) elements of the matrix, # as we did in # :numref:`chapter_naive_bayes`. # + attributes={"classes": [], "id": "", "n": "13"} # X = nd.random.normal(shape=(2, 5)) X = normal.Normal(loc = 0, scale = 1).sample((2, 5)) X_prob = softmax(X) X_prob, torch.sum(X_prob, dim=1) # - # ## The Model # # Now that we have defined the softmax operation, # we can implement the softmax regression model. # The below code defines the forward pass through the network. # Note that we flatten each original image in the batch # into a vector with length `num_inputs` with the `view` function # before passing the data through our model. # + attributes={"classes": [], "id": "", "n": "14"} def net(X): return softmax(torch.matmul(X.reshape((-1, num_inputs)), W) + b) # - # ## The Loss Function # # Next, we need to implement the cross entropy loss function, # introduced in :numref:`chapter_softmax`. # This may be the most common loss function # in all of deep learning because, at the moment, # classification problems far outnumber regression problems. # # # Recall that cross entropy takes the negative log likelihood # of the predicted probability assigned to the true label $-\log p(y|x)$. # Rather than iterating over the predictions with a Python `for` loop # (which tends to be inefficient), we can use the `gather` function # which allows us to select the appropriate terms # from the matrix of softmax entries easily. # Below, we illustrate the `gather` function on a toy example, # with 3 categories and 2 examples. y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y = torch.tensor([0, 2]) torch.gather(y_hat, 1, y.unsqueeze(dim=1)) # y has to be unsqueezed so that shape(y_hat) = shape(y) # Now we can implement the cross-entropy loss function efficiently # with just one line of code. # + attributes={"classes": [], "id": "", "n": "16"} def cross_entropy(y_hat, y): return -torch.gather(y_hat, 1, y.unsqueeze(dim=1)).log() # - # ## Classification Accuracy # # Given the predicted probability distribution `y_hat`, # we typically choose the class with highest predicted probability # whenever we must output a *hard* prediction. Indeed, many applications require that we make a choice. Gmail must catetegorize an email into Primary, Social, Updates, or Forums. It might estimate probabilities internally, but at the end of the day it has to choose one among the categories. # # When predictions are consistent with the actual category `y`, they are correct. The classification accuracy is the fraction of all predictions that are correct. Although we cannot optimize accuracy directly (it is not differentiable), it's often the performance metric that we care most about, and we will nearly always report it when training classifiers. # # To compute accuracy we do the following: # First, we execute `y_hat.argmax(dim=1)` # to gather the predicted classes # (given by the indices for the largest entires each row). # The result has the same shape as the variable `y`. # Now we just need to check how frequently the two match. The result is PyTorch tensor containing entries of 0 (false) and 1 (true). Since the attribute `mean` can only calculate the mean of floating types, # we also need to convert the result to `float`. Taking the mean yields the desired result. # + attributes={"classes": [], "id": "", "n": "17"} def accuracy(y_hat, y): return (y_hat.argmax(dim=1) == y).float().mean().item() # - # We will continue to use the variables `y_hat` and `y` # defined in the `gather` function, # as the predicted probability distribution and label, respectively. # We can see that the first example's prediction category is 2 # (the largest element of the row is 0.6 with an index of 2), # which is inconsistent with the actual label, 0. # The second example's prediction category is 2 # (the largest element of the row is 0.5 with an index of 2), # which is consistent with the actual label, 2. # Therefore, the classification accuracy rate for these two examples is 0.5. # + attributes={"classes": [], "id": "", "n": "18"} accuracy(y_hat, y) # - # Similarly, we can evaluate the accuracy for model `net` on the data set # (accessed via `data_iter`). # + attributes={"classes": [], "id": "", "n": "19"} # The function will be gradually improved: the complete implementation will be # discussed in the "Image Augmentation" section def evaluate_accuracy(data_iter, net): acc_sum, n = 0.0, 0 for X, y in data_iter: acc_sum += (net(X).argmax(dim=1) == y).sum().item() n += y.size()[0] # y.size()[0] = batch_size return acc_sum / n # - # Because we initialized the `net` model with random weights, # the accuracy of this model should be close to random guessing, # i.e. 0.1 for 10 classes. # + attributes={"classes": [], "id": "", "n": "20"} evaluate_accuracy(test_iter, net) # - # ## Model Training # # The training loop for softmax regression should look strikingly familiar # if you read through our implementation # of linear regression earlier in this chapter. # Again, we use the mini-batch stochastic gradient descent # to optimize the loss function of the model. # Note that the number of epochs (`num_epochs`), # and learning rate (`lr`) are both adjustable hyper-parameters. # By changing their values, we may be able to increase the classification accuracy of the model. In practice we'll want to split our data three ways # into training, validation, and test data, using the validation data to choose the best values of our hyperparameters. # + attributes={"classes": [], "id": "", "n": "21"} num_epochs, lr = 5, 0.1 # This function has been saved in the d2l package for future use def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, trainer=None): for epoch in range(num_epochs): train_l_sum, train_acc_sum, n = 0.0, 0.0, 0 for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y).sum() l.backward() if trainer is None: d2l.sgd(params, lr, batch_size) else: # This will be illustrated in the next section trainer.step(batch_size) train_l_sum += l.item() train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item() n += y.size()[0] test_acc = evaluate_accuracy(test_iter, net) print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr) # - # ## Prediction # # Now that training is complete, our model is ready to classify some images. # Given a series of images, we will compare their actual labels # (first line of text output) and the model predictions # (second line of text output). # + for X, y in test_iter: break true_labels = d2l.get_fashion_mnist_labels(y.numpy()) pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(dim=1).numpy()) titles = [truelabel + '\n' + predlabel for truelabel, predlabel in zip(true_labels, pred_labels)] d2l.show_fashion_mnist(X[10:20], titles[10:20]) # - # ## Summary # # With softmax regression, we can train models for multi-category classification. The training loop is very similar to that in linear regression: retrieve and read data, define models and loss functions, # then train models using optimization algorithms. As you'll soon find out, most common deep learning models have similar training procedures. # # ## Exercises # # 1. In this section, we directly implemented the softmax function based on the mathematical definition of the softmax operation. What problems might this cause (hint - try to calculate the size of $\exp(50)$)? # 1. The function `cross_entropy` in this section is implemented according to the definition of the cross-entropy loss function. What could be the problem with this implementation (hint - consider the domain of the logarithm)? # 1. What solutions you can think of to fix the two problems above? # 1. Is it always a good idea to return the most likely label. E.g. would you do this for medical diagnosis? # 1. Assume that we want to use softmax regression to predict the next word based on some features. What are some problems that might arise from a large vocabulary?
Ch05_Linear_Neural_Networks/Implementation_of_Softmax_Regression_from_Scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import flopy import flopy.utils.binaryfile as bf import matplotlib.pyplot as plt from matplotlib.ticker import (MultipleLocator, FormatStrFormatter) import pyvista as pv from scipy import stats from scipy.interpolate import interp1d from ipython2cwl.iotypes import * from typing import List import os #Parameters, inputs and outputs exposed case: CWLIntInput = 2 # case number # 1-3 # 1:homogeneous aquifer, 2:midly heterogeneous aquifer, 3:highly heterogeneous aquifer hydr: CWLStringInput = 2 # hydraulic gradient [-] # range 0-2 (float) rech: CWLStringInput = 1 # recharge rate [L3/T] # range 0-1 (float) inic: CWLStringInput = 1 # initial concentration of contaminant [M/L3] # range 0-1 (float) degr: CWLStringInput = 0.5 # degradation rate [M/T] # range 0-1 (float) conc: CWLIntInput = 30 # concerned time period [T] # range 0-30 (integer) #Parameters, inputs and outputs not exposed print(os.getcwd()) aquifer_file_case1: CWLFilePathInput = "aquifer1" aquifer_file_case2: CWLFilePathInput = "aquifer2" aquifer_file: CWLFilePathInput = 'aquifer.ftl' ground_water_flow_field_viz: CWLFilePathOutput = 'groundwaterflowfield.png' break_through_curve_viz: CWLFilePathOutput = "break_through_curve.png" arrival_time_viz: CWLFilePathOutput= "arrival_time.png" contaminant_transport_list: List[CWLFilePathOutput] = ["contaminant_transport0.png", "contaminant_transport1.png", "contaminant_transport2.png"] mt3d001: CWLFilePathOutput = "MT3D001.UCN" mf2005_exe = 'mf2005' mt3dms_exe = 'mt3dms' # + # aquifer size delx = 1 dely = 1 delz = 1 lambdax = 25 lambday = 25 lambdaz = 5 nlay = lambdaz*3 nrow = lambday*3 ncol = lambdax*6 ztop = 0. zbot = np.zeros((nlay, nrow, ncol)) for i in range(nlay): zbot[i, :, :] = ztop - delz*(i+1) # flow boundary condition ibound = np.ones((nlay, nrow, ncol), dtype=np.int32) ibound[:, :, 0] = -1 ibound[:, :, -1] = -1 headend = hydr*ncol strt = np.ones((nlay, nrow, ncol), dtype=np.float32) strt[:, :, 0] = headend strt[:, :, -1] = 0 lrcq = {} lrcq[0] = [] for i in range(lambdax,lambdax*2): for j in range(lambday,lambday*2): lrcq[0].append([nlay-1, j, i, rech]) # contamination source initial=np.zeros((nlay,nrow,ncol)) for i in range(lambdax,lambdax*2): for j in range(lambday,lambday*2): initial[nlay-1, j, i] = inic # time domain tlim=int(conc) times=np.linspace(0, tlim, num=tlim*10 + 1) # aquifer case if case == 1: hk = np.ones((nlay, nrow, ncol)) elif case == 2: sgems=np.loadtxt(aquifer_file_case1, skiprows=1, delimiter=',') aquifer=np.zeros((nlay,nrow,ncol)) aquifer=np.reshape(sgems.T,(nlay,nrow,ncol)) hk = np.exp(aquifer) elif case == 3: sgems=np.loadtxt(aquifer_file_case2, skiprows=1, delimiter=',') aquifer=np.zeros((nlay,nrow,ncol)) aquifer=np.reshape(sgems.T,(nlay,nrow,ncol)) hk = np.exp(aquifer) else: print('wrong case number') # MODFLOW2005 modelname = 'aquifer' mf = flopy.modflow.Modflow(modelname, exe_name=mf2005_exe) dis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, delr=delx, delc=dely, top=ztop, botm=zbot, nper=1, perlen=tlim) bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt) lpf = flopy.modflow.ModflowLpf(mf, hk=hk) wel = flopy.modflow.ModflowWel(mf, stress_period_data=lrcq) oc = flopy.modflow.ModflowOc(mf) pcg = flopy.modflow.ModflowPcg(mf) lmt = flopy.modflow.ModflowLmt(mf, output_file_header='extended', output_file_format='formatted', output_file_name=str(aquifer_file)) mf.write_input() success, buff = mf.run_model() if True: # MODFLOW2005 to MT3DMS f = open(aquifer_file, 'r') cell_number = nlay * nrow * ncol velocity_vector = np.zeros((cell_number,3)) for line in f: if line[3] == 'X': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 0] = float(subline[j]) if line[3] == 'Y': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 1] = float(subline[j]) if line[3] == 'Z': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 2] = float(subline[j]) f.close() # MT3DMS mt = flopy.mt3d.Mt3dms(modflowmodel=mf, modelname=modelname, namefile_ext='mtnam', exe_name=mt3dms_exe, ftlfree=True, ftlfilename=str(aquifer_file)) btn=flopy.mt3d.Mt3dBtn(mt,prsity=0.4, sconc=initial, ncomp=1, nprs=1, timprs=times, savucn=True) adv = flopy.mt3d.Mt3dAdv(mt, mixelm=3, percel=0.75, mxpart=8000000) dsp = flopy.mt3d.Mt3dDsp(mt, al=0.01, trpt=0.1, trpv=0.01, dmcoef=1e-9) rct = flopy.mt3d.Mt3dRct(mt,isothm=0, igetsc=0) ssm = flopy.mt3d.Mt3dSsm(mt) gcg = flopy.mt3d.Mt3dGcg(mt) mt.write_input() success, buff=mt.run_model() # + groundwaterflowfield = True contaminantplumefield = True breakthroughcurve = True arrivaltimedistribution = True from pyvirtualdisplay import Display display = Display(visible=0, size=(600, 400)) display.start() f = open(aquifer_file, 'r') delx = 1 dely = 1 delz = 1 lambdax = 25 lambday = 25 lambdaz = 5 nlay = lambdaz*3 nrow = lambday*3 ncol = lambdax*6 cell_number = nlay * nrow * ncol velocity_vector = np.zeros((cell_number,3)) for line in f: if line[3] == 'X': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 0] = float(subline[j]) if line[3] == 'Y': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 1] = float(subline[j]) if line[3] == 'Z': print(line) subline = f.readline().split() for j in range(3): velocity_vector[j, 2] = float(subline[j]) f.close() factor = 27/np.quantile(velocity_vector, 0.99) vgrid = np.zeros((cell_number, 3)) wellgrid = np.zeros((lambdax*lambday, 3)) cpgrid = np.zeros((nlay*nrow, 3)) iteration = 0 deltal = 1 for l in range(nlay): for k in range(nrow): for j in range(ncol): vgrid[iteration] = np.array(([j*deltal,k*deltal,l*deltal])) iteration += 1 iteration = 0 for l in range(lambdax, lambdax*2): for k in range(lambday, lambday*2): wellgrid[iteration] = np.array(([l*deltal,k*deltal,(nlay-1)*deltal])) iteration += 1 iteration = 0 for l in range(lambday*3): for k in range(lambdaz*3): cpgrid[iteration] = np.array(([5*lambdax*deltal,l*deltal,k*deltal])) iteration += 1 if groundwaterflowfield == True: rawgrid = vgrid.reshape((15,75,150,3)) rawvelo = velocity_vector.reshape((15,75,150,3)) avgrid = np.zeros((3,5,10,3)) avvelo = np.zeros((3,5,10,3)) for i in range(10): for j in range(5): for k in range(3): avblock = rawgrid[5*k:(5*k+5),15*j:(15*j+15),15*i:(15*i+15)].reshape((15*15*5,3)) avgrid[k,j,i] = [np.mean([avblock[i][0] for i in range(15*15*5)]), np.mean([avblock[i][1] for i in range(15*15*5)]), np.mean([avblock[i][2] for i in range(15*15*5)])] avblock = rawvelo[5*k:(5*k+5),15*j:(15*j+15),15*i:(15*i+15)].reshape((15*15*5,3)) avvelo[k,j,i] = [np.mean([avblock[i][0] for i in range(15*15*5)]), np.mean([avblock[i][1] for i in range(15*15*5)]), np.mean([avblock[i][2] for i in range(15*15*5)])] avgrid = avgrid.reshape((10*5*3, 3)) avvelo = avvelo.reshape((10*5*3, 3)) point_cloud = pv.PolyData(avgrid) point_cloud['vectors'] = avvelo arrows = point_cloud.glyph(orient='vectors', scale=True, factor=factor) plotter = pv.Plotter(notebook=True, window_size=(600,400)) plotter.add_text('Groundwater flow vectors',position='upper_edge', font='arial', font_size=10, color='k') sargs = dict( title_font_size=1, label_font_size=1, n_labels=5, fmt="%.1f", color ='k', font_family="arial") plotter.add_mesh(arrows, scalars='GlyphScale', lighting=True, show_scalar_bar=True, stitle='Groundwater flow [L3/T]' , scalar_bar_args=sargs, opacity=0.8) plotter.enable_eye_dome_lighting() plotter.set_background('w') welldata = pv.PolyData(wellgrid) outline = welldata.outline() plotter.add_mesh(outline, color="blue", line_width=6, label='recharge zone') cpdata = pv.PolyData(cpgrid) outline2 = cpdata.outline() plotter.add_mesh(outline2, color="green", line_width=6, label='control plane') plotter.show_bounds(bounds=[0, 150, 0, 75, 0, 15], grid='back', location='outer', xlabel = 'x [L]', ylabel = 'y [L]', zlabel = 'z [L]', italic=True, font_family="arial", font_size=12) plotter.add_legend(bcolor=[255,255,255], border=True, size=[0.15,0.06]) plotter.show() plotter.screenshot(filename=ground_water_flow_field_viz, transparent_background=True) if contaminantplumefield == True: c=bf.UcnFile(mt3d001) cpoint_cloud = pv.PolyData(vgrid) ctimes = c.get_times() screenshots = [ctimes[0], ctimes[int(len(ctimes)/2)], ctimes[-1]] for i in range(3): cfield = c.get_data(totim=screenshots[i]).flatten() cpoint_cloud = pv.PolyData(vgrid) plotter = pv.Plotter(notebook=True, window_size=(600,400)) plotter.add_text('Contaminant plume at t = '+str(screenshots[i])+' [T]',position='upper_edge', font='arial', font_size=10, color='k') sargs = dict( title_font_size=1, label_font_size=1, n_labels=5, fmt="%.3f", color ='k', font_family="arial") welldata = pv.PolyData(wellgrid) outline = welldata.outline() plotter.add_mesh(outline, color="blue", line_width=6, label='recharge zone') cpdata = pv.PolyData(cpgrid) outline2 = cpdata.outline() plotter.add_mesh(outline2, color="green", line_width=6, label='control plane') plotter.add_mesh(cpoint_cloud, scalars=cfield, show_scalar_bar=True, lighting=True, point_size=7, render_points_as_spheres=True, opacity='linear', stitle='Contaminant concentration [M/L3]' , scalar_bar_args=sargs, cmap='bone_r' ) plotter.show_bounds(bounds=[0, 150, 0, 75, 0, 15], grid='back', location='outer', xlabel = 'x [L]', ylabel = 'y [L]', zlabel = 'z [L]', italic=True, font_family="arial", font_size=12) plotter.enable_eye_dome_lighting() plotter.set_background('w') plotter.add_legend(bcolor=[255,255,255], border=True, size=[0.15,0.06]) plotter.show() plotter.screenshot(filename=contaminant_transport_list[i], transparent_background=True) if breakthroughcurve == True: c=bf.UcnFile(mt3d001) ctimes = c.get_times() for k in range(len(ctimes)): concentration=c.get_data(totim=ctimes[k]) if np.sum(concentration[:,:,lambdax*5]) >= 0.001: initime = ctimes[k] initk = k break concen = np.zeros(len(ctimes)) init = np.sum(c.get_data(totim=initime)[:,:,:lambdax*5]) cpc = 0 for k in range(initk,len(ctimes)): concentration=c.get_data(totim=ctimes[k]) cpc = (init - np.sum(concentration[:,:,:lambdax*5]))/init concen[k] = cpc fig, ax = plt.subplots(figsize=(5,4)) ax.title.set_text('Breakthrough curve at control plane') ax.plot(ctimes, concen, color='k', linewidth=5) plt.xlabel(r'$t$ [T]', size=16) plt.ylabel(r'$M/M_0$', size=16) plt.xticks(size=14) plt.yticks(size=14) plt.xticks(fontsize=14, fontname='Arial') plt.yticks(fontsize=14, fontname='Arial') ax.xaxis.set_major_locator(MultipleLocator(int(conc/5))) ax.xaxis.set_minor_locator(MultipleLocator(int(conc/25))) ax.yaxis.set_major_locator(MultipleLocator(0.2)) ax.yaxis.set_minor_locator(MultipleLocator(0.04)) plt.yticks(fontsize=14, fontname='Arial') ax.tick_params(which="major", direction="in", right=True, top=True, length=5) ax.tick_params(which="minor", direction="in", right=True, top=True, length=3) plt.ylim(0,1) plt.xlim(0,conc) plt.tight_layout() plt.savefig(break_through_curve_viz,dpi=400) plt.show() if arrivaltimedistribution == True: c=bf.UcnFile(mt3d001) ctimes = c.get_times() for k in range(len(ctimes)): concentration=c.get_data(totim=ctimes[k]) if np.sum(concentration[:,:,lambdax*5]) >= 0.001: initime = ctimes[k] initk = k break concen = np.zeros(len(ctimes)) init = np.sum(c.get_data(totim=initime)[:,:,:lambdax*5]) cpc = 0 for k in range(initk,len(ctimes)): concentration=c.get_data(totim=ctimes[k]) cpc = (init - np.sum(concentration[:,:,:lambdax*5]))/init concen[k] = cpc intpx = [] intpy = [] for i in range(len(ctimes)): if concen[i] >= 0.01: if concen[i] >= 0.99: break intpx.append(concen[i]) intpy.append(ctimes[i]) intpx = np.asarray(intpx) intpy = np.asarray(intpy) f1 = interp1d(intpx, intpy, kind='quadratic') newconc = np.linspace(intpx.min(), intpx.max(), num=51) newtime = f1(newconc) fig, ax = plt.subplots(figsize=(5,4)) ax.title.set_text('Arrival time distribution at control plane') histdata = ax.hist(newtime, 20, color='k', density=True) ax.vlines(f"{inic}/{degr}",0,np.max(histdata[0]), color='red', label='time for 100% decay') ax.hist(newtime, 20, color='k', density=True) plt.xlabel(r'$t_{arrival}$ [T]', size=16) plt.ylabel(r'Probability density', size=16) plt.xticks(size=14) plt.yticks(size=14) plt.legend(fontsize=12) plt.tight_layout() plt.savefig(arrival_time_viz,dpi=400) plt.show() # -
main_ERC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github.com/tulasiram58827/ocr_tflite/blob/main/colabs/KERAS_OCR_TFLITE.ipynb) # + [markdown] id="ml8RTXFZxPkd" # ## SetUp # + id="4o-1GNYVxWUw" # !pip install validators # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="5Mme4MUCxVM8" outputId="df988251-c816-489b-fa43-58a1096500a3" import typing import string import tensorflow as tf from tensorflow import keras import numpy as np import cv2 import os import hashlib import urllib.request import urllib.parse tf.__version__ # + [markdown] id="PwgbqcnBxnc9" # ### Hyper-Parameters # + id="duqhKKqbxpc9" DEFAULT_BUILD_PARAMS = { 'height': 31, 'width': 200, 'color': False, 'filters': (64, 128, 256, 256, 512, 512, 512), 'rnn_units': (128, 128), 'dropout': 0.25, 'rnn_steps_to_discard': 2, 'pool_size': 2, 'stn': True, } DEFAULT_ALPHABET = string.digits + string.ascii_lowercase PRETRAINED_WEIGHTS = { 'kurapan': { 'alphabet': DEFAULT_ALPHABET, 'build_params': DEFAULT_BUILD_PARAMS, 'weights': { 'notop': { 'url': 'https://github.com/faustomorales/keras-ocr/releases/download/v0.8.4/crnn_kurapan_notop.h5', 'filename': 'crnn_kurapan_notop.h5', 'sha256': '027fd2cced3cbea0c4f5894bb8e9e85bac04f11daf96b8fdcf1e4ee95dcf51b9' }, 'top': { 'url': 'https://github.com/faustomorales/keras-ocr/releases/download/v0.8.4/crnn_kurapan.h5', 'filename': 'crnn_kurapan.h5', 'sha256': 'a7d8086ac8f5c3d6a0a828f7d6fbabcaf815415dd125c32533013f85603be46d' } } } } # + [markdown] id="bH9oviLCzOI6" # ## Utilities # + id="GkP9-8YZzTWh" def swish(x, beta=1): return x * keras.backend.sigmoid(beta * x) keras.utils.get_custom_objects().update({'swish': keras.layers.Activation(swish)}) def _repeat(x, num_repeats): ones = tf.ones((1, num_repeats), dtype='int32') x = tf.reshape(x, shape=(-1, 1)) x = tf.matmul(x, ones) return tf.reshape(x, [-1]) def _meshgrid(height, width): x_linspace = tf.linspace(-1., 1., width) y_linspace = tf.linspace(-1., 1., height) x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace) x_coordinates = tf.reshape(x_coordinates, shape=(1, -1)) y_coordinates = tf.reshape(y_coordinates, shape=(1, -1)) ones = tf.ones_like(x_coordinates) indices_grid = tf.concat([x_coordinates, y_coordinates, ones], 0) return indices_grid # pylint: disable=too-many-statements def _transform(inputs): locnet_x, locnet_y = inputs output_size = locnet_x.shape[1:] batch_size = tf.shape(locnet_x)[0] height = tf.shape(locnet_x)[1] width = tf.shape(locnet_x)[2] num_channels = tf.shape(locnet_x)[3] locnet_y = tf.reshape(locnet_y, shape=(batch_size, 2, 3)) locnet_y = tf.reshape(locnet_y, (-1, 2, 3)) locnet_y = tf.cast(locnet_y, 'float32') output_height = output_size[0] output_width = output_size[1] indices_grid = _meshgrid(output_height, output_width) indices_grid = tf.expand_dims(indices_grid, 0) indices_grid = tf.reshape(indices_grid, [-1]) # flatten? indices_grid = tf.tile(indices_grid, tf.stack([batch_size])) indices_grid = tf.reshape(indices_grid, tf.stack([batch_size, 3, -1])) transformed_grid = tf.matmul(locnet_y, indices_grid) x_s = tf.slice(transformed_grid, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(transformed_grid, [0, 1, 0], [-1, 1, -1]) x = tf.reshape(x_s, [-1]) y = tf.reshape(y_s, [-1]) # Interpolate height_float = tf.cast(height, dtype='float32') width_float = tf.cast(width, dtype='float32') output_height = output_size[0] output_width = output_size[1] x = tf.cast(x, dtype='float32') y = tf.cast(y, dtype='float32') x = .5 * (x + 1.0) * width_float y = .5 * (y + 1.0) * height_float x0 = tf.cast(tf.floor(x), 'int32') x1 = x0 + 1 y0 = tf.cast(tf.floor(y), 'int32') y1 = y0 + 1 max_y = tf.cast(height - 1, dtype='int32') max_x = tf.cast(width - 1, dtype='int32') zero = tf.zeros([], dtype='int32') x0 = tf.clip_by_value(x0, zero, max_x) x1 = tf.clip_by_value(x1, zero, max_x) y0 = tf.clip_by_value(y0, zero, max_y) y1 = tf.clip_by_value(y1, zero, max_y) flat_image_dimensions = width * height pixels_batch = tf.range(batch_size) * flat_image_dimensions flat_output_dimensions = output_height * output_width base = _repeat(pixels_batch, flat_output_dimensions) base_y0 = base + y0 * width base_y1 = base + y1 * width indices_a = base_y0 + x0 indices_b = base_y1 + x0 indices_c = base_y0 + x1 indices_d = base_y1 + x1 flat_image = tf.reshape(locnet_x, shape=(-1, num_channels)) flat_image = tf.cast(flat_image, dtype='float32') pixel_values_a = tf.gather(flat_image, indices_a) pixel_values_b = tf.gather(flat_image, indices_b) pixel_values_c = tf.gather(flat_image, indices_c) pixel_values_d = tf.gather(flat_image, indices_d) x0 = tf.cast(x0, 'float32') x1 = tf.cast(x1, 'float32') y0 = tf.cast(y0, 'float32') y1 = tf.cast(y1, 'float32') area_a = tf.expand_dims(((x1 - x) * (y1 - y)), 1) area_b = tf.expand_dims(((x1 - x) * (y - y0)), 1) area_c = tf.expand_dims(((x - x0) * (y1 - y)), 1) area_d = tf.expand_dims(((x - x0) * (y - y0)), 1) transformed_image = tf.add_n([ area_a * pixel_values_a, area_b * pixel_values_b, area_c * pixel_values_c, area_d * pixel_values_d ]) # Finished interpolation transformed_image = tf.reshape(transformed_image, shape=(batch_size, output_height, output_width, num_channels)) return transformed_image # + [markdown] id="ALRK3SR1x8JH" # ## Create Model # - def CTCDecoder(): def decoder(y_pred): input_shape = tf.keras.backend.shape(y_pred) input_length = tf.ones(shape=input_shape[0]) * tf.keras.backend.cast( input_shape[1], 'float32') unpadded = tf.keras.backend.ctc_decode(y_pred, input_length)[0][0] unpadded_shape = tf.keras.backend.shape(unpadded) padded = tf.pad(unpadded, paddings=[[0, 0], [0, input_shape[1] - unpadded_shape[1]]], constant_values=-1) return padded return tf.keras.layers.Lambda(decoder, name='decode') # + id="DaRONZgExrQL" def build_model(alphabet, height, width, color, filters, rnn_units, dropout, rnn_steps_to_discard, pool_size, stn=True): """Build a Keras CRNN model for character recognition. Args: height: The height of cropped images width: The width of cropped images color: Whether the inputs should be in color (RGB) filters: The number of filters to use for each of the 7 convolutional layers rnn_units: The number of units for each of the RNN layers dropout: The dropout to use for the final layer rnn_steps_to_discard: The number of initial RNN steps to discard pool_size: The size of the pooling steps stn: Whether to add a Spatial Transformer layer """ assert len(filters) == 7, '7 CNN filters must be provided.' assert len(rnn_units) == 2, '2 RNN filters must be provided.' inputs = keras.layers.Input((height, width, 3 if color else 1), name='input', batch_size=1) x = keras.layers.Permute((2, 1, 3))(inputs) x = keras.layers.Lambda(lambda x: x[:, :, ::-1])(x) x = keras.layers.Conv2D(filters[0], (3, 3), activation='relu', padding='same', name='conv_1')(x) x = keras.layers.Conv2D(filters[1], (3, 3), activation='relu', padding='same', name='conv_2')(x) x = keras.layers.Conv2D(filters[2], (3, 3), activation='relu', padding='same', name='conv_3')(x) x = keras.layers.BatchNormalization(name='bn_3')(x) x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_3')(x) x = keras.layers.Conv2D(filters[3], (3, 3), activation='relu', padding='same', name='conv_4')(x) x = keras.layers.Conv2D(filters[4], (3, 3), activation='relu', padding='same', name='conv_5')(x) x = keras.layers.BatchNormalization(name='bn_5')(x) x = keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size), name='maxpool_5')(x) x = keras.layers.Conv2D(filters[5], (3, 3), activation='relu', padding='same', name='conv_6')(x) x = keras.layers.Conv2D(filters[6], (3, 3), activation='relu', padding='same', name='conv_7')(x) x = keras.layers.BatchNormalization(name='bn_7')(x) if stn: # pylint: disable=pointless-string-statement """Spatial Transformer Layer Implements a spatial transformer layer as described in [1]_. Borrowed from [2]_: downsample_fator : float A value of 1 will keep the orignal size of the image. Values larger than 1 will down sample the image. Values below 1 will upsample the image. example image: height= 100, width = 200 downsample_factor = 2 output image will then be 50, 100 References ---------- .. [1] Spatial Transformer Networks <NAME>, <NAME>, <NAME>, <NAME> Submitted on 5 Jun 2015 .. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py .. [3] https://github.com/EderSantana/seya/blob/keras1/seya/layers/attention.py """ stn_input_output_shape = (width // pool_size**2, height // pool_size**2, filters[6]) stn_input_layer = keras.layers.Input(shape=stn_input_output_shape) locnet_y = keras.layers.Conv2D(16, (5, 5), padding='same', activation='relu')(stn_input_layer) locnet_y = keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu')(locnet_y) locnet_y = keras.layers.Flatten()(locnet_y) locnet_y = keras.layers.Dense(64, activation='relu')(locnet_y) locnet_y = keras.layers.Dense(6, weights=[ np.zeros((64, 6), dtype='float32'), np.float32([[1, 0, 0], [0, 1, 0]]).flatten() ])(locnet_y) localization_net = keras.models.Model(inputs=stn_input_layer, outputs=locnet_y) x = keras.layers.Lambda(_transform, output_shape=stn_input_output_shape)([x, localization_net(x)]) x = keras.layers.Reshape(target_shape=(width // pool_size**2, (height // pool_size**2) * filters[-1]), name='reshape')(x) x = keras.layers.Dense(rnn_units[0], activation='relu', name='fc_9')(x) rnn_1_forward = keras.layers.LSTM(rnn_units[0], kernel_initializer="he_normal", return_sequences=True, name='lstm_10')(x) rnn_1_back = keras.layers.LSTM(rnn_units[0], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_10_back')(x) rnn_1_add = keras.layers.Add()([rnn_1_forward, rnn_1_back]) rnn_2_forward = keras.layers.LSTM(rnn_units[1], kernel_initializer="he_normal", return_sequences=True, name='lstm_11')(rnn_1_add) rnn_2_back = keras.layers.LSTM(rnn_units[1], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_11_back')(rnn_1_add) x = keras.layers.Concatenate()([rnn_2_forward, rnn_2_back]) backbone = keras.models.Model(inputs=inputs, outputs=x) x = keras.layers.Dropout(dropout, name='dropout')(x) x = keras.layers.Dense(len(alphabet) + 1, kernel_initializer='he_normal', activation='softmax', name='fc_12')(x) x = keras.layers.Lambda(lambda x: x[:, rnn_steps_to_discard:])(x) model = keras.models.Model(inputs=inputs, outputs=x) prediction_model = keras.models.Model(inputs=inputs, outputs=CTCDecoder()(model.output)) return model, prediction_model # + id="NPBjgj6gyECQ" build_params = DEFAULT_BUILD_PARAMS alphabets = DEFAULT_ALPHABET blank_index = len(alphabets) model, prediction_model = build_model(alphabet=alphabets, **build_params) # + [markdown] id="xLVl95alzwI8" # ## Download and Load Weights # + id="dmBRQeL90HoQ" def get_default_cache_dir(): return os.environ.get('KERAS_OCR_CACHE_DIR', os.path.expanduser(os.path.join('~', '.keras-ocr'))) def sha256sum(filename): """Compute the sha256 hash for a file.""" h = hashlib.sha256() b = bytearray(128 * 1024) mv = memoryview(b) with open(filename, 'rb', buffering=0) as f: for n in iter(lambda: f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def download_and_verify(url, sha256=None, cache_dir=None, verbose=True, filename=None): """Download a file to a cache directory and verify it with a sha256 hash. Args: url: The file to download sha256: The sha256 hash to check. If the file already exists and the hash matches, we don't download it again. cache_dir: The directory in which to cache the file. The default is `~/.keras-ocr`. verbose: Whether to log progress filename: The filename to use for the file. By default, the filename is derived from the URL. """ if cache_dir is None: cache_dir = get_default_cache_dir() if filename is None: filename = os.path.basename(urllib.parse.urlparse(url).path) filepath = os.path.join(cache_dir, filename) os.makedirs(os.path.split(filepath)[0], exist_ok=True) if verbose: print('Looking for ' + filepath) if not os.path.isfile(filepath) or (sha256 and sha256sum(filepath) != sha256): if verbose: print('Downloading ' + filepath) urllib.request.urlretrieve(url, filepath) assert sha256 is None or sha256 == sha256sum(filepath), 'Error occurred verifying sha256.' return filepath # + colab={"base_uri": "https://localhost:8080/"} id="B5lNpYwczLuO" outputId="7edc1ebd-b516-4746-8ab0-16d6420e0d6a" weights_dict = PRETRAINED_WEIGHTS['kurapan'] model.load_weights(download_and_verify(url=weights_dict['weights']['top']['url'], filename=weights_dict['weights']['top']['filename'], sha256=weights_dict['weights']['top']['sha256'])) # + [markdown] id="czQXrbwF1rWO" # ## Model Architecture # + id="CUyx8EAV0IjX" model.summary() # + [markdown] id="0Dg50iNc106w" # ## Convert to TFLite # + id="Y9sC2rpTe-bX" # Download and unzipping representative dataset # %%bash wget https://github.com/tulasiram58827/ocr_tflite/raw/main/data/represent_data.zip unzip represent_data.zip # + id="-vYzKpI1fB27" dataset_path = '/content/represent_data/' def representative_data_gen(): for file in os.listdir(dataset_path): image_path = dataset_path + file input_data = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) input_data = cv2.resize(input_data, (200, 31)) input_data = input_data[np.newaxis] input_data = np.expand_dims(input_data, 3) input_data = input_data.astype('float32')/255 yield [input_data] # + id="VzyIgV-NfEeb" def convert_tflite(quantization): converter = tf.lite.TFLiteConverter.from_keras_model(prediction_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops. tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops. ] if quantization == 'float16': converter.target_spec.supported_types = [tf.float16] elif quantization == 'int8' or quantization == 'full_int8': converter.representative_dataset = representative_data_gen if quantization == 'full_int8': converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.int8 # or tf.uint8 converter.inference_output_type = tf.int8 # or tf.uint8 tf_lite_model = converter.convert() open(f'ocr_{quantization}.tflite', 'wb').write(tf_lite_model) # + [markdown] id="u_cHVmRAfMda" # **Note** : Support for CTC Decoder is not available in TFLite yet. So while converting we removed CTCDecoder in model part. We need to run Decoder from the output of the model. # # Refer to this [issue](https://github.com/tensorflow/tensorflow/issues/33494) regarding CTC decoder support in TFLite. # # **Update** : CTC Decoder is supported in TFLite now by enabling Built-in-Ops in Tensorflow 2.4. Thanks to TensorFlow team for the support. # + colab={"base_uri": "https://localhost:8080/"} id="MN216EKBfLt7" outputId="7f8d255b-d02e-4c7e-8baf-343796c1692a" quantization = 'dr' #@param ["dr", "float16"] convert_tflite(quantization) # + colab={"base_uri": "https://localhost:8080/"} id="XhWmneGKfQpl" outputId="1ff0ae1c-e921-40e2-985d-22bdf68f955c" # !du -sh ocr_dr.tflite # + colab={"base_uri": "https://localhost:8080/"} id="b2EWZDvpfQrY" outputId="ca234cf2-98ed-4111-85c3-7fa776b3dd55" quantization = 'float16' #@param ["dr", "float16"] convert_tflite(quantization) # + colab={"base_uri": "https://localhost:8080/"} id="IqOjRGHmfYHX" outputId="bc06f692-82dd-42ea-d5bc-d485aeda24e9" # !du -sh ocr_float16.tflite # + colab={"base_uri": "https://localhost:8080/"} id="SohE_ShAfceP" outputId="1a468191-3c27-4bc4-e6b3-1995624e44ef" quantization = 'int8' #@param ["dr", "float16", 'int8', 'full_int8'] convert_tflite(quantization) # + [markdown] id="u5pmotq_fkpG" # **Currently Integer Quantization is erroring out and informed to TFLite team** # + [markdown] id="CVhhiWe1fzBz" # ## TFLite Inference # + id="rkGs7BDWf6dm" def run_tflite_model(image_path, quantization): input_data = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) input_data = cv2.resize(input_data, (200, 31)) input_data = input_data[np.newaxis] input_data = np.expand_dims(input_data, 3) input_data = input_data.astype('float32')/255 path = f'ocr_{quantization}.tflite' interpreter = tf.lite.Interpreter(model_path=path) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]['shape'] interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() output = interpreter.get_tensor(output_details[0]['index']) return output # + id="DGmtdkD7gAVH" from google.colab.patches import cv2_imshow image_path = '/content/demo_1.png' # + colab={"base_uri": "https://localhost:8080/", "height": 106} id="v6K9tv0u1umk" outputId="6654e164-1b2b-4203-ff1f-a58aad5422b5" # Running Dynamic Range Quantization tflite_output = run_tflite_model(image_path, 'dr') final_output = "".join(alphabets[index] for index in tflite_output[0] if index not in [blank_index, -1]) print(final_output) cv2_imshow(cv2.imread(image_path)) # + colab={"base_uri": "https://localhost:8080/", "height": 106} id="DPsb744KgGqw" outputId="ccefad96-91a4-4095-b4b7-d70b38ef5490" # Running Float16 Quantization tflite_output = run_tflite_model(image_path, 'float16') final_output = "".join(alphabets[index] for index in tflite_output[0] if index not in [blank_index, -1]) print(final_output) cv2_imshow(cv2.imread(image_path)) # + id="kyBwZRO3gPH6" # Running Integer Quantization tflite_output = run_tflite_model(image_path, 'int8') final_output = "".join(alphabets[index] for index in decoded[0] if index not in [blank_index, -1]) print(final_output) cv2_imshow(cv2.imread(image_path)) # + [markdown] id="ReJFMVEjgUKj" # **The above code snippet will error out as informed integer quantization is not yet supported** # + [markdown] id="_-4Ro3Z-gfjK" # ## Dynamic Range Model benchmarks # + [markdown] id="Tt5wQBSggfXg" # **Inference Time** : 0.2sec # # **Memory FootPrint** : 46.38MB # # **Model Size** : 8.5MB # + [markdown] id="rLqDwjRzgm_8" # ## Float16 benchmarks # + [markdown] id="1181AuvDgm1V" # **Inference** : 0.76sec # # **Memory FootPrint** : 128MB # # **Model Size** : 17MB # + [markdown] id="1g_bWIW5gvni" # **The above benchmarks with respect to Redmi K20 Pro with 4 threads. **
colabs/KERAS_OCR_TFLITE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # School DataFrame # # #### Version: 3 # #### Creation Date: 2/5/21 # #### File Names Generated: school_df_v3.csv # # # #### Updates from previous version: # * Adds in calculated attendance percent metrics # # #### Notes: # * new column names listed as **bold** below # --- # COLUMN NAMES: # * 'attnd_absence_1-5_SY1718', # * 'attnd_absence_1-5_SY1819', # * 'attnd_absence_11-20_SY1718', # * 'attnd_absence_11-20_SY1819', # * 'attnd_absence_20+_SY1718', # * 'attnd_absence_20+_SY1819', # * 'attnd_absence_6-10_SY1718', # * 'attnd_absence_6-10_SY1819', # * 'attnd_count_truancy_aged_students_SY1718', # * 'attnd_count_truancy_aged_students_SY1819', # * **'pct_of_absences_1-5_SY1718',** # * **'pct_of_absences_1-5_SY1819',** # * **'pct_of_absences_11-20_SY1718',** # * **'pct_of_absences_11-20_SY1819',** # * **'pct_of_absences_20+_SY1718',** # * **'pct_of_absences_20+_SY1819',** # * **'pct_of_absences_6-10_SY1718',** # * **'pct_of_absences_6-10_SY1819',** # * **'pct_of_student_absences_1-5_SY1718',** # * **'pct_of_student_absences_1-5_SY1819',** # * **'pct_of_student_absences_11-20_SY1718',** # * **'pct_of_student_absences_11-20_SY1819',** # * **'pct_of_student_absences_20+_SY1718',** # * **'pct_of_student_absences_20+_SY1819',** # * **'pct_of_student_absences_6-10_SY1718',** # * **'pct_of_student_absences_6-10_SY1819',** # * 'school_budgeted_amount_FY16', # * 'school_budgeted_amount_FY17', # * 'school_budgeted_enrollment_FY16', # * 'school_budgeted_enrollment_FY17', # * 'school_capacity_SY1718', # * 'school_capacity_SY1819', # * 'school_cluster', # * 'school_code', # * 'school_enrollment_SY1718', # * 'school_enrollment_SY1819', # * 'school_grade_band', # * 'school_grade_range', # * 'school_latitude', # * 'school_longitude', # * 'school_name', # * 'school_sector', # * 'school_star_rating_SY1718', # * 'school_star_rating_SY1819', # * 'school_star_score_SY1718', # * 'school_star_score_SY1819', # * 'school_unfilled_seats_SY1718', # * 'school_unfilled_seats_SY1819', # * 'school_ward' # * **'students_with_absences_SY1718'** # * **'students_with_absences_SY1819'** # --- # <br><Br> import pandas as pd import numpy as np import seaborn as sns data = pd.read_excel('../../01_data/cleaned_data/school_df_v2.xlsx') data.info() data.head() # <br> # # ### Generate capacity percent filled of school SY1819 data['school_capacity_pct_SY1819']=data['school_enrollment_SY1819']/data['school_capacity_SY1819'] # <br> # # ### Generate capacity percent filled of school SY1718 data['school_capacity_pct_SY1718']=data['school_enrollment_SY1718']/data['school_capacity_SY1718'] # <br> # # ### Cast student absence count of <10 to 5 across dataframe # Decision: values of <10 were reassigned as 5 data.replace(to_replace='n<10',value=5, inplace=True) data.replace(to_replace='<10',value=5, inplace=True) # <br> # # ### Generate count of students with absences SY1819 data[['attnd_absence_1-5_SY1819','attnd_absence_6-10_SY1819','attnd_absence_11-20_SY1819','attnd_absence_20+_SY1819']] = data[['attnd_absence_1-5_SY1819','attnd_absence_6-10_SY1819','attnd_absence_11-20_SY1819','attnd_absence_20+_SY1819']].apply(pd.to_numeric) data['students_with_absences_SY1819'] = data['attnd_absence_1-5_SY1819'] + data['attnd_absence_6-10_SY1819'] + data['attnd_absence_11-20_SY1819'] + data['attnd_absence_20+_SY1819'] # <br> # # ### Generate count of students with absences SY1718 data[['attnd_absence_1-5_SY1718','attnd_absence_6-10_SY1718','attnd_absence_11-20_SY1718','attnd_absence_20+_SY1718']] = data[['attnd_absence_1-5_SY1718','attnd_absence_6-10_SY1718','attnd_absence_11-20_SY1718','attnd_absence_20+_SY1718']].apply(pd.to_numeric) data['students_with_absences_SY1718'] = data['attnd_absence_1-5_SY1718'] + data['attnd_absence_6-10_SY1718'] + data['attnd_absence_11-20_SY1718'] + data['attnd_absence_20+_SY1718'] # <br> # # ### Generate percent of students with absences in a category against all students with absences SY1819 data['pct_of_absences_1-5_SY1819'] = data['attnd_absence_1-5_SY1819']/data['students_with_absences_SY1819'] data['pct_of_absences_6-10_SY1819'] = data['attnd_absence_6-10_SY1819']/data['students_with_absences_SY1819'] data['pct_of_absences_11-20_SY1819'] = data['attnd_absence_11-20_SY1819']/data['students_with_absences_SY1819'] data['pct_of_absences_20+_SY1819'] = data['attnd_absence_20+_SY1819']/data['students_with_absences_SY1819'] # <br> # # ### Generate percent of students with absences in a category against all students enrolled SY1819 data['pct_of_student_absences_1-5_SY1819'] = data['attnd_absence_1-5_SY1819']/data['school_enrollment_SY1819'] data['pct_of_student_absences_6-10_SY1819'] = data['attnd_absence_6-10_SY1819']/data['school_enrollment_SY1819'] data['pct_of_student_absences_11-20_SY1819'] = data['attnd_absence_11-20_SY1819']/data['school_enrollment_SY1819'] data['pct_of_student_absences_20+_SY1819'] = data['attnd_absence_20+_SY1819']/data['school_enrollment_SY1819'] # <br> # # ### Generate percent of students with absences in a category against all students with absences SY1718 data['pct_of_absences_1-5_SY1718'] = data['attnd_absence_1-5_SY1718']/data['students_with_absences_SY1718'] data['pct_of_absences_6-10_SY1718'] = data['attnd_absence_6-10_SY1718']/data['students_with_absences_SY1718'] data['pct_of_absences_11-20_SY1718'] = data['attnd_absence_11-20_SY1718']/data['students_with_absences_SY1718'] data['pct_of_absences_20+_SY1718'] = data['attnd_absence_20+_SY1718']/data['students_with_absences_SY1718'] # <br> # # ### Generate percent of students with absences in a category against all students enrolled SY1718 data['pct_of_student_absences_1-5_SY1718'] = data['attnd_absence_1-5_SY1718']/data['school_enrollment_SY1718'] data['pct_of_student_absences_6-10_SY1718'] = data['attnd_absence_6-10_SY1718']/data['school_enrollment_SY1718'] data['pct_of_student_absences_11-20_SY1718'] = data['attnd_absence_11-20_SY1718']/data['school_enrollment_SY1718'] data['pct_of_student_absences_20+_SY1718'] = data['attnd_absence_20+_SY1718']/data['school_enrollment_SY1718'] data.head() data.columns.sort_values() data.to_csv('../data/cleaned/school_df_v3.csv', index=False)
02_notebooks/01_creating_dataframes/create_school_df_v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dipbanik/Clustering-MeanShift/blob/master/Clustering_MeanShift.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="EonG_F8yLOwI" colab_type="code" colab={} import pandas as pd # + [markdown] id="Yrsls2H3VtLF" colab_type="text" # We use the titanic dataset to try and figure out passengers with similar characteristics(clusters). # Dataset can be downloaded from # https://www.kaggle.com/c/3136/download/train.csv # # + id="J-Gz07e6UrZX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 496} outputId="a4556976-c0af-465a-9e05-72439be3085a" titanic_data = pd.read_csv("titanic.csv", quotechar='"') titanic_data.head() # + id="f1g9LLnaVf1_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="71b5506f-c0bd-498e-befa-41607823e941" titanic_data.drop(['PassengerId','Name','Ticket','Cabin'],'columns', inplace=True) titanic_data.head() # + id="ckPpQ6_XWUjs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="5519e9d9-652a-4043-cafb-d62df37ccb03" from sklearn import preprocessing le = preprocessing.LabelEncoder() titanic_data['Sex'] = le.fit_transform(titanic_data['Sex'].astype(str)) titanic_data.head() # + id="-QdhZHocXBDH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="e5fa93fe-598a-42b1-b6d8-7e2405746bb6" titanic_data = pd.get_dummies(titanic_data, columns=['Embarked']) titanic_data.head() # + id="81p5jyisXSCG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f8505ecf-5469-4798-c550-a51e6d6dd843" titanic_data.isnull().any() # + id="bdlZEwv8Ykrd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1989} outputId="3a72bed6-3122-451d-8ca0-ba2a7352b67e" titanic_data[titanic_data.isnull().any(axis=1)] # + id="_zr0ecRzXcrF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="19a5ddba-b09c-4db9-b4cb-4f31e63ccfaa" titanic_data.head() # + id="CrtcEBXlX9XB" colab_type="code" colab={} titanic_data = titanic_data.dropna() # + id="AfSfkkW_Y2E1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="a188d08e-cfa7-450e-9dc7-ba479cc25a98" from sklearn.cluster import MeanShift analyzer = MeanShift(bandwidth=50) analyzer.fit(titanic_data) # + id="Sb4-YMZ4ZAAO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="09e3d9be-19ab-4d35-8d2c-8406d41ba5e5" from sklearn.cluster import estimate_bandwidth estimate_bandwidth(titanic_data) # + [markdown] id="7OsHcCeKeaZH" colab_type="text" # estimate_bandwidth gives us a good idea on what should be set as the bandwidth. Here I have set a random bandwidth at first followed by using the bandwidth as suggested by the setimate bandwidth function. # + id="k9xRRVK6ZMkC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="79b9fff9-c14b-4405-f532-5182674465ac" #from sklearn.cluster import MeanShift #analyzer = MeanShift(bandwidth=30.44) #analyzer.fit(titanic_data) # + id="La7FlQNTZQjY" colab_type="code" colab={} labels = analyzer.labels_ # + id="InmrSSSaZxxQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2891ff9b-f749-4748-d56b-7f27d6c3bc8f" import numpy as np np.unique(labels) # + id="6gA3I354Z1I_" colab_type="code" colab={} import numpy as np titanic_data['cluster_group'] = np.nan data_length = len(titanic_data) for i in range(data_length): titanic_data.iloc[i, titanic_data.columns.get_loc('cluster_group')] = labels[i] # + id="hdgXvEC-aMru" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="b55d9861-bd93-41fc-961f-c8b09f3dbd4d" titanic_data.head() # + id="-WdkXCcRaUZj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="dee9fc85-ad07-46aa-ed90-9877c0525981" titanic_data.describe() # + id="NVAVLJ13aVxQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 193} outputId="5523c860-7f4a-4e35-c155-a17fa298cb19" titanic_cluster_data = titanic_data.groupby(['cluster_group']).mean() titanic_cluster_data # + id="B9H4CH5iatGd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 193} outputId="3c732cf8-dd91-4391-f150-484eb496fe28" titanic_cluster_data['Counts'] = pd.Series(titanic_data.groupby(['cluster_group']).size()) titanic_cluster_data # + id="z4Kj49yla8du" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="d82212df-112a-4468-ed41-a0c11b1a79d3" titanic_data[titanic_data['cluster_group'] == 1 ].describe() # + id="baBn-eRVbTrt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1030} outputId="e1e9e172-1af6-475e-d691-2d47cf1d8a14" titanic_data[titanic_data['cluster_group'] == 1] # + id="saBFZQqGbhdU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="45dc4726-2365-4a0f-ba85-2aadfb1987b0" from sklearn.cluster import MeanShift analyzer = MeanShift(bandwidth=30.44) analyzer.fit(titanic_data) # + id="O3ruNh0Ccy5H" colab_type="code" colab={} labels = analyzer.labels_ # + id="5Ecnh6Khc4x1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4202ff08-8b5f-4334-f557-6342608152cb" np.unique(labels) # + id="g198RrEkc99r" colab_type="code" colab={} titanic_data['cluster_group'] = np.nan data_length = len(titanic_data) for i in range(data_length): titanic_data.iloc[i, titanic_data.columns.get_loc('cluster_group')] = labels[i] # + id="fqxe3yZndEAx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="7d5506fb-4fdc-4a87-8de7-1452c678a9ce" titanic_cluster_data = titanic_data.groupby(['cluster_group']).mean() titanic_cluster_data # + id="bTavag7idJdU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="75b251f9-af07-4297-8c83-88b1ccd8e045" titanic_cluster_data['Counts'] = pd.Series(titanic_data.groupby(['cluster_group']).size()) titanic_cluster_data # + id="qD5mwtsedQXW" colab_type="code" colab={} # + [markdown] id="j8GJI2m6eKRf" colab_type="text" # This gives us some insights how more females and kids were prioritized and that people of higher classes were prioritized in the saving effort.
Clustering_MeanShift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 0 - Information # + # Re tester l'upload de l'entraiment du réseau de neurones avec les paramètres sans dropout # Test un benchmark avec le carré de la distance comme diviseur. # - # # 1 - Packages # + # Math packages import numpy as np # Dataset packages import pandas as pd import copy # Graphic package import matplotlib.pyplot as plt import seaborn as sns sns.set() # Os packages import os, sys # Time packages import time # Sklearn packages import sklearn.svm as svm import sklearn.linear_model as sklin import sklearn.model_selection as mod_selec import sklearn.metrics as metrics # Keras import keras from keras.models import Model, Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import Flatten, BatchNormalization from keras.optimizers import SGD from keras.utils import np_utils # Lightlgb import lightgbm as lgb # Progress bar import tqdm as tqdm # - # # 2 - Loading of the Data # ## 2.1 - Extrat all the data # + # Loading of the data X_train_df = pd.read_csv("Data/X_train.csv") Y_train_df = pd.read_csv("Data/Y_train.csv") X_test_df = pd.read_csv("Data/X_test.csv") # Display print("Columns names: ", X_train_df.columns) print("Shape: ", X_train_df.shape) X_train_df.head() # - # ## 2.2 - Replace Nan value by interpolation def fillNan(df, method="linear"): """Fill Nan values of X_train of X_test with an interpolation.""" # Interpolate the missing value thanks to interpolation for each station_id columns_to_interpolate = ["value_" + str(i) for i in range(10)] # Group samples by station_id g_stations_df = df.groupby(["station_id"]) # Loop over the different groups for station_id, g in g_stations_df: # Interpolate the value of columns_to_interpolate values = g[columns_to_interpolate] values.interpolate(inplace=True, method=method, limit_direction='forward', axis=0) values.interpolate(inplace=True, method=method, limit_direction='backward', axis=0) # Replace the values of df df.loc[g.index, columns_to_interpolate] = values.values return df # + # Check the number of Nan values of X_train_df print("Before Fill Nan: ", X_train_df.isna().sum()) # Fill Nan of X_train and X_test X_train_df = fillNan(X_train_df) X_test_df = fillNan(X_test_df) # Fill Nan with the mean # X_train_df.fillna(X_train_df.mean(), inplace=True) # X_test_df.fillna(X_test_df.mean(), inplace=True) # Fill Nan with 0 # X_train_df.fillna(0, inplace=True) # X_test_df.fillna(0, inplace=True) # Drop rows with NaN # X_train_df.dropna(inplace=True) # X_test_df.dropna(inplace=True) # Y_train_df = Y_train_df.loc[X_train_df.index, :] # print("New Shape: ", X_train_df.shape, Y_train_df.shape) # Check the number of Nan values of X_train_df X_train_df.isna().sum() # - # ## 2.3 - Add the inverse of the distance # + def inverse(x): """Compute the inverse of x if x not null.""" if x != 0: return 1 / x else: return 10e10 # Loop over all columns of distance for i in range(10): # Compute new columns X_train_df["inv_distance_" + str(i)] = X_train_df["distance_" + str(i)].apply( lambda x: inverse(x)) X_test_df["inv_distance_" + str(i)] = X_test_df["distance_" + str(i)].apply( lambda x: inverse(x)) # - # ## 2.4 - Add the Benchmark def addBenchmark(df): """Add benchmark to df.""" # Compute the inverse of the distance distance_inv = (1. / df.filter(regex='^distance*', axis = 1)).values # Extract the value at the nearest station values = df.filter(regex='value_*', axis = 1) # Compute the benchmark numer = (distance_inv * values).sum(axis = 1) denom = (distance_inv * (values != 0)).sum(axis = 1) # Compute the benchmark benchmark = numer / denom df["Benchmark"] = benchmark def addBenchmark2(df): """Add benchmark with the squares of the distances to df.""" # Compute the inverse of the distance distance_inv = (1. / df.filter(regex='^distance*', axis = 1)).values # Extract the value at the nearest station values = df.filter(regex='value_*', axis = 1) # Compute the benchmark numer = (distance_inv ** 2 * values).sum(axis = 1) denom = (distance_inv ** 2 * (values != 0)).sum(axis = 1) # Compute the benchmark benchmark = numer / denom df["Benchmark2"] = benchmark return df # Add the benchmark to the dataFrames addBenchmark(X_train_df) addBenchmark(X_test_df) # ## 2.5 - Add the Squares of the values in df def addSquares(df): """Add the square of the values of all columns.""" # Extract all columns of df columns = df.filter(regex='^distance*', axis = 1).columns # Loop over all column in columns for col in columns: # Add the square df[col + str("^2")] = df[col] ** 2 return df # Add the benchmark to the dataFrames addSquares(X_train_df) addSquares(X_test_df) # ## 2.6 - Clipping of the value between 0 and 100 def clipping(df, lower=0, upper=100): """Clipping of the value between 0 and 100.""" # Extract all columns of df columns = df.filter(regex='^value*', axis = 1).columns # Loop over all column in columns df[columns].clip(lower=lower, upper=upper, inplace=True, axis=0) return df # Clip the value of the dataFrame X_train_df = clipping(X_train_df) X_test_df = clipping(X_test_df) # + [markdown] heading_collapsed=true # ## 2.7 - Remove High Value # + hidden=true def removeHighValue(x_df, y_df): """Remove high value from df.""" # Extract index of low value x_df["max"] = x_df.filter(regex='^value*', axis = 1).max(axis=1) index_low_values = x_df[x_df["max"] > 100].index # Extract only low value x_df = x_df.loc[index_low_values, :] y_df = y_df.loc[index_low_values, :] return x_df, y_df # + hidden=true # Remove high value removeHighValue(X_train_df, Y_train_df); # - # ## 2.* - Extract a same number of samples of each stations # Number of stations print("Number of stations: ", len(X_train_df["station_id"].unique())) print("Number of samples per stations: ", X_train_df.groupby(["station_id"]).\ apply(lambda g: len(g["hdres_100"]))) # + # Initialisation of the seed np.random.seed(42) # Proportion of samples to extract from each station p = 0.75 # Loop to compute the lines to extract lines_train = [] lines_test = [] g_stations_df = X_train_df.groupby(["station_id"]) # Loop over the different groups for station_id, g in g_stations_df: # Extract the index of the current group idx = np.array(g.index) # Shuffle of idx np.random.shuffle(idx) # Extract a random proportion p of idx n = len(idx) n_p = int(n * p) lines_train.extend(idx[:n_p]) lines_test.extend(idx[n_p:]) # Extract the samples in X_train_df and Y_train_df X_train_sub_train_df = X_train_df.loc[lines_train, :] Y_train_sub_train_df = Y_train_df.loc[lines_train, :] X_train_sub_test_df = X_train_df.loc[lines_train, :] Y_train_sub_test_df = Y_train_df.loc[lines_train, :] # New length print("Shape: ", X_train_sub_train_df.shape) # - # ## 2.* - Extract values a numpy array # + # Extract all tables as numpy array X_train = X_train_df.iloc[:, 2:].values X_test = X_test_df.iloc[:, 2:].values y_train = Y_train_df.loc[:, "value"].values # Extract sub tables as numpy array X_train_sub_train = X_train_sub_train_df.iloc[:, 2:].values y_train_sub_train = Y_train_sub_train_df.loc[:, "value"].values X_train_sub_test = X_train_sub_test_df.iloc[:, 2:].values y_train_sub_test = Y_train_sub_test_df.loc[:, "value"].values # + [markdown] heading_collapsed=true # # 3 - Model # + [markdown] hidden=true # ## 3.1 - SVM # + hidden=true # Hyper-parameter of the SVM hp_svm = {"kernel": ['poly', 'rbf'], "gamma": ['scale'], "C": [1], # [10 ** (i) for i in np.linspace(-5, 5, 10)] "epsilon": [0.1]} # [10 ** (i) for i in np.linspace(-5, 5, 10)] # SVM model svm_model = svm.SVR() # + [markdown] hidden=true # ## 3.2 - LASSO # + hidden=true # Hyper-parameter of the SVM hp_lasso = {"alpha": [10], "fit_intercept": [False], "normalize": [True] } # SVM model lasso_model = sklin.Lasso() # + [markdown] heading_collapsed=true # # 4 - GridSearch # + [markdown] hidden=true # ## 4.1 - Apply the GridSearch # + hidden=true # Metrics loss = metrics.mean_squared_error # mean_squared_log_error scoring = metrics.make_scorer(loss, greater_is_better=False) # Apply the gridSearch model = lasso_model # svm_model model_name = "Lasso" # SVM hp = hp_lasso # hp_svm gds = mod_selec.GridSearchCV(model, hp, scoring=scoring, cv=5, error_score="raise", verbose=1, n_jobs=-1) # + hidden=true # Fiting of the GridSearch gds.fit(X_train, y_train) # Display best score and best params print("Display best score: ", gds.best_score_) print("Display best params: ", gds.best_params_) # + [markdown] hidden=true # ## 4.2 - Make Predictions # + hidden=true # Make predictions y_pred_values = gds.predict(X_test) # Clip the predictions to be positives y_pred_values = np.where(y_pred_values < 0, 0, y_pred_values) # Disaply the scores print("Score on the training set: ", metrics.mean_squared_log_error(y_train, gds.predict(X_train)) # Save predictions Y_pred_df = X_test_df.ID.to_frame() Y_pred_df["value"] = y_pred_values Y_pred_df.to_csv("./Results/Predictions_GridSearch_" + model_name + ".csv", index=False) # - # # 5 - Neural Network # ## 5.1 - Model class NN(object): def __init__(self, shape=1, dropout=0, batch_normalisation=False, nb_neurons_l=16, epochs=5, batch_size=32): """Initialisation of the neural network.""" # Save parameters of the compilator self.epochs = epochs self.batch_size = batch_size # Compute the input shape input_shape = (shape, ) # Extract numbers of neurons if type(nb_neurons_l) == int: nb_neurons_l = [nb_neurons_l for i in range(3)] # Definition of the model self.model = Sequential() # Dense Layer if batch_normalisation: self.model.add(BatchNormalization()) self.model.add(Dense(nb_neurons_l[0], activation="relu", input_shape=input_shape)) if dropout[0] > 0: self.model.add(Dropout(dropout[0])) self.model.add(Dense(nb_neurons_l[1], activation="relu")) if dropout[1] > 0: self.model.add(Dropout(dropout[1])) self.model.add(Dense(nb_neurons_l[2], activation="relu")) if dropout[2] > 0: self.model.add(Dropout(dropout[2])) self.model.add(Dense(1, activation="relu")) # Definition of the loss function self.model.compile(loss='mean_squared_logarithmic_error', optimizer="adam") def fit(self, X_train, y_train, X_val=None, y_val=None): """Fitting of the model.""" if (X_val is None) or (y_val is None): self.model.fit(X_train, y_train, epochs=self.epochs, batch_size=self.batch_size, verbose=1) else: self.model.fit(X_train, y_train, epochs=self.epochs, batch_size=self.batch_size, verbose=1, validation_data=(X_val, y_val)) def predict(self, X): """Predictions for the dataset given in arguument.""" # Make predictions y_pred = self.model.predict(X) return y_pred def score(self, X, y): """Compute the score between the prediction of X and the true y.""" score = metrics.mean_squared_log_error(y, self.predict(X)) return score def makePredictions(self, X_test_df, X_train, y_train): """Compyte the predictions and save the results.""" # Make predictions y_pred_values = self.predict(X_test_df.iloc[:, 2:].values) # Clip the predictions to be positives y_pred_values = np.where(y_pred_values < 0, 0, y_pred_values) # Disaply the scores print("Score on the training set: ", metrics.mean_squared_log_error(y_train, self.model.predict(X_train))) # Save predictions Y_pred_df = X_test_df.ID.to_frame() Y_pred_df["value"] = y_pred_values Y_pred_df.to_csv("./Results/Predictions_NN.csv", index=False) # ## 5.2 - CrossValidation def CrossValidation(X_train_df, Y_train_df, model_hp, cv=5, n_jobs=-1): """Apply a cross validation to the model.""" # Initialisation of the time start = time.time() # Extract all tables as numpy array X_train = np.array(X_train_df.iloc[:, 2:].values) y_train = np.array(Y_train_df.loc[:, "value"].values).reshape((-1, 1)) # Shape of data n = np.shape(X_train)[0] step = n // cv def oneFold(k, cv=cv): """Execute one fold of the cv.""" # Index for the training set and testing set if k == cv - 1: idx_test = np.arange(k * step, n) else: idx_test = np.arange(k * step, (k + 1) * step) idx_train = np.delete(np.arange(0, n), idx_test) # Extract the kth X_train and X_test batch X_train_k = X_train[idx_train, :] y_train_k = y_train[idx_train, :] X_test_k = X_train[idx_test, :] y_test_k = y_train[idx_test, :] # Creation of the model model = NN(**model_hp) # Fitting of the model on this batch model.fit(X_train_k, y_train_k, X_test_k, y_test_k) # Compute the score for this fold score_k = model.score(X_test_k, y_test_k) print("Score k on Test: ", score_k) return score_k # Parallelisation of the cv all_scores = [oneFold(k) for k in tqdm.tqdm(range(cv))] # Display the time required print("Time of the cross-validation: {:4f}, Score: {:4f}".format( time.time() - start, np.mean(all_scores))) return np.mean(all_scores) # ## 5.2 - Architecture and Compilation # + # Definition of the model model_hp = { "shape" :len(X_train_df.iloc[0, 2:]), "dropout": [0, 0, 0], "epochs": 10, "nb_neurons_l": 16, "batch_size": 64 } # Fitting of the model CrossValidation(X_train_df, Y_train_df, model_hp) # - # ## 5.2 - Make predictions # + x = [10 ** (i) for i in np.linspace(-5, 2.99, 100)] x_2 = [10 ** (i) for i in np.linspace(-5, 2, 100)] y_300 = [(np.log((300 + 1) / (x_i + 1))) ** 2 for x_i in x] y_10 = [(np.log((10 + 1) / (x_i + 1))) ** 2 for x_i in x_2] fig, axs = plt.subplots(1, 2, figsize=(20, 8), dpi=150) axs[0].plot(x, y_300) axs[0].set_xlabel("y_pred") axs[0].set_ylabel("MSLE") axs[0].set_title("MSLE for y_true = 300") axs[1].plot(x_2, y_10) axs[1].set_xlabel("y_pred") axs[1].set_ylabel("MSLE") axs[1].set_title("MSLE for y_true = 10") fig.savefig("./Results/MSLE_Effect.png") # - # Split the Training set into a Training and a Validation sets X_train_split, X_val, y_train_split, y_val = mod_selec.train_test_split(X_train, y_train, test_size=0.3, random_state=42) # + # Definition of the model model = Sequential() # Dense Layer # model.add(BatchNormalization()) model.add(Dense(16, activation="relu", input_shape=(len(X_train[0]), ))) # model.add(Dropout(0.25)) model.add(Dense(16, activation="relu")) model.add(Dense(16, activation="relu")) # # model.add(Dropout(0.25)) model.add(Dense(1, activation="relu")) # Definiton of the optimizer sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # Definition of the loss function model.compile(loss='mean_squared_logarithmic_error', optimizer="adam") # Fitting of the model model.fit(X_train, y_train, epochs=7, batch_size=64, verbose=1) # - errors_df = X_train_df.filter(regex='value_*', axis = 1) errors_df["pred"] = model.predict(X_train) errors_df["true"] = Y_train_df.loc[:, "value"] errors_df["errors"] = (np.log((errors_df["pred"] + 1) / (errors_df["true"] + 1)) ** 2) errors_df.sort_values("errors", ascending=False, inplace=True) errors_df.head(10) # + # Make predictions y_pred_values = model.predict(X_test) # Clip the predictions to be positives y_pred_values = np.where(y_pred_values < 0, 0, y_pred_values) # Disaply the scores print("Score on the training set: ", metrics.mean_squared_log_error(y_train, model.predict(X_train))) # Save predictions Y_pred_df = X_test_df.ID.to_frame() Y_pred_df["value"] = y_pred_values Y_pred_df.to_csv("./Results/Predictions_NN.csv", index=False) # + [markdown] heading_collapsed=true # # 6 - LightGbm # + [markdown] hidden=true # ## 6.1 - Cross-Validation of the model # + hidden=true stats_id = np.unique(X_train_df["station_id"].values) stats_groups = np.random.permutation(stats_id).reshape((5, -1)) l_train, l_val = [], [] for i in range(5): l_val.append(stats_groups[i]) l_train.append(np.setdiff1d(stats_id, stats_groups[i])) # + hidden=true # Parameters of the model params = { 'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'root_mean_squared_error', 'max_depth': 5, 'num_leaves': 25, 'learning_rate': 0.01, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5 } # Cross-Validation l_train_acc, l_val_acc = [], [] for i in range(5): X_train_split_df = X_train_df[X_train_df.station_id.isin(l_train[i])].reset_index(drop=True) X_val_df = X_train_df[X_train_df.station_id.isin(l_val[i])].reset_index(drop=True) X_train_split = X_train_split_df.values y_train_split = Y_train_df[X_train_df.station_id.isin(l_train[i])].reset_index(drop=True)["value"].values X_val = X_val_df.values y_val = Y_train_df[X_train_df.station_id.isin(l_val[i])].reset_index(drop=True)["value"].values lgb_train = lgb.Dataset(X_train_split, y_train_split) lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train) gbm = lgb.train(params, lgb_train, valid_sets = [lgb_eval], num_boost_round = 10000, early_stopping_rounds = 200) train_preds = gbm.predict(X_train_split) l_train_acc.append(metrics.mean_squared_log_error(y_train_split, train_preds)) val_preds = gbm.predict(X_val) l_val_acc.append(metrics.mean_squared_log_error(y_val, val_preds)) # + hidden=true # Display the scores obtained print("Train acc: ", l_train_acc) print("Val acc: ", l_val_acc) # + [markdown] hidden=true # ## 6.2 - Make Predictions # + hidden=true # Final parameters model_params = { 'boosting_type': 'gbdt', 'objective': 'regression', 'max_depth': 5, 'num_leaves': 25, 'learning_rate': 0.01, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5 } # Fitting of the model lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(model_params, lgb_train, num_boost_round = 500) # Computation of the predictions y_pred_values = gbm.predict(X_test) # Clip the predictions to be positives y_pred_values = np.where(y_pred_values < 0, 0, y_pred_values) # Disaply the scores print("Score on the training set: ", metrics.mean_squared_log_error(y_train, gbm.predict(X_train))) # Save predictions Y_pred_df = X_test_df.ID.to_frame() Y_pred_df["value"] = y_pred_values Y_pred_df.to_csv("./Results/Predictions_LightGBM.csv", index=False) # + hidden=true
Code/Pierre_Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> # <tr><td align="right" style="background-color:#ffffff;"> # <img src="../images/logo.jpg" width="20%" align="right"> # </td></tr> # <tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;"> # Prepared by <NAME><br> # <NAME> | July 04, 2019 (updated) # </td></tr> # <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;"> # This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. # </td></tr> # </table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # <h2> <font color="blue"> Solution for </font>Entanglement and Superdense Coding</h2> # <a id="task1"></a> # <h3> Task 1</h3> # # Verify the correctness of the above protocol. # # For each pair of $ (x,y) \in \left\{ (0,0), (0,1), (1,0),(1,1) \right\} $: # <ul> # <li> Create a quantum curcuit with two qubits: Asja's and Balvis' qubits.</li> # <li> Both are initially set to $ \ket{0} $.</li> # <li> Apply h-gate (Hadamard) to the first qubit. </li> # <li> Apply cx-gate (CNOT) with parameters first-qubit and second-qubit. </li> # </ul> # # Assume that they are separated now. # # <ul> # <li> If $ x $ is 1, then apply z-gate to the first qubit. </li> # <li> If $ y $ is 1, then apply x-gate (NOT) to the first qubit. </li> # </ul> # # Assume that Asja sends her qubit to Balvis. # # <ul> # <li> Apply cx-gate (CNOT) with parameters first-qubit and second-qubit.</li> # <li> Apply h-gate (Hadamard) to the first qubit. </li> # <li> Measure both qubits, and compare the results with pair $ (x,y) $. </li> # </ul> # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer all_pairs = ['00','01','10','11'] for pair in all_pairs: # create a quantum curcuit with two qubits: Asja's and Balvis' qubits. # both are initially set to |0>. qreg = QuantumRegister(2) # quantum register with 2 qubits creg = ClassicalRegister(2) # classical register with 2 bits mycircuit = QuantumCircuit(qreg,creg) # quantum circuit with quantum and classical registers # apply h-gate (Hadamard) to the first qubit. mycircuit.h(qreg[0]) # apply cx-gate (CNOT) with parameters first-qubit and second-qubit. mycircuit.cx(qreg[0],qreg[1]) # they are separated now. # if a is 1, then apply z-gate to the first qubit. if pair[0]=='1': mycircuit.z(qreg[0]) # if b is 1, then apply x-gate (NOT) to the first qubit. if pair[1]=='1': mycircuit.x(qreg[0]) # Asja sends her qubit to Balvis. # apply cx-gate (CNOT) with parameters first-qubit and second-qubit. mycircuit.cx(qreg[0],qreg[1]) # apply h-gate (Hadamard) to the first qubit. mycircuit.h(qreg[0]) # measure both qubits mycircuit.measure(qreg,creg) # compare the results with pair (a,b) job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(mycircuit) for outcome in counts: reverse_outcome = '' for i in outcome: reverse_outcome = i + reverse_outcome print("(a,b) is",pair,": ",reverse_outcome,"is observed",counts[outcome],"times")
bronze/.ipynb_checkpoints/B50_Superdense_Coding_Solutions-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="s7bf1mZBKZL2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="7245528f-df45-4623-fe5d-b9916fd2e9ac" executionInfo={"status": "ok", "timestamp": 1585064245927, "user_tz": -60, "elapsed": 52353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GibCmJm4I5alAYk2fdaQo4e-Il9gszBYLnB_f8Z5w=s64", "userId": "16290332025867110084"}} import pickle import pandas as pd import nltk import nltk nltk.download('stopwords') print('------------') nltk.download('punkt') print('------------') nltk.download('wordnet') from nltk.corpus import stopwords from nltk.tokenize import punkt from nltk.corpus.reader import wordnet from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np # !pip install 'dash==0.36' import dash import dash_core_components as dcc import dash_html_components as html import dash_table from dash.dependencies import Input, Output, State import plotly.graph_objs as go import re #Read the csv file from drive # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="q4ijOKYLK3Mv" colab_type="code" colab={} #Load the best performing model pickle_id = "1zj05sWx5oUfQ1TCb2BUdL6UjHi2rfM3k" downloaded = drive.CreateFile({'id':pickle_id}) downloaded.GetContentFile('best_lrc.pickle') with open('best_lrc.pickle', 'rb') as data: lrc_model = pickle.load(data) # + id="_gKJBPYBMduL" colab_type="code" colab={} #TFID Object pickle_id = "1OmkdxXkvCw67pg4DjMMWaazBNKNi_F8j" downloaded = drive.CreateFile({'id':pickle_id}) downloaded.GetContentFile('tfidf.pickle') with open('tfidf.pickle', 'rb') as data: tfidf = pickle.load(data) # + id="Bskq27aoMr_x" colab_type="code" colab={} label_codes = { 'none': 0, 'soft': 1, 'tech': 2, } # + id="IIyoomkcMwKQ" colab_type="code" colab={} punctuation_signs = list("?:!.,;") stop_words = list(stopwords.words('english')) def create_features_from_text(text): # Dataframe creation lemmatized_text_list = [] df = pd.DataFrame(columns=['Content']) df.loc[0] = text df['Content_Parsed_1'] = df['Content'].str.replace("\r", " ") df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace("\n", " ") df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(" ", " ") df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace('"', '') df['Content_Parsed_2'] = df['Content_Parsed_1'].str.lower() df['Content_Parsed_3'] = df['Content_Parsed_2'] for punct_sign in punctuation_signs: df['Content_Parsed_3'] = df['Content_Parsed_3'].str.replace(punct_sign, '') df['Content_Parsed_4'] = df['Content_Parsed_3'].str.replace("'s", "") wordnet_lemmatizer = WordNetLemmatizer() lemmatized_list = [] text = df.loc[0]['Content_Parsed_4'] text_words = text.split(" ") for word in text_words: lemmatized_list.append(wordnet_lemmatizer.lemmatize(word, pos="v")) lemmatized_text = " ".join(lemmatized_list) lemmatized_text_list.append(lemmatized_text) df['Content_Parsed_5'] = lemmatized_text_list df['Content_Parsed_6'] = df['Content_Parsed_5'] for stop_word in stop_words: regex_stopword = r"\b" + stop_word + r"\b" df['Content_Parsed_6'] = df['Content_Parsed_6'].str.replace(regex_stopword, '') df = df['Content_Parsed_6'] df = df.rename(columns={'Content_Parsed_6': 'Content_Parsed'}) # TF-IDF features = tfidf.transform(df).toarray() return features # + id="aZGGXWvRN4VV" colab_type="code" colab={} def get_category_name(category_id): for category, id_ in label_codes.items(): if id_ == category_id: return category # + id="j6DRRLqQOVvB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 537} outputId="6b9db2aa-ec69-42de-b55b-e96ae03dae1c" executionInfo={"status": "error", "timestamp": 1585070706452, "user_tz": -60, "elapsed": 1872, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GibCmJm4I5alAYk2fdaQo4e-Il9gszBYLnB_f8Z5w=s64", "userId": "16290332025867110084"}} # -*- coding: utf-8 -*- import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div( [ html.I("Try typing input and Press Enter "), html.Br(),dcc.Input(id="input1", type="text"), html.Div(id="output"), ] ) @app.callback( Output("output", "children"), [Input("input1", "value")], ) def update_output(input1): if input1 is not None and input1 is not '': try: prediction_lrc = lrc_model.predict(create_features_from_text(input1))[0] prediction_lrc_proba = lrc_model.predict_proba(create_features_from_text(input1))[0] category_lrc = get_category_name(prediction_lrc) pred =prediction_lrc_proba.max()*100 return 'The predicted category using the LRC model is {} \n\nThe conditional probability is: {}'.format(category_lrc,pred) except ValueError: return 'Error accured' if __name__ == '__main__': app.run_server(debug=True) # + id="gqOrbgpgUD2F" colab_type="code" colab={}
App/theApplication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import mmh3 import numpy as np import random import string from bitarray import bitarray # <h1>Probabilistic Data Structures - Bloom Filter</h1> # <h2>Data Structure</h2> # <h4>Definition</h4> # # <p align="justify">In computer science, a data structure is a data organization, management, and storage format that enables efficient access and modification. More precisely, a data structure is a collection of data values, the relationships among them, and the functions or operations that can be applied to the data<sup>1</sup>. A data structure is a particular way of organizing data in a computer so that it can be used effectively<sup>2</sup>.</p></br> # # <h4>Usage</h4> # # <p align="justify">Data structures serve as the basis for abstract data types (ADT). The ADT defines the logical form of the data type. The data structure implements the physical form of the data type. Different types of data structures are suited to different kinds of applications, and some are highly specialized to specific tasks. Usually, efficient data structures are key to designing efficient algorithms. Some formal design methods and programming languages emphasize data structures, rather than algorithms, as the key organizing factor in software design. Data structures can be used to organize the storage and retrieval of information stored in both main memory and secondary memory.</p> # # <h4>Implementation</h4> # # <p align="justify">Data structures are generally based on the ability of a computer to fetch and store data at any place in its memory, specified by a pointer-a bit string, representing a memory address, that can be itself stored in memory and manipulated by the program. Thus, the array and record data structures are based on computing the addresses of data items with arithmetic operations, while the linked data structures are based on storing addresses of data items within the structure itself. # # The implementation of a data structure usually requires writing a set of procedures that create and manipulate instances of that structure. The efficiency of a data structure cannot be analyzed separately from those operations. This observation motivates the theoretical concept of an abstract data type, a data structure that is defined indirectly by the operations that may be performed on it, and the mathematical properties of those operations (including their space and time cost).</p> # # <h4>Data Structure Types</h4> # # <p align="justify">As data structures are used to store data in an organized form, and since data is the most crucial entity in computer science, the true worth of data structures is clear. No matter what problem are you solving, in one way or another you have to deal with data - whether it’s an employee’s salary, stock prices, a grocery list, or even a simple telephone directory. # Based on different scenarios, data needs to be stored in a specific format. We have a handful of data structures that cover our need to store data in different formats. # # Here are some of the data structure types<sup>3</sup>:</p> # <div id="container" style="white-space:nowrap"> # <div id="image" style="display:inline;"> # <img src='images/data_structure_types.png' alt="Data Structure Types" width=600 height=400 align='right'/> # </div> # <div id="texts" style="display:inline; white-space:nowrap;"> # <ol align='left'> # <li>Data types # <ul> # <li>Primitive types</li> # <li>Composite types or non-primitive type</li> # <li>Abstract data types</li> # </ul> # </li> # <li>Linear data structures # <ul> # <li>Arrays</li> # <li>Lists</li> # </ul> # </li> # <li>Trees (non-linear) # <ul> # <li>Binary trees</li> # <li>B-trees</li> # <li>Heaps</li> # <li>Trees</li> # <li>Multi-way trees</li> # <li>Space-partitioning trees</li> # <li>Application-specific trees</li> # </ul> # </li> # <li>Hash-based structures</li> # <li>Graphs</li> # <li>Other</li> # </ol> # </div> # </div> # # <p align="justify">Hundreds of books have been written on why data structures and algorithms are important. Particularly impressive are <NAME>'s four volumes, entitled <a href="https://en.wikipedia.org/wiki/The_Art_of_Computer_Programming">"The Art of Computer Programming"</a>, in which data structures and algorithms are discussed in over 2,500 pages. One author has even titled a book answering the question "why data structures are so important". This is <NAME>'s book <a href="https://en.wikipedia.org/wiki/Algorithms_%2B_Data_Structures_%3D_Programs">"Algorithms + Data Structures = Programs"</a>, which looks again at data structures and fundamental algorithms in programming<sup>4</sup>.</p> # # <h4>Algorhitms</h4> # # <p align="justify">Speaking of data structures, we can not pass the algorithms. In mathematics and computer science, an algorithm is a finite sequence of well-defined, computer-implementable instructions, typically to solve a class of problems or to perform a computation. Algorithms are always unambiguous and are used as specifications for performing calculations, data processing, automated reasoning, and other tasks. Data structures and algorithms are the base of the programming.</p> # # <p align="justify">We cannot talk about the efficiency of algorithms and data structures without using the term "algorithm complexity". The complexity of an algorithm is a rough estimate of the number of steps that the algorithm will take depending on the size of the input data. This is a rough estimate that is interested in the order of the number of steps, not the exact number. We'll not dig deeper on that, but if you are interested you can check more about <a href="https://en.wikipedia.org/wiki/Time_complexity">algorhitm complexity</a></p> # <h2>Probabilistic Structures</h2> # <p align='justify'>Probabilistic data structures is a common name for data structures # based mostly on different hashing techniques. Unlike regular (or # deterministic) data structures, they always provide approximated # answers but with reliable ways to estimate possible errors. Fortunately, # the potential losses and errors are fully compensated for by extremely # low memory requirements, constant query time, and scaling, the factors # that become essential in Big Data applications<sup>5</sup>. # # Big data is characterized by three fundamental dimensions: Volume, # Velocity, and Variety, The Three V’s of Big Data. The Volume # expresses the amount of data, Velocity describes the speed at which data # is arriving and being processed, and Variety refers to the number of # types of data. # The data could come from anywhere, including social media, various # sensors, financial transactions, etc. IBM has stated # that people create # 2.5 quintillion bytes of data every day, this number is growing # constantly and most of it cannot be stored and is usually wasted # without being processed. Today, it is not uncommon to process terabyte or petabyte-sized corporation's data and gigabit-rate streams. # On the other hand, nowadays every company wants to fully # understand the data it has, in order to find value and act on it. This led # to the rapid growth in the Big Data Software market. However, # the traditional technologies which include data structures and # algorithms, become ineffective when dealing with Big Data. Therefore, # many software practitioners, again and again, refer to computer science # for the most appropriate solutions and one option is to use probabilistic # data structures and algorithms.</p> # # <p align='justify'>When processing large data sets, we often want to do some simple checks, such as number of unique items, most frequent items, and whether some items exist in the data set. The common approach is to use some kind of deterministic data structure like HashSet or Hashtable for such purposes. But when the data set we are dealing with becomes very large, such data structures are simply not feasible because the data is too big to fit in the memory. It becomes even more difficult for streaming applications which typically require data to be processed in one pass and perform incremental updates. # # Probabilistic data structures are a group of data structures that are extremely useful for big data and streaming applications. Generally speaking, these data structures use hash functions to randomize and compactly represent a set of items. Collisions are ignored but errors can be well-controlled under certain threshold. The more number of hash function the more accurate result. Comparing with error-free approaches, these algorithms use much less memory and have constant query time. They usually support union and intersection operations and therefore can be easily parallelized<sup>6</sup>.</p> # # <p align='justify'>By using this type of data structure, we can only safely assume that we have an approximately solution which may or may not be the exact answer but it's in the right direction. These data structures are proven to use either a fixed or sublinear memory and have constant execution time. The answers may not be exact and have some probability of error. # Any probablistic data structure will rely on some form of probability such as using randomness, hasing, and etc. to reach an approximate solution. # Some of the data structures are rather proven alternative approaches for a data structure but often times they are needed for these cases<sup>7</sup>: # <ol> # <li>Analzing / mining big data sets (more than what a deterministic data structure can handle).</li> # <li>Statistical analysis.</li> # <li>A stream of data that need an answer aftwares.</li> # </ol> # </p> # # <p align='justify'>In majority of the cases these data structures use hash functions to randomize the items as mentioned before. Because they ignore collisions they keep the size constant, but this is also a reason why they can't give you exact values. The advantages they bring<sup>8</sup>: # <ol> # <li>Use small amount of memory (you can control how much)</li> # <li>Easily parallelizable (hashes are independent)</li> # <li>Constant query time (not even amortized constant like in dictionary)</li> # </ol> # # Here are some of the Probabilistic Data Structure Types: # <ol> # <li>Membership querying (Bloom filter, Counting Bloom filter, Quotient filter, Cuckoo filter).</li> # <li>Cardinality (Linear counting, probabilistic counting, LogLog, HyperLogLog, HyperLogLog++).</li> # <li>Frequency (Majority algorithm, Frequent, Count Sketch, Count-Min Sketch).</li> # <li>Rank (Random sampling, q-digest, t-digest).</li> # <li>Similarity (LSH, MinHash, SimHash).</li> # </ol> # </p> # # <p>We will now look at one of the most commonly used probabilistic data structures - it is called Bloom filter.</p> # <h3>Bloom filter<sup>9</sup></h3> # <h6>Bloom filters test if an element is part of a set, without needing to store the entire set.</h6> # # <p align='justify'>A Bloom filter is a space-efficient probabilistic data structure, conceived by <NAME> in 1970, that is used to test whether an element is a member of a set. False positive matches are possible, but false negatives are not – in other words, a query returns either "possibly in set" or "definitely not in set". Elements can be added to the set, but not removed (though this can be addressed with the counting Bloom filter variant); the more items added, the larger the probability of false positives. # # One simple way to think about Bloom filters is that they support insert and lookup in the same way the hash tables do, but using very little space, i.e., one byte per item or less. This is a significant saving when you have many items and each item takes up, say 8 bytes.</p> # # <h4>Algorithm description</h4> # # <img src='images/bloom_filter.png' alt="Bloom Filter" width=400 height=200 align='right'/> # <p align='justify'>An empty Bloom filter is a bit array of $m$ bits, all set to 0. There must also be $k$ different hash functions defined, each of which maps or hashes some set element to one of the $m$ array positions, generating a uniform random distribution. Typically, $k$ is a small constant which depends on the desired false error rate $\varepsilon$, while $m$ is proportional to $k$ and the number of elements to be added. # # To add an element, feed it to each of the $k$ hash functions to get $k$ array positions. Set the bits at all these positions to 1. To add/insert an item $x$ into the Bloom filter, we first compute the $k$ hash functions on $x$, and for each resulting hash, set the corresponding slot of A to 1 - see picture below<sup>10</sup>. # <img src='images/bloom_filter_insert.png' alt="Bloom Filter Insert" width=400 height=200 /> # Example of insert into Bloom filter. In this example, an initially empty Bloom filter has $m$=8, and $k$=2 (two hash functions). To insert an element $x$, we first compute the two hashes on $x$, the first one of which generates 1 and the second one generates 5. We proceed to set A[1] and A[5] to 1. To insert $y$, we also compute the hashes and similarly, set positions A[4] and A[6] to 1. # # To query for an element (test whether it is in the set), feed it to each of the $k$ hash functions to get $k$ array positions. If any of the bits at these positions is 0, the element is definitely not in the set; if it were, then all the bits would have been set to 1 when it was inserted. If all are 1, then either the element is in the set, or the bits have by chance been set to 1 during the insertion of other elements, resulting in a false positive. Similarly to insert, lookup computes $k$ hash functions on $x$, and the first time one of the corresponding slots of A equal to 0, the lookup reports the item as Not Present, otherwise it reports the item as Present. # <img src='images/bloom-filter_lookup.png' alt="Bloom Filter Lookup" width=400 height=200 /> # Example of a lookup on a Bloom filter. We take the resulting Bloom filter from picture above, where we inserted elements $x$ and $y$. To do a lookup on $x$, we compute the hashes (which are the same as in the case of an insert), and we return Found/Present, as both bits in corresponding locations equal 1. Then we do a lookup of an element $z$, which we never inserted, and its hashes are respectively 4 and 5, and the bits at locations A[4] and A[5] equal 1, thus we again return Found/Present. This is an example of a false positive, where two other items together set the bits of the third item to 1. An example of a negative (negative is always true), would be if we did a lookup on an element $w$, whose hashes are 2 and 5, (0 and 1), or 0 and 3 (0 and 0). If the Bloom filter reports an element as Not Found/Not Present, then we can be sure that this element was never inserted into a Bloom filter. # # Asymptotically, the insert operation on the Bloom filter costs $O(k)$. Considering that the number of hash functions rarely goes above 12, this is a constant-time operation. The lookup might also need $O(k)$, in case the operation has to check all the bits, but most unsuccessful lookups will give up way before; usually on average, an unsuccessful lookup takes about 1-2 probes before giving up. # # Removing an element from this simple Bloom filter is impossible because there is no way to tell which of the $k$ bits it maps to should be cleared. Although setting any one of those $k$ bits to zero suffices to remove the element, it would also remove any other elements that happen to map onto that bit. Since the simple algorithm provides no way to determine whether any other elements have been added that affect the bits for the element to be removed, clearing any of the bits would introduce the possibility of false negatives. # # It is often the case that all the keys are available but are expensive to enumerate (for example, requiring many disk reads). When the false positive rate gets too high, the filter can be regenerated; this should be a relatively rare event. # </p> # # <p align='justify'>For example, if we have inserted $\{x, y, z\}$ into the bloom filter, with $k=3$ hash functions like the picture above. Each of these three elements has three bits each set to 1 in the bit array. When we look up for $w$ in the set, because one of the bits is not set to 1, the bloom filter will tell us that it is not in the set. # # Bloom filter has the following properties: # <ul> # <li>False positive is possible when the queried positions are already set to 1. But false negative is impossible.</li> # <li>Query time is $O(k)$.</li> # <li>Union and intersection of bloom filters with same size and hash functions can be implemented with bitwise OR and AND operations.</li> # <li>Cannot remove an element from the set.</li> # </ul> # # Bloom filter requires the following inputs: # <ol> # <li>$m$: size of the bit array</li> # <li>$n$: estimated insertion</li> # <li>$p$: false positive probability</li> # </ol> # </p> # # <h4>Space and time advantages</h4> # # <p align='justify'>While risking false positives, Bloom filters have a substantial space advantage over other data structures for representing sets, such as self-balancing binary search trees, tries, hash tables, or simple arrays or linked lists of the entries. Most of these require storing at least the data items themselves, which can require anywhere from a small number of bits, for small integers, to an arbitrary number of bits, such as for strings (tries are an exception since they can share storage between elements with equal prefixes). However, Bloom filters do not store the data items at all, and a separate solution must be provided for the actual storage. Linked structures incur an additional linear space overhead for pointers. A Bloom filter with a 1% error and an optimal value of $k$, in contrast, requires only about 9.6 bits per element, regardless of the size of the elements. This advantage comes partly from its compactness, inherited from arrays, and partly from its probabilistic nature. The 1% false-positive rate can be reduced by a factor of ten by adding only about 4.8 bits per element. # # To understand its space efficiency, it is instructive to compare the general Bloom filter with its special case when $k = 1$. If $k = 1$, then in order to keep the false positive rate sufficiently low, a small fraction of bits should be set, which means the array must be very large and contain long runs of zeros. The information content of the array relative to its size is low. The generalized Bloom filter ($k$ greater than 1) allows many more bits to be set while still maintaining a low false positive rate; if the parameters ($k$ and $m$) are chosen well, about half of the bits will be set, and these will be apparently random, minimizing redundancy and maximizing information content. # # Let's detail a little bit on the space-efficiency. If you want to store a long list of items in a set, you could do in various ways. You could store that in a hashmap and then check existence in the hashmap which would allow you to insert and query very efficiently. However, since you will be storing the items as they are, it will not be very space efficient. # # If we want to also be space efficient, we could hash the items before putting into a set. We could use bit arrays to store hash of the items. Let's also allow hash collision in the bit array. That is pretty much how Bloom Filters work, they are under the hood bit arrays which allow hash collisions; that produces false positives. Hash collisions exist in the Bloom Filters by design. Otherwise, they would not be compact. Whenever a list or set is used, and space efficiency is important and significant, Bloom filter should be considered<sup>11</sup>. # # <b><i>Bloom filters are deterministic.</i></b> If we are using the same size and same number of hash functions as well as the hash function, bloom filter is deterministic on which items it gives positive response and which items it gives negative response. For an item $x$, if it gives it is probably in to that particular item, it will give the same response as 5 minutes later, 1 hour later, 1 day later and 1 week later. It was "probabilistic" so the response of the bloom filter should be somehow random, right? Not really. It is probabilistic in the sense that you cannot know which item it will say it is probably in. Otherwise, when it says that it is probably in, it keeps saying the same thing. # </p> # # <h4>Controlling accuracy with memory</h4> # # <p align='justify'>The more memory you give a bloom filter, the more accurate it will become. Why’s that? Simple: the more memory it has to store data in, the more information it can store, and the more accurate it can be. # But, of course, we use a Bloom filter to save memory, so you need to find a balance between memory usage and the number of false positives that are acceptable. # Given these facts, you may already get a feeling for when to use a Bloom filter. In general terms, you use them to reduce the load on a system by reducing expense lookups in some data tables at a moderate memory expense. This data table can be anything. Some examples: # <ul> # <li>A database</li> # <li>A filesystem</li> # <li>Some kind of key-value storage</li> # </ul> # </p> # # <h4>Probability of false positives</h4> # # <p align='justify'>While Bloom Filters can say "definitely not in" with confidence, they will also say possibly in for some number of items. Depending on the application, this could be a huge downside or it could be relatively okay. If it is okay to introduce false positives every now and then, you should definitely consider using Bloom Filters for membership existence for set operations. # Also note that if you are decreasing the false positive rate arbitrarily, you would increase the number of hash functions which would add latency to both insertion and membership existence. One more thing in this section is that, if the hash functions are independent each other and distribute the input space pretty uniformly, then the theoretic false positive rate can be satisfied. Otherwise, the false positive rate of the bloom filter will be worse than the theoretic false positive rate as hash functions correlate each other and hash collisions would occur more often than desired. When using a Bloom filter, we should consider the potential effects of false positives. # # Assume that a hash function selects each array position with equal probability. If $m$ is the number of bits in the array, the probability that a certain bit is not set to 1 by a certain hash function during the insertion of an element is # # $${\displaystyle 1-{\frac {1}{m}}}$$ # # If $k$ is the number of hash functions and each has no significant correlation between each other, then the probability that the bit is not set to 1 by any of the hash functions is # # $${\displaystyle \left(1-{\frac {1}{m}}\right)^{k}}$$ # # We can use the well-known identity for $e−1$ # # $${\displaystyle \lim _{m\to \infty }\left(1-{\frac {1}{m}}\right)^{m}={\frac {1}{e}}}$$ # to conclude that, for large $m$, # # $${\displaystyle \left(1-{\frac {1}{m}}\right)^{k}=\left(\left(1-{\frac {1}{m}}\right)^{m}\right)^{\frac {k}{m}}\approx e^{-\frac {k}{m}}}$$ # # If we have inserted $n$ elements, the probability that a certain bit is still 0 is # # $${\displaystyle \left(1-{\frac {1}{m}}\right)^{kn}\approx e^{-\frac {kn}{m}}}$$ # the probability that it is 1 is therefore # # $${\displaystyle 1-\left(1-{\frac {1}{m}}\right)^{kn}\approx 1-e^{-\frac {kn}{m}}}$$ # # Now test membership of an element that is not in the set. Each of the $k$ array positions computed by the hash functions is 1 with a probability as above. The probability of all of them being 1, which would cause the algorithm to erroneously claim that the element is in the set, is often given as # # $${\displaystyle \varepsilon =\left(1-\left[1-{\frac {1}{m}}\right]^{kn}\right)^{k}\approx \left(1-e^{-\frac {kn}{m}}\right)^{k}}$$ # # This is not strictly correct as it assumes independence for the probabilities of each bit being set. However, assuming it is a close approximation we have that the probability of false positives decreases as $m$ (the number of bits in the array) increases, and increases as $n$ (the number of inserted elements) increases. # # The true probability of a false positive, without assuming independence, is # # $${\displaystyle {\frac {1}{m^{k(n+1)}}}\sum _{i=1}^{m}i^{k}i!{m \choose i}\left\{{kn \atop i}\right\}}$$ # where the {braces} denote Stirling numbers of the second kind. # # An alternative analysis arriving at the same approximation without the assumption of independence is given by Mitzenmacher and Upfal. After all $n$ items have been added to the Bloom filter, let $q$ be the fraction of the $m$ bits that are set to 0. (That is, the number of bits still set to 0 is $qm$.) Then, when testing membership of an element not in the set, for the array position given by any of the $k$ hash functions, the probability that the bit is found set to 1 is ${\displaystyle 1-q}$. So the probability that all $k$ hash functions find their bit set to 1 is ${\displaystyle (1-q)^{k}}$. Further, the expected value of $q$ is the probability that a given array position is left untouched by each of the $k$ hash functions for each of the $n$ items, which is (as above) # # $${\displaystyle E[q]=\left(1-{\frac {1}{m}}\right)^{kn}}$$ # # It is possible to prove, without the independence assumption, that $q$ is very strongly concentrated around its expected value. In particular, from the Azuma–Hoeffding inequality, they prove that # # $${\displaystyle \Pr(\left|q-E[q]\right|\geq {\frac {\lambda }{m})}\leq 2\exp \left( \frac {-2\lambda ^2}{kn} \right) }$$ # # Because of this, we can say that the exact probability of false positives is # # $${\displaystyle \sum _{t}\Pr(q=t)(1-t)^{k}\approx (1-E[q])^{k}=\left(1-\left[1-{\frac {1}{m}}\right]^{kn}\right)^{k}\approx \left(1-e^{-\frac {kn}{m}}\right)^{k}}$$ # as before. # </p> # # <h4>Optimal number of hash functions</h4> # # <p align='justify'>The optimum number of hash functions $k$ can be determined using the formula: $${\displaystyle k={\frac {m}{n}}\ln 2}$$ # Given false positive probability $p$ and the estimated number of insertions $n$, the length of the bit array can be calculated as:$${\displaystyle m=-{\frac {n \ln p}{(\ln 2)^{2}}}}$$</p> # # <p align='justify'>The hash functions used for bloom filter should generally be faster than cryptographic hash algorithms with good distribution and collision resistance. Commonly used hash functions for bloom filter include Murmur hash, fnv series of hashes and Jenkins hashes. Murmur hash is the fastest among them. MurmurHash3 is used by Google Guava library's bloom filter implementation.</p> # # <h4>The sieve analogy<sup>12</sup></h4> # # <p align='justify'>We can compare Bloom filters with a sieve, specially formed to only let through certain elements: # <ul> # <li>The known elements will fit the holes in the sieve and fall through.</li> # <li>Even though they’ve never been seen before, some elements will fit the holes in the sieve too and fall through. These are our false positives.</li> # <li>Other elements, never seen before, won’t fall through: the negatives.</li> # </ul> # </p> # # <h4>Disadvantages</h4> # # <p align='justify'><b><i>The size of the Bloom Filters</i></b> need to be known a priori based on the number of items that you are going to insert. This is not so great if you do not know or cannot approximate the number of items. You could put an arbitrarily large size, but that would be a waste in terms of space which we are trying to optimize in the very first place and the reason why we adopt to choose Bloom Filter. This could be fixed to create a bloom filter dynamic to the list of items that you want to fit, but depending on the application, this may not be always possible. There is a variant called Scalable Bloom Filter which dynamically adjusts its size for different number of items. This could mitigate some of its shortcomings. # # <b><i>Constructing and Membership Existence in Bloom Filter</i></b> # # While using the Bloom Filters, you not only accept false positive rates, but also you are willing to have a little bit overhead in terms of speed. Comparing to an hashmap, there is definitely an overhead in terms of hashing the items as well as constructing the bloom filter. # # <b><i>Cannot give the items that you inserted</i></b> # # Bloom Filter cannot produce a list of items that are inserted, you could only check if an item is in it, but never get the full item list because of hash collisions and hash functions. This is due to arguably the most significant advantage over other data structures; its space efficiency which comes with this disadvantage. # # <b><i>Removing an element</i></b> # # Removing an element from the Bloom Filter is not possible, you cannot undo an insertion operation as hash results for different items can be indexed in the same position. If you want to do undo inserts, either you need to count the inserts for each index in the BloomFilter or you need to construct the BloomFilter from the start excluding a single item. Both methods involve an overhead and not straightforward. Depending on the application, one might want to try to reconstruct the bloom filter from the start instead of removing or deleting items from the Bloom Filter.</p> # # <h4>Use-cases</h4> # # <p align='justify'>Let’s look at an example use-case to get a better feeling for how Bloom filters in Python can help. # # Imagine a large, multi-machine, networked database. Each lookup for a record in that distributed database requires, at its worst, querying multiple machines at once. On each machine, a lookup means accessing large data structures stored on a disk. As you know, disk access, together with networking, is one of the slowest operations in computer science. # # Now imagine that each machine uses a bloom filter, trained with the records stored on disk. Before accessing any data structure on disks, the machine first checks the filter. If it gives a negative, we can be certain that this machine does not store that record, and we can return this result without accessing disks at all. # # If a bloom filter can prevent 80% of these disk lookups, in exchange for some extra memory usage, that may be well worth it! Even if the filter would saves only 30% of disk lookups, that may still be an enormous increase in speed and efficiency.</p> # # <h5>Google’s Webtable and Apache Cassandra</h5> # # <img src='images/google_apache_cassandra.png' alt="Google Apache Cassandra" width=300 height=400 align='left'/> # <p alignt='justify'>For instance, this is how Bloom filters are used in Google’s Webtable and Apache Cassandra that are among the most widely used distributed storage systems designed to handle massive amounts of data. Namely, these systems organize their data into a number of tables called Sorted String Tables (SSTs) that reside on the disk and are structured as key-value maps. In Webtable, keys might be website names, and values might be website attributes or contents. In Cassandra, the type of data depends on what system is using it, so for example, for Twitter, a key might be a User ID, and the value could be user’s tweets. # # When users query for data, a problem arises because we do not know which table contains the desired result. To help locate the right table without checking explicitly on the disk, we maintain a dedicated Bloom filter in RAM for each of the tables, and use them to route the query to the correct table, in the way described in picture on the left. # # Bloom filters in distributed storage systems. In this example, we have 50 sorted string tables (SSTs) on disk, and each table has a dedicated Bloom filter that can fit into RAM due to its much smaller size. When a user does a lookup, the lookup first checks the Bloom filters. In this example, the first Bloom filter that reports the item as Present is Bloom filter No.3. Then we go ahead and check in the SST3 on disk whether the item is present. In this case, it was a false alarm. We continue checking until another Bloom filter reports Present. Bloom filter No.50 reports present, we go to the disk and actually locate and return the requested item. # # Bloom filters are most useful when they are strategically placed in high-ingestion systems, in parts of the application where they can prevent expensive disk seeks. For example, having an application perform a lookup of an element in a large table on a disk can easily bring down the throughput of an application from hundreds of thousands ops/sec to only a couple of thousands ops/sec. Instead, if we place a Bloom filter in RAM to serve the lookups, this will deem the disk seek unnecessary except when the Bloom filter reports the key as Present. This way the Bloom filter can remove disk bottlenecks and help the application maintain consistently high throughput across its different components.</p> # # <h5>Bitcoin mobile app</h5> # # <img src='images/bitcoin_bloom_filter.png' alt="Bitcoin Mobile App" width=200 height=250 align='left'/> # <p align='justify'>Peer-to-peer networks use Bloom filters to communicate data, and a well-known example of that is Bitcoin. An important feature of Bitcoin is ensuring transparency between clients, i.e., each node should be able to see everyone’s transactions. However, for nodes that are operating from a smartphone or a similar device of limited memory and bandwidth, keeping the copy of all transactions is highly impractical. This is why Bitcoin offers the option of simplified payment verification (SPV), where a node can choose to be a light node by advertising a list of transactions it is interested in. This is in contrast to full nodes that contain all the data. # In Bitcoin, light clients can broadcast what transactions they are interested in, and thereby block the deluge of updates from the network. # Light nodes compute and transmit a Bloom filter of the list of transactions they are interested in to the full nodes. This way, before a full node sends information about a transaction to the light node, it first checks its Bloom filter to see whether a node is interested in it. If the false positive occurs, the light node can discard the information upon its arrival.</p> # # <h5>Bloom filter and Pokemon</h5> # # <p align='justify'>One really interesting implementation of Bloom filter is for Pokemon game. You can see it here if you're interesting in it - <a href='https://notebook.community/vprusso/youtube_tutorials/data_structures/bloom_filter/Bloom%20Filters%20and%20Pokemon'>Bloom filter and Pokemon</a>.</p> # # <p align='justify'>Now let's show some basic bloom filter in Python.</p> class BloomFilter: ''' Class for Bloom filter, using murmur3 hash function ''' def __init__(self, items_count: int, fp_prob: float): ''' items_count : int Number of items expected to be stored in bloom filter - n fp_prob : float False Positive probability in decimal - f ''' # False posible probability in decimal self.fp_prob = fp_prob # Size of bit array to use self.size = self.__get_size(items_count, fp_prob) # number of hash functions to use self.hash_count = self.__get_hash_count(self.size, items_count) # Bit array of given size self.bit_array = bitarray(self.size) # initialize all bits as 0 self.bit_array.setall(0) def add(self, item): ''' Add an item in the filter ''' digests = [] for i in range(self.hash_count): # create digest for given item. # i work as seed to mmh3.hash() function # With different seed, digest created is different digest = mmh3.hash(item, i) % self.size digests.append(digest) # set the bit True in bit_array self.bit_array[digest] = True def check(self, item): ''' Check for existence of an item in filter ''' for i in range(self.hash_count): digest = mmh3.hash(item, i) % self.size if self.bit_array[digest] == False: # if any of bit is False then,its not present # in filter # else there is probability that it exist return False return True @staticmethod def __get_size(n, p): ''' Return the size of bit array(m) to used using following formula m = -(n * lg(p)) / (lg(2)^2) n : int number of items expected to be stored in filter p : float False Positive probability in decimal ''' m = -(n * np.log(p))/(np.log(2)**2) return int(m) @staticmethod def __get_hash_count(m, n): ''' Return the hash function(k) to be used using following formula k = (m/n) * lg(2) m : int size of bit array n : int number of items expected to be stored in filter ''' k = (m/n) * np.log(2) return int(k) # + # Example 1 n = 20 # number of items to add p = 0.05 # false positive probability bloom_filter = BloomFilter(n,p) print(f"Size of bit array: {bloom_filter.size}") print(f"False positive Probability: {bloom_filter.fp_prob}") print(f"Number of hash functions: {bloom_filter.hash_count}") # Words to be added word_present = ['abound','abounds','abundance','abundant','accessable', 'bloom','blossom','bolster','bonny','bonus','bonuses', 'coherent','cohesive','colorful','comely','comfort', 'gems','generosity','generous','generously','genial' ] random.shuffle(word_present) # Word not added word_absent = ['bluff','cheater','hate','war','humanity', 'racism','hurt','nuke','gloomy','facebook', 'geeksforgeeks','twitter' ] random.shuffle(word_absent) # Add words to bloom filter for item in word_present: bloom_filter.add(item) test_words = word_present + word_absent random.shuffle(test_words) for word in test_words: if bloom_filter.check(word): if word in word_absent: print(f"{word.upper()} IS A FALSE POSITIVE!") else: print(f"{word.upper()} is probably present!") else: print(f"{word.upper()} definitely not present!") # + # Example 2 n = 10 # number of items to add p = 1e-4 # false positive probability bloom_filter = BloomFilter(n,p) animals = ["dog", "cat", "giraffe", "fly", "mosquito", "horse", "eagle", "bird", "bison", "boar", "butterfly", "ant", "anaconda", "bear", "chicken", "dolphin", "donkey", "crow", "crocodile" ] other_animals = ["badger", "cow", "pig", "sheep", "bee", "wolf", "fox", "whale", "shark", "fish", "turkey", "duck", "dove", "deer", "elephant", "frog", "falcon", "goat", "gorilla", "hawk" ] # Add animals into Bloom filter for animal in animals: bloom_filter.add(animal) # Print several statistics of the filter print(f"Size of bit array: {bloom_filter.size}") print(f"False positive Probability: {bloom_filter.fp_prob}") print(f"Number of hash functions: {bloom_filter.hash_count}") # Check whether an item is in the filter or not for animal in animals + other_animals: if bloom_filter.check(animal): if animal in other_animals: print( f'{animal.upper()} is a FALSE POSITIVE case (please adjust fp_prob to a smaller value).' ) else: print(f'{animal.upper()} is PROBABLY IN the filter.') else: print(f'{animal.upper()} is DEFINITELY NOT IN the filter as expected.') # - # <p align='justify'>Now let’s show the formula that determines the false positive rate as a function of $m$ = number of bits in a Bloom filter, $n$ = number of elements to insert and $k$ = number of hash functions visually: # $$f \approx \left(1-e^{-\frac {kn}{m}}\right)^{k}$$ # The graph below shows the plot of $f$ as a function of $k$ for different choices of $m/n$ (bits per element). In many real-life applications, fixing bits-per-element ratio is meaningful because we often have an idea of how many bits we can spend per element. Common values for the bits-per-element ratio are between 6 and 14, and such ratios allow us fairly low false positive rates as shown in the graph below:</p> # # + def cm_to_inch(value): # Figsize works with inches - converter to cm needed return value/2.54 def plot_fp_rate_vs_num_hash_functions(m, n, labels, linestyles): k = np.linspace(0, 20, 1000) plt.figure(figsize=(cm_to_inch(30), cm_to_inch(20))) plt.xlim(0, 21, 1) plt.ylim(0, 0.20) plt.xticks(np.arange(0, 21, 1)) plt.yticks(np.arange(0.00, 0.21, 0.01)) plt.xlabel("$k$ - number of hash functions", loc = 'center', rotation = 0) plt.ylabel("$f$ - false positive rate", loc = 'center', rotation = 90) plt.title("Flase Positive Rate vs. Number of Hash Functions", pad=20) for i in range(len(m)): f = (1 - np.e ** (-n[i] * k / m[i])) ** k plt.plot(k, f, label=labels[i], linestyle = linestyles[i]) plt.legend(loc='upper right') plt.grid() return plt.show() # + # Set "m" and "n" as list of int elements (with same lenght) with random numbers to get proper m/n ratios. m = [8, 10, 30, 80, 110, 60, 140] n = [2, 2, 5, 10, 11, 5, 10] # Set labels and linestyles for each m/n ratio as list of strings - should be same length as "m" and "n" lists. labels = ['$m/n$ = 4', '$m/n$ = 5', '$m/n$ = 6', '$m/n$ = 8', '$m/n$ = 10', '$m/n$ = 12', '$m/n$ = 14'] linestyles = ['solid', 'solid', 'solid', 'dashdot', 'dotted', 'dashed', 'dashed', 'dashed'] # Plot the data plot_fp_rate_vs_num_hash_functions(m, n, labels, linestyles) # - # <p align='justify'>The plot relating the number of hash functions $(k)$ and the false positive rate $(f)$ in a Bloom filter. The graph shows the false positive rate for a fixed bits-per-element ratio $(m/n)$, different curves corresponding to different ratios. Starting from the top to bottom, we have $m/n=4, 5, 6, 8, 10, 12, 14$. As the amount of allowed space per element increases (going from top to bottom), given the same number of hash functions, the false positive rate drops. Also, the curves show the trend that increasing $k$ up until some point (going from left to right), for a fixed $m/n$, reduces the error, but after some point, increasing $k$ increases the error rate. Note that the curves are fairly smooth, and for example, when $m/n$=8, i.e., we are willing to spend 1 byte per element, if we use anywhere between 4 and 8 hash functions, the false positive rate will not go above 3%, even though the optimal choice of $k$ is between 5 and 6. # # While increasing $m$ or reducing $n$ drops the false positive rate, i.e., more bits per element results in the overall lower false positive curve, the graph also shows the two-fold effect that $k$ has on false positives: up to a certain point, increasing $k$ helps reduce false positives, but there is a point at which it starts to worsen it; this is because having more hash functions allows a lookup more chance to find a zero, but also on an insert, sets more bits to 1. The minimum for each curve is the sweet spot that is the optimal $k$ for a particular bits-per-element. This leads to formula for optimum number of hash functions: # $${\displaystyle k={\frac {m}{n}}\ln 2}$$ # # For example, when $m/n=8$, $k_{opt} = 5.545$. We can use this formula to optimally configure the Bloom filter. Keep in mind that these calculations assume $k$ is a real number, but our $k$ has to be an integer. So if $k_{opt}$ is 5.546 when $m/n=8$ a non-integer, then we need to choose one of the two neighboring integers, which means that false positive rate also is not an exact anymore. Often it is better to choose the smaller of the two possible values of $k$, because it reduces the amount of computation we need to do. So in that case we can conclude that $k=5$ is the optimal number of hash functions. # # Let's try a little bit different implementation of Bloom filter for spell checking<sup>13</sup>.</p> class BloomFilterSpell: def __init__(self, size, hash_count): self.size = size self.hash_count = hash_count self.bit_array = bitarray(size) self.bit_array.setall(0) def add(self, string): for seed in range(self.hash_count): result = mmh3.hash(string, seed) % self.size self.bit_array[result] = 1 def lookup(self, string): for seed in range(self.hash_count): result = mmh3.hash(string, seed) % self.size if self.bit_array[result] == 0: return "Definitely not" return "Probably" # + def fp_prob(num_hash_funcs, num_items, bit_vec_length): probability_of_success = np.e**( (-num_hash_funcs * float(num_items)) / bit_vec_length) return (1.0 - probability_of_success)**num_hash_funcs def random_char(y): return ''.join(random.choice(string.ascii_letters) for x in range(y)) size = 1024000 hash_functions = 5 bloomfil = BloomFilterSpell(size, hash_functions) lines = open("data/words_alpha.txt").read().splitlines() for line in lines: bloomfil.add(line) #result = raw_input("Which word you want to search: ") prob_fp = fp_prob(hash_functions, len(lines), size) print(f"Probability of False Positives: {prob_fp}") random_word = random_char(10) print (f"Randomly generated word is {random_word}") print (f"{random_word} Spelling is {bloomfil.lookup(random_word)} correct") # print "{} Spelling is {} correct".format(result,bloomfil.lookup(result)) # - # <h2>Bloom filter vs. other data structures<sup>14, 15</sup></h2> # # <p align='justify'>Now after we show and describe what the Bloom filter is, let's compare this probabilistic data structure with other data structures.</p> # # <h4>BF vs. HashTable</h4> # # <img src='images/hash_table.png' alt="Hash Table Data Structure" height=300 width=500 align='left'/> # <p align='justify'>Hashtable is designed to use a special function called the Hash function which is used to map a given value with a particular key for faster access of elements. It is used where fast lookups are required.(Under reasonable assumptions, average time for element lookup in a hash table is $O(1)$). Dictionary in Python is implemented using HashTables. Java also implements HashTable class. Some applications of hashing can be found <a href='https://www.geeksforgeeks.org/applications-of-hashing/'>here.</a> # # HashTables and bloom filters are closely related to each other, therefore, it is wise to compare these two data structures and use them wisely as per your application/need demands. # # <table> # <tr> # <th style='text-align: center;'>Hash Table</th> # <th style='text-align: center;'>Bloom filter</th> # </tr> # <tr> # <td style='text-align: justify;'>In hash table the object gets stored to the bucket (index position in the hashtable) the hash function maps to.</td> # <td style='text-align: justify;'>Bloom filters doesn’t store the associated object. It just tells whether it is there in the bloom filter or not.</td> # </tr> # <tr> # <td style='text-align: justify;'>Hash tables are less space efficient. All (note: most) of the strings must map to a certain location.</td> # <td style='text-align: justify;'>Bloom filters are more space efficient. It’s size is even the less than the associated object which it is mapping - just an array of 0 and 1s according to the hash functions in the bloom.</td> # </tr> # <tr> # <td style='text-align: justify;'>Supports deletions - they can just mark that location of the object as NULL again.</td> # <td style='text-align: justify;'>It is not possible to delete elements from bloom filters. We have to reset a bloom filter entirely to restart.</td> # </tr> # <tr> # <td style='text-align: justify;'>Hashtables give accurate results.</td> # <td style='text-align: justify;'>Bloom filters have small false positive probability. (False positive means it might be in bloom filter but actually it is not.)</td> # </tr> # <tr> # <td style='text-align: justify;'>In a hashtable either we should implement multiple hash functions or have a strong hash function to minimize collisions.</td> # <td style='text-align: justify;'>A bloom filter uses many hash functions. There is no need to handle collisions.</td> # </tr> # <tr> # <td style='text-align: justify;'>Hashtables (hashmaps) are used in compiler operations, programming languages (hash table based data structures), password verification, etc.</td> # <td style='text-align: justify;'>Bloom filters find application in network routers, web browsers (to detect the malicious urls), in password checkers (to not a set a weak or guessable or list of forbidden passwords), existing username checkers etc.</td> # </tr> # </table> # # Let's compare with one simple <a href='https://softwareengineering.stackexchange.com/questions/252341/are-bloom-filters-actually-faster-than-hashes-even-taking-in-account-cache'>example</a>: # # Consider a simplified hash function $f(x) = x \% 2$. Now you input the following integers: 2, 3, 4, 5, 6, 7. # # Standard Hash: the given values will be hashed, and we end up with a lot of collisions due to $f(2) = f(4) = f(6) = 0$ and $f(3) = f(5) = f(7) = 1$. Nevertheless, the hash stores all of these values and it will be able to tell you that 8 is not stored in it. How does it do that? It keeps track of collisions and stores all values with the same hash-value, then when you query it, it additionally compares your query. So let's query the map for 8: $f(8) = 0$, so it'll look into a bucket where we have already inserted 2, 4, 6 and needs to make 3 comparisons in order to tell you that 8 was not part of the input. # # Bloom filter: Normally, each input value is hashed against $k$ different hash functions. Again, for simplicity, let's just assume we only use the single hash function $f$. We need an array of 2 values then and when we encounter the input 2 it means that due to $f(2) = 0$ we set the array value at position 0 to the value 1. The same happens for 4 and 6. Similarly, the inputs 3, 5, 7 each set the array position 1 to value 1. Now we query if 8 was part of the input: $f(8) = 0$ and the array at position 0 is 1, so the bloom filter will falsely claim that 8 was indeed part of the input. # # To get a bit more realistic, let's consider that we add a second hash function $g(x) = x \% 10$. With that, the input value 2 leads to two hash values $f(2) = 0$ and $g(2) = 2$ and the two corresponding array positions will be set to 1. Of course, the array now should be at least of size 10. But when we query for 8 we will check the array at position 8 due to $g(8) = 8$, and that position will still be 0. That's why additional hash functions decrease the false positives you'll get. # # Comparison: The bloom filter uses $k$ hash functions which means up to $k$ random array positions being accessed. But that figure is exact. The hash instead is only guaranteeing you an amortized constant access time, but may de-generate depending on the nature of your hash function and input data. So it is typically faster, except for the de-generated cases. # However, once you have a hash collision the standard hash will have to check equality of the stored values against the query value. This equality check may be arbitrarily expensive and will never occur with a bloom filter. # In terms of space, the bloom filter is constant, as there is never any need to use more memory than the designated array. On the other hand, the hash grows dynamically and may get much larger due to having to keep track of collisioned values. # </p> # # <h4>Comparing vs. Tries and BSTs</h4> # # <p align='justify'>Let's first define trie and BST.<br> # # <img src='images/trie.png' alt="Trie Data Structure" width=200 height=150 align='left'/> # <b><i>A trie</i></b>, also called digital tree or prefix tree, is a type of search tree, a tree data structure used for locating specific keys from within a set. These keys are most often strings, with links between nodes defined not by the entire key, but by individual characters. In order to access a key (to recover its value, change it, or remove it), the trie is traversed depth-first, following the links between nodes, which represent each character in the key. More about tries you can read <a href='https://en.wikipedia.org/wiki/Trie'>here</a>.</p> # # <p align='justify'> # <img src='images/binary_search_tree.png' alt="Binary Search Tree Data Structure" height=250 width=200 align='right'/> # <b><i>A binary search tree (BST)</i></b>, also called an ordered or sorted binary tree, is a rooted binary tree whose internal nodes each store a key greater than all the keys in the node's left subtree and less than those in its right subtree. A binary tree is a type of data structure for storing data such as numbers in an organized way. Binary search trees allow binary search for fast lookup, addition and removal of data items, and can be used to implement dynamic sets and lookup tables. The order of nodes in a BST means that each comparison skips about half of the remaining tree, so the whole lookup takes time proportional to the binary logarithm of the number of items stored in the tree. This is much better than the linear time required to find items by key in an (unsorted) array, but slower than the corresponding operations on hash tables. Several variants of the binary search tree have been studied. More about binary search trees you can read <a href='https://en.wikipedia.org/wiki/Trie'>here</a>. # # # Now let's go to the comparison. # <ul> # <li>Tries have the limitation of being useful only for strings. BSTs and Bloom filters can account for various other data types and miscellaneous structs.</li> # <li>Complexity for Insertion and Search: # <ul> # <li>Tries: $O(string length)$</li> # <li>BST: $O(string_length * height)$ [As the entire string is being stored at each node, we must compare the entire string $h$ number of times]</li> # <li>Bloom: $O(1)$ or $O(hash function)$ if the hash functions are very complex</li> # </ul></li> # <li>Deletion: Cannot occur in bloom filters but can in the other two</li> # <li>Space complexity: # <ul> # <li>Tries are better than BST as they do not store overlapping prefixes again and again.</li> # <li>Bloom filters work with hash functions so they are obviously the best of the lot.</li> # </ul> # </li> # <li>Tries and BSTs can store the entire string but the Bloom Filter can only tell the presence / absence of it, not recover it (limitation).</li> # </ul> # # The most significant advantage of Bloom filters over other data structures such as self-balancing trees, tries, HashMaps is in terms of space utilization. Any data structure implemented in order to store a set of elements (ordered or unordered) stores each element in its entirety. The storage requirement in this case can range from a few bits to several bytes. Furthermore, there are overhead costs associated with certain data structures. For eg: linked lists require additional linear space overhead for pointers. As mentioned earlier, the only caveat with Bloom filters is the possibility of a false positive. However with a relatively small error rate (choosing the optimal value for $k$), Bloom filters on an average require around 9.6 bits per element irrespective of the size of the elements. This is owed to its compact structure and nature of the approximation data structure. Therefore, if an error rate of $≤ 2\%$ is acceptable, and a few false postives are not harmful to the result, bloom filters are ideal. Lastly, in order to avoid expensive search operation over disk or network by checking probabilistically beforehand if the element possibly exists, bloom filter is the way to go!</p> # <h2>Conclusions</h2> # <p align='justify'>Bloom filter packs the space really well but are there, or are there better data structures? In other words, for the same amount of space, can we achieve a better false positive rate than the Bloom filter? To answer this question, we need to derive a lower bound that relates the space in the Bloom filter $(m)$ with the false positive rate $(f)$. This lower bound (available in some more theoretical resources on the subject) tells us that the amount of space the Bloom filter uses is $1.44x$ away from the minimum. There are, in fact, data structures that are closer to this lower bound than Bloom filter, but some of them are very complex to understand and implement. # # The basic Bloom filter data structure leaves a lot to be desired, and computer scientists have developed various modified versions of Bloom filters that address its various inefficiencies. For example, the standard Bloom filter does not handle deletions. There is a version of Bloom filter called counting Bloom filter that uses counters instead of individual bits in the cells. The insert operation in the counting Bloom filter increments the respective counters, and the delete operation decrements the corresponding counters. Counting Bloom filters use more space and can also lead to false negatives, when, for example, we repeatedly delete the same element thereby bringing down some other element counters to zero. # # Another issue with Bloom filters is their inability to be efficiently scaled. One of the problems with scaling in the way we are used to with hash tables, by rehashing and re-inserting, is that we do not store the items nor the fingerprints in the Bloom filter, so the original keys are effectively lost and rehashing is not an option. # # Also, Bloom filters are vulnerable when the queries are not drawn uniformly and randomly. Queries in real-life scenarios are rarely uniform random. Instead, many queries follow the Zipfian distribution, where a small number of elements is queried a large number of times, and a large number of elements is queried only once or twice. This pattern of queries can increase our effective false positive rate, if one of our “hot” elements, i.e., the elements queried often, results in the false positive. A modification to the Bloom filter called weighted Bloom filter addresses this issue by devoting more hashes to the “hot” elements, thus reducing the chance of the false positive on those elements. There are also new adaptations of Bloom filters that are adaptive, i.e. upon the discovery of a false positive, they attempt to correct it. # # The other vein of research has been focused on designing data structures functionally similar to the Bloom filter, but their design has been based on particular types of compact hash tables. Quotient filters are a viable alternative to Bloom filters, but they deserve to be represented separately.</p> # # <h4>Summary</h4> # # <p align='justify'> # <ol> # <li>Bloom filters have been widely applied in the context of distributed databases, networks, bioinformatics, and other domains where regular hash tables are too space-consuming.</li> # <li>Bloom filters trade accuracy for the savings in space, and there is a relationship between the space, false positive rate, the number of elements and the number of hash functions in the Bloom filter.</li> # <li>Bloom filters do not meet the space vs. accuracy lower bound, but they are simpler to implement than more space-efficient alternatives, and have been adapted over time to deal with deletes, different query distributions, etc.</li> # <li>Quotient filters are based on compact hash tables and are functionally equivalent to Bloom filters, with the benefit of the cache-efficient operations, and ability to delete, merge and resize.</li> # <li>Cuckoo filters are based on cuckoo hash tables, and promise the lookup of $O(1)$. Just like quotient filters, they store fingerprints instead of the actual keys.</li> # <li>Time Complexity : Adding a new element and testing for membership are both Constant Time - $O(1)$ operations</li> # <li>Space Complexity: A bloom filter with room for $n$ elements requires $O(n)$ space.</li> # </ol> # </p> # <h2>References</h2> # <ol> # <li><a href='https://en.wikipedia.org/wiki/Data_structure'>Data Structure Wiki</a></li> # <li><a href='https://www.geeksforgeeks.org/data-structures/'>Data Structure GeeksforGeeks</a></li> # <li><a href='https://en.wikipedia.org/wiki/List_of_data_structures#Abstract_data_types'>List of Data Structures Wiki</a></li> # <li><a href='https://introprogramming.info/intro-csharp-book/read-online/glava19-strukturi-ot-danni-supostavka-i-preporuki/'>Svetlin Nakov CSharp Book</a></li> # <li><a href='https://pdsa.gakhov.com/'><NAME> - Probabilistic Data Structures and Algorithms for Big Data Applications</a></li> # <li><a href='https://dzone.com/articles/introduction-probabilistic-0'>DZone Probabilistic Data Structures</a></li> # <li><a href='https://iq.opengenus.org/probabilistic-data-structures/'>Opengenus PDS</a></li> # <li><a href='https://stackoverflow.com/questions/27307169/what-are-probabilistic-data-structures'>StackOverflow PDS</a> # <li><a href='https://en.wikipedia.org/wiki/Bloom_filter'>Bloom filter Wiki</a></li> # <li><a href='https://freecontent.manning.com/all-about-bloom-filters/'>Freecontent Manning Bloom Filter</a></li> # <li><a href='https://www.kdnuggets.com/2016/08/gentle-introduction-bloom-filter.html'>KDNuggets Bloom filters</a></li> # <li><a href='https://python.land/bloom-filter'>Python Land Bloom Filter</a></li> # <li><a href='https://github.com/solo-rey/Bloom-filter'>Solo Rey Github Bloom Filter Spell Check</a></li> # <li><a href='https://www.geeksforgeeks.org/difference-between-bloom-filters-and-hashtable/'>GeeksforGeeks Bloom Filter Comparison</a></li> # <li><a href='https://iq.opengenus.org/comparison-of-bloom-filter/'>Opengenus Bloom Filter Comparison</a></li> # </ol>
exam_project_bloom_filter/Bloom Filter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''dsfs'': conda)' # name: python3 # --- import numpy as np import json import tarfile import matplotlib.pyplot as plt import pandas as pd # After importing the necessary libraries and packages geting the yelp reviews dataset from a tarfile. # + tar = tarfile.open('yelp_dataset.tar') members = tar.getmembers() review_file = tar.extractfile('yelp_academic_dataset_review.json') review_data = [] for _ in range(50000): review_data.append(json.loads(review_file.readline())) # - import datetime tar.close() review_file.close() review_data[0] datetime.datetime.strptime(review_data[0]['date'], '%Y-%m-%d %H:%M:%S') datetime.datetime.fromisoformat(review_data[0]['date']) # The date of the review is saved as a string. First converting it into a datetime object. # + import time for rev in review_data: rev['timeStruct'] = datetime.datetime.fromisoformat(rev['date']) # rev['timeInt'] = time.mktime(rev['timeStruct']) review_data[0] # - datetime.datetime.fromisoformat(review_data[0]['date']).weekday() # List of ratings for each day of the week. # + from collections import defaultdict weekRatings = defaultdict(list) for rev in review_data: day = rev['timeStruct'].weekday() weekRatings[day].append(rev['stars']) # + # Averages for all the weeks weekAvgs = {} for day, ratings in weekRatings.items(): weekAvgs[day] = sum(ratings)/len(ratings) weekAvgs # - X = list(weekAvgs.keys()) # days of the week y = [weekAvgs[x] for x in weekAvgs] X, y plt.plot(X, y) plt.show() # x must be sorted weekAvgs = {key: val for key, val in sorted(weekAvgs.items(), key = lambda ele: ele[0])} weekAvgs X = list(weekAvgs.keys()) # days of the week y = [weekAvgs[x] for x in weekAvgs] X, y plt.plot(X, y) plt.bar(X, y) plt.ylim(3.65, 3.9) plt.bar(X, y) plt.ylim(3.65, 3.9) plt.xlabel('Weekday') plt.ylabel('Avg rating of the weekday') plt.xticks(list(range(0, 7)), ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']) plt.bar(X, y)
dataProcessingnVisualization/visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Oriented Programming # # ## Basic Python # # 1. Variables; # 2. Flow Control; # 3. Functions; # 4. Data structure. # # ## Basic Object Oriented Programming # # 1. Classes; # 2. Attributes; # 3. Methods; # 4. Inheritance. # # ## Overview of Python # # What is Python? # # Python is a programming language. # A programming language allows you to write a program which is a sequence of instructions that specifies how to perform a computation. # # When writing a program you need two things: # # 1. Something to save the code (a text editor for example) # 2. Something to run the code # # We will be using a combination of these 2 things called notebooks. print('ola') 2**3 # ## Variables # # ### Character variables: string = "Hello world" num = complex(2,-3) + complex(2,2) print(num) print(string) print(string) print(string) print(string) # ### Numeric variables: num_1 = 2 num_2 = 3.0 num_1 + num_2 # ### String manipulation #We define a variable called String # (note that # allows me to comment my code) string = "My name is Vince" #Let's get the 5th letter of String # (Note that Python starts counting at 0): string[2] string[1:4] index_of_v = string.index("V") index_of_v print(string[index_of_v:]) print(string[:index_of_v]) # ### Numeric manipulation # + num = 3 # The following two lines are equivalent num = num + 1 num += 1 num # - num -= 2 num *= 3 num **= 2 num # ## Flow control # # - In Python indentation is important! # - In all languages indentation is good practice, in Python it is a requirement. # # ### If statements n = 8 if n <= 5: value = 1 elif n % 2 == 0: value = 2 else: value = 3 value # ### While loops count = 0 total = 0 while count < 100: count += 1 total += count total # ### For loops for i in [1, 2, 3, 4]: print(i) for subject in ["Queueing Theory", "Game Theory", "Inventory Theory", "Reliability Theory", "Project Management", "Decision Analysis"]: if "Theory" in subject: print(subject) # ## Functions # + #To create a function we use the 'def' statement: def hi(): """ This function simply prints a short statement. This is a shorter way of writing documentation, it is good practice to always include a description of what a function does. """ print("Hello everybody!") hi() # - def fibonacci(n): """ This returns the nth Fibonacci number. """ if n == 0: return 0 if n == 1: return 1 return fibonacci(n - 1) + fibonacci(n - 2) fibonacci(5) # ## Data structures: Lists my_list = list(range(6)) my_list[0] my_list.append(100) my_list # # Object Oriented Programming # # This is similar to cellular structure: # # ![](resources/cellular_structure.png) # # We can create "things" with: # # - attributes: things those "things" have; # - methods: things those "things" can do. # # ![](resources/oop.png) # # ## Defining a class class Student(): """We can create a simple empty class. This is a set of rules that says what a student is. """ # + tags=["nbval-ignore-output"] vince = Student() # Creating an instance vince # + tags=["nbval-ignore-output"] zoe = Student() # Creating a different instance zoe # - # ## Attributes class Student(): courses = ["Biology", "Mathematics", "English"] age = 5 gender = "Male" #Let us now create Vince again: vince = Student() # Accessing these attributes: vince.courses vince.age vince.gender # We can manipulate these attributes just like **any other** python variable: vince.courses.append("Photography") print(vince.courses) zoe = Student() print(zoe.courses) vince.age = 28 vince.age vince.gender = "M" vince.gender # ## The self Parameter # # The self parameter is a reference to the current instance of the class, and is used to access variables that belongs to the class. # # It does not have to be named self , you can call it whatever you like, but it has to be the first parameter of any function in the class: # + class Person: def __init__(self, name, age): self.name = name self.age = age def myfunc(self): print("Hello my name is " + self.name + " and I have "+ str(self.age) +" years.") p1 = Person("John", 36) p1.myfunc() p2 = Person("Oliver", 42) p2.myfunc() # - # ## Methods class Student(): courses = ["Biology", "Mathematics", "English"] age = 5 sex = "Male" def __init__(self, age=5): self.age = age def have_a_birthday(self): """This method increments the age of our instance.""" self.age += 1 vince = Student(6) vince.age vince.have_a_birthday() print(vince.age) zoe = Student() print(zoe.age) # ## The `__init__` method class Student(): def __init__(self, courses, age=None, sex='Male'): """ What the class should do when it is used to create an instance """ self.courses = courses self.age = age self.sex = sex def have_a_birthday(self): self.age += 1 vince = Student(["Biology","Math"],28,"Male") vince.courses, vince.age, vince.sex # ## Inheritance # # We can use a class to create new classes: class Math_Student(Student): """ A Math student: behaves exactly like a Student but also has a favourite class attribute. """ favourite_class = "Mathematics" becky = Math_Student(["Mathematics", "Biology"], 29, "Female") becky.courses, becky.age, becky.sex, becky.favourite_class #This class has the methods of the parent class: becky.have_a_birthday() becky.age # ## Use the super() Function # # Python also has a super() function that will make the child class inherit all the methods and properties from its parent: # + class Person: def __init__(self, fname, lname): self.firstname = fname self.lastname = lname def printname(self): print(self.firstname, self.lastname) class Student(Person): def __init__(self, fname, lname, year): super().__init__(fname, lname) self.graduationyear = year def printname(self): print(self.firstname, self.lastname, self.graduationyear) x = Student("Mike", "Olsen", 2019) x.printname() x.printname() # - # ## Summary # # - Classes # - Attributes # - Methods # - Inheritance # # ## Advantages # # - Simplicity # - Modularity # - Modifiability # - Extensibility # - Re-usability # # Further resources # # 1. [Python 3 Classes](https://docs.python.org/3.7/tutorial/classes.html) # 2. [W3 Python Classes and Objects](https://www.w3schools.com/python/python_iterators.asp)
00 oop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.8 64-bit # name: python3 # --- # In this worksheet we show how to connect to a BaseX database containing XML documents representing source code. Data from individual models are stored in databases by the same names. The name of the database is the same as the name of the model. The name of the database to open is given in the model variable below: # + dotnet_interactive={"language": "csharp"} model = 'ApplicationFoundation' # + dotnet_interactive={"language": "csharp"} from BaseXClient import BaseXClient # create session session = BaseXClient.Session("localhost", 1984, "admin", "admin") try: # create query instance input = """let $results := <Results> { for $c in /Class | /Table let $allMethods := count($c/Method) let $privateMethods := count($c/Method[@IsPrivate = 'true']) let $protectedMethods := count($c/Method[@IsProtected = 'true']) (: Explicitly marked with protected keyword :) let $publicMethods := count($c/Method[@IsPublic = 'true']) let $internalMethods := count($c[@IsInternal = 'true']) (: Compensate for methods that lack a visibility keyword: They are protected. :) let $protectedMethods := $protectedMethods + ($allMethods - $privateMethods - $protectedMethods - $publicMethods - $internalMethods) return <Result Class='{$c/@Name}' PrivateMethodCount='{$privateMethods}' ProtectedMethodCount='{$protectedMethods}' PublicMethodCount='{$publicMethods}' InternalMethodCount='{$internalMethods}'/> } </Results> return <Totals PrivateMethodCount='{sum($results/Result/@PrivateMethodCount)}' ProtectedMethodCount='{sum($results/Result/@ProtectedMethodCount)}' PublicMethodCount='{sum($results/Result/@PublicMethodCount)}' InternalMethodCount='{sum($results/Result/@InternalMethodCount)}' />""" session.execute("open " + model); query = session.query(input) # There is only one result, an XML tag called Totals with the # attributes provided. res = query.execute() print(res) # close query object query.close() finally: # close session if session: session.close() # - # As you can see, the result of running the query in the BaseX database is stored in the res variable. For this demonstration, we will take these results and show then graphically using the mathplotlib library. # + dotnet_interactive={"language": "csharp"} import matplotlib.pyplot as plt # Get the information from the XML using the lightweight Python XML library import xml.etree.ElementTree as ET tree = ET.fromstring(res) privateCount = tree.get("PrivateMethodCount") protectedCount = tree.get("ProtectedMethodCount") publicCount = tree.get("PublicMethodCount") internalCount = tree.get("InternalMethodCount") fig = plt.figure(figsize=(5,5)) # In inches(!) piechart = fig.add_subplot(111) piechart.pie([privateCount, protectedCount, internalCount, publicCount], labels=("Private", "Protected", "Internal", "Public"), shadow=True, startangle=90, explode=(0.1, 0,0,0), autopct='%1.1f%%') piechart.set_title ("Distribution of method visiblity\nModel: " + model, fontsize=20) piechart.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.tight_layout() plt.show() # + dotnet_interactive={"language": "csharp"} # + dotnet_interactive={"language": "csharp"}
tools/Explorer/Jupyter notebooks/Method Visibility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WESAD Dataset Analysis import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import pandas as pd df = pd.read_csv("../../WESAD/allchest.csv") df = df[df['ID'] == 2] df.reset_index(inplace=True, drop=True) df['label'].unique() sns.set_context("paper", rc={"lines.linewidth": 2.5}) sns.set_palette("binary_d") # Neutral sns.lineplot(data=df[df['label'] == 1].reset_index(drop=True)['chestResp']) plt.ylabel('RESP') plt.xlabel('Sequential Data-Points') plt.show() # Stress sns.lineplot(data=df[df['label'] == 2].reset_index(drop=True)['chestResp']) plt.ylabel('RESP') plt.xlabel('Sequential Data-Points') plt.show() # Neutral sns.lineplot(data=df[df['label'] == 1].reset_index(drop=True).iloc[400000:420000]['chestResp']) plt.ylabel('RESP') plt.xlabel('Sequential Data-Points') plt.show() # Stress sns.lineplot(data=df[df['label'] == 2].reset_index(drop=True).iloc[50000:70000]['chestResp']) plt.ylabel('RESP') plt.xlabel('Sequential Data-Points') plt.show()
WESAD/WESAD_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''rapids-0.18'': conda)' # name: python388jvsc74a57bd0db8c33e37b4d85990d3830e1b454a98f3634fc126ad97d7dcd45a1752ca4c3f7 # --- # # Biopython store entire query to mongoDB # # Storing entire query in mongoDB database. from Bio import Entrez import json from bs4 import BeautifulSoup as bs import lxml import json # # Functions from API def entrez_search_pubmed(query,records_per_query=10,email="<EMAIL>"): from Bio import Entrez Entrez.email = email # Search handle = Entrez.esearch(db="pubmed",term=query, idtype="acc") record = Entrez.read(handle) handle.close() return record def entrez_fetch_list_summary(uid_list,email): from Bio import Entrez Entrez.email = email results = [ Entrez.read( Entrez.esummary(db="pubmed", id=uid) ) for uid in uid_list ] return [{'uid':a, 'summary':b} for a,b in zip(uid_list, results)] return results def flatten_abstract(abstract_xml): abstract = '' for abstractText in abstract_xml.find_all('abstracttext'): if abstractText.get('label') != None: abstract = abstract + " " + abstractText.get('label') + ": " abstract = abstract + abstractText.text return abstract def entrez_fetch_abstracts(uid,email): from Bio import Entrez from bs4 import BeautifulSoup as bs Entrez.email = email handle = Entrez.efetch(db="pubmed", id=uid, rettype='Medline', retmode='xml') result = handle.readlines() result = b"".join(result) bs_content = bs(result, "lxml") abstracts = bs_content.find_all('abstract') handle.close() # Abstract results = [ flatten_abstract(abstract) for abstract in abstracts] return results def entrez_construct_abstract_dict(uids,email): results = entrez_fetch_abstracts( uids, email ) return [{'uid':a, 'abstract':b} for a,b in zip(uids, results)] def entrez_fetch_full_text_linkout(uid_list): import requests query = ",".join(uid_list) result = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id=" + query + "&cmd=prlinks&retmode=json").json() results_parsed = result['linksets'][0]['idurllist'] return [{ 'uid': result['id'], 'objurls' : result['objurls']} for result in results_parsed] # ## Procedure to fetch query_body={ "query": "<NAME>", 'email' : "<EMAIL>" } ## Query search results_query = entrez_search_pubmed(query = query_body['query'], email = query_body['email'] ) uids = results_query['IdList'] uids ## Summary results_summary = entrez_fetch_list_summary(uids,"<EMAIL>") results_summary[0] ## Abstract results_abstracts = entrez_construct_abstract_dict(uids,"<EMAIL>") results_abstracts[0] ## Elink results_elink = entrez_fetch_full_text_linkout(uids) results_elink[0] # # Store to mongoDB from pymongo import MongoClient from loguru import logger #client = MongoClient('mongodb', 27017) client = MongoClient('127.0.0.1', 27017) logger.info(client.list_database_names()) def entrez_mongodb_create_dictionaries(uid-):3 from pymongo import MongoClient from loguru import logger #client = MongoClient('mongodb', 27017) client = MongoClient('127.0.0.1', 27017) logger.info(client.list_database_names())
notebooks/biopython/03-store-query.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SCF # ## Imports import numpy as np import scipy.linalg as spla import pyscf from pyscf import gto, scf import matplotlib.pyplot as plt import time # %matplotlib notebook # ## Some useful resources: # - Szabo and Ostlund Chapter 3 (for algorithm see page 146) # - [Notes by <NAME>](http://vergil.chemistry.gatech.edu/notes/hf-intro/hf-intro.html) # - [Notes by <NAME>](http://joshuagoings.com/2013/04/24/hartree-fock-self-consistent-field-procedure/) # - [Programming notes by <NAME>](http://www.evangelistalab.org/wp-content/uploads/2013/12/Hartree-Fock-Theory.pdf) # - [Psi4Numpy SCF page](https://github.com/psi4/psi4numpy/tree/master/Tutorials/03_Hartree-Fock) # - [Crawdad programming notes](http://sirius.chem.vt.edu/wiki/doku.php?id=crawdad:programming:project3) # ## The SCF algorithm from Szabo and Ostlund: # 1. Specify a molecule (coordinates $\{R_A\}$, atomic numbers $\{Z_A\}$, number electrons $N$) and atomic orbital basis $\{\phi_\mu\}$. # 2. Calculate molecular integrals over AOs ( overlap $S_{\mu\nu}$, core Hamiltonian $H^{\mathrm{core}}_{\mu\nu}$, and 2 electron integrals $(\mu \nu | \lambda \sigma)$ ). # 3. Diagonalize the overlap matrix $S$ to obtain the transformation matrix $X$. # 4. Make a guess at the original density matrix $P$. # 5. Calculate the intermediate matrix $G$ using the density matrix $P$ and the two electron integrals $(\mu \nu | \lambda \sigma)$. # 6. Construct the Fock matrix $F$ from the core hamiltonian $H^{\mathrm{core}}_{\mu\nu}$ and the intermediate matrix $G$. # 7. Transform the Fock matrix $F' = X^\dagger F X$. # 8. Diagonalize the Fock matrix to get orbital energies $\epsilon$ and molecular orbitals (in the transformed basis) $C'$. # 9. Transform the molecular orbitals back to the AO basis $C = X C'$. # 10. Form a new guess at the density matrix $P$ using $C$. # 11. Check for convergence. (Are the changes in energy and/or density smaller than some threshold?) If not, return to step 5. # 12. If converged, use the molecular orbitals $C$, density matrix $P$, and Fock matrix $F$ to calculate observables like the total Energy, etc. # ## Quick note # The reason we need to calculate the transformation matrix $X$ is because the atomic orbital basis is not orthonormal by default. This means without transformation we would need to solve a generalized eigenvalue problem $FC = ESC$. If we use scipy to solve this generalized eigenvalue problem we can simply the SCF algorithm. # ## Simplified SCF # 1. Specify a molecule (coordinates $\{R_A\}$, atomic numbers $\{Z_A\}$, number electrons $N$) and atomic orbital basis $\{\phi_\mu\}$. # 2. Calculate molecular integrals over AOs ( overlap $S_{\mu\nu}$, core Hamiltonian $H^{\mathrm{core}}_{\mu\nu}$, and 2 electron integrals $(\mu \nu | \lambda \sigma)$ ). # 3. Make a guess at the original density matrix $P$. # 4. Calculate the intermediate matrix $G$ using the density matrix $P$ and the two electron integrals $(\mu \nu | \lambda \sigma)$. # 5. Construct the Fock matrix $F$ from the core hamiltonian $H^{\mathrm{core}}_{\mu\nu}$ and the intermediate matrix $G$. # 6. Solve the generalized eigenvalue problem using the Fock matrix $F$ and the overlap matrix $S$ to get orbital energies $\epsilon$ and molecular orbitals. # 7. Form a new guess at the density matrix $P$ using $C$. # 8. Check for convergence. (Are the changes in energy and/or density smaller than some threshold?) If not, return to step 4. # 9. If converged, use the molecular orbitals $C$, density matrix $P$, and Fock matrix $F$ to calculate observables like the total Energy, etc. # # # STEP 1 : Specify the molecule # start timer start_time = time.time() # define molecule mol = pyscf.gto.M( atom="O 0.0000000 0.0000000 0.0000000; H 0.7569685 0.0000000 -0.5858752; H -0.7569685 0.0000000 -0.5858752", basis='sto-3g', unit="Ang", verbose=0, symmetry=False, spin=0, charge=0 ) # get number of atomic orbitals num_ao = mol.nao_nr() # get number of electrons num_elec_alpha, num_elec_beta = mol.nelec num_elec = num_elec_alpha + num_elec_beta # get nuclear repulsion energy E_nuc = mol.energy_nuc() # # STEP 2 : Calculate molecular integrals # # Overlap # # $$ S_{\mu\nu} = (\mu|\nu) = \int dr \phi^*_{\mu}(r) \phi_{\nu}(r) $$ # # Kinetic # # $$ T_{\mu\nu} = (\mu\left|-\frac{\nabla}{2}\right|\nu) = \int dr \phi^*_{\mu}(r) \left(-\frac{\nabla}{2}\right) \phi_{\nu}(r) $$ # # Nuclear Attraction # # $$ V_{\mu\nu} = (\mu|r^{-1}|\nu) = \int dr \phi^*_{\mu}(r) r^{-1} \phi_{\nu}(r) $$ # # Form Core Hamiltonian # # $$ H = T + V $$ # # Two electron integrals # # $$ (\mu\nu|\lambda\sigma) = \int dr_1 dr_2 \phi^*_{\mu}(r_1) \phi_{\nu}(r_1) r_{12}^{-1} \phi_{\lambda}(r_2) \phi_{\sigma}(r_2) $$ # # + # calculate overlap integrals S = mol.intor('cint1e_ovlp_sph') # calculate kinetic energy integrals T = mol.intor('cint1e_kin_sph') # calculate nuclear attraction integrals V = mol.intor('cint1e_nuc_sph') # form core Hamiltonian H = T + V # calculate two electron integrals eri = mol.intor('cint2e_sph', aosym='s8') # since we are using the 8 fold symmetry of the 2 electron integrals # the functions below will help us when accessing elements __idx2_cache = {} def idx2(i, j): if (i, j) in __idx2_cache: return __idx2_cache[i, j] elif i >= j: __idx2_cache[i, j] = int(i*(i+1)/2+j) else: __idx2_cache[i, j] = int(j*(j+1)/2+i) return __idx2_cache[i, j] def idx4(i, j, k, l): return idx2(idx2(i, j), idx2(k, l)) print(np.shape(eri)) # - # # STEP 3 : Form guess density matrix # set inital density matrix to zero D = np.zeros((num_ao, num_ao)) # # STEPS 4 - 8 : SCF loop # # 4. Calculate the intermediate matrix $G$ using the density matrix $P$ and the two electron integrals $(\mu \nu | \lambda \sigma)$. # # $$G_{\mu\nu} = \sum_{\lambda\sigma}^{\mathrm{num\_ao}} P_{\lambda \sigma}[2(\mu\nu|\lambda\sigma)-(\mu\lambda|\nu\sigma)]$$ # # 5. Construct the Fock matrix $F$ from the core hamiltonian $H^{\mathrm{core}}_{\mu\nu}$ and the intermediate matrix $G$. # # $$ F = H + G $$ # # 6. Solve the generalized eigenvalue problem using the Fock matrix $F$ and the overlap matrix $S$ to get orbital energies $\epsilon$ and molecular orbitals. # # $$F C = E S C $$ # # 7. Form a new guess at the density matrix $P$ using $C$. # # $$ P_{\mu\nu} = \sum_{i}^{\mathrm{num\_elec}/2} C_{\mu i} C_{\nu i} $$ # # 8. Check for convergence. (Are the changes in energy and/or density smaller than some threshold?) If not, return to step 4. # # $$ E_{\mathrm{elec}} = \sum^{\mathrm{num\_ao}}_{\mu\nu} P_{\mu\nu} (H_{\mu\nu} + F_{\mu\nu}) $$ # $$ \Delta E = E_{\mathrm{new}} - E_{\mathrm{old}} $$ # $$ |\Delta P| = \left[ \sum^{\mathrm{num\_ao}}_{\mu\nu} [P^{\mathrm{new}}_{\mu\nu} - P_{\mu\nu}^{\mathrm{old}}]^2 \right]^{1/2}$$ # # 9. If converged, use the molecular orbitals $C$, density matrix $P$, and Fock matrix $F$ to calculate observables like the total Energy, etc. # # $$ E_{\mathrm{total}} = V_{\mathrm{NN}} + E_{\mathrm{elec}} $$ # + # 2 helper functions for printing during SCF def print_start_iterations(): print("{:^79}".format("{:>4} {:>11} {:>11} {:>11} {:>11}".format( "Iter", "Time(s)", "RMSC DM", "delta E", "E_elec"))) print("{:^79}".format("{:>4} {:>11} {:>11} {:>11} {:>11}".format( "****", "*******", "*******", "*******", "******"))) def print_iteration(iteration_num, iteration_start_time, iteration_end_time, iteration_rmsc_dm, iteration_E_diff, E_elec): print("{:^79}".format("{:>4d} {:>11f} {:>.5E} {:>.5E} {:>11f}".format(iteration_num, iteration_end_time - iteration_start_time, iteration_rmsc_dm, iteration_E_diff, E_elec))) # set stopping criteria iteration_max = 100 convergence_E = 1e-9 convergence_DM = 1e-5 # loop variables iteration_num = 0 E_total = 0 E_elec = 0.0 iteration_E_diff = 0.0 iteration_rmsc_dm = 0.0 converged = False exceeded_iterations = False # - print_start_iterations() while (not converged and not exceeded_iterations): # store last iteration and increment counters iteration_start_time = time.time() iteration_num += 1 E_elec_last = E_elec D_last = np.copy(D) # form G matrix G = np.zeros((num_ao, num_ao)) ######################################################### # FILL IN HOW TO MAKE THE G MATRIX HERE ######################################################### # build fock matrix ######################################################### # FILL IN HOW TO MAKE THE FOCK MATRIX HERE ######################################################### # solve the generalized eigenvalue problem E_orbitals, C = spla.eigh(F, S) # compute new density matrix D = np.zeros((num_ao, num_ao)) ######################################################### # FILL IN HOW TO MAKE THE DENSITY MATRIX HERE ######################################################### # calculate electronic energy ######################################################### # FILL IN HOW TO CALCULATE THE ELECTRONIC ENERGY HERE ######################################################### # calculate energy change of iteration iteration_E_diff = np.abs(E_elec - E_elec_last) # rms change of density matrix iteration_rmsc_dm = np.sqrt(np.sum((D - D_last)**2)) iteration_end_time = time.time() print_iteration(iteration_num, iteration_start_time, iteration_end_time, iteration_rmsc_dm, iteration_E_diff, E_elec) if(np.abs(iteration_E_diff) < convergence_E and iteration_rmsc_dm < convergence_DM): converged = True if(iteration_num == iteration_max): exceeded_iterations = True # # STEP 9 : Calculate Observables # + # calculate total energy #################################################### # FILL IN HOW TO CALCULATE THE TOTAL ENERGY HERE #################################################### # - print("{:^79}".format("Total Energy : {:>11f}".format(E_total)))
02_SCF/basics/scf_pyscf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import soundfile as sf import random import os import matplotlib.pyplot as plt import textgrid import numpy as np # Get all dictories and file names of files with certain suffix def get_filelist(dir, Filelist, namelist, suffix): newDir = dir if os.path.isfile(dir): if dir.endswith(suffix): Filelist.append(dir) name = os.path.basename(dir) namelist.append(name[:(len(name)-len(suffix))]) elif os.path.isdir(dir): for s in os.listdir(dir): newDir=os.path.join(dir,s) get_filelist(newDir, Filelist, namelist,suffix) return Filelist, namelist # + # Some random chosen keywords KEYWORDS = ['was','his','which','from','any','she','people','without','little','about'] root_dir = './LibriSpeech/Librispeech' # Root directory of audios text_dir = './librispeech_MFA/Documents/aligned_librispeech' # Root directory of transcripts generated by MFA filelist, namelist = get_filelist(root_dir,[],[],'.wav') scriptlist, scriptname = get_filelist(text_dir,[],[],'.TextGrid') # Generate a folder for outputs if os.path.exists('./Outputs') == False: os.mkdir('./Outputs') # + for word_idx in range(len(KEYWORDS)): #loop over all selected keywords keyword = KEYWORDS[word_idx] filecount = 0 for file in range(len(filelist)): # loop over all audio files file_dir = filelist[file] file_name = namelist[file] script_dir = scriptlist[file] tg = textgrid.TextGrid() tg.read(script_dir.format(file_name)) segmentlist = np.array([[0,0]]) locations = np.array([[0,0]]) for idx in range(0,len(tg.tiers[0])): # loop over all words in an audio if tg.tiers[0][idx].mark == keyword: t_start = tg.tiers[0][idx].minTime t_end = tg.tiers[0][idx].maxTime duration = t_end - t_start # Generate 1-sec segments if (1-duration) >= t_start: rand_num = round(random.uniform(0,t_start),2) else: rand_num = round(random.uniform(0,1-duration),2) t_start_new = t_start - rand_num t_end_new = t_start_new + 1 # The array that contains all 1-sec segments that present the selected keyword segmentlist = np.concatenate((segmentlist,np.array([[t_start_new, t_end_new]]))) # The array that contains the location of each keyword in each segment locations = np.concatenate((locations,np.array([[t_start - t_start_new, t_end - t_start_new]]))) segmentlist = np.delete(segmentlist,0,axis=0) locations = np.delete(locations,0,axis=0) y, sr = sf.read(file_dir) # read the original audio if os.path.exists('./Outputs/{0}'.format(keyword)) == False: os.mkdir('./Outputs/{0}'.format(keyword)) # Generate and save the segments for i in range(np.shape(segmentlist)[0]): y_cut = y[int(segmentlist[i,0]*sr) : int(segmentlist[i,1]*sr)] sf.write('./Outputs/{0}/{1}-{2}.wav'.format(keyword, str(word_idx).zfill(4), str(filecount).zfill(4)), y_cut, sr) # generate the .wrd files with the location of each keyword with open('./Outputs/{0}/{1}-{2}.wrd'.format(keyword, str(word_idx).zfill(4), str(filecount).zfill(4)),'w') as file: file.write('{0} {1} {2}'.format(int(locations[i,0]*sr), int(locations[i,1]*sr), keyword)) filecount += 1 print('Files are generated successifully!') # + # This part can separate the audios into train, val, and test sets. import shutil from shutil import copy2 train_ratio = 0.7 test_ratio = 0.15 if os.path.exists('./newOutputs') == False: os.mkdir('./newOutputs') train_dir_root = './newOutputs/train' if os.path.exists(train_dir_root) == False: os.mkdir(train_dir_root) val_dir_root = './newOutputs/validation' if os.path.exists(val_dir_root) == False: os.mkdir(val_dir_root) test_dir_root = './newOutputs/test' if os.path.exists(test_dir_root) == False: os.mkdir(test_dir_root) for keyword in KEYWORDS: file_dir = "./Outputs/{0}/".format(keyword) all_files = os.listdir(file_dir) name_list = [] for file in all_files: if file.endswith('.wav'): name_list.append(file[:-4]) num_audios = len(name_list) index_list = list(range(num_audios)) random.shuffle(index_list) num = 0 train_dir = os.path.join(train_dir_root,keyword) if os.path.exists(train_dir) == False: os.mkdir(train_dir) val_dir = os.path.join(val_dir_root,keyword) if os.path.exists(val_dir) == False: os.mkdir(val_dir) test_dir = os.path.join(test_dir_root,keyword) if os.path.exists(test_dir) == False: os.mkdir(test_dir) for i in index_list: audio_files = os.path.join(file_dir, name_list[i] + '.wav') wrd_files = os.path.join(file_dir, name_list[i] + '.wrd') if num < num_audios*train_ratio: copy2(audio_files, train_dir) copy2(wrd_files, train_dir) elif num >= num_audios*(1-test_ratio): copy2(audio_files, test_dir) copy2(wrd_files, test_dir) else: copy2(audio_files, val_dir) copy2(wrd_files, val_dir) num += 1 print('Files are separated into train, validation, and test sets.')
Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # ## Train model using PCA Components # ### Model is trained with XGBoost installed in notebook instance # Install xgboost in notebook instance. #### Command to install xgboost # !conda install -y -c conda-forge xgboost # %matplotlib inline import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import xgboost as xgb column_list_file = 'bike_train_column_list_pca.txt' train_file = 'bike_train_pca.csv' validation_file = 'bike_validation_pca.csv' test_file = 'bike_test_pca.csv' columns = '' with open(column_list_file,'r') as f: columns = f.read().split(',') columns # Specify the column names as the file does not have column header df_train = pd.read_csv(train_file,names=columns) df_validation = pd.read_csv(validation_file,names=columns) df_train.head(2) df_validation.head(2) df_train.iloc[:,1:-2].head(2) # + X_train = df_train.iloc[:,1:] # Features: 1st column onwards y_train = df_train.iloc[:,0].ravel() # Target: 0th column X_validation = df_validation.iloc[:,1:] y_validation = df_validation.iloc[:,0].ravel() # - # XGBoost Training Parameter Reference: # https://github.com/dmlc/xgboost/blob/master/doc/parameter.md regressor = xgb.XGBRegressor(max_depth=5,eta=0.1,subsample=0.7,num_round=150,n_estimators=150) regressor regressor.fit(X_train,y_train, eval_set = [(X_train, y_train), (X_validation, y_validation)]) eval_result = regressor.evals_result() training_rounds = range(len(eval_result['validation_0']['rmse'])) plt.scatter(x=training_rounds,y=eval_result['validation_0']['rmse'],label='Training Error') plt.scatter(x=training_rounds,y=eval_result['validation_1']['rmse'],label='Validation Error') plt.grid(True) plt.xlabel('Iteration') plt.ylabel('RMSE') plt.title('Training Vs Validation Error') plt.legend() xgb.plot_importance(regressor) xgb.plot_importance(regressor) def adjust_count(x): if x < 0: return 0 else: return x # Prepare Data for Submission to Kaggle df_test = pd.read_csv(test_file,parse_dates=['datetime']) df_test.head(2) X_test = df_test.iloc[:,1:] # Exclude datetime for prediction X_test.head(2) result = regressor.predict(X_test) result[:5] # Convert result to actual count df_test["count"] = np.expm1(result) df_test.head() df_test[df_test["count"] < 0] df_test['count'] = df_test['count'].map(adjust_count) df_test[df_test["count"] < 0] df_test[['datetime','count']].to_csv('predicted_count_pca.csv',index=False)
6 PCA/BikeSharingRegression/sdk1.7/biketrain_xgboost_localmode_pca.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Local Lengthscale GP with PyMC # # Author: [<NAME>](https://patel-zeel.github.io/), [<NAME>](https://nipunbatra.github.io/) # In this chapter, we explore a non-stationary GP discussed by {cite}`LLSGP` with PyMC. # + import numpy as np import matplotlib.pyplot as plt from matplotlib import rc import pymc3 as pm from sklearn.cluster import KMeans import warnings from multiprocessing import Pool warnings.filterwarnings('ignore') rc('font', size=16) # - # Let us define both stationary and non-statioanry RBF kernels, # + ### Stationary GP def kernel(a, b, lenghtscale, std): """ Borrowed from <NAME>'s lecture code https://www.cs.ubc.ca/~nando/540-2013/lectures/gp.py """ sqdist = np.square(a - b.T) return std**2*np.exp(-.5 * (1/lenghtscale) * sqdist) ### LLS GP def global_kernel(x1, x2, l1, l2, std): sqdist = np.square(x1 - x2.T) l1l2meansqr = (np.square(l1)[:, np.newaxis, :] + np.square(l2)[np.newaxis, :, :]).squeeze()/2 # print(sqdist.shape, l1l2meansqr.shape) return std**2 * pm.math.matrix_dot(np.sqrt(l1),np.sqrt(l2.T)) * (1/np.sqrt(l1l2meansqr)) * np.exp(-sqdist/l1l2meansqr) def local_kernel(x1, x2, lengthscale): """ Borrowed from <NAME>'s lecture code https://www.cs.ubc.ca/~nando/540-2013/lectures/gp.py """ sqdist = np.square(x1 - x2.T) return np.exp(-.5 * (1/lengthscale) * sqdist) # - # We will test the efficacy of these models on a step function data. # + n_train = 21 np.random.seed(1234) # Generate data def f(X): # target function return np.sin(5*X) + np.sign(X) X = np.random.uniform(-1, 1, (n_train, 1)).reshape(-1,1) # data Y = f(X)[:, 0] + np.random.normal(0,0.2,n_train) plt.scatter(X, Y); plt.xlabel('x'); plt.ylabel('y'); # - # ### Stationary GP model # + basic_model = pm.Model() with basic_model: # Priors for unknown model parameters # Variance kernel_std = pm.Lognormal("kernel_std", 0, 0.1) # Length scale kernel_ls = pm.Lognormal("kernel_ls", 0, 1) noise_sigma = pm.Lognormal("noise_sigma", 0, 1) K = kernel(X, X, kernel_ls, kernel_std) K += np.eye(X.shape[0]) * np.power(noise_sigma, 2) y = pm.MvNormal("y", mu = 0, cov = K, observed = Y.squeeze()) # - pm.model_to_graphviz(basic_model.model) # Let us find MAP estimate of paramaters. map_estimate = pm.find_MAP(model=basic_model) map_estimate # Now, we will sample from the posterior. with basic_model: # draw 2000 posterior samples per chain trace = pm.sample(2000,return_inferencedata=False,tune=2000) # + import arviz as az az.plot_trace(trace); # - # Now, we will infer the $y$ values at new input locations. # + test_x = np.linspace(-1.5, 1.5, 100).reshape(-1, 1) train_x = X train_y = Y def post(train_x, train_y, test_x, kernel, kernel_ls, kernel_std, noise): N = len(train_x) K = kernel(train_x, train_x, kernel_ls, kernel_std)+noise**2*np.eye(len(train_x)) N_star = len(test_x) K_star = kernel(train_x, test_x, kernel_ls, kernel_std) K_star_star = kernel(test_x, test_x, kernel_ls, kernel_std) posterior_mu = K_star.T@np.linalg.inv(K)@(train_y) posterior_sigma = K_star_star - K_star.T@np.linalg.inv(K)@K_star # Instead of size = 1, we can also sample multiple times given a single length scale, kernel_std and noise return np.random.multivariate_normal(posterior_mu, posterior_sigma, size=1) # - # Make predictions at new locations. train_y = Y.squeeze() n_samples = 500 preds = np.stack([post(train_x, train_y, test_x=test_x, kernel=kernel, kernel_ls=trace['kernel_ls'][b], kernel_std=trace['kernel_std'][b], noise=trace['noise_sigma'][b]) for b in range(n_samples)]) # + ci = 95 ci_lower = (100 - ci) / 2 ci_upper = (100 + ci) / 2 preds_mean = preds.reshape(n_samples, len(test_x)).mean(0) preds_lower = np.percentile(preds, ci_lower, axis=0) preds_upper = np.percentile(preds, ci_upper, axis=0) plt.plot(test_x,preds.reshape(n_samples, len(test_x)).mean(axis=0)) plt.scatter(train_x, train_y, c='black', zorder=3, label='data') plt.fill_between(test_x.flatten(), preds_upper.flatten(), preds_lower.flatten(), alpha=.3, label='95% CI'); # - # ### LLS GP # We will learn local lengthscales at 3 input locations choosen by KMeans clustering. # + n_local = 3 lls_model = pm.Model() param_X = KMeans(n_local, random_state=0).fit(X).cluster_centers_ with lls_model: ### Local GP # local lengthscale local_ls = pm.Lognormal("local_ls", 0, 1) param_ls = pm.Lognormal("param_ls", 0, 1, shape=(n_local, 1)) local_K = local_kernel(param_X, param_X, local_ls) local_K_star = local_kernel(X, param_X, local_ls) ### global GP # global lengthscales global_ls = pm.math.exp(pm.math.matrix_dot(local_K_star, pm.math.matrix_inverse(local_K), pm.math.log(param_ls))) # global variance global_std = pm.Lognormal("global_std", 0, 1) # global noise global_noise_sigma = pm.Lognormal("global_noise_sigma", 0, 1) global_K = global_kernel(X, X, global_ls, global_ls, global_std) global_K += np.eye(X.shape[0])*global_noise_sigma**2 y = pm.MvNormal("y", mu = 0, cov = global_K, observed = Y) # - pm.model_to_graphviz(lls_model.model) # Let us find MAP estimate of paramaters. NSmap_estimate = pm.find_MAP(model=lls_model) NSmap_estimate with lls_model: # draw 2000 posterior samples per chain trace = pm.sample(4000,return_inferencedata=False,tune=2000) import arviz as az with lls_model: az.plot_trace(trace); # Let us predict the values at new locations. # + test_x = np.linspace(-1.5, 1.5, 100).reshape(-1, 1) train_x = X train_y = Y def post(local_ls, param_ls, global_std, global_noise): N = len(train_x) param_K_inv = np.linalg.inv(local_kernel(param_X, param_X, local_ls)) local_K = local_kernel(train_x, param_X, local_ls) global_ls = np.exp(local_K@param_K_inv@param_ls) local_K_star = local_kernel(test_x, param_X, local_ls) global_ls_star = np.exp(local_K_star@param_K_inv@param_ls) K = global_kernel(train_x, train_x, global_ls, global_ls, global_std)+np.eye(N)*global_noise**2 K_inv = pm.math.matrix_inverse(K) K_star = global_kernel(train_x, test_x, global_ls, global_ls_star, global_std) posterior_mu = pm.math.matrix_dot(K_star.T,K_inv,train_y) return posterior_mu.eval() # Instead of size = 1, we can also sample multiple times given a single length scale, kernel_std and noise return np.random.multivariate_normal(posterior_mu.eval(), posterior_sigma.eval(), size=1) # + # Make predictions at new locations. n_samples = 100 preds = np.stack([post(local_ls=trace['local_ls'][b], param_ls=trace['param_ls'][b], global_std=trace['global_std'][b], global_noise=trace['global_noise_sigma'][b]) for b in range(n_samples)]) # - # Let us visualize predictive mean and variance. ci = 95 ci_lower = (100 - ci) / 2 ci_upper = (100 + ci) / 2 preds_mean = preds.reshape(n_samples, len(test_x)).mean(0) preds_lower = np.percentile(preds, ci_lower, axis=0) preds_upper = np.percentile(preds, ci_upper, axis=0) # + plt.plot(test_x,preds.reshape(n_samples, len(test_x)).mean(axis=0)) plt.scatter(train_x, train_y, c='black', zorder=3, label='data') plt.fill_between(test_x.flatten(), preds_upper.flatten(), preds_lower.flatten(), alpha=.3, label='95% CI'); for x_loc in param_X: plt.vlines(x_loc, -3, 3); plt.vlines(x_loc, -3, 3, label='Latent locations'); plt.legend(bbox_to_anchor=(1,1)); plt.ylim(-2.5,2.5); plt.xlabel('x') plt.ylabel('y'); # - # Let us visualize the latent lengthscales. # + local_ls = NSmap_estimate['local_ls'] param_ls = NSmap_estimate['param_ls'] local_K = local_kernel(param_X, param_X, local_ls) local_K_star = local_kernel(test_x, param_X, local_ls) global_ls = pm.math.exp(pm.math.matrix_dot(local_K_star, pm.math.matrix_inverse(local_K), pm.math.log(param_ls))) plt.plot(test_x, global_ls.eval(), label='Local lengthscales', color='r'); plt.vlines(param_X.squeeze(), *plt.ylim(), label='latent locations') plt.xlabel('x') plt.ylabel('lengthscales'); plt.legend(bbox_to_anchor=(1,1)); # - # We see that lengthscales are low in the center region where the underlying function is having a sudden jump. Thus, LLS model is giving us sensible predictions.
explain-ml-book/notebooks/2021-03-17-lls-gp-pymc3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jrg94/doodles/blob/master/teaching-evals/visualizing_teaching_evaluations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="YKqBX5hkAtzV" colab_type="code" colab={} # Load useful libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np # + id="_WclUf_oGoOL" colab_type="code" outputId="a4fb8525-f3e6-4167-c352-63d0707c264a" colab={"base_uri": "https://localhost:8080/", "height": 166} # Load data and take a peek df = pd.read_csv("https://raw.githubusercontent.com/jrg94/doodles/master/teaching-evals/mean-evals-by-term.csv") df.head() # + id="D6ADigArPQyI" colab_type="code" outputId="d8e169f0-3672-4b0b-d528-bf24409b130f" colab={"base_uri": "https://localhost:8080/", "height": 94} # Load question labels labels = pd.read_csv("https://raw.githubusercontent.com/jrg94/doodles/master/teaching-evals/question-labels.csv") labels.head() # + id="YybIzf4Gyq7O" colab_type="code" outputId="95e7b4a7-3ca4-4667-e024-e7b2dda2fa00" colab={"base_uri": "https://localhost:8080/", "height": 195} # Load question distributions dists = pd.read_csv("https://raw.githubusercontent.com/jrg94/doodles/master/teaching-evals/question-distributions.csv") dists.head() # + id="IDKMHbshGt7z" colab_type="code" outputId="6047f435-f597-40eb-b280-ba2d5d766eef" colab={"base_uri": "https://localhost:8080/", "height": 824} # Plot time series of all questions over 4 terms results = df.plot( subplots=True, x="term", y=["q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10"], figsize=(15, 15), ylim=(4,5), title=list(labels.values[0]), legend=False, sharex=True, sharey=True, layout=(5,2) ) # + id="Aw38oTIX4jut" colab_type="code" outputId="4bc0a2b0-1fc2-44df-f99c-b3619ab4962f" colab={"base_uri": "https://localhost:8080/", "height": 576} # Plot distributions of all four terms filt = dists[dists["question"] == "q1"][ ["term", "strongly disagree", "disagree", "neutral", "agree", "strongly agree"] ].set_index("term").T results = filt.plot( kind="bar", subplots=True, figsize=(12, 8), ylim=(0,100), legend=False ) # + id="ZS0UQVl7KdDy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 585} outputId="e365e5d7-8df6-406a-9fcc-09fbedea4f79" fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(12, 8), sharex=True, sharey=True) width=.15 i = 1 for row in ax: for col in row: filt = dists[dists["question"] == f"q{i}" ][ ["term", "strongly disagree", "disagree", "neutral", "agree", "strongly agree"] ].set_index("term").T col.set_title(labels.values[0][i - 1]) for j in range(5): if j == 2: # centers the tick col.bar(np.arange(4) + width * j, filt.iloc[j], width, label=filt.index[j], tick_label=filt.T.index, align="center") else: col.bar(np.arange(4) + width * j, filt.iloc[j], width, label=filt.index[j], align="center") handles, axes_labels = col.get_legend_handles_labels() i+=1 fig.legend(handles, axes_labels, loc="lower right", bbox_to_anchor=(1.15, .8)) fig.tight_layout()
teaching-evals/visualizing_teaching_evaluations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0 # --- # + language="sh" # pygmentize train_function.R # + language="sh" # pygmentize serve_function.R # + language="sh" # pygmentize main.R # + language="sh" # pygmentize Dockerfile # + language="sh" # export REGION=eu-west-1 # aws ecr create-repository --repository-name r-custom --region $REGION # + language="sh" # # This cell will not run on SageMaker Studio # # The simplest option is to run these commands on your local machine # export REGION=eu-west-1 # export ACCOUNT_ID=`aws sts get-caller-identity --query Account --output text` # docker build -t r-custom:predictor -f Dockerfile . # export IMAGE_ID=`docker images -q r-custom:predictor` # docker tag $IMAGE_ID $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/r-custom:predictor # aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/r-custom:predictor # docker push $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/r-custom:predictor # + import sagemaker from sagemaker.estimator import Estimator print(sagemaker.__version__) sess = sagemaker.Session() account_id = sess.boto_session.client('sts').get_caller_identity()['Account'] region = sess.boto_session.region_name bucket = sess.default_bucket() prefix = 'r-boston-housing' training = sess.upload_data(path='housing.csv', key_prefix=prefix + "/training") output = 's3://{}/{}/output/'.format(bucket,prefix) role = sagemaker.get_execution_role() r_estimator = Estimator( image_uri=account_id+'.dkr.ecr.'+region+'.amazonaws.com/r-custom:predictor', role=role, instance_count=1, instance_type='ml.m5.large', output_path=output, hyperparameters={ 'normalize': False } ) r_estimator.fit({'training':training}) # - r_predictor = r_estimator.deploy( initial_instance_count=1, instance_type='ml.m5.large') # + import pandas as pd data = pd.read_csv('housing.csv') data.drop(['medv'], axis=1, inplace=True) data = data.to_csv(index=False) r_predictor.serializer = sagemaker.serializers.CSVSerializer() response = r_predictor.predict(data) # - print(response) r_predictor.delete_endpoint()
Chapter 08/R_custom/R on Boston Housing Dataset - Train and predict with Generic Estimator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Импорт библиотек # + import datetime import itertools import json import pathlib import warnings from sklearn.ensemble import RandomForestClassifier import eli5 import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn.linear_model as linear_model from catboost import CatBoostRegressor, Pool from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import ARDRegression, ElasticNet, LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.model_selection import ParameterGrid, GridSearchCV from sklearn.preprocessing import StandardScaler from tqdm import tqdm_notebook as tqdm from tsfresh import extract_features, extract_relevant_features from tsfresh.feature_extraction import feature_calculators as fc from sklearn.model_selection import StratifiedShuffleSplit import seaborn as sns; sns.set() import warnings; warnings.filterwarnings('ignore') RS = 289475 # - # # Загрузка данных # + train_target = pd.read_csv('pet_target_train.csv', index_col='date', parse_dates=['date']) test_target = pd.read_csv('pet_test_timestamps.csv', index_col='date', parse_dates=['date']) daily = pd.read_csv('pet_daily.csv', index_col='date', parse_dates=['date']) weekly = pd.read_csv('pet_weekly.csv', index_col='date', parse_dates=['date']) tsf_features2 = pd.read_csv('tsf_features2.csv', index_col='date', parse_dates=['date']) print('train_target:', train_target.index.date.min(), '|', train_target.index.date.max()) print('test_target:', test_target.index.date.min(), '|', test_target.index.date.max()) print('daily:', daily.index.date.min(), '|', daily.index.date.max()) print('weekly:', weekly.index.date.min(), '|', weekly.index.date.max()) print('tsf_features2:', tsf_features2.index.date.min(), '|', tsf_features2.index.date.max()) # - # # Фичи # + dfts = daily.resample('D').ffill() wfts = weekly.resample('D').ffill() fts = dfts.join(wfts).drop('pta_NEA_USD',axis=1) fts # - # ## Rolling # + PERIODS = [f'{d}D' for d in range(1,1096)] AGGREGATES = ['mean', 'median', 'std', 'max', 'min'] all_features = [] for period in tqdm(PERIODS): for agg in AGGREGATES: if agg!='mean' and period=='1D': continue rolling_features = fts.rolling(period).aggregate(agg) rolling_features.rename(lambda x: '_'.join([x, period, agg]), axis=1, inplace=True) all_features.append(rolling_features) # rolling_features[rolling_features.index.day==9] all_features = pd.concat(all_features, axis=1) all_features # - # ## Tsfresh all_features = all_features.join(tsf_features2) all_features # ## Сокращаем до 1 дня и сдвигаем # + all_features = all_features[all_features.index.day==9] all_features['prediction_date'] = ( all_features.index + pd.TimedeltaIndex(all_features.index.days_in_month-8, unit='D')) all_features # - # ## Убираем бесполезные колонки # + df = train_target.join(all_features.ffill().set_index('prediction_date'), how='outer')['2005-02':] #2004-06 df.index.name = 'date' nulls = df[:'2015'].isna().sum().sort_values(ascending=False) nulls_cols = nulls[nulls>0].index inf = (df == np.inf).sum().sort_values(ascending=False) inf_cols = inf[inf>0].index zeros = df[:'2015'].sum().sort_values(ascending=False) zeros_cols = zeros[zeros==0].index non_unique_cols = [] for col in df.columns: if len(df[col].unique()) == 1: non_unique_cols.append(col) FTS_COLS = [ col for col in df.columns[1:] if col not in nulls_cols and col not in inf_cols and col not in zeros_cols and col not in non_unique_cols ] TARGET = 'pet' df = df[[TARGET] + FTS_COLS] df # - # ## Считаем корреляцию tr_df = df[:'2015'] corrs = tr_df[FTS_COLS].corrwith(tr_df[TARGET]).abs().sort_values(ascending=False) corrs # # ElasticNet # + code_folding=[3] def mape(y_true, y_pred): return round(np.mean(np.abs((y_pred-y_true)/y_true)) * 100, 4) def line_model(model, df, sli, params, corr_rank=None): full_df, tr, cv = df[:'2015'].copy(), df[:str(sli)].copy(), df[str(sli+1):'2015'].copy() print('Start:', tr.shape, cv.shape) corrs_cols = corrs[corrs <= corr_rank].index print('Del corr cols:', len(corrs_cols)) del_cols = list(corrs_cols) + [TARGET] FCOLS = [col for col in full_df.columns if col not in del_cols] print('After:', tr[FCOLS].shape, cv[FCOLS].shape) errors = [] for param in tqdm(params): lmodel = eval(model)(**param) lmodel.fit(tr[FCOLS], tr[TARGET]) ltr_preds = pd.Series(lmodel.predict(tr[FCOLS]), index=tr.index) lcv_preds = pd.Series(lmodel.predict(cv[FCOLS]), index=cv.index) cv_mape = mape(cv[TARGET], lcv_preds) errors.append({ 'mape': cv_mape, 'param': param }) errors = pd.DataFrame.from_records(errors) min_error = errors.iloc[errors['mape'].idxmin()] print(min_error['mape'], min_error['param']) lmodel = eval(model)(**min_error['param']) lmodel.fit(tr[FCOLS], tr[TARGET]) ltr_preds = pd.Series(lmodel.predict(tr[FCOLS]), index=tr.index) lcv_preds = pd.Series(lmodel.predict(cv[FCOLS]), index=cv.index) plt.figure(figsize=(18,6)) sns.lineplot(data=df, x=df.index ,y=TARGET, label='Full') sns.lineplot(x=ltr_preds.index ,y=ltr_preds.values, label='Train_pred') sns.lineplot(x=lcv_preds.index ,y=lcv_preds.values, label='Valid_pred', c='red') plt.title(model.split('.')[1]) plt.xlabel('Date'); plt.ylabel('Prices') f_lmodel = eval(model)(**min_error['param']) f_lmodel.fit(full_df[FCOLS], full_df[TARGET]) return lmodel, f_lmodel, FCOLS # + params = ParameterGrid({ 'alpha': [0.0001, 0.001, 0.01, 0.1, 0.5, 1, 2, 3, 4], 'l1_ratio': [0.1, 0.5, 1, 3, 5], 'positive': [True,False], 'max_iter': [10, 15, 20, 50, 100, 200], # , 100, 200, 300, 500 'precompute': [True,False], 'warm_start': [True,False], #'normalize': [True,False], 'selection': ['cyclic','random'], 'tol': [0.01, 0.001, 0.0001], 'fit_intercept': [True,False] }) lmodel, f_lmodel, FCOLS = line_model( 'linear_model.ElasticNet', df, 2014, params, corr_rank=0.6) eli5.show_weights(f_lmodel, top=15, feature_names=FCOLS) # - # ## _Предсказание ts = df['2016':].copy() predict_f = f_lmodel.predict(ts[FCOLS]) ts_preds = pd.DataFrame(predict_f, columns=['pet'], index=ts.index) ts_preds.loc[test_target.index].to_csv('submit_model3')#('sub/pet_linear_35_11.csv') # # Сравнение # + # def mape(y_true, y_pred): return 100 * np.mean(np.abs((y_pred-y_true)/y_true)) # s_1 = pd.read_csv('sub\pet_linear_35.csv', index_col=0, parse_dates=["date"]) # s_2 = pd.read_csv('sub\pet_linear_35_10.csv', index_col=0, parse_dates=["date"]) # print('mape =',mape(s_1['pet'].values, s_2['pet'].values))
sibur-challenge-2019-petf-v2/model3_P4_7745_Pet_35.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> Curve Element</dd> # <dt>Dependencies</dt> <dd>Bokeh</dd> # <dt>Backends</dt> <dd><a href='./Curve.ipynb'>Bokeh</a></dd> <dd><a href='../matplotlib/Curve.ipynb'>Matplotlib</a></dd> # </dl> # </div> import numpy as np import holoviews as hv hv.extension('bokeh') # ``Curve`` Elements are used to display quantitative values over a continuous interval or time span. They accept tabular data with one key dimension representing the samples along the x-axis and one value dimension of the height of the curve at for each sample. See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays. # #### Simple Curve # # A ``Curve`` is a set of values provided for some set of keys from a [continuously indexable 1D coordinate system](Continuous_Coordinates.ipynb), where the plotted values will be connected up because they are assumed to be samples from a continuous relation. points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] hv.Curve(points) # #### Interpolation # The ``Curve`` also supports the ``interpolation`` plot option to determine whether to linearly interpolate the curve values or to draw discrete steps: # %%opts Curve [width=600] NdOverlay [legend_position='right'] hv.NdOverlay({interp: hv.Curve(points[::8]).opts(plot=dict(interpolation=interp)) for interp in ['linear', 'steps-mid', 'steps-pre', 'steps-post']}) # For full documentation and the available style and plot options, use ``hv.help(hv.Curve).``
examples/reference/elements/bokeh/Curve.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <font size="+3">Time-series Generative Adversarial Network (TimeGAN)</font> # # Imports & Settings # Adapted from the excellent paper by <NAME>, <NAME>, and <NAME>: # [Time-series Generative Adversarial Networks](https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks), # Neural Information Processing Systems (NeurIPS), 2019. # # - Last updated Date: April 24th 2020 # - [Original code](https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/master/alg/timegan/) author: <NAME> (<EMAIL>) import warnings warnings.filterwarnings('ignore') # + import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler import tensorflow as tf from pathlib import Path from tqdm import tqdm from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import GRU, Dense, RNN, GRUCell, Input from tensorflow.keras.losses import BinaryCrossentropy, MeanSquaredError from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.utils import plot_model import matplotlib.pyplot as plt import seaborn as sns # - gpu_devices = tf.config.experimental.list_physical_devices('GPU') if gpu_devices: print('Using GPU') tf.config.experimental.set_memory_growth(gpu_devices[0], True) else: print('Using CPU') sns.set_style('white') # # Experiment Path results_path = Path('time_gan') if not results_path.exists(): results_path.mkdir() experiment = 0 log_dir = results_path / f'experiment_{experiment:02}' if not log_dir.exists(): log_dir.mkdir(parents=True) hdf_store = results_path / 'TimeSeriesGAN.h5' # # Prepare Data # ## Parameters seq_len = 24 n_seq = 6 batch_size = 128 tickers = ['BA', 'CAT', 'DIS', 'GE', 'IBM', 'KO'] def select_data(): df = (pd.read_hdf('../data/assets.h5', 'quandl/wiki/prices') .adj_close .unstack('ticker') .loc['2000':, tickers] .dropna()) df.to_hdf(hdf_store, 'data/real') select_data() # ## Plot Series # + df = pd.read_hdf(hdf_store, 'data/real') axes = df.div(df.iloc[0]).plot(subplots=True, figsize=(14, 6), layout=(3, 2), title=tickers, legend=False, rot=0, lw=1, color='k') for ax in axes.flatten(): ax.set_xlabel('') plt.suptitle('Normalized Price Series') plt.gcf().tight_layout() sns.despine(); # - # ## Correlation sns.clustermap(df.corr(), annot=True, fmt='.2f', cmap=sns.diverging_palette(h_neg=20, h_pos=220), center=0); # ## Normalize Data scaler = MinMaxScaler() scaled_data = scaler.fit_transform(df).astype(np.float32) # ## Create rolling window sequences # + data = [] for i in range(len(df) - seq_len): data.append(scaled_data[i:i + seq_len]) n_windows = len(data) # - # ## Create tf.data.Dataset real_series = (tf.data.Dataset .from_tensor_slices(data) .shuffle(buffer_size=n_windows) .batch(batch_size)) real_series_iter = iter(real_series.repeat()) # ## Set up random series generator def make_random_data(): while True: yield np.random.uniform(low=0, high=1, size=(seq_len, n_seq)) # We use the Python generator to feed a `tf.data.Dataset` that continues to call the random number generator as long as necessary and produces the desired batch size. random_series = iter(tf.data.Dataset .from_generator(make_random_data, output_types=tf.float32) .batch(batch_size) .repeat()) # # TimeGAN Components # The design of the TimeGAN components follows the author's sample code. # ## Network Parameters hidden_dim = 24 num_layers = 3 # ## Set up logger writer = tf.summary.create_file_writer(log_dir.as_posix()) # ## Input place holders X = Input(shape=[seq_len, n_seq], name='RealData') Z = Input(shape=[seq_len, n_seq], name='RandomData') # ## RNN block generator # We keep it very simple and use a very similar architecture for all four components. For a real-world application, they should be tailored to the data. def make_rnn(n_layers, hidden_units, output_units, name): return Sequential([GRU(units=hidden_units, return_sequences=True, name=f'GRU_{i + 1}') for i in range(n_layers)] + [Dense(units=output_units, activation='sigmoid', name='OUT')], name=name) # ## Embedder & Recovery embedder = make_rnn(n_layers=3, hidden_units=hidden_dim, output_units=hidden_dim, name='Embedder') recovery = make_rnn(n_layers=3, hidden_units=hidden_dim, output_units=n_seq, name='Recovery') # ## Generator & Discriminator generator = make_rnn(n_layers=3, hidden_units=hidden_dim, output_units=hidden_dim, name='Generator') discriminator = make_rnn(n_layers=3, hidden_units=hidden_dim, output_units=1, name='Discriminator') supervisor = make_rnn(n_layers=2, hidden_units=hidden_dim, output_units=hidden_dim, name='Supervisor') # # TimeGAN Training # ## Settings train_steps = 10000 gamma = 1 # ## Generic Loss Functions mse = MeanSquaredError() bce = BinaryCrossentropy() # # Phase 1: Autoencoder Training # ## Architecture # + H = embedder(X) X_tilde = recovery(H) autoencoder = Model(inputs=X, outputs=X_tilde, name='Autoencoder') # - autoencoder.summary() plot_model(autoencoder, to_file=(results_path / 'autoencoder.png').as_posix(), show_shapes=True) # ## Autoencoder Optimizer autoencoder_optimizer = Adam() # ## Autoencoder Training Step @tf.function def train_autoencoder_init(x): with tf.GradientTape() as tape: x_tilde = autoencoder(x) embedding_loss_t0 = mse(x, x_tilde) e_loss_0 = 10 * tf.sqrt(embedding_loss_t0) var_list = embedder.trainable_variables + recovery.trainable_variables gradients = tape.gradient(e_loss_0, var_list) autoencoder_optimizer.apply_gradients(zip(gradients, var_list)) return tf.sqrt(embedding_loss_t0) # ## Autoencoder Training Loop for step in tqdm(range(train_steps)): X_ = next(real_series_iter) step_e_loss_t0 = train_autoencoder_init(X_) with writer.as_default(): tf.summary.scalar('Loss Autoencoder Init', step_e_loss_t0, step=step) # ## Persist model # + # autoencoder.save(log_dir / 'autoencoder') # - # # Phase 2: Supervised training # ## Define Optimizer supervisor_optimizer = Adam() # ## Train Step @tf.function def train_supervisor(x): with tf.GradientTape() as tape: h = embedder(x) h_hat_supervised = supervisor(h) g_loss_s = mse(h[:, 1:, :], h_hat_supervised[:, 1:, :]) var_list = supervisor.trainable_variables gradients = tape.gradient(g_loss_s, var_list) supervisor_optimizer.apply_gradients(zip(gradients, var_list)) return g_loss_s # ## Training Loop for step in tqdm(range(train_steps)): X_ = next(real_series_iter) step_g_loss_s = train_supervisor(X_) with writer.as_default(): tf.summary.scalar('Loss Generator Supervised Init', step_g_loss_s, step=step) # ## Persist Model # + # supervisor.save(log_dir / 'supervisor') # - # # Joint Training # ## Generator # ### Adversarial Architecture - Supervised # + E_hat = generator(Z) H_hat = supervisor(E_hat) Y_fake = discriminator(H_hat) adversarial_supervised = Model(inputs=Z, outputs=Y_fake, name='AdversarialNetSupervised') # - adversarial_supervised.summary() plot_model(adversarial_supervised, show_shapes=True) # ### Adversarial Architecture in Latent Space # + Y_fake_e = discriminator(E_hat) adversarial_emb = Model(inputs=Z, outputs=Y_fake_e, name='AdversarialNet') # - adversarial_emb.summary() plot_model(adversarial_emb, show_shapes=True) # ### Mean & Variance Loss X_hat = recovery(H_hat) synthetic_data = Model(inputs=Z, outputs=X_hat, name='SyntheticData') synthetic_data.summary() plot_model(synthetic_data, show_shapes=True) def get_generator_moment_loss(y_true, y_pred): y_true_mean, y_true_var = tf.nn.moments(x=y_true, axes=[0]) y_pred_mean, y_pred_var = tf.nn.moments(x=y_pred, axes=[0]) g_loss_mean = tf.reduce_mean(tf.abs(y_true_mean - y_pred_mean)) g_loss_var = tf.reduce_mean(tf.abs(tf.sqrt(y_true_var + 1e-6) - tf.sqrt(y_pred_var + 1e-6))) return g_loss_mean + g_loss_var # ## Discriminator # ### Architecture: Real Data Y_real = discriminator(H) discriminator_model = Model(inputs=X, outputs=Y_real, name='DiscriminatorReal') discriminator_model.summary() plot_model(discriminator_model, show_shapes=True) # ## Optimizers generator_optimizer = Adam() discriminator_optimizer = Adam() embedding_optimizer = Adam() # ## Generator Train Step @tf.function def train_generator(x, z): with tf.GradientTape() as tape: y_fake = adversarial_supervised(z) generator_loss_unsupervised = bce(y_true=tf.ones_like(y_fake), y_pred=y_fake) y_fake_e = adversarial_emb(z) generator_loss_unsupervised_e = bce(y_true=tf.ones_like(y_fake_e), y_pred=y_fake_e) h = embedder(x) h_hat_supervised = supervisor(h) generator_loss_supervised = mse(h[:, 1:, :], h_hat_supervised[:, 1:, :]) x_hat = synthetic_data(z) generator_moment_loss = get_generator_moment_loss(x, x_hat) generator_loss = (generator_loss_unsupervised + generator_loss_unsupervised_e + 100 * tf.sqrt(generator_loss_supervised) + 100 * generator_moment_loss) var_list = generator.trainable_variables + supervisor.trainable_variables gradients = tape.gradient(generator_loss, var_list) generator_optimizer.apply_gradients(zip(gradients, var_list)) return generator_loss_unsupervised, generator_loss_supervised, generator_moment_loss # ## Embedding Train Step @tf.function def train_embedder(x): with tf.GradientTape() as tape: h = embedder(x) h_hat_supervised = supervisor(h) generator_loss_supervised = mse(h[:, 1:, :], h_hat_supervised[:, 1:, :]) x_tilde = autoencoder(x) embedding_loss_t0 = mse(x, x_tilde) e_loss = 10 * tf.sqrt(embedding_loss_t0) + 0.1 * generator_loss_supervised var_list = embedder.trainable_variables + recovery.trainable_variables gradients = tape.gradient(e_loss, var_list) embedding_optimizer.apply_gradients(zip(gradients, var_list)) return tf.sqrt(embedding_loss_t0) # ## Discriminator Train Step @tf.function def get_discriminator_loss(x, z): y_real = discriminator_model(x) discriminator_loss_real = bce(y_true=tf.ones_like(y_real), y_pred=y_real) y_fake = adversarial_supervised(z) discriminator_loss_fake = bce(y_true=tf.zeros_like(y_fake), y_pred=y_fake) y_fake_e = adversarial_emb(z) discriminator_loss_fake_e = bce(y_true=tf.zeros_like(y_fake_e), y_pred=y_fake_e) return (discriminator_loss_real + discriminator_loss_fake + gamma * discriminator_loss_fake_e) @tf.function def train_discriminator(x, z): with tf.GradientTape() as tape: discriminator_loss = get_discriminator_loss(x, z) var_list = discriminator.trainable_variables gradients = tape.gradient(discriminator_loss, var_list) discriminator_optimizer.apply_gradients(zip(gradients, var_list)) return discriminator_loss # ## Training Loop step_g_loss_u = step_g_loss_s = step_g_loss_v = step_e_loss_t0 = step_d_loss = 0 for step in range(train_steps): # Train generator (twice as often as discriminator) for kk in range(2): X_ = next(real_series_iter) Z_ = next(random_series) # Train generator step_g_loss_u, step_g_loss_s, step_g_loss_v = train_generator(X_, Z_) # Train embedder step_e_loss_t0 = train_embedder(X_) X_ = next(real_series_iter) Z_ = next(random_series) step_d_loss = get_discriminator_loss(X_, Z_) if step_d_loss > 0.15: step_d_loss = train_discriminator(X_, Z_) if step % 1000 == 0: print(f'{step:6,.0f} | d_loss: {step_d_loss:6.4f} | g_loss_u: {step_g_loss_u:6.4f} | ' f'g_loss_s: {step_g_loss_s:6.4f} | g_loss_v: {step_g_loss_v:6.4f} | e_loss_t0: {step_e_loss_t0:6.4f}') with writer.as_default(): tf.summary.scalar('G Loss S', step_g_loss_s, step=step) tf.summary.scalar('G Loss U', step_g_loss_u, step=step) tf.summary.scalar('G Loss V', step_g_loss_v, step=step) tf.summary.scalar('E Loss T0', step_e_loss_t0, step=step) tf.summary.scalar('D Loss', step_d_loss, step=step) # ## Persist Synthetic Data Generator synthetic_data.save(log_dir / 'synthetic_data') # # Generate Synthetic Data generated_data = [] for i in range(int(n_windows / batch_size)): Z_ = next(random_series) d = synthetic_data(Z_) generated_data.append(d) len(generated_data) generated_data = np.array(np.vstack(generated_data)) generated_data.shape np.save(log_dir / 'generated_data.npy', generated_data) # ## Rescale generated_data = (scaler.inverse_transform(generated_data .reshape(-1, n_seq)) .reshape(-1, seq_len, n_seq)) generated_data.shape # ## Persist Data with pd.HDFStore(hdf_store) as store: store.put('data/synthetic', pd.DataFrame(generated_data.reshape(-1, n_seq), columns=tickers)) # ## Plot sample Series # + fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14, 7)) axes = axes.flatten() index = list(range(1, 25)) synthetic = generated_data[np.random.randint(n_windows)] idx = np.random.randint(len(df) - seq_len) real = df.iloc[idx: idx + seq_len] for j, ticker in enumerate(tickers): (pd.DataFrame({'Real': real.iloc[:, j].values, 'Synthetic': synthetic[:, j]}) .plot(ax=axes[j], title=ticker, secondary_y='Synthetic', style=['-', '--'], lw=1)) sns.despine() fig.tight_layout()
ml4trading-2ed/21_gans_for_synthetic_time_series/02_TimeGAN_TF2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline import astropy.coordinates as coord from astropy.table import Table import astropy.units as u import gala.coordinates as gc import gala.dynamics as gd from gala.dynamics import mockstream import gala.potential as gp from gala.units import galactic # - plt.style.use('notebook') t = Table.read('../data/stream_track.txt', format='ascii.commented_header', delimiter=',') tp = Table.read('../data/pvd_stream.dat', format='ascii.commented_header', delimiter=' ') # ### Rotate the galaxy to lie along z=0 pa = 154*u.deg # https://www.flickr.com/photos/dcrowson/35166799656 theta = 64*u.deg # + x = np.cos(theta)*t['x'] + np.sin(theta)*t['y'] z = -np.sin(theta)*t['x'] + np.cos(theta)*t['y'] xpvd = np.cos(theta)*tp['x_pvd_kpc'] + np.sin(theta)*tp['y_pvd_kpc'] zpvd = -np.sin(theta)*tp['x_pvd_kpc'] + np.cos(theta)*tp['y_pvd_kpc'] # - # progenitor as densest location on the stream xp_rot, zp_rot = -38.7, -2.3 xp_ = np.cos(theta)*xp_rot + np.sin(theta)*zp_rot zp_ = -np.sin(theta)*xp_rot + np.cos(theta)*zp_rot # + plt.plot(t['x'], t['y'], 'ko', alpha=0.1) plt.plot(x, z, 'ko') plt.plot(xp_, zp_, 'kx', ms=10, mew=2) plt.xlabel('x [kpc]') plt.ylabel('z [kpc]') plt.gca().set_aspect('equal') # - # ### Set up gravitational potential # most params from Martinez-Delgado paper, + tuned halo mass to reproduce Casertano measurement of max vcirc # https://ui.adsabs.harvard.edu/abs/2008ApJ...689..184M/abstract # adopted halo flattening of 0.95 to match the trailing tail curvature ham = gp.Hamiltonian(gp.MilkyWayPotential(nucleus=dict(m=0), halo=dict(c=1.1, m=1.17e12*u.Msun, r_s=26*u.kpc), bulge=dict(m=2.3e10*u.Msun, c=0.6*u.kpc), disk=dict(m=8.4e10*u.Msun, a=6.24*u.kpc, b=0.26*u.kpc))) # + xyz = np.zeros((3, 128)) xyz[0] = np.linspace(1, 100, 128) print('maximal circular velocity {:.0f}'.format(np.max(ham.potential.circular_velocity(xyz)))) plt.figure(figsize=(8,5)) plt.plot(xyz[0], ham.potential.circular_velocity(xyz)) plt.axhline(235, color='k') plt.axhline(215, color='k') plt.ylim(100,250) plt.xlabel('r [kpc]') plt.ylabel('$V_c$ [km s$^{-1}$]') plt.tight_layout() # - for d in [200,225,250,300]: print('{:.0f} kpc {:.2g}'.format(d, ham.potential.mass_enclosed(d*u.kpc)[0])) # ### Pick orbit for the satellite # + # trial progenitor 6D location xp = np.array([xp_, 0, zp_]) * u.kpc vp = np.array([30,65,225]) * u.km/u.s w0 = gd.PhaseSpacePosition(xp, vel=vp) dt = 0.5*u.Myr n_steps = 900 orbit_fwd = ham.integrate_orbit(w0, dt=dt, n_steps=n_steps) orbit_rr = ham.integrate_orbit(w0, dt=-dt, n_steps=n_steps) plt.plot(x, z, 'ko') for orbit in [orbit_fwd, orbit_rr]: plt.plot(orbit.cartesian.x, orbit.cartesian.z, '-', color='tab:blue') plt.xlabel('x [kpc]') plt.ylabel('z [kpc]') plt.gca().set_aspect('equal') # - # ### Create a stream model # + f = 3 prog_orbit = ham.integrate_orbit(w0, dt=-dt/f, n_steps=5200*f) prog_orbit = prog_orbit[::-1] n_times = np.size(prog_orbit.t) prog_mass = np.linspace(2e8, 0, n_times) # stream = mockstream.fardal_stream(ham, prog_orbit, prog_mass=prog_mass, release_every=1, seed=4359) # fardal values for particle release conditions k_mean = np.array([2., 0, 0, 0, 0.3, 0]) k_disp = np.array([0.5, 0, 0.5, 0, 0.5, 0.5]) # tweaks to reproduce smaller offset of tidal arms, trailing tail extension k_mean = np.array([1.2, 0, 0, 0.0, 0.1, 0]) k_disp = np.array([0.5, 0, 0.5, 0.02, 0.5, 0.5]) stream = mockstream.mock_stream(ham, prog_orbit, prog_mass=prog_mass, release_every=1, seed=4359, k_mean=k_mean, k_disp=k_disp) # + plt.figure(figsize=(10,10)) plt.plot(x, z, 'ko', ms=4, label='Dragonfly (Colleen)') plt.plot(xpvd, zpvd, 'ro', ms=4, label='Dragonfly (Pieter)') plt.plot(prog_orbit.cartesian.x, prog_orbit.cartesian.z, '-', color='tab:blue', label='Orbit', alpha=0.5) plt.plot(stream.cartesian.x, stream.cartesian.z, '.', color='0.3', ms=1, alpha=0.1, label='Stream model') plt.legend(fontsize='small', loc=1) plt.xlabel('x [kpc]') plt.ylabel('z [kpc]') plt.xlim(-40,130) plt.ylim(-60,70) plt.gca().set_aspect('equal') plt.savefig('../plots/trial_model_xz.png', dpi=200) # - Ns = np.size(stream.cartesian.x) Nsh = int(Ns/2) Nsq = int(Ns/4) xp, vp tout_stream = Table([stream.cartesian.x, stream.cartesian.z], names=('x', 'z')) tout_stream.write('../data/stream.fits', overwrite=True) tout_orbit = Table([prog_orbit.cartesian.x, prog_orbit.cartesian.z, prog_orbit.t], names=('x', 'z', 't')) tout_orbit.write('../data/orbit.fits', overwrite=True)
notebooks/orbit_massive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Limits # ## `Press Keys` on MacOS/Chrome # While SeleniumLibrary 3.3.0 added `Press Keys` which can target non-inputs, as of `chromedriver` version `2.45` the <kbd>⌘</kbd> key cannot be used. As this is the favored key for shortcuts, this means almost all of the client keyboard shortcuts just won't work if you are trying to test on MacOS. # # > **Workaround** # > # > _If you are trying to `Press Keys` where the <kbd>⌘</kbd> key would be used, try to find a combination of simpler key combinations and mouse clicks._
docs/LIMITS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + f = open('int.in') n = int(f.readline()) l = [] for i in range(n): a,b = map(int, f.readline().split()) l.append((a,b)) f.close() l.sort(key = lambda x: x[1]) rsp = [] rsp.append(l[0]) for tup in l[1:]: if tup[0] >= rsp[-1][1]: rsp.append(tup) f = open('int.out',"w") #for tup in rsp: # f.write(str(tup[0]) + " " + str(tup[1]) + "\n") f.write(str(len(rsp))) f.close() # -
Python (Probleme)/Int.ipynb
# # 📝 Exercise M6.04 # # The aim of this exercise is to: # # * verify if a GBDT tends to overfit if the number of estimators is not # appropriate as previously seen for AdaBoost; # * use the early-stopping strategy to avoid adding unnecessary trees, to # get the best statistical performances. # # We will use the California housing dataset to conduct our experiments. # + from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split data, target = fetch_california_housing(return_X_y=True, as_frame=True) target *= 100 # rescale the target in k$ data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=0, test_size=0.5) # - # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">If you want a deeper overview regarding this dataset, you can refer to the # Appendix - Datasets description section at the end of this MOOC.</p> # </div> # Similarly to the previous exercise, create a gradient boosting decision tree # and create a validation curve to assess the impact of the number of trees # on the statistical performance of the model. # + # Write your code here. # - # Unlike AdaBoost, the gradient boosting model will always improve when # increasing the number of trees in the ensemble. However, it will reach a # plateau where adding new trees will just make fitting and scoring slower. # # To avoid adding new unnecessary tree, gradient boosting offers an # early-stopping option. Internally, the algorithm will use an out-of-sample # set to compute the statistical performance of the model at each addition of a # tree. Thus, if the statistical performance are not improving for several # iterations, it will stop adding trees. # # Now, create a gradient-boosting model with `n_estimators=1000`. This number # of trees will be too large. Change the parameter `n_iter_no_change` such # that the gradient boosting fitting will stop after adding 5 trees that do not # improve the overall statistical performance. # + # Write your code here.
notebooks/ensemble_ex_04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- import numpy as np x = np.load('yahoo_examples_1.npz') test_sents = x['test_sents'] test_labels = x['test_labels'] wordtoix = x['wordtoix'] ixtoword = x['ixtoword'].all() Att_h = x['Att_h'] Att_v = x['Att_v'] test_sents_words = [] for j in test_sents: sent = [ixtoword[i] for i in j] test_sents_words.append(sent) test_sents_words for i, words, alphas_values in zip(range(100), test_sents_words, Att_v): with open("visualization_{}.html".format(i), "w") as html_file: for word, alpha in zip(words, alphas_values / alphas_values.max()): if word == ":START:": continue elif word == ":PAD:": break html_file.write('<font style="background: rgba(0, 0, 255, %f)">%s</font>\n' % (alpha*1.3, word)) words = ['a', 'b', 'c'] alphas_values = np.array([0,1,2]) # + with open("visualization.html", "w") as html_file: for word, alpha in zip(words, alphas_values / alphas_values.max()): if word == ":START:": continue elif word == ":PAD:": break html_file.write('<font style="background: rgba(255, 255, 255, %f)">%s</font>\n' % (alpha, word)) print('\nOpen visualization.html to checkout the attention coefficients visualization.') # -
python_notebooks/attention plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''cc'': conda)' # language: python # name: python37764bitcccondaf9be1bff0edc45b89bc5baae44dcd3f4 # --- # # Ejemplos de Bisección e Iteración de Punto Fijo import numpy as np import matplotlib.pyplot as plt # Cálculo del error # # \begin{equation} # error = |x - r| # \end{equation} error = lambda x, r: np.abs(x - r) # ## Problema # # Obtener $\sqrt{2}$ # # \begin{equation} # \begin{split} # x & = \sqrt{2} \quad /()^2\\ # x^2 & = 2 # \end{split} # \end{equation} # Si bien llegamos a una ecuación cuadrática la cual tiene dos soluciones, nos interesa el valor positivo por lo que tanto para la *bisección* como para la *IPF* debemos elegir convenientemente el intervalo y estimación inicial respectivamente. Para este problema debería tomarse en cuenta $x>0$. # ## Resolución utilizando Bisección # # Se define $f(x)$ # \begin{equation} # f(x)=x^2-2=0 # \end{equation} def bisect(f, a, b, tol=1e-8): fa = f(a) fb = f(b) i = 0 x = [] # Just checking if the sign is not negative => not root necessarily if np.sign(f(a)*f(b)) >= 0: print('f(a)f(b)<0 not satisfied!') return None while (b-a)/2 > tol: c = (a+b)/2. x.append(c) fc = f(c) # Did we find the root? if fc == 0: break elif np.sign(fa*fc) < 0: b = c fb = fc else: a = c fa = fc i += 1 #xc = (a+b)/2. return np.array(x)#xc f = lambda x: x ** 2 - 2 x = np.linspace(0, 2) plt.figure(figsize=(12, 6)) plt.plot(x, f(x)) plt.grid(True) plt.xlabel(r"$x$") plt.ylabel(r"$f(x)$") plt.show() # Buscamos el intervalo donde $f(a)\, f(b) < 0$. En este caso $[a, b]=[1,2]$ parece ser útil. x_b = bisect(f, 1, 2) n_i = np.arange(1, len(x_b) + 1) er = error(x_b, np.sqrt(2)) plt.figure(figsize=(12, 6)) plt.plot(n_i, er, 'b.') plt.grid(True) plt.xlabel("# iteraciones") plt.ylabel("Error") plt.yscale('log') plt.show() # El error decae linealmente a medida que se aumenta el número de iteraciones. # ## Resolución utilizando punto Fijo def cobweb(x, g=None): min_x = np.amin(x) max_x = np.amax(x) plt.figure(figsize=(10,10)) ax = plt.axes() plt.plot(np.array([min_x,max_x]),np.array([min_x,max_x]),'b-') for i in np.arange(x.size-1): delta_x = x[i+1]-x[i] head_length = np.abs(delta_x)*0.04 arrow_length = delta_x-np.sign(delta_x)*head_length ax.arrow(x[i], x[i], 0, arrow_length, head_width=1.5*head_length, head_length=head_length, fc='k', ec='k') ax.arrow(x[i], x[i+1], arrow_length, 0, head_width=1.5*head_length, head_length=head_length, fc='k', ec='k') if g!=None: y = np.linspace(min_x,max_x,1000) plt.plot(y,g(y),'r') plt.title('Cobweb diagram') plt.grid(True) plt.show() def fpi(g, x0, k, tol=1e-16, flag_cobweb=False): x = np.empty(k+1) x[0] = x0 for i in range(k): if np.abs(x[i] - x[i-1]) <= tol: x = x[:i] break x[i+1] = g(x[i]) if flag_cobweb: cobweb(x, g) return x[1:] # ### Manejo algebraico: # # Debemos construir $g(x)$ para utilizar el algoritmo. # # \begin{equation} # x=g(x) # \end{equation} # #### Version 1 # # \begin{equation} # \begin{split} # x^2 & = 2 \quad / \frac{1}{x}\\ # x & = \frac2x \\ # g_1(x) & = \frac2x # \end{split} # \end{equation} # # # #### Version 2 # # \begin{equation} # \begin{split} # x^2 & = 2 \quad / +x\\ # x^2 + x & = 2+x \\ # x & = 2 + x - x^2 \\ # g_2(x) & = 2 + x - x^2 # \end{split} # \end{equation} # # #### Version 3 # # \begin{equation} # \begin{split} # x^2 & = 2 \quad / +x^3\\ # x^3 + x^2 & = 2 + x^3 \\ # x(x^2 + x) & = 2 + x^3 \\ # x & = \frac{2+x^3}{x^2 + x} \\ # g_3(x) & = \frac{2+x^3}{x^2 + x} # \end{split} # \end{equation} # # #### Version 4 # # \begin{equation} # \begin{split} # x^2 & = 2 \quad / +x^2\\ # 2x^2 & = 2+x^2 \quad /\frac{1}{2x} \\ # x & = \frac{2 + x^2}{2x} \\ # x & = \frac1x + \frac{x}{2} \\ # g_4(x) & = \frac1x + \frac{x}{2} # \end{split} # \end{equation} # # Algunas funciones se pueden indeterminar para ciertos valores como en el caso de $g_3(x)$ y $g_4(x)$, por lo que hay que tener cuidado con la selección de $x_0$. g1 = lambda x: 2 / x g2 = lambda x: 2 + x - x ** 2 g3 = lambda x: (2 + x ** 3) / (x ** 2 + x) g4 = lambda x: 1 / x + x / 2 # ### Visualización de las funciones $g(x)$ x2 = np.linspace(0.1, 2, 100) plt.figure(figsize=(12, 6)) plt.plot(x2, g1(x2), label=r"$g_1(x)$") plt.plot(x2, g2(x2), label=r"$g_2(x)$") plt.plot(x2, g3(x2), label=r"$g_3(x)$") plt.plot(x2, g4(x2), label=r"$g_4(x)$") plt.plot(x2, x2, label=r"$y=x$") plt.ylim([0, 6]) plt.grid(True) plt.legend() plt.show() # ### Aplicación de IPF x_f_1 = fpi(g1, 1, 20, flag_cobweb=True) x_f_1 x_f_2 = fpi(g2, 1, 20, flag_cobweb=True) x_f_2 x_f_3 = fpi(g3, 1, 20, flag_cobweb=True) x_f_3 x_f_4 = fpi(g4, 1, 20, flag_cobweb=True) x_f_4 # Según la $g(x)$ y $x_0$ que se escoja, la *IPF* puede no converger o iterar alternando entre dos valores. er_f_1 = error(x_f_1, np.sqrt(2)) er_f_2 = error(x_f_2, np.sqrt(2)) er_f_3 = error(x_f_3, np.sqrt(2)) er_f_4 = error(x_f_4, np.sqrt(2)) plt.figure(figsize=(12, 6)) plt.plot(er_f_1, 'rx', label=r'$g_1(x)$') plt.plot(er_f_2, 'bs', label=r'$g_2(x)$') plt.plot(er_f_3, 'yo', label=r'$g_3(x)$') plt.plot(er_f_4, 'g.', label=r'$g_4(x)$') plt.grid(True) plt.xlabel("# iteraciones") plt.ylabel("Error") plt.yscale('log') plt.legend() plt.show() # Notar del gráfico que según el $g(x)$ utilizado podemos obtener una solución mucho más rápido como en el caso de $g_4(x)$. # ## Convergencia # ### Cota teórica # # \begin{equation} # \frac{\epsilon_{i+1}}{\epsilon_i} = \frac{|x_{i+1}-r|}{|x_{i}-r|} # \end{equation} # Veamos la convergencia de los resultados obtenidos conv_t = lambda x, r: error(x[1:], r) / error(x[:-1], r) # #### Para bisección... conv_b = conv_t(x_b, np.sqrt(2)) print("e_{i+1} \t e_{i} \t\t e_{i+1}/e_{i}") for i, (e1 ,e2) in enumerate(zip(error(x_b[1:], np.sqrt(2)), error(x_b[:-1], np.sqrt(2)))): print(("%.6f \t %.6f \t %.6f")%(e1, e2, conv_b[i])) plt.figure(figsize=(12, 6)) plt.plot(conv_b, 'bo') plt.grid(True) plt.xlabel("# de iteraciones") plt.ylabel(r"$\epsilon_{i+1}/\epsilon_i$") plt.show() # El error en la bisección puede diferir bastante entre cada iteración porque para $i$ podría estimar $x_c$ muy cerca de $r$ pero en $i+1$ se podría volver a alejar, dado que solo divide el intervalo a la mitad para generar la siguiente estimación. # #### Para IPF conv_f_1 = conv_t(x_f_1, np.sqrt(2)) conv_f_2 = conv_t(x_f_2, np.sqrt(2)) conv_f_3 = conv_t(x_f_3, np.sqrt(2)) conv_f_4 = conv_t(x_f_4, np.sqrt(2)) plt.figure(figsize=(12, 6)) plt.plot(conv_f_1, label=r'$g_1(x)$') plt.plot(conv_f_2, label=r'$g_2(x)$') plt.plot(conv_f_3, label=r'$g_3(x)$') plt.plot(conv_f_4, label=r'$g_4(x)$') plt.grid(True) plt.legend() plt.xlabel("# de iteraciones") plt.ylabel(r"$\epsilon_{i+1}/\epsilon_i$") plt.show() # $g_4(x)$ parece tener el mejor comportamiento aunque $g_3(x)$ tampoco sería una opción incorrecta. # ### Aproximación en cada iteracion # Podemos utilizar esta aproximación si no conocemos $r$. # # \begin{equation} # \frac{\epsilon_{i+1}}{\epsilon_i} \approx \frac{|x_{i+1}-x_{i}|}{|x_{i}-x_{i-1}|} # \end{equation} conv = lambda x: error(x[2:], x[1:-1]) / error(x[1:-1], x[:-2]) # #### Para bisección conv_b = conv(x_b) conv_b # #### Para IPF conv_f_1 = conv(x_f_1) conv_f_2 = conv(x_f_2) conv_f_3 = conv(x_f_3) conv_f_4 = conv(x_f_4) conv_f_1 conv_f_2 conv_f_3 conv_f_4 # Para que exista convergencia el cociente debe ser menor a $1$. plt.figure(figsize=(12, 6)) plt.plot(conv_b, 'bs', label="Bisección") plt.plot(conv_f_3, 'y.', label="IPF con " + r"$g_3(x)$") plt.plot(conv_f_4, 'go', label="IPF con " + r"$g_4(x)$") plt.yscale('log') plt.legend() plt.grid(True) plt.xlabel("# de iteraciones") plt.ylabel(r"$\epsilon_{i+1}/\epsilon_i$") plt.show() # Parece # ### Tasa $S$ # # Si $r=g(r)$ y $S = |g'(r)| < 1$. # En el caso de *IPF*: # * $g_1(x)=\frac{2}{x} \implies g_1'(x)=-\frac{2}{x^2}$, $S=|-1|=1$ # * $g_2(x)=2+x-x^2 \implies g_2'(x)=1-2x$, $S=|1-2\sqrt{2}|\approx 1.83$ # * $g_3(x)=\frac{2+x^3}{x^2 + x} \implies g_3'(x)=\frac{x^4+2 x^3-4x-2}{x^2 (1 + x)^2}$, $S\approx 0.172$ # * $g_4(x)=\frac1x + \frac{x}{2} \implies g_4'(x)=-\frac{1}{x^2} + \frac12$, $S=|-\frac{1}{2}+\frac12| =0$ # Teóricamente hay mejores elecciones de $g(x)$, ¿se valida con los resultados experimentales? # # Otro problema # # Resolver: # # \begin{equation} # f(x)=x^2-x-1$ # \end{equation} # ## Bisección # # Simplemente aplicar el algoritmo en el intervalo adecuado. f = lambda x: x ** 2 - x - 1 x = np.linspace(-2, 2, 100) plt.figure(figsize=(12, 6)) plt.plot(x, f(x)) plt.grid(True) plt.xlabel(r"$x$") plt.ylabel(r"$f(x)$") plt.show() # Como existen $2$ raíces, hay que seleccionar el intervalo según la raíz que necesitemos. Para $x_1$ se utilizará $[-2,0]$ y $[0,2]$ para $x_2$. x_b_1 = bisect(f, -2, 0) x_b_2 = bisect(f, 0, 2) x_b_1[-1], x_b_2[-1] # ## Solución analítica: # # \begin{equation} # x_1 = \frac{1-\sqrt{5}}{2}, \quad x_2 = \frac{1+\sqrt{5}}{2} # \end{equation} # ### Error de las aproximaciones n_i_1 = np.arange(1, len(x_b_1) + 1) n_i_2 = np.arange(1, len(x_b_2) + 1) er_1 = error(x_b_1, (1 - np.sqrt(5)) / 2) er_2 = error(x_b_2, (1 + np.sqrt(5)) / 2) plt.figure(figsize=(12, 6)) plt.plot(n_i_1, er_1, 'b.') plt.plot(n_i_2, er_2, 'r.') plt.grid(True) plt.xlabel("# iteraciones") plt.ylabel("Error") plt.yscale('log') plt.show() # Como era de esperarse con *bisección*, este decae lineal con respecto al número de iteraciones. # ## Iteración de Punto Fijo # ### Definición de funciones $g(x)$ # # 1. $f(x)=0$ # 2. $x = g(x) \implies f(x)=x-g(x)$ # # #### Versión 1 # \begin{equation} # \begin{split} # x & = x^2-1 \\ # g_1(x) & = x^2-1 # \end{split} # \end{equation} # # #### Versión 2 # \begin{equation} # \begin{split} # x^2 &= x + 1 \quad / \frac1x \\ # x &= 1 + \frac1x \\ # g_2(x) &= 1 + \frac1x # \end{split} # \end{equation} # # #### Versión 3 # \begin{equation} # \begin{split} # x^2-x &= 1 \\ # x(x-1) &= 1 \\ # x &= \frac{1}{x-1} \\ # g_3(x) &= \frac{1}{x-1} # \end{split} # \end{equation} # # #### Versión 4 # \begin{equation} # \begin{split} # x^2 &= x + 1 \quad +x^2\\ # 2x^2 &= x^2 +x + 1 \quad \frac{1}{2x} \\ # x &= \frac{x}{2} + \frac{1}{2x} + \frac12 \\ # g_4(x) &= \frac{x}{2} + \frac{1}{2x} + \frac12 # \end{split} # \end{equation} # ### ¿Cuál nos servirá? # # * $S_1=|g_1'(x)|=|2x|$ # * $S_2=|g_2'(x)|=\left|-\frac{1}{x^2}\right|$ # * $S_3=|g_3'(x)|=\left|-\frac{1}{(x-1)^2}\right|$ # * $S_4=|g_4'(x)|=\left|\frac12 - \frac{1}{2x^2}\right|$ g1 = lambda x: x ** 2 - 1 g2 = lambda x: 1 + 1 / x g3 = lambda x: 1 / (x - 1) g4 = lambda x: 1/2 *(x + 1/x + 1) g1p = lambda x: np.abs(2 * x) g2p = lambda x: np.abs(- 1 / x ** 2) g3p = lambda x: np.abs(-1 / (x - 1) ** 2) g4p = lambda x: np.abs(1 / 2 * (1 - 1 / x**2)) # Se puede realizar un análisis de forma visual, pero hay que evitar los problemas de indeterminación de las funciones $g'(x)$. # x para evitar indeterminación... xa = np.linspace(-1+1e-8, -1e-8) xb = np.linspace(1e-8, 1-1e-8) xc = np.linspace(1+1e-8, 2) plt.figure(figsize=(12, 6)) plt.plot(xa, g1p(xa), 'b-', label=r"$|g_1'(x)|$") plt.plot(xb, g1p(xb), 'b-') plt.plot(xa, g2p(xa), 'r-', label=r"$|g_2'(x)|$") plt.plot(xb, g2p(xb), 'r-') plt.plot(xc, g2p(xc), 'r-') plt.plot(xa, g3p(xa), 'g-', label=r"$|g_3'(x)|$") plt.plot(xb, g3p(xb), 'g-') plt.plot(xc, g3p(xc), 'g-') plt.plot(xa, g4p(xa), 'k-', label=r"$|g_4'(x)|$") plt.plot(xb, g4p(xb), 'k-') plt.plot(xc, g4p(xc), 'k-') plt.axvline(x=(1-np.sqrt(5))/2, color='m', linestyle='--') plt.axvline(x=(1+np.sqrt(5))/2, color='m', linestyle='--') plt.ylim([0., 1.1]) plt.xlim([-1, 2]) plt.legend() plt.grid(True) plt.show() # Otra alternativa es analizar el valor de $|g'(x)|$ en $r$. Si no se conoce $r$ podemos buscar en un vecindario, por ejemplo $(r-\varepsilon, r+\varepsilon)$. $\varepsilon$ debe ser lo suficientemente pequeño, ya que segun las características de $g'(x)$ se puede concluir algo completamente erróneo. g1p(-0.6), g1p(3/2) # Se descarta $g_1(x)$ g2p(-0.6), g2p(3/2) # $g_2(x)$ nos sirve para la segunda raíz. g3p(-0.6), g3p(3/2) # $g_3(x)$ nos sirve para la primera raíz. g4p(-0.6), g4p(3/2) # $g_4(x)$ nos sirve para ambas raíces, pero para la primera converge un poco más lento. # ### Experimentos # Recordar que debemos escoger $x_0$ cercano a la raíz que necesitamos. # $g_2(x)$ x_2 = fpi(g2, 1, 20) x_2 # $g_3(x)$ x_1 = fpi(g3, -1, 20) x_1 conv_x_1 = conv(x_1) conv_x_2 = conv(x_2) plt.figure(figsize=(12, 6)) plt.plot(conv_x_1) plt.plot(conv_x_2) plt.ylim([0.36, 0.4]) plt.xlabel("# de iteraciones") plt.ylabel("S") plt.grid(True) plt.show() # Evaluando en $r$... g3p((1-np.sqrt(5))/2), g2p((1+np.sqrt(5))/2) # $g_4(x)$ xx_1 = fpi(g4, -1, 20) xx_1 xx_2 = fpi(g4, 1, 20) xx_2 conv_xx_1 = conv(xx_1) conv_xx_2 = conv(xx_2) plt.figure(figsize=(12, 6)) plt.plot(conv_xx_1) plt.plot(conv_xx_2) plt.xlabel("# de iteraciones") plt.ylabel("S") plt.grid(True) plt.show() g4p((1-np.sqrt(5))/2), g4p((1+np.sqrt(5))/2) # ## Conclusión # # Según el $g(x)$ seleccionado vamos a obtener o no convergencia para distintos $x_0$. Incluso, para una buena elección de $g(x)$ podemos encontrar más de una raíz.
material/03_raices_1D/ejemplo_biseccion_ipf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="FhGuhbZ6M5tl" colab_type="text" # ##### Copyright 2018 The TensorFlow Authors. # + id="AwOEIRJC6Une" colab_type="code" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + id="KyPEtTqk6VdG" colab_type="code" colab={} #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] id="EIdT9iu_Z4Rb" colab_type="text" # # 回归模型:房价预测 # + [markdown] id="bBIlTPscrIT9" colab_type="text" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/basic_regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] id="AHp3M9ZmrIxj" colab_type="text" # 在一个**回归**问题中,我们希望预测一个连续的值,比如说价格或概率。而**分类**问题中,我们预测的是一个离散的标签(例如某个图片包含的是苹果还是橘子)。 # # 本 notebook 构建了一个模型来预测波士顿郊区在上世纪七十年代中期的房价中位数。为此,我们会给模型送入此郊区的一些特征数据,其中包括犯罪率、当地房产税税率等。 # # 本例使用了 `tf.keras` API,请参见[此指南](https://www.tensorflow.org/guide/keras)了解更多细节。 # + id="1rRo8oNqZ-Rj" colab_type="code" colab={} from __future__ import absolute_import, division, print_function import tensorflow as tf from tensorflow import keras import numpy as np print(tf.__version__) # + [markdown] id="F_72b0LCNbjx" colab_type="text" # ## 波士顿房价数据集 # # 可以在 TensorFlow 中直接访问此[数据集](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html)。通过以下方式下载及打乱训练集: # + id="p9kxxgzvzlyz" colab_type="code" colab={} boston_housing = keras.datasets.boston_housing (train_data, train_labels), (test_data, test_labels) = boston_housing.load_data() # Shuffle the training set order = np.argsort(np.random.random(train_labels.shape)) train_data = train_data[order] train_labels = train_labels[order] # + [markdown] id="PwEKwRJgsgJ6" colab_type="text" # ### 样例与特征 # # 这个数据集比我们其它的数据集要小的多:它共有 506 个样例,在分割后有 404 个样例放入训练集中,有 102 个样例作为测试集: # + id="Ujqcgkipr65P" colab_type="code" colab={} print("Training set: {}".format(train_data.shape)) # 404 examples, 13 features print("Testing set: {}".format(test_data.shape)) # 102 examples, 13 features # + [markdown] id="0LRPXE3Oz3Nq" colab_type="text" # 这个数据集包含了 13 个不同的特征: # # 1. 人均犯罪率; # 2. 占地面积超过 25,000 平方英尺以上住宅用地所占比例; # 3. 城镇平均非零售商业占地面积所占比例; # 4. Charles 河虚拟变量 (如果地段靠近 Charles 河,则值为 1,否则为 0); # 5. 一氧化氮浓度(单位为千万分之一); # 6. 每栋住所的平均房间数; # 7. 1940 年前建造的自住房占比; # 8. 到 5 个波士顿工作中心的加权距离; # 9. 辐射式高速公路的可达性指数; # 10. 每万美元全额房产税税率; # 11. 城镇学生-教师比例; # 12. 1000 * (Bk - 0.63) ** 2 函数中, Bk 为城镇黑人所占比例; # 13. 底层人口所占百分比。 # # 输入数据的每个特征维度都分别用不同的量纲进行存储。一些特征用 0-1 的比例来表示,还有一些特征用 1-12 的范围来表示,另外还有一些特征用 0-100 的范围表示等。这是因为它们来自于真实世界,在开发时,了解如何探索并清洗这些数据是开发中的一项重要技能。 # # 请注意:作为一名建模者及开发者,需要思考该如何使用这些数据,明白模型的预测会带来哪些潜在的益处或危害。一个模型可能会加大社会的不公平与偏见。一个与问题有关的特征在你手上会被用来解决不公平还是制造不公平呢?关于更多这方面的信息,请阅读:[机器学习的公平性](https://developers.google.com/machine-learning/fairness-overview/)。 # + id="8tYsm8Gs03J4" colab_type="code" colab={} print(train_data[0]) # Display sample features, notice the different scales # + [markdown] id="Q7muNf-d1-ne" colab_type="text" # 用 [pandas](https://pandas.pydata.org) 库来对数据集的前几行进行格式优美的展示: # + id="pYVyGhdyCpIM" colab_type="code" colab={} import pandas as pd column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'] df = pd.DataFrame(train_data, columns=column_names) df.head() # + [markdown] id="wb9S7Mia2lpf" colab_type="text" # ### 标签 # # 标签是以千美元为单位的房价。(请注意这是上世纪七十年代中期的价格。) # + id="I8NwI2ND2t4Y" colab_type="code" colab={} print(train_labels[0:10]) # Display first 10 entries # + [markdown] id="mRklxK5s388r" colab_type="text" # ## 特征标准化(Normalize) # # 推荐对使用不同量纲和范围的特征进行标准化。我们对每个特征都减去各自的均值,并除以标准差(即 z-score 标准化): # + id="ze5WQP8R1TYg" colab_type="code" colab={} # Test data is *not* used when calculating the mean and std mean = train_data.mean(axis=0) std = train_data.std(axis=0) train_data = (train_data - mean) / std test_data = (test_data - mean) / std print(train_data[0]) # First training sample, normalized # + [markdown] id="BuiClDk45eS4" colab_type="text" # 虽然在没有进行特征标准化的情况下模型**可能会**收敛,但会让训练过程更加困难,并且会导致模型更加依赖于输入数据选用的单位。 # + [markdown] id="SmjdzxKzEu1-" colab_type="text" # ## 创建模型 # # 现在开始构建模型。我们在此处使用顺序(`Sequential`)模型,用两个全连接层作为隐藏层,并定义一个输出层,输出单个的、连续的数值。模型构建的步骤包裹在一个 `build_model` 函数中,因为稍后我们还要另外构建一个模型。 # + id="c26juK7ZG8j-" colab_type="code" colab={} def build_model(): model = keras.Sequential([ keras.layers.Dense(64, activation=tf.nn.relu, input_shape=(train_data.shape[1],)), keras.layers.Dense(64, activation=tf.nn.relu), keras.layers.Dense(1) ]) optimizer = tf.train.RMSPropOptimizer(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae']) return model model = build_model() model.summary() # + [markdown] id="0-qWCsh6DlyH" colab_type="text" # ## 训练模型 # # 将模型训练 500 个迭代,并将训练与验证准确率记录在 `history` 对象中。 # + id="sD7qHCmNIOY0" colab_type="code" colab={} # Display training progress by printing a single dot for each completed epoch class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 500 # Store training stats history = model.fit(train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[PrintDot()]) # + [markdown] id="tQm3pc0FYPQB" colab_type="text" # 使用存储在 `history` 对象中的状态对模型的训练过程进行可视化。我们希望用这些数据来决定模型在准确率停止提高前,何时终止训练。 # + id="B6XriGbVPh2t" colab_type="code" colab={} import matplotlib.pyplot as plt def plot_history(history): plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [1000$]') plt.plot(history.epoch, np.array(history.history['mean_absolute_error']), label='Train Loss') plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']), label = 'Val loss') plt.legend() plt.ylim([0, 5]) plot_history(history) # + [markdown] id="AqsuANc11FYv" colab_type="text" # 根据这个图的显示,模型在大约 200 个 epoch 后提升就很小了。现在我们更新 `model.fit` 方法,让模型在验证评分不再提升时自动停止训练。我们将在每个迭代中使用 *callback* 来测试训练条件。如果在一系列迭代中都不再有提升,就自动停止训练。 # # 你可以阅读[此指南](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping)来了解更多有关这种 callback 的信息。 # + id="fdMZuhUgzMZ4" colab_type="code" colab={} model = build_model() # The patience parameter is the amount of epochs to check for improvement early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) history = model.fit(train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) # + [markdown] id="3St8-DmrX8P4" colab_type="text" # 这个图显示了平均误差大约在 \\$2,500 美元。这个值够好吗?并不。\$2,500 美元在部分标签仅为 $15,000 的数据中并不是微不足道的误差。 # # 让我们看看模型在测试集上表现如何: # + id="jl_yNr5n1kms" colab_type="code" colab={} [loss, mae] = model.evaluate(test_data, test_labels, verbose=0) print("Testing set Mean Abs Error: ${:7.2f}".format(mae * 1000)) # + [markdown] id="ft603OzXuEZC" colab_type="text" # ## 预测 # # 最后,对测试集中的一些数据预测其房价: # + id="Xe7RXH3N3CWU" colab_type="code" colab={} test_predictions = model.predict(test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [1000$]') plt.ylabel('Predictions [1000$]') plt.axis('equal') plt.xlim(plt.xlim()) plt.ylim(plt.ylim()) _ = plt.plot([-100, 100], [-100, 100]) # + id="f-OHX4DiXd8x" colab_type="code" colab={} error = test_predictions - test_labels plt.hist(error, bins = 50) plt.xlabel("Prediction Error [1000$]") _ = plt.ylabel("Count") # + [markdown] id="vgGQuV-yqYZH" colab_type="text" # ## 总结 # # 本 notebook 介绍了几种用于处理回归问题的技术。 # # * 均方差(MSE)是一种针对回归问题(区别于分类问题)通用的损失函数。 # * 与此类似,回归问题的评价指标也与分类问题不同。平均绝对误差(MAE)是针对回归问题的一种通用评价指标。 # * 当输入数据的特征有着不同范围的值时,每个特征都要独立进行缩放。 # * 如果没有足够的训练数据,使用隐藏层较少的小型网络可以避免过拟合。 # * 尽早停止训练是一种很有用的阻止过拟合的技术。
tutorials/keras/basic_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import tensorflow.compat.v1 as tf # from tensorflow.compat.v1 import keras from os import listdir import matplotlib.pyplot as plt from matplotlib.pyplot import imread, imsave, imshow import numpy as np import pandas as pd import math from scipy.signal import convolve2d from skimage import img_as_ubyte, img_as_float from skimage.transform import resize # from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split # from keras import Sequential # from keras import layers import copy from skimage.color import rgb2gray from sklearn.metrics import mean_squared_error as MSE import gc import os.path from skimage.metrics import structural_similarity as SSIM from skimage.metrics import peak_signal_noise_ratio as PSNR from scipy import interpolate import pickle gc.enable() from lib import * # + import copy # from cv2 import rotate import gc import matplotlib.pyplot as plt import math import numpy as np import os from PIL import Image from skimage.transform import radon import scipy.signal as ss from scipy import ndimage, interpolate import time tmp = [] counter = 0 def make_directory(dirname): if (not os.path.exists(dirname)): os.mkdir(dirname) return def FixImage(image): ''' Returns image with values in [0, 1] segment for normal output with possible negative elements ''' min_value = image.min() max_value = image.max() if min_value < 0: image -= min_value return image / (max_value - min_value) def images_out(class_elem): ''' Relatively normal output of _cur_image and _init_image in element of FunctionalMinimisation class ''' plt.figure(figsize=(35,35)) plt.subplot(1,2,1) plt.imshow(FixImage(class_elem._cur_image), cmap='gray') plt.subplot(1,2,2) plt.imshow(FixImage(class_elem._init_image), cmap='gray') DEBUG = True temp_dir = 'temp/' def save_img(class_elem, p='my_stuff.png', p_b='blurred.png', dir_to_save=temp_dir): plt.imsave(os.path.join(dir_to_save, p), class_elem._cur_image, cmap='gray') plt.imsave(os.path.join(dir_to_save, p_b), class_elem._init_image, cmap='gray') def rgb2gray(rgb): r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2] gray = 0.299 * r + 0.5870 * g + 0.1140 * b return gray def blend_images(orig, four, alpha = 0.8, colH = 10, w=1): orig_img = Image.open(orig).convert('RGBA') fourier_img = Image.open(four).convert('RGBA') orig_img = orig_img.resize(((fourier_img.size[0], fourier_img.size[1])), Image.BICUBIC) img = fourier_img A = img.getchannel('A') # Convert to HSV and save the V (Lightness) channel V = img.convert('RGB').convert('HSV').getchannel('V') # Synthesize new Hue and Saturation channels using values from colour picker colpickerH, colpickerS = colH, 255 newH=Image.new('L',img.size,(colpickerH)) newS=Image.new('L',img.size,(colpickerS)) # Recombine original V channel plus 2 synthetic ones to a 3 channel HSV image HSV = Image.merge('HSV', (newH, newS, V)) # Add original Alpha layer back in R,G,B = HSV.convert('RGB').split() RGBA = Image.merge('RGBA',(R,G,B,A)) new_img = Image.blend(orig_img, RGBA, alpha) return new_img def calculate_log(picture,threshold=0.5): log = np.log(np.abs(np.fft.fft2(Cepstrum.hamming(picture)))) fourier_abs = np.abs(log) return fourier_abs def wiener_filter(img, kernel, K=1): if np.shape(kernel)[0] == 0: return img kernel /= np.sum(kernel) dummy = np.copy(img) dummy = np.fft.fft2(dummy) kernel = np.fft.fft2(kernel, s = img.shape) kernel = np.conj(kernel) / (np.abs(kernel) ** 2 + K) dummy = dummy * kernel dummy = np.abs(np.fft.ifft2(dummy)) return dummy def get_blur_len(img, angle, weight, w=2): # img[img == img.max()] = 0 # img[img < 0.7 * img.max()] = 0 # cur_img = FixImage(img) # rotated_img = ndimage.rotate(cur_img, -angle * 180/math.pi) # blur_len = 0 # max_val = rotated_img[rotated_img.shape[0] // 2 - w : rotated_img.shape[0] // 2 + w].max(axis=0) # wid = rotated_img.shape[1] // 2 # for i in range(wid): # if (max_val[i] > 0.05): # blur_len = wid - i # break rotated_img = ndimage.rotate(img, -angle * 180/math.pi) rotated_img[rotated_img < 4/255 * rotated_img.max()] = 0 max_val = rotated_img[rotated_img.shape[0] // 2 - w : rotated_img.shape[0] // 2 + w].max(axis=0) r = max_val # r = radon(rotated_img, theta=[90], circle=False) # r[r > 0.6 * r.max()] = 0 r *= 1./max(r) for i in range(len(r)): if (r[i] > 0.03): blur_len = len(r) // 2 - 1 - i # if (blur_len > 2 * img.shape[0] // 5): # blur_len = 0 break global counter plt.imsave('temp/' + str(counter) + 'rotated_ceps.png', rotated_img) counter += 1 if (DEBUG): h = img.shape[0] q = h // 2 - 1 k = -math.tan(angle) b = (1 - k) * q new_blur_len = blur_len * 6 l = [] if abs(abs(angle * 180/math.pi) - 90) > 10: for old_x in range(q - new_blur_len, q + new_blur_len): old_y = round(k * old_x+b) old_y = int((old_y if old_y >= 0 else 0) if old_y <= h-1 else h-1) if (old_y <= 1 or old_y >= h-2 or old_x <= 1 or old_x >= h-2): continue for i in range(-w, w+1): for j in range(-w, w+1): x = old_x y = old_y y += i y = (y if y >= 0 else 0) if y <= h-1 else h-1 x += j x = (x if x >= 0 else 0) if x <= h-1 else h-1 if (y, x) not in l: l.append((y, x)) else: for y in range(q - new_blur_len, q + new_blur_len): for i in range(-w, w+1): if (y, q + i) not in l: l.append((y, q + i)) p = np.zeros((h, h)) for t in l: try: p[t] = weight except Exception as e: print(e) return (int(abs(blur_len)), p) else: return int(abs(blur_len)) def find_best_line(template_picture, dif=180): h = template_picture.shape[0] q = h // 2 - 1 theta = np.linspace(0., 180, dif, endpoint=False) sinogram = radon(template_picture, theta=theta, circle=True) max_values = sinogram.max(axis=0) if DEBUG: tmp.append(sinogram) return (max(max_values), theta[np.argmax(max_values)] * math.pi/180 - math.pi/2) def make_ker(ker_len, ker_angle): # h = ker_len * 2 h = ker_len ker_len = ker_len // 2 ker_angle = math.pi/180 * ker_angle ker = np.zeros((h, h), dtype='float') k = -math.tan(ker_angle) b = (1 - k) * ker_len if abs(abs(ker_angle * 180/math.pi) - 90) > 10: for x in range(h): y = round(k * x + b) y = int((y if y >= 0 else 0) if y <= h-1 else h-1) if (y == 0 or y == h - 1): continue ker[y, x] = 1 else: for y in range(h): ker[y, ker_len] = 1 if ker.sum() > 0: ret_value = ker/ker.sum() return ret_value else: return [] # if np.isnan(np.sum(ret_value)): # return [] # else: # return ret_value class Cepstrum: def __init__(self, picture, batch_size=256, step=0.5, dir_to_save=temp_dir): gc.enable() self.batch_size = batch_size self.step = step self.dir_to_save = dir_to_save make_directory(dir_to_save) self.x_batches = int(picture.shape[1] // int(batch_size * step) - 1) self.y_batches = int(picture.shape[0] // int(batch_size * step) - 1) self.picture = copy.deepcopy(picture) self.squared_image = [0] * self.x_batches * self.y_batches self.MainProcess() if (DEBUG): plt.imsave(os.path.join(self.dir_to_save, 'orig_img.png'), self.picture, cmap='gray') def get_square(self): pixel_step = int(self.batch_size * self.step) for y in range(self.y_batches): for x in range(self.x_batches): square = self.picture[y * pixel_step : y * pixel_step + self.batch_size, x * pixel_step : x * pixel_step + self.batch_size] self.squared_image[y * self.x_batches + x] = square orig_ceps = Cepstrum.calculate_cepstrum(square) self.orig_cepstrums.append(self.swap_quarters(orig_ceps)) self.batch_slices.append((y * pixel_step, y * pixel_step + self.batch_size, x * pixel_step, x * pixel_step + self.batch_size)) yield self.swap_quarters(Cepstrum.get_k_bit_plane(orig_ceps)) def ft_array(self): # CALCULATE CEPSTRUMS t = time.time() self.count_ft() if (DEBUG): print("Counted cepstrums: ", time.time() - t) self.count_angles() if (DEBUG): print("Counted angles: ", time.time() - t) self.count_lengths() if (DEBUG): print("Counted lengths: ", time.time() - t) self.make_kernels() if (DEBUG): print("Counted kernels: ", time.time() - t) self.weight = self.weight.reshape((self.y_batches, self.x_batches)) self.angle = self.angle.reshape((self.y_batches, self.x_batches)) self.blur_len = self.blur_len.reshape((self.y_batches, self.x_batches)) self.batch_slices = np.array(self.batch_slices).reshape((self.y_batches, self.x_batches, len(self.batch_slices[0]))) if (np.max(self.blur_len) == 0) : self.angle_value = 0 print("Unable to calculate blur lengths") return self.blur_len_value, self.angle_value = self.get_common_ker_len_angle() self.kernel_image = make_ker(self.blur_len_value, self.angle_value) self.squared_image = np.reshape(self.squared_image, (self.y_batches, self.x_batches, self.batch_size, self.batch_size)) if (DEBUG): self.save_vector_field() print("Total time: ", time.time() - t) def MainProcess(self): self.ft_array() try: temp2 = [ 0 ] * self.y_squares for y in range(self.y_squares): temp2[y] = np.hstack(self.restored_image[y, :, :, :]) self.restored_image_full = np.vstack(temp2) except AttributeError as error: return def hamming(picture): hm_len = picture.shape[0] bw2d = np.outer(ss.hamming(hm_len), np.ones(hm_len)) bw2d = bw2d * bw2d.T return picture * bw2d def calculate_cepstrum(picture, threshold=0.5): log = np.log(1 + np.abs(np.fft.fft2(Cepstrum.hamming(picture)))) fourier_abs = np.abs(np.fft.ifft2(log)) # fourier_abs[fourier_abs >= threshold * fourier_abs.max()] = 0 # fourier_abs[fourier_abs >= threshold * fourier_abs.max()] = 0 return fourier_abs def get_k_bit_plane(img, k_list = [4, 5], width=8): lst = [] img = (FixImage(img) * 255).astype(int) for i in range(img.shape[0]): for j in range(img.shape[1]): lst.append(np.binary_repr(img[i][j], width=width)) # width = no. of bits out_img = np.zeros_like(img) for k in k_list: assert(k <= width) out_img += (np.array([int(i[k]) for i in lst],dtype = np.uint8) * 2**(width-k)).reshape(img.shape[0],img.shape[1]) return out_img def swap_quarters(self, picture): out_pict = copy.deepcopy(picture) batch_size = picture.shape[0] temp_pict = copy.deepcopy(out_pict[: batch_size//2, : batch_size//2]) out_pict[: batch_size//2, : batch_size//2] = out_pict[batch_size//2 :, batch_size//2 :] out_pict[batch_size//2 :, batch_size//2 :] = temp_pict temp_pict = copy.deepcopy(out_pict[: batch_size//2, batch_size//2 :]) out_pict[: batch_size//2, batch_size//2 :] = out_pict[batch_size//2 :, : batch_size//2] out_pict[batch_size//2 :, : batch_size//2] = temp_pict[:] return out_pict def get_common_ker_len_angle(self): w = self.weight / self.weight.sum() return (int(np.ceil(np.multiply(w, self.blur_len).sum())), np.median(self.angle)) def count_ft(self): self.orig_cepstrums = list() self.batch_slices = [] self.cepstrum_picture = np.array(list(self.get_square())) self.conc_cepstrum_picture = self.cepstrum_picture.reshape((self.y_batches, self.x_batches, self.batch_size, self.batch_size)) temp = [ 0 ] * self.y_batches for y in range(self.y_batches): temp[y] = np.hstack(self.conc_cepstrum_picture[y, :, :, :]) self.conc_cepstrum_picture = np.vstack(temp) plt.imsave(os.path.join(self.dir_to_save, 'big_img.png'), self.conc_cepstrum_picture, cmap='gray') def count_angles(self): self.weight = np.ndarray((self.y_batches * self.x_batches), dtype='float') self.angle = np.ndarray((self.y_batches * self.x_batches), dtype='float') if (DEBUG): self.lines_img = np.zeros_like(self.cepstrum_picture, dtype=float) for idx, q in enumerate(self.cepstrum_picture): self.weight[idx], self.angle[idx] = find_best_line(q) self.weight /= self.weight.max() def count_lengths(self): self.blur_len = np.ndarray((self.y_batches * self.x_batches), dtype='int') for idx, q in enumerate(self.orig_cepstrums): if (DEBUG): self.blur_len[idx], self.lines_img[idx] = get_blur_len(q, self.angle[idx], self.weight[idx]) self.conc_lines_img = self.lines_img.reshape((self.y_batches, self.x_batches, self.batch_size, self.batch_size)) temp = [ 0 ] * self.y_batches for y in range(self.y_batches): temp[y] = np.hstack(self.conc_lines_img[y, :, :, :]) self.conc_lines_img = np.vstack(temp) plt.imsave(os.path.join(self.dir_to_save, 'lines_img.png'), self.conc_lines_img, cmap='gray') else: self.blur_len[idx] = get_blur_len(q, self.angle[idx], self.weight[idx]) def make_kernels(self): self.kernels = [0] * self.y_batches * self.x_batches for idx, q in enumerate(self.cepstrum_picture): self.kernels[idx] = (self.blur_len[idx], self.angle[idx]) def make_pixel_map(self): self.pixel_map = [0] * self.picture.shape[0] for i in range(self.picture.shape[0]): self.pixel_map[i] = [[]] * self.picture.shape[1] for yb in range(self.y_batches): for xb in range(self.x_batches): cur_slice = self.batch_slices[yb][xb] for y in range(cur_slice[0], cur_slice[1]): for x in range(cur_slice[2], cur_slice[3]): self.pixel_map[y][x].append((self.blur_len[yb][xb], self.angle[yb][xb])) for y in range(self.picture.shape[0]): for x in range(self.picture.shape[1]): lengths = [a[0] for a in self.pixel_map[y][x]] angles = [a[1] for a in self.pixel_map[y][x]] self.pixel_map[y][x] = (np.mean(lengths) if len(lengths) > 0 else 0, np.mean(angles) if len(angles) > 0 else 0) def save_vector_field(self): self.make_pixel_map() # s = self.angle.shape s = self.picture.shape x = np.zeros(s[0] * s[1]) y = np.zeros(s[0] * s[1]) u = np.zeros(s[0] * s[1]) v = np.zeros(s[0] * s[1]) for idx0 in range(s[0]): for idx1 in range(s[1]): cur_idx = idx0 * s[1] + idx1 y[cur_idx] = s[0] - 1 - idx0 x[cur_idx] = idx1 # u[cur_idx] = self.blur_len[idx0][idx1] * np.cos(self.angle[idx0][idx1]) # v[cur_idx] = self.blur_len[idx0][idx1] * np.sin(self.angle[idx0][idx1]) u[cur_idx] = self.pixel_map[idx0][idx1][0] * np.cos(self.pixel_map[idx0][idx1][1]) v[cur_idx] = self.pixel_map[idx0][idx1][0] * np.sin(self.pixel_map[idx0][idx1][1]) k = 10 yy = np.linspace(0, s[0] - 1, k) xx = np.linspace(0, s[1] - 1, k) xx, yy = np.meshgrid(xx, yy) points = np.transpose(np.vstack((x, y))) u_interp = interpolate.griddata(points, u, (xx, yy), method='cubic') v_interp = interpolate.griddata(points, v, (xx, yy), method='cubic') m = s[0] / s[1] l = 15 plt.figure(figsize=(15, 15 * m)) plt.quiver(xx, yy, u_interp, v_interp) plt.savefig(os.path.join(self.dir_to_save, 'vector_fielld.png')) # - with open('params.pickle', 'rb') as handle: pars = pickle.load(handle) print(len(pars)) new_params = [] img_dir = "/home/vados/Stuff/Images/generated/april10/blurred/" imgs = sorted(listdir(img_dir)) for idx, fname in enumerate(imgs): tmp = [] img = img_dir + fname img = plt.imread(img) counter = 0 c = Cepstrum(rgb2gray(img)) new_params.append((c.blur_len_value, c.angle_value, idx)) img = '/home/vados/Stuff/Images/mb.jpg' img = plt.imread(img) tmp = [] counter = 0 c = Cepstrum(rgb2gray(img), batch_size=256, step=0.5) new_params_processed = [(a[0], a[1] * 180 / math.pi) for a in new_params] new_params_processed = [(a[0], a[1] + 180 if a[1] < 0 else a[1]) for a in new_params_processed] angles_dif = [] len_dif = [] for idx in range(len(new_params_processed)): angles_dif.append(np.abs(new_params_processed[idx][1] - pars[idx][1])) len_dif.append(new_params_processed[idx][0] - pars[idx][0]) plt.hist(angles_dif) plt.savefig('temp/results/angle_hist.png') pars new_params_processed # + s = c.angle.shape x = np.zeros(s[0] * s[1]) y = np.zeros(s[0] * s[1]) u = np.zeros(s[0] * s[1]) v = np.zeros(s[0] * s[1]) for idx0 in range(s[0]): for idx1 in range(s[1]): cur_idx = idx0 * s[1] + idx1 print(cur_idx) y[cur_idx] = s[0] - 1 - idx0 x[cur_idx] = idx1 u[cur_idx] = c.blur_len[idx0][idx1] * np.cos(c.angle[idx0][idx1]) v[cur_idx] = -c.blur_len[idx0][idx1] * np.sin(c.angle[idx0][idx1]) k = 10 yy = np.linspace(0, s[0] - 1, k) xx = np.linspace(0, s[1] - 1, k) xx, yy = np.meshgrid(xx, yy) points = np.transpose(np.vstack((x, y))) u_interp = interpolate.griddata(points, u, (xx, yy), method='cubic') v_interp = interpolate.griddata(points, v, (xx, yy), method='cubic') # plt.figure(figsize=(s[0]*2,s[1]*2)) # plt.figure(figsize=) plt.quiver(xx, yy, u_interp, v_interp) plt.savefig('temp/vector_fielld.png') # -
np_deconv copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Toy (counter-)example for anomaly decomposition # # This is a carefully crafted example to demonstrate two possibly counter-intuitive results in anomaly decomposition study: # - ROC AUC < 0.5 # - AUC for 'all good' is lower than for some other 'subsystems'. # + import numpy as np # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') # - # Introducing 3 real subsystems and generating labels for them (1 - good, 0 - anomaly). # + ### Good 80% of the time rd1 = np.random.binomial(1, p=0.8, size=1000) ### Good 80% of the time rd2 = np.random.binomial(1, p=0.8, size=1000) ### This detector is anti-correlated with the second detector rd3 = np.where(rd2 == 1, np.random.binomial(1, p = 0.7, size=1000), 1) ### 1&2 Good rd12 = rd1 * rd2 ### 1&3 Good rd13 = rd1 * rd3 ### 2&3 good rd23 = rd2 * rd3 ### All good rd123 = rd12 * rd3 # - np.mean(rd123) # $\mathrm{score}_i$ variables represent classifier's prediction for each of the subdetectors. # By construction $\mathrm{score}_i$ has high discriminative power against $i$-th detector. ### just some noise introduced into the true labels. score1 = np.random.normal(scale=0.2, size=1000) + rd1 score2 = np.random.normal(scale=0.3, size=1000) + rd2 score3 = np.random.normal(scale=0.4, size=1000) + rd3 from sklearn.metrics import roc_auc_score # + roc_aucs = np.ndarray(shape=(3, 7)) for i, score in enumerate([score1, score2, score3]): for j, rd in enumerate([rd1, rd2, rd3, rd12, rd13, rd23, rd123]): roc_aucs[i, j] = roc_auc_score(rd, score) # - plt.figure(figsize=(12, 6)) plt.imshow(roc_aucs) plt.yticks(np.arange(3), ['score 1', 'score 2', 'score 3'], fontsize=16) plt.xticks( np.arange(7), ['1 good', '2 good', '3 good', '1 & 2 good', '1 & 3 good', '2 & 3 good', 'all good'], fontsize=14, rotation= 45 ) plt.title('ROC AUC') plt.xlabel('True state of the subsystems', fontsize=18) plt.ylabel('Predicted state', fontsize=18) plt.colorbar() # Observations: # # - $\mathrm{score}_i$, indeed, have high ROC AUC against corresponding true states; # - $\mathrm{score}_2$ has ROC AUC < 0.5 against subsystem 3; # - the same for $\mathrm{score}_3$ vs subsystem 2; # - 'all good' has relatevely low score in spite of each score having high ROC AUC against corersponding subsystem. # + metrics = np.ndarray(shape=(3, 7, 4)) for i, score in enumerate([score1, score2, score3]): pred = score > 0.5 for j, rd in enumerate([rd1, rd2, rd3, rd12, rd13, rd23, rd123]): tp = float(np.sum((pred == 1) & (rd == 1))) fp = float(np.sum((pred == 1) & (rd == 0))) tn = float(np.sum((pred == 0) & (rd == 0))) fn = float(np.sum((pred == 0) & (rd == 1))) precision = tp / (tp + fp) recall = tp / (tp + fn) spc = tn / (tn + fp) p = tn / (tn + fn) roc = roc_auc_score(rd, score) metrics[i, j] = (precision, recall, spc, p) # - for j, rd in enumerate(['1', '2', '3', '1&2', '1&3', '2&3', 'all good']): plt.figure() plt.imshow(metrics[:, j, :], vmin=0, vmax=1) plt.title('subsystems: %s' % rd) plt.xticks(np.arange(4), ['precision', 'recall', 'SPC', 'negative\npredictive value']) plt.yticks(np.arange(3), ['score 1', 'score 2', 'score 3'], fontsize=16) plt.colorbar() plt.show()
notebooks/Decomposition_counter-examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tuple # ### Tuple Creation # + #Example t1=(1,2,3,4,5,6) print(t1) print(type(t1)) t2=1,2,3,4,5,6 print(t2) print(type(t2)) print("\n") #Single element tuple x1=1 print(x1) print(type(x1)) x1=1, print(x1) print(type(x1)) # - # ### Tuple Manipulation # #### Tuple are immutable t1=(5,4,2,6,7) print(t1) t1[2]=4 # #### Tuple elements can be accessed by indexing and slicing methods # + t2=(5,4,2,6,7,8,9) print(t2[2]) print(t2[-1]) print(t2[2:5]) print(t2[2:5:2]) print(t2[:2]) print(t2[2:]) # - # ### Tuple Unpacking # #### Unpacking allows assigning multiple values at a time # + t1=(5,'b',4,2,('x',6)) i1,i2,i3,i4,i5=t1 print(i1,i2,i3,i5) (i6,i7)=i5 print(i6,i7) # - # #### Tuples unpacking also applicable to loops t1=((2,3),(4,5),(6,7)) for x,y in t1: print(x,y) # #### Below code generates error for x,y in (2,3): print(x,y) # ### Nested Tuples and Zip function # #### Tuples can be nested also # + t1=(5,'b',4,2,('x',6)) print(t1) for i in t1: print(i) print(type(i)) # - # #### Zip() takes two list inputs and combine them into tuple object # + a=[30,40,50,60] #Extra element is ignored b=['a','b','c'] c=zip(a,b) print(c) #prints the address of zipped tuple print(type(c)) for i in c: print(i) # - # # Assignment # # ## (Tuple sort by second element) WAP to get a list, sorted in increasing order by the last element in each tuple from a given list of non empty tuples # # ### Sample list [(2,5),(1,2),(4,4),(2,3),(2,1)] # # ### Expected result [(2,1),(1,2),(2,3),(4,4),(2,5)] # + #Other assignments #swap two numbers #case study of zoo #concatenate two tuples # + a=[(2,5),(1,2),(4,4),(2,3),(2,1)] for j in range(len(a)-1): for i in range(len(a)-1): if a[i][1]>a[i+1][1]: temp=a[i] a[i]=a[i+1] a[i+1]=temp print(a) print("By method 2") temp1=[] temp2=[] for (x,y) in a: temp1.append((y,x)) temp1.sort() #In below loop, each Tuple is unpacked as two individual integers x and y, which are then passsed as a for (x,y) in temp1: temp2.append((y,x)) del(temp1) del(a) #Note original list of tuples deleted print(temp2) # - # # SET # + # Doesnt support sort operation # Set is unordered list of elements identified by curly braces # It is mutable like List. # It can contain only unique elements x={'xyz','abc','abc','sadsad'} print(x) print(type(x)) cset={11,11,12} print(cset) # - # ### Q: Eliminate duplicates from list # + L=[1,2,3,3,4,5,6,4] print(L) s=set(L) L=list(s) print(L) # - # #### Set doesn't support indexing # + myset={'Bananas','Apples','Oranges'} print(myset[0]) #Note ERROR # - # ### Set class methods # + s2=set() print(s2) s1=set((2,3,4)) print(s1) s1={2,3,4} print(s1) print("\n") print("Use of s1.clear()") s1.clear() print(s1) # s={}- Not possible to create empty set! # - # #### You can add single element, but not multiple elements, However you can use update to add more than 2 elements from a list # + s1={2,3,4} print(s1) s1.add(20) print(s1) # + # We can also add one tuple at a time s1={2,3,4} print(s1) s1.add((20,30)) print(s1) # + s1={2,3,4} print(s1) s1.add(20,30) print(s1) #Cannot add multiple elements # + s1={2,3,4} print(s1) s1.update([20,30]) print(s1) # + #Trying to add tuple s1={2,3,4} print(s1) s1.update([20,30,(40,5)]) print(s1) # - # #### Discard- Removes element and Doesn't return error if element is not present in set # + s1={2,3,4} print(s1) s1.discard(2) print(s1) # + s1={2,3,4} print(s1) s1.discard(6) print(s1) # - # #### Remove- Removes element and returns error if element is not present in set # + s1={2,3,4} print(s1) s1.remove(2) print(s1) # - # #### Pop- Removes random element # + s1={2,3,4,7,5,3,6,23,43,9} print(s1) s1.pop() print(s1) s1.pop() print(s1) s1.pop() print(s1) print("Popped element in next set is",s1.pop()) print(s1) # + #RUN help(set) to see all functions # - # ### Boolean Operation on Sets aset={11,22,33} best={12,23,33} # #### Union print(aset|best) # #### Intersection #print(aset.intersection(best)) #alternative way print(aset|best) # #### Difference print(aset-best) # #### Symmetric Difference # union(a,b) - intersection(a,b) print(aset^best) # # Dictionary # * Pairs of keys and values are specified in a dictionary by using the notation # <br/> d={key1:value1,key2:value2} # * Unordered # * Classs dict # # #### Creation Methods # + d1={} #create empty dictionary - Use of curly brackets creates blank dictionary print("method1 ",d1) d2=dict() #Create dictionary using dict function print("method2 ",d2) d3={3} print("method3 ",d3) d4={'a':3,'b':4} print("method4 ",d4) # - # #### List of tuple, List of list # + d1=dict([('a',3),('b',5)]) #List of tuple print(d1) d1=dict([['a',3],['b',5]]) #List of list print(d1) # + #Tuple of tuple is not possible # - # #### New key:value pair # + d={1:'a',2:'b'} print(d) print("Changing value of particular key") d[1]='g' print(d) # - # #### Key has unique value d={1:'a',1:'b',1:'c'} print(d) #Observed behaviour latest paird of repeated key:value is used # ### Key has to be immutable data type only # #### Tuple can be key d={(1,3):'a',2:'b'} print(d) # #### List cannot be key # d={[1,3]:'a',2:'b'} print(d) # ### Dictionary Basic Methods # #### Len() [Number of pairs in dictionary] and del() # + d={1:'a',2:'b'} print(d) x=len(d) print(x) del d[1] print(d) del d #Deletes complete dictionary # - # #### Other Operations # ##### keys() d={1:'a',2:'b',3:'c'} k=d.keys() print(k) print(type(k)) for i in k: print(i) # ##### values() d={1:'a',2:'b',3:'c'} k=d.values() print(k) print(type(k)) for i in k: print(i) # ##### items() # + d={1:'a',2:'b',3:'c'} k=d.items() print(k) #Returned as Tuples print(type(k)) for i in k: print(i) # - # #### Deep and Shallow Copy #Shallow copy d={1:'a',2:'b',3:'c'} k=d print("k",k) print("d",d) k[1]='c' print("\n") print("k",k) print("d",d) #Deep Copy d={1:'a',2:'b',3:'c'} k=d.copy() print("k",k) print("d",d) k[1]='c' print("\n") print("k",k) print("d",d) print("Note: d doesnt change when we change k") # + d={1:'a',2:'b',3:'c'} d1=dict(d) print("k",k) print("d",d) k[1]='c' print("\n") print("k",k) print("d",d) # - # ##### Copying keys only # Copying only keys from old dictionary to new dictionary d={1:'a',2:'b',3:'c'} d1=dict.fromkeys(d) print(d1) # ##### Copying keys with new value (passed as parameter to function) # Copying keys with new value (passed as parameter to function) d={1:'a',2:'b',3:'c'} d1=dict.fromkeys(d,10) print(d1) # + def test(): return print(test()) print(type(test())) # -
ITWS 2/4_Data Structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import tensorflow as tf m1 = tf.constant([[2,2]]) m2 = tf.constant([[3],[3]]) product = tf.multiply(m1, m2) # how to use session # method 1 sess = tf.Session() sess.run(product) # method 2 with tf.Session() as sess: print sess.run(product) # how to use variable state = tf.Variable(0, name="counter") print(state.name) one = tf.constant(1) new_value = tf.add(state, one) update = tf.assign(state, new_value) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(5): sess.run(update) print(sess.run(state)) # how to use placeholder input1 = tf.placeholder(tf.float32) input2 = tf.placeholder(tf.float32) output = tf.multiply(input1, input2) with tf.Session() as sess: print(sess.run(output, feed_dict={'input1': 1, 'input2': 10}))
tf_mofan/2_tf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ISubpr_SSsiM" # ##### Copyright 2020 The TensorFlow Authors. # # + cellView="form" id="3jTMb1dySr3V" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="6DWfyNThSziV" # # 模块、层和模型简介 # # <table class="tfo-notebook-buttons" align="left"> # <td><a target="_blank" href="https://tensorflow.google.cn/guide/intro_to_modules"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a></td> # <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/intro_to_modules.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td> # <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/intro_to_modules.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 Github 上查看源代码</a></td> # <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/guide/intro_to_modules.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td> # </table> # + [markdown] id="v0DdlfacAdTZ" # 要进行 TensorFlow 机器学习,您可能需要定义、保存和恢复模型。 # # 抽象地说,模型是: # # - 一个在张量上进行某些计算的函数(**前向传递**) # - 一些可以更新以响应训练的变量 # # 在本指南中,您将深入学习 Keras,了解如何定义 TensorFlow 模型。本文着眼于 TensorFlow 如何收集变量和模型,以及如何保存和恢复它们。 # # 注:如果您想立即开始使用 Keras,请参阅 [Keras 指南集合](./keras/)。 # # + [markdown] id="VSa6ayJmfZxZ" # ## 设置 # + id="goZwOXp_xyQj" import tensorflow as tf from datetime import datetime # %load_ext tensorboard # + [markdown] id="yt5HEbsYAbw1" # ## 在 TensorFlow 中定义模型和层 # # 大多数模型都由层组成。层是具有已知数学结构的函数,可以重复使用并且具有可训练的变量。在 TensorFlow 中,层和模型的大多数高级实现(例如 Keras 或 Sonnet)都在以下同一个基础类上构建:`tf.Module`。 # # 下面是一个在标量张量上运行的非常简单的 `tf.Module` 示例: # # + id="alhYPVEtAiSy" class SimpleModule(tf.Module): def __init__(self, name=None): super().__init__(name=name) self.a_variable = tf.Variable(5.0, name="train_me") self.non_trainable_variable = tf.Variable(5.0, trainable=False, name="do_not_train_me") def __call__(self, x): return self.a_variable * x + self.non_trainable_variable simple_module = SimpleModule(name="simple") simple_module(tf.constant(5.0)) # + [markdown] id="JwMc_zu5Ant8" # 模块和引申而来的层是“对象”的深度学习术语:它们具有内部状态以及使用该状态的方法。 # # `__call__` 并无特殊之处,只是其行为与 [Python 可调用对象](https://stackoverflow.com/questions/111234/what-is-a-callable)类似;您可以使用任何函数来调用模型。 # # 您可以出于任何原因开启和关闭变量的可训练性,包括在微调过程中冻结层和变量。 # # 注:tf.Module 是 tf.keras.layers.Layer 和 tf.keras.Model 的基类,因此您在此处看到的一切内容也适用于 Keras。出于历史兼容性的原因,Keras 层不会从模块收集变量,因此您的模型应仅使用模块或仅使用 Keras 层。不过,下面给出的用于检查变量的方法相同在这两种情况下相同。 # # 通过将 `tf.Module` 子类化,将自动收集分配给该对象属性的任何 `tf.Variable` 或 `tf.Module` 实例。这样,您可以保存和加载变量,还可以创建 `tf.Module` 的集合。 # + id="CyzYy4A_CbVf" # All trainable variables print("trainable variables:", simple_module.trainable_variables) # Every variable print("all variables:", simple_module.variables) # + [markdown] id="nuSFrRUNCaaW" # 下面是一个由模块组成的两层线性层模型的示例。 # # 首先是一个密集(线性)层: # + id="Efb2p2bzAn-V" class Dense(tf.Module): def __init__(self, in_features, out_features, name=None): super().__init__(name=name) self.w = tf.Variable( tf.random.normal([in_features, out_features]), name='w') self.b = tf.Variable(tf.zeros([out_features]), name='b') def __call__(self, x): y = tf.matmul(x, self.w) + self.b return tf.nn.relu(y) # + [markdown] id="bAhMuC-UpnhX" # 随后是完整的模型,此模型将创建并应用两个层实例。 # + id="QQ7qQf-DFw74" class SequentialModule(tf.Module): def __init__(self, name=None): super().__init__(name=name) self.dense_1 = Dense(in_features=3, out_features=3) self.dense_2 = Dense(in_features=3, out_features=2) def __call__(self, x): x = self.dense_1(x) return self.dense_2(x) # You have made a model! my_model = SequentialModule(name="the_model") # Call it, with random results print("Model results:", my_model(tf.constant([[2.0, 2.0, 2.0]]))) # + [markdown] id="d1oUzasJHHXf" # `tf.Module` 实例将以递归方式自动收集分配给它的任何 `tf.Variable` 或 `tf.Module` 实例。这样,您可以使用单个模型实例管理 `tf.Module` 的集合,并保存和加载整个模型。 # + id="JLFA5_PEGb6C" print("Submodules:", my_model.submodules) # + id="6lzoB8pcRN12" for var in my_model.variables: print(var, "\n") # + [markdown] id="hoaxL3zzm0vK" # ### 等待创建变量 # # 您在这里可能已经注意到,必须定义层的输入和输出大小。这样,`w` 变量才会具有已知的形状并且可被分配。 # # 通过将变量创建推迟到第一次使用特定输入形状调用模块时,您将无需预先指定输入大小。 # + id="XsGCLFXlnPum" class FlexibleDenseModule(tf.Module): # Note: No need for `in+features` def __init__(self, out_features, name=None): super().__init__(name=name) self.is_built = False self.out_features = out_features def __call__(self, x): # Create variables on first call. if not self.is_built: self.w = tf.Variable( tf.random.normal([x.shape[-1], self.out_features]), name='w') self.b = tf.Variable(tf.zeros([self.out_features]), name='b') self.is_built = True y = tf.matmul(x, self.w) + self.b return tf.nn.relu(y) # + id="8bjOWax9LOkP" # Used in a module class MySequentialModule(tf.Module): def __init__(self, name=None): super().__init__(name=name) self.dense_1 = FlexibleDenseModule(out_features=3) self.dense_2 = FlexibleDenseModule(out_features=2) def __call__(self, x): x = self.dense_1(x) return self.dense_2(x) my_model = MySequentialModule(name="the_model") print("Model results:", my_model(tf.constant([[2.0, 2.0, 2.0]]))) # + [markdown] id="49JfbhVrpOLH" # 这种灵活性是 TensorFlow 层通常仅需要指定其输出的形状(例如在 `tf.keras.layers.Dense` 中),而无需指定输入和输出大小的原因。 # + [markdown] id="JOLVVBT8J_dl" # ## 保存权重 # # 您可以将 `tf.Module` 保存为[检查点](./checkpoint.ipynb)和 [SavedModel](./saved_model.ipynb)。 # # 检查点即是权重(即模块及其子模块内部的变量集的值)。 # + id="pHXKRDk7OLHA" chkp_path = "my_checkpoint" checkpoint = tf.train.Checkpoint(model=my_model) checkpoint.write(chkp_path) checkpoint.write(chkp_path) # + [markdown] id="WXOPMBR4T4ZR" # 检查点由两种文件组成---数据本身以及元数据的索引文件。索引文件跟踪实际保存的内容和检查点的编号,而检查点数据包含变量值及其特性查找路径。 # + id="jBV3fprlTWqJ" # !ls my_checkpoint* # + [markdown] id="CowCuBTvXgUu" # 您可以查看检查点内部,以确保整个变量集合已由包含这些变量的 Python 对象保存并排序。 # + id="o2QAdfpvS8tB" tf.train.list_variables(chkp_path) # + [markdown] id="4eGaNiQWcK4j" # 在分布式(多机)训练期间,可以将它们分片,这就是要对它们进行编号(例如 '00000-of-00001')的原因。不过,在本例中,只有一个分片。 # # 重新加载模型时,将重写 Python 对象中的值。 # + id="UV8rdDzcwVVg" new_model = MySequentialModule() new_checkpoint = tf.train.Checkpoint(model=new_model) new_checkpoint.restore("my_checkpoint") # Should be the same result as above new_model(tf.constant([[2.0, 2.0, 2.0]])) # + [markdown] id="BnPwDRwamdfq" # 注:由于检查点处于长时间训练工作流的核心位置,因此 `tf.checkpoint.CheckpointManager` 是一个可使检查点管理变得更简单的辅助类。有关更多详细信息,请参阅[指南](./checkpoint.ipynb)。 # + [markdown] id="pSZebVuWxDXu" # ## 保存函数 # # TensorFlow 可以在不使用原始 Python 对象的情况下运行模型,如 [TensorFlow Serving](https://tensorflow.org/tfx) 和 [TensorFlow Lite](https://tensorflow.org/lite) 中所见,甚至当您从 [TensorFlow Hub](https://tensorflow.org/hub) 下载经过训练的模型时也是如此。 # # TensorFlow 需要了解如何执行 Python 中描述的计算,但**不需要原始代码**。为此,您可以创建一个**计算图**,如上一篇[指南](./intro_to_graphs.ipynb)中所述。 # # 此计算图中包含实现函数的*运算*。 # # 您可以通过添加 `@tf.function` 装饰器在上面的模型中定义计算图,以指示此代码应作为计算图运行。 # + id="WQTvkapUh7lk" class MySequentialModule(tf.Module): def __init__(self, name=None): super().__init__(name=name) self.dense_1 = Dense(in_features=3, out_features=3) self.dense_2 = Dense(in_features=3, out_features=2) @tf.function def __call__(self, x): x = self.dense_1(x) return self.dense_2(x) # You have made a model with a graph! my_model = MySequentialModule(name="the_model") # + [markdown] id="hW66YXBziLo9" # 您创建的模块的工作原理与之前完全相同。传递给函数的每个唯一签名都会创建一个单独的计算图。有关详细信息,请参阅[计算图指南](./intro_to_graphs.ipynb)。 # + id="H5zUfti3iR52" print(my_model([[2.0, 2.0, 2.0]])) print(my_model([[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]])) # + [markdown] id="lbGlU1kgyDo7" # 您可以通过在 TensorBoard 摘要中跟踪计算图来将其可视化。 # + id="zmy-T67zhp-S" # Set up logging. stamp = datetime.now().strftime("%Y%m%d-%H%M%S") logdir = "logs/func/%s" % stamp writer = tf.summary.create_file_writer(logdir) # Create a new model to get a fresh trace # Otherwise the summary will not see the graph. new_model = MySequentialModule() # Bracket the function call with # tf.summary.trace_on() and tf.summary.trace_export(). tf.summary.trace_on(graph=True, profiler=True) # Call only one tf.function when tracing. z = print(new_model(tf.constant([[2.0, 2.0, 2.0]]))) with writer.as_default(): tf.summary.trace_export( name="my_func_trace", step=0, profiler_outdir=logdir) # + [markdown] id="gz4lwNZ9hR79" # 启动 Tensorboard 以查看生成的跟踪: # + id="V4MXDbgBnkJu" #docs_infra: no_execute # %tensorboard --logdir logs/func # + [markdown] id="Gjattu0AhYUl" # ![A screenshot of the graph, in tensorboard](https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/images/tensorboard_graph.png?raw=true) # + [markdown] id="SQu3TVZecmL7" # ### 创建 `SavedModel` # # 共享经过完全训练的模型的推荐方式是使用 `SavedModel`。`SavedModel` 包含函数集合与权重集合。 # # 您可以保存刚刚创建的模型。 # + id="Awv_Tw__WK7a" tf.saved_model.save(my_model, "the_saved_model") # + id="SXv3mEKsefGj" # Inspect the in the directory # !ls -l the_saved_model # + id="vQQ3hEvHYdoR" # The variables/ directory contains a checkpoint of the variables # !ls -l the_saved_model/variables # + [markdown] id="xBqPop7ZesBU" # `saved_model.pb` 文件是一个描述函数式 `tf.Graph` 的[协议缓冲区](https://developers.google.com/protocol-buffers)。 # # 可以从此表示加载模型和层,而无需实际构建创建该表示的类的实例。在您没有(或不需要)Python 解释器(例如大规模应用或在边缘设备上),或者在原始 Python 代码不可用或不实用的情况下,这样做十分理想。 # # 您可以将模型作为新对象加载: # + id="zRFcA5wIefv4" new_model = tf.saved_model.load("the_saved_model") # + [markdown] id="-9EF3mT7i3qN" # 通过加载已保存模型创建的 `new_model` 是 TensorFlow 内部的用户对象,无需任何类知识。它不是 `SequentialModule` 类型的对象。 # + id="EC_eQj7yi54G" isinstance(new_model, SequentialModule) # + [markdown] id="-OrOX1zxiyhR" # 此新模型​​适用于已定义的输入签名。您不能向以这种方式恢复的模型添加更多签名。 # + id="_23BYYBWfKnc" print(my_model([[2.0, 2.0, 2.0]])) print(my_model([[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]])) # + [markdown] id="qSFhoMtTjSR6" # 因此,利用 `SavedModel`,您可以使用 `tf.Module` 保存 TensorFlow 权重和计算图,随后再次加载它们。 # + [markdown] id="Rb9IdN7hlUZK" # ## Keras 模型和层 # # 请注意,到目前为止,还没有提到 Keras。您可以在 `tf.Module` 上构建自己的高级 API,而我们已经拥有这些 API。 # # 在本部分中,您将研究 Keras 如何使用 `tf.Module`。可在 [Keras 指南](keras/sequential_model.ipynb)中找到有关 Keras 模型的完整用户指南。 # # + [markdown] id="uigsVGPreE-D" # ### Keras 层 # # `tf.keras.layers.Layer` 是所有 Keras 层的基类,它继承自 `tf.Module`。 # # 您只需换出父项,然后将 `__call__` 更改为 `call` 即可将模块转换为 Keras 层: # + id="88YOGquhnQRd" class MyDense(tf.keras.layers.Layer): # Adding **kwargs to support base Keras layer arguemnts def __init__(self, in_features, out_features, **kwargs): super().__init__(**kwargs) # This will soon move to the build step; see below self.w = tf.Variable( tf.random.normal([in_features, out_features]), name='w') self.b = tf.Variable(tf.zeros([out_features]), name='b') def call(self, x): y = tf.matmul(x, self.w) + self.b return tf.nn.relu(y) simple_layer = MyDense(name="simple", in_features=3, out_features=3) # + [markdown] id="nYGmAsPrws--" # Keras 层有自己的 `__call__`,它会进行下一部分中所述的某些簿记,然后调用 `call()`。您应当不会看到功能上的任何变化。 # + id="nIqE8wOznYKG" simple_layer([[2.0, 2.0, 2.0]]) # + [markdown] id="tmN5vb1K18U1" # ### `build` 步骤 # # 如上所述,在您确定输入形状之前,等待创建变量在许多情况下十分方便。 # # Keras 层具有额外的生命周期步骤,可让您在定义层时获得更高的灵活性。这是在 `build()` 函数中定义的。 # # `build` 仅被调用一次,而且是使用输入的形状调用的。它通常用于创建变量(权重)。 # # 您可以根据输入的大小灵活地重写上面的 `MyDense` 层。 # # + id="4YTfrlgdsURp" class FlexibleDense(tf.keras.layers.Layer): # Note the added `**kwargs`, as Keras supports many arguments def __init__(self, out_features, **kwargs): super().__init__(**kwargs) self.out_features = out_features def build(self, input_shape): # Create the state of the layer (weights) self.w = tf.Variable( tf.random.normal([input_shape[-1], self.out_features]), name='w') self.b = tf.Variable(tf.zeros([self.out_features]), name='b') def call(self, inputs): # Defines the computation from inputs to outputs return tf.matmul(inputs, self.w) + self.b # Create the instance of the layer flexible_dense = FlexibleDense(out_features=3) # + [markdown] id="Koc_uSqt2PRh" # 此时,模型尚未构建,因此没有变量。 # + id="DgyTyUD32Ln4" flexible_dense.variables # + [markdown] id="-KdamIVl2W8Y" # 调用该函数会分配大小适当的变量。 # + id="IkLyEx7uAoTK" # Call it, with predictably random results print("Model results:", flexible_dense(tf.constant([[2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]))) # + id="Swofpkrd2YDd" flexible_dense.variables # + [markdown] id="7PuNUnf0OIpF" # 由于仅调用一次 `build`,因此如果输入形状与层的变量不兼容,输入将被拒绝。 # + id="caYWDrHSAy_j" try: print("Model results:", flexible_dense(tf.constant([[2.0, 2.0, 2.0, 2.0]]))) except tf.errors.InvalidArgumentError as e: print("Failed:", e) # + [markdown] id="YnporXiudF1I" # Keras 层具有许多额外的功能,包括: # # - 可选损失 # - 对指标的支持 # - 对可选 `training` 参数的内置支持,用于区分训练和推断用途 # - `get_config` 和 `from_config` 方法,允许您准确存储配置以在 Python 中克隆模型 # # 在自定义层的[完整指南](./keras/custom_layers_and_models.ipynb)中阅读关于它们的信息。 # + [markdown] id="L2kds2IHw2KD" # ### Keras 模型 # # 您可以将模型定义为嵌套的 Keras 层。 # # 但是,Keras 还提供了称为 `tf.keras.Model` 的全功能模型类。它继承自 `tf.keras.layers.Layer`,因此 Keras 模型是一种 Keras 层,支持以同样的方式使用、嵌套和保存。Keras 模型还具有额外的功能,这使它们可以轻松训练、评估、加载、保存,甚至在多台机器上进行训练。 # # 您可以使用几乎相同的代码定义上面的 `SequentialModule`,再次将 `__call__` 转换为 `call()` 并更改父项。 # + id="Hqjo1DiyrHrn" class MySequentialModel(tf.keras.Model): def __init__(self, name=None, **kwargs): super().__init__(**kwargs) self.dense_1 = FlexibleDense(out_features=3) self.dense_2 = FlexibleDense(out_features=2) def call(self, x): x = self.dense_1(x) return self.dense_2(x) # You have made a Keras model! my_sequential_model = MySequentialModel(name="the_model") # Call it on a tensor, with random results print("Model results:", my_sequential_model(tf.constant([[2.0, 2.0, 2.0]]))) # + [markdown] id="8i-CR_h2xw3z" # 所有相同的功能都可用,包括跟踪变量和子模块。 # # 注:为了强调上面的注意事项,嵌套在 Keras 层或模型中的原始 `tf.Module` 将不会收集其变量以用于训练或保存。相反,它会在 Keras 层内嵌套 Keras 层。 # + id="hdLQFNdMsOz1" my_sequential_model.variables # + id="JjVAMrAJsQ7G" my_sequential_model.submodules # + [markdown] id="FhP8EItC4oac" # 重写 `tf.keras.Model` 是一种构建 TensorFlow 模型的极 Python 化方式。如果要从其他框架迁移模型,这可能非常简单。 # # 如果要构造的模型是现有层和输入的简单组合,则可以使用[函数式 API](./keras/functional.ipynb) 节省时间和空间,此 API 附带有关模型重构和架构的附加功能。 # # 下面是使用函数式 API 构造的相同模型: # + id="jJiZZiJ0fyqQ" inputs = tf.keras.Input(shape=[3,]) x = FlexibleDense(3)(inputs) x = FlexibleDense(2)(x) my_functional_model = tf.keras.Model(inputs=inputs, outputs=x) my_functional_model.summary() # + id="kg-xAZw5gaG6" my_functional_model(tf.constant([[2.0, 2.0, 2.0]])) # + [markdown] id="s_BK9XH5q9cq" # 这里的主要区别在于,输入形状是作为函数构造过程的一部分预先指定的。在这种情况下,不必完全指定 `input_shape` 参数;您可以将某些维度保留为 `None`。 # # 注:您无需在子类化模型中指定 `input_shape` 或 `InputLayer`;这些参数和层将被忽略。 # + [markdown] id="qI9aXLnaHEFF" # ## 保存 Keras 模型 # # 可以为 Keras 模型创建检查点,这看起来和 `tf.Module` 一样。 # # Keras 模型也可以使用 `tf.saved_models.save()` 保存,因为它们是模块。但是,Keras 模型具有更方便的方法和其他功能。 # + id="SAz-KVZlzAJu" my_sequential_model.save("exname_of_file") # + [markdown] id="C2urAeR-omns" # 同样地,它们也可以轻松重新加载。 # + id="Wj5DW-LCopry" reconstructed_model = tf.keras.models.load_model("exname_of_file") # + [markdown] id="EA7P_MNvpviZ" # Keras `SavedModels` 还可以保存指标、损失和优化器状态。 # # 可以使用此重构模型,并且在相同数据上调用时会产生相同的结果。 # + id="P_wGfQo5pe6T" reconstructed_model(tf.constant([[2.0, 2.0, 2.0]])) # + [markdown] id="xKyjlkceqjwD" # 有关保存和序列化 Keras 模型,包括为自定义层提供配置方法来为功能提供支持的更多信息,请参阅[保存和序列化指南](keras/save_and_serialize)。 # + [markdown] id="kcdMMPYv7Krz" # # 后续步骤 # # 如果您想了解有关 Keras 的更多详细信息,可以在[此处](./keras/)查看现有的 Keras 指南。 # # 在 `tf.module` 上构建的高级 API 的另一个示例是 DeepMind 的 Sonnet,[其网站](https://github.com/deepmind/sonnet)上有详细介绍。
site/zh-cn/guide/intro_to_modules.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Apache Toree - Scala # language: scala # name: apache_toree_scala # --- # ## Prepare Datasets for Joins # Let us prepare datasets to join. # # * Make sure airport-codes is in HDFS. # * We will also use airlines data for the month of January 2008. We have used that data set in the past as well. # + import sys.process._ "hdfs dfs -ls /public/airlines_all"! # + import sys.process._ "hdfs dfs -ls /public/airlines_all/airport-codes"! # + import sys.process._ "hdfs dfs -ls /public/airlines_all/airlines-part/flightmonth=200801"!
06_joining_data_sets/02_preparing_data_sets_for_joins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=["naas"] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] papermill={} tags=[] # # HTML - Create a website # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/HTML/HTML_Create_a_website.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=[] # **Tags:** #html #css #website #page #landing #custom #snippet # + [markdown] papermill={} tags=["naas"] # **Author:** [<NAME>](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/) # + [markdown] papermill={} tags=[] # The objective of this notebook is to create an end-to-end website in 5min. # + [markdown] papermill={} tags=[] # ## Input # + [markdown] papermill={} tags=[] # ### Import libraries # + papermill={} tags=[] from urllib.request import urlopen from IPython.display import IFrame import naas # + [markdown] papermill={} tags=[] # ## Model # + [markdown] papermill={} tags=[] # ### Get example # + papermill={} tags=[] html = urlopen("http://www.example.com/").read().decode('utf-8') print(html) # + [markdown] papermill={} tags=[] # ### Save file on your file system # Click right to open + edit the file downloaded # + papermill={} tags=[] html_file = open("site.html","w") html_file.write(html) html_file.close() # + [markdown] papermill={} tags=[] # ### Learn about HTML with this Cheat Sheet # + papermill={} tags=[] IFrame("https://web.stanford.edu/group/csp/cs21/htmlcheatsheet.pdf", width=900, height=600) # + [markdown] papermill={} tags=[] # ### Manually change the content of the file to make it your own. # # Use Google search to go further in the customization (I recommend using https://stackoverflow.com/ + https://www.w3schools.com/html/) # + [markdown] papermill={} tags=[] # ## Output # + [markdown] papermill={} tags=[] # Use Naas asset formula to generate a shareable URL. # + papermill={} tags=[] naas.asset.add("site.html",{"inline": True}) # + [markdown] papermill={} tags=[] # Nb: if you want to use your own domain name, we will cover that in another version of this template. # Contact us → <EMAIL>
HTML/HTML_Create_a_website.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] cell_id="f7d60a22a16b4bc89b1a1f66427c1252" deepnote_cell_height=323.1875 deepnote_cell_type="markdown" tags=[] # # Tech Career Salary Analysis Part 2 # > "A continuation of the previous post exploring salaries in the tech sector." # # - toc: false # - branch: master # - badges: true # - permalink: /career-salary-comparisons/ # - comments: false # - hide: false # - categories: [Beginner, AdHoc Analysis] # + [markdown] cell_id="5106bdc4c4b740f7a90365296701f8b3" deepnote_cell_height=470.796875 deepnote_cell_type="markdown" tags=[] # This is a continuation of my [previous post]({% post_url 2022-05-02-salaryInfo %}), an exploration of what the data from [Levels.fyi](https://levels.fyi) says about the salary associated with various paths in the tech sector. In short, I built a [quick and dirty webscraper](https://github.com/borsboomT/levels_scraper/blob/main/levels_scraper.py) to collect all the salary data available on [Levels.fyi](https://levels.fyi) for the US tech sector. In my [previous post]({% post_url 2022-05-02-salaryInfo %}) I showed how the data was [aggregated](https://github.com/borsboomT/levels_scraper/blob/main/track_aggregator.py) and cleaned, and then made a brief observation that software engineers and data scientists make approximately the same amount of money. # # There is so much more information that can be collected from the scraped data though. We have information on locations, companies, time spent with a given company, and more. We also have data for the following career tracks: # - Data Scientist # - Product Manager # - Recruiter # - Sales # - Software Engineer # - Software Engineering Manager # - Technical Program Manager # # The objective of this post will be to explore the available data and see what conclusions can be drawn from it. We'll start by using the function we developed in my [previous post]({% post_url 2022-05-02-salaryInfo %}) to import and clean all the available data. For a thorough explanation of the thought process behind that function, check out that post! # + cell_id="b5c6739bff1a4c38bac3fcc7cd5bf538" deepnote_cell_height=153 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=0 execution_start=1651618257812 source_hash="bfdb2999" tags=[] # hide import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import pandas as pd pd.options.mode.chained_assignment = None # default='warn' # + cell_id="ab8a5bae-41e1-488a-bf6c-ab5a4765d22c" deepnote_cell_height=345.796875 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1499 execution_start=1651618257813 is_output_hidden=true source_hash="8fdb68a9" tags=[] # hide_output import pandas as pd import numpy as np import plotly.graph_objects as go import seaborn as sns DS_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/DataScientist_completeCSV.csv" PM_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/ProductManager_completeCSV.csv" REC_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/Recruiter_completeCSV.csv" SAL_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/Sales_completeCSV.csv" SWE_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/SoftwareEngineer_completeCSV.csv" SWEM_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/SoftwareEngineeringManager_completeCSV.csv" TPM_FILE_LOCATION = "https://raw.githubusercontent.com/borsboomT/levels_scraper/main/data/TechnicalProgramManager_completeCSV.csv" # + cell_id="91c54655e3654d90890f1dcd6fd99fce" deepnote_cell_height=1305 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=2362 execution_start=1651618259313 source_hash="3dd4ac0e" tags=[] def clean_salary_data(raw_df): # This ensures that the numerical columns are interpreted as numeric columns by # converting all N/A values to NaN. numeric_cols = [ "Total Compensation", "Stock Comp", "Base Comp", "Bonus Comp", "YOE Total", "YOE At Company", ] raw_df[numeric_cols] = raw_df[numeric_cols].apply( pd.to_numeric, errors="coerce" ) # We also round and convert the experience columns to integer values for easier grouping later on. exp_cols = ["YOE Total", "YOE At Company"] raw_df.replace([np.inf, -np.inf], np.nan, inplace=True) raw_df.dropna(subset=exp_cols, how="all", inplace=True) raw_df[exp_cols] = raw_df[exp_cols].apply(np.round) raw_df[exp_cols] = raw_df[exp_cols].astype(int) YOE_vals = raw_df["YOE Total"].unique() cleaned_df_list = [] for YOE in YOE_vals: # We filter the dataframe based on the total years of experience. df = raw_df[raw_df["YOE Total"] == YOE] # We get the upper and lower quantile data for the numeric columns. lower_quant = df.quantile(0.25) upper_quant = df.quantile(0.75) IQR = upper_quant - lower_quant cut_off = IQR * 1.5 lower_cut_off, upper_cut_off = ( lower_quant - cut_off, upper_quant + cut_off, ) # We filter the data based on those quantiles quant_filter = (df[numeric_cols] > (lower_cut_off)) & ( df[numeric_cols] < (upper_cut_off) ) df = df[quant_filter.any(axis=1)] # We fill the NaN cells with the imputed median values df.fillna(df.median(), inplace=True) # We save the cleaned dataframe by adding it to a list cleaned_df_list.append(df) cleaned_df = pd.concat(cleaned_df_list) cleaned_df.reset_index(inplace=True, drop=True) return cleaned_df cleaned_DS_df = clean_salary_data( pd.read_csv(DS_FILE_LOCATION) ) # Data Scientist cleaned_PM_df = clean_salary_data( pd.read_csv(PM_FILE_LOCATION) ) # Product Manager cleaned_REC_df = clean_salary_data(pd.read_csv(REC_FILE_LOCATION)) # Recruiter cleaned_SAL_df = clean_salary_data(pd.read_csv(SAL_FILE_LOCATION)) # Sales cleaned_SWE_df = clean_salary_data( pd.read_csv(SWE_FILE_LOCATION) ) # Software Engineer cleaned_SWEM_df = clean_salary_data( pd.read_csv(SWEM_FILE_LOCATION) ) # Software Engineering Manager cleaned_TPM_df = clean_salary_data( pd.read_csv(TPM_FILE_LOCATION) ) # Technical Program Manager clean_df_dict = { "DS": cleaned_DS_df, "PM": cleaned_PM_df, "REC": cleaned_REC_df, "SAL": cleaned_SAL_df, "SWE": cleaned_SWE_df, "SWEM": cleaned_SWEM_df, "TPM": cleaned_TPM_df, } focused_df_dict = { "DS": cleaned_DS_df, "SWE": cleaned_SWE_df, "SWEM": cleaned_SWEM_df, } # + [markdown] cell_id="4b0e0e5bacad4828993da758a559089f" deepnote_cell_height=142.796875 deepnote_cell_type="markdown" tags=[] # # Raw Salary Comparison # # We'll answer the simplest question first, which of these career paths makes the most money? The easiest way to answer this question is to plot the median values for each career path as a function of the total years of experience. # + cell_id="b0a3d901a43042649ca6faf229ee5411" deepnote_cell_height=1020 deepnote_cell_type="code" deepnote_output_heights=[527] deepnote_to_be_reexecuted=false execution_millis=225 execution_start=1651618327862 owner_user_id="e86de109-443a-4032-b815-9af35793c0f6" source_hash="75263782" tags=[] fig = go.Figure() for df_key in clean_df_dict: df = clean_df_dict[df_key].copy(deep=True) df_grouped = df.groupby(["YOE Total"]).agg( ["mean", "count", "std", "median"] ) fig.add_trace( go.Scatter( name=df_key, x=df_grouped.index, y=df_grouped["Total Compensation"]["median"], mode="lines", ) ) fig.update_layout( title="Career Path Median Salaries", xaxis_title="Total Years of Experience", yaxis_title="Median Total Yearly Compensation (USD)", showlegend=True, ) fig.show() # + [markdown] cell_id="c7c467233f784aeca442d8d903b93e0f" deepnote_cell_height=498.390625 deepnote_cell_type="markdown" tags=[] # A few things immediately jump out of this plot. The total compensation for recruiters falls far below the rest of the pack. Additionally, the total compensation for software engineering managers is far above the rest of the pack. Sales appears to lag behind the more technical roles to some extent until 9 years of experience, at which point it's fairly comparable to the bulk of the career paths. In the time that it takes to reach that point, however, a salesperson would had missed out on hundreds of thousands of dollars in income in comparison to a data scientist or software engineer. # # Looking closely, you can also see that the product manager and technical program manager roles lag slightly behind the data scientist and software engineer roles right up to about the 9 year mark. Most trends disappear beyond the 9 year mark for the bulk of the roles. # # The remainder of this analysis will focus on the DS, and SWE roles for no reason other than those are the career paths I'm interested in. # # # How Does FAANG compare? # # Breaking the salary data down by company can offer some insight as to which companies offer the best compensation, whether that compensations comes in the form of base pay/stock/bonus, and whether or not the companies reward job hopping or tenure. A pair plot offers a good initial overview of the trends in the available data. The results of the pair plots actually looked the same for the career paths we're analysing, so only the SWE one is displayed below since that path has the largest amount of available data. I've also seen some stir about Microsoft entering the fray to be considered amongst the FAANG companies, so we'll throw them in to our FAANG analysis as well. # + cell_id="4dda845f5f8a4e72bc89bd77b6ffb5b5" deepnote_cell_height=924 deepnote_cell_type="code" deepnote_output_heights=[611, 611] deepnote_to_be_reexecuted=false execution_millis=25187 execution_start=1651618262245 source_hash="1079a9f4" tags=[] # hide_output faang_companies = [ "Facebook", "Amazon", "Apple", "Netflix", "Google", "Microsoft", ] df = focused_df_dict["SWE"].copy(deep=True) df_top = df[df["Company"].isin(faang_companies)] df_top_grouped = df_top.groupby(["Company", "YOE Total"]).median() df_top_grouped.reset_index(inplace=True) ax = sns.pairplot( df_top_grouped, vars=[ "YOE Total", "Total Compensation", "YOE At Company", "Base Comp", "Stock Comp", "Bonus Comp", ], kind="reg", hue="Company", ) # + [markdown] cell_id="765cee6256d344919117533800792f98" deepnote_cell_height=1059.453125 deepnote_cell_type="markdown" tags=[] # ![image](https://github.com/borsboomT/fastpagesBlogBuilder/blob/master/images/faang_salary_pairplot.png?raw=true) # # Immediately we see that Netflix appears to compensate far better than the other members of FAANG, and that most of this extra compensation comes in the form of increased base pay. The data also shows that Facebook offers more of its total compensation as stock than the other members, and that both Google and Facebook offer a significant portion of their total compensation as a bonus. Another thing we can see is that Microsoft appears to offer the lowest salary amongst the group, and they appear to have people staying on the longesst. We'll take a closer look at the relationship between YOE at a company and total compensation in a moment. # # Let's take a closer look at how the median total compensation looks for each of the FAANG companies as a function of time for both DS and SWE. # + cell_id="5cebae0c380c4b1a80e4fff81b19466f" deepnote_cell_height=1740 deepnote_cell_type="code" deepnote_output_heights=[527, 527] deepnote_to_be_reexecuted=false execution_millis=284 execution_start=1651618287148 source_hash="2d540080" tags=[] def plot_median_salaries_companies(career_path, company_list, plot_title): df = focused_df_dict[career_path].copy(deep=True) df_top = df[df["Company"].isin(company_list)] df_top_grouped = df_top.groupby(["Company", "YOE Total"]).agg( ["mean", "count", "std", "median"] ) df_top_grouped.reset_index(inplace=True) fig = go.Figure() for company in company_list: company_grouped_df = df_top_grouped[ df_top_grouped["Company"] == company ] fig.add_trace( go.Scatter( name="{}".format(company), x=company_grouped_df["YOE Total"], y=company_grouped_df["Total Compensation"]["median"], mode="lines", ) ) fig.update_layout( title=plot_title, xaxis_title="Total Years of Experience", yaxis_title="Median Total Yearly Compensation (USD)", showlegend=True, ) fig.show() plot_median_salaries_companies( "DS", faang_companies, "DS FAANG Median Salaries" ) plot_median_salaries_companies( "SWE", faang_companies, "SWE FAANG Median Salaries" ) # + [markdown] cell_id="31e3693182ce4c59ad3d5c6b0ca8af66" deepnote_cell_height=178.390625 deepnote_cell_type="markdown" tags=[] # Netflix is the clear leader in terms of total compensation for both career paths, just like we saw in the pair plot. It appears that for the remainder of the FAANG companies, the competition is fierce on the data science side while there appears to be a clear pecking order on the software engineering side. # # One major criticism of FAANG is that much of the total compensation comes in the form of stock and bonuses, and that in a bear market the total compensation of these companies could tank. Lets take a look at just how much of the total compensation is provided through non-guaranteed means at FAANG companies. # # # + cell_id="22b1725af03f4e82b624a9a59ff2ee8d" deepnote_cell_height=3443 deepnote_cell_type="code" deepnote_output_heights=[606.1875, 606.1875, 606.1875, 606.1875] deepnote_to_be_reexecuted=false execution_millis=382 execution_start=1651618287258 source_hash="5ba17740" tags=[] def plot_stock_bonus_ratio_companies( career_path, company_list, plot_title, plot_type ): df = focused_df_dict[career_path].copy(deep=True) df_top = df[df["Company"].isin(company_list)] df_top_grouped = df_top.groupby(["Company", "YOE Total"]).agg( ["mean", "count", "std", "median"] ) df_top_grouped.reset_index(inplace=True) fig = go.Figure() for company in company_list: company_grouped_df = df_top_grouped[ df_top_grouped["Company"] == company ] company_stock_ratio = ( company_grouped_df["Stock Comp"]["median"] / company_grouped_df["Total Compensation"]["median"] ) company_bonus_ratio = ( company_grouped_df["Bonus Comp"]["median"] / company_grouped_df["Total Compensation"]["median"] ) if plot_type == "stock": y_vals = company_stock_ratio axis_title = "Stock" elif plot_type == "bonus": y_vals = company_bonus_ratio axis_title = "Bonus" fig.add_trace( go.Scatter( name="{}".format(company), x=company_grouped_df["YOE Total"], y=y_vals, mode="lines", ) ) fig.update_layout( title=plot_title, xaxis_title="Total Years of Experience", yaxis_title="Proportion of Total Compensation Contributed by {}".format( axis_title ), showlegend=True, ) fig.show() plot_stock_bonus_ratio_companies( "DS", faang_companies, "DS FAANG Stock Salary Proportion", "stock" ) plot_stock_bonus_ratio_companies( "SWE", faang_companies, "SWE FAANG Stock Salary Proportion", "stock" ) plot_stock_bonus_ratio_companies( "DS", faang_companies, "DS FAANG Bonus Salary Proportion", "bonus" ) plot_stock_bonus_ratio_companies( "SWE", faang_companies, "SWE FAANG Bonus Salary Proportion", "bonus" ) # + [markdown] cell_id="7b50d9e1aedc4674b38fced5a4171880" deepnote_cell_height=291.1875 deepnote_cell_type="markdown" tags=[] # Here we highlight just how heavily skewed towards stock many of the FAANG companies can become, especially later in your career, for both data scientists and software engineers. That said, Netflix appears to do the best job at providing the bulk of an employee's salary through base pay. # # # Job Hopping vs Company Tenure # # People constantly make the argument that the fastest way to increase your salary in the tech sector is to jump between companies, and that staying with one company isn't rewarded. Let's take a look at what the data says about that claim. In order to evaluate the statement we'll plot total compensation as a function of total years of experience, but split the data by the number of years of experience at a given company. When plotting this data, using all available values for YOE at a company made the plot unreadable. The plot below is restricted to 5 years of experience and below, but highlights the same trends. # + cell_id="e6b2e714730641f9b61624f53b1488ce" deepnote_cell_height=1830 deepnote_cell_type="code" deepnote_output_heights=[606.1875, 606.1875] deepnote_to_be_reexecuted=false execution_millis=167 execution_start=1651618287643 source_hash="2951fe8" tags=[] def plot_YOE_tenure_comparison(career_path, plot_title): df = focused_df_dict[career_path].copy(deep=True) df_top_grouped = df.groupby(["YOE At Company", "YOE Total"]).agg( ["mean", "count", "std", "median"] ) df_top_grouped.reset_index(inplace=True) YOE_vals = range(0, 6) fig = go.Figure() for YOE in YOE_vals: # We only plot data for company tenure 5 years and below to improve # the readability of the plot. if YOE <= 5: YOE_grouped_df = df_top_grouped[ df_top_grouped["YOE At Company"] == YOE ] fig.add_trace( go.Scatter( name="{}".format(YOE), x=YOE_grouped_df["YOE Total"], y=YOE_grouped_df["Total Compensation"]["median"], mode="lines", ) ) fig.update_layout( title=plot_title, xaxis_title="Total Years of Experience", yaxis_title="Median Total Yearly Compensation (USD)", legend_title_text="YOE at Company", showlegend=True, ) fig.show() plot_YOE_tenure_comparison("DS", "DS Company Tenure Comparison") plot_YOE_tenure_comparison("SWE", "SWE Company Tenure Comparison") # + [markdown] cell_id="ac10f4d521484fe09332fa74f6c21477" deepnote_cell_height=439.59375 deepnote_cell_type="markdown" tags=[] # It appears there is some truth to the claim that job hopping results in higher salary. The claim is especially true for the software engineering track, as the trace for 0 years of tenure at a company is frequently seen above the other traces in the plot. The truth is a little less clear for the data scientist career track, however there does appear to be an inclination for higher salaries at 0 years of tenure. # # One surprising thing about this plot is the data corresponding to people with more years of tenure at a company than they have experience in a particular job. These data points would indicate people transitioning between career paths while staying within the same company. It appears that if you already work for a tech company and are trying to make a career transition to data science or software engineering, the best move you have is to transition into your new role while working for the same company, and begin job hopping afterwards. # # # Which Companies Pay the Most? # # Some people are more interested in total compensation than the clout that goes along with working for a FAANG company. We should take a look at which companies pay the most regardless of clout, and see if FAANG is really all that when it comes to total compensation. We'll take a look at the top 10 companies for both data science and software engineering careers paths. We'll also be limiting this analysis to companies that have 30 or more entries in our datasets to ensure we have enough data to make some claims. Admittedly, the value of 30 is somewhat arbitrary but I will discuss that in a moment. # + cell_id="2aa5f448b1294c11a2176fa8835a4848" deepnote_cell_height=1628 deepnote_cell_type="code" deepnote_output_heights=[606.1875, 606.1875] deepnote_to_be_reexecuted=false execution_millis=240 execution_start=1651618287809 source_hash="a417675" tags=[] def get_top_compensation_companies(career_path, num_companies): df = focused_df_dict[career_path].copy(deep=True) top_companies = df.groupby(["Company"]).agg( ["mean", "count", "std", "median"] ) top_companies.reset_index(inplace=True) top_companies = top_companies[ top_companies["Total Compensation"]["count"] >= 30 ] top_companies.sort_values( ("Total Compensation", "median"), ascending=False, inplace=True ) top_companies = top_companies["Company"][:num_companies] return top_companies top_DS_companies = get_top_compensation_companies("DS", 10) plot_median_salaries_companies( "DS", top_DS_companies, "DS Top Company Median Salaries" ) top_SWE_companies = get_top_compensation_companies("SWE", 10) plot_median_salaries_companies( "SWE", top_SWE_companies, "SWE Top Company Median Salaries" ) # + [markdown] cell_id="c2e02a3215af4d54abaa22b3b842b565" deepnote_cell_height=372.390625 deepnote_cell_type="markdown" tags=[] # These plots are very messy, but what I wanted to highlight with them is that while for data scientists each of FAANG can be found in the top ten companies that is not the case for software engineers. FAANG is very poorly represented amongst the top paying companies for software engineers, with only Netflix appearing in the top ten. Also worth noting is that amongst the best compensating companies for software engineers, Netflix is definitely competitive but it's not like they're dominating the competition. # # This just goes to show that while FAANG might seem shiny, the intensive hiring processes and increased stress load may not be worth it if your main goal is total compensation. # # # What Role Does Location Play? # # The last topic I'll address with this data is location. People talk a lot about where you need to move if you want a career in tech, and while remote work is becoming more and more normalized it's still worth looking at how employment location has historically affected total compensation. Let's group our data by location and sort by median salary to see which locations come out on top. We'll be restricting the plot to cities that have at least 10 entries to avoid any odd entries or outliers. # + cell_id="267c897a3ab848d8876125f2cd49bfdc" deepnote_cell_height=1974 deepnote_cell_type="code" deepnote_output_heights=[606.1875, 606.1875] deepnote_to_be_reexecuted=false execution_millis=271 execution_start=1651618288054 source_hash="b5e59cfe" tags=[] def get_top_compensation_locations(career_path, num_locations): df = focused_df_dict[career_path].copy(deep=True) top_locations = df.groupby(["Location"]).agg( ["mean", "count", "std", "median"] ) top_locations.reset_index(inplace=True) top_locations = top_locations[ top_locations["Total Compensation"]["count"] >= 10 ] top_locations.sort_values( ("Total Compensation", "median"), ascending=False, inplace=True ) top_locations = top_locations["Location"][:num_locations] return top_locations def plot_median_salaries_locations(career_path, location_list, plot_title): df = focused_df_dict[career_path].copy(deep=True) df_top = df[df["Location"].isin(location_list)] df_top_grouped = df_top.groupby(["Location"]).agg( ["mean", "count", "std", "median"] ) df_top_grouped.reset_index(inplace=True) fig = go.Figure() for location in location_list: location_grouped_df = df_top_grouped[ df_top_grouped["Location"] == location ] fig.add_trace( go.Bar( name="{}".format(location), x=location_grouped_df["Location"], y=location_grouped_df["Total Compensation"]["median"], ) ) fig.update_layout( title=plot_title, yaxis_title="Median Total Yearly Compensation (USD)", showlegend=False, ) fig.show() top_DS_locations = get_top_compensation_locations("DS", 10) plot_median_salaries_locations( "DS", top_DS_locations, "DS Top Location Median Salaries" ) top_SWE_locations = get_top_compensation_locations("SWE", 10) plot_median_salaries_locations( "SWE", top_SWE_locations, "SWE Top Location Median Salaries" ) # + [markdown] cell_id="9714af8d24794163b1f64b5f1286f091" deepnote_cell_height=97.1875 deepnote_cell_type="markdown" tags=[] # It's no surprise that in the top ten locations for both data scientists and software engineers we're mostly seeing Silicon Valley, the Greater Seattle Area. Los Gatos clearly takes the cake in terms of total compensation, however that's where the main headquarters of Netflix can be found. Let's do this again, but aggregate the Silicon Valley and Greater Seattle Area locations. # + cell_id="d155b16358e04e03956bd384c688bbde" deepnote_cell_height=2604 deepnote_cell_type="code" deepnote_output_heights=[606.1875, 606.1875] deepnote_to_be_reexecuted=false execution_millis=413 execution_start=1651618288332 source_hash="28cd0b80" tags=[] def aggregate_locations(df): silicon_valley_cities = "|".join( [ "San Jose, CA", "Menlo Park, CA", "Palo Alto, CA", "Mountain View, CA", "Cupertino, CA", "Santa Clara, CA", "Redwood City, CA", "Sunnyvale, CA", "Los Gatos, CA", "San Mateo, CA", "San Bruno, CA", ] ) san_francisco_cities = "|".join(["South San Francisco, CA"]) seattle_area_cities = "|".join( ["Seattle, WA", "Kirkland, WA", "Redmond, WA", "Bellevue, WA"] ) df["Location"] = df["Location"].str.replace( silicon_valley_cities, "Silicon Valley" ) df["Location"] = df["Location"].str.replace( seattle_area_cities, "Greater Seattle Area" ) df["Location"] = df["Location"].str.replace( san_francisco_cities, "San Francisco, CA" ) return df def get_top_compensation_locations_aggregated(career_path, num_locations): df = focused_df_dict[career_path].copy(deep=True) df = aggregate_locations(df) top_locations = df.groupby(["Location"]).agg( ["mean", "count", "std", "median"] ) top_locations.reset_index(inplace=True) top_locations = top_locations[ top_locations["Total Compensation"]["count"] >= 10 ] top_locations.sort_values( ("Total Compensation", "median"), ascending=False, inplace=True ) top_locations = top_locations["Location"][:num_locations] return top_locations def plot_median_salaries_aggregated_locations( career_path, location_list, plot_title ): df = focused_df_dict[career_path].copy(deep=True) df = aggregate_locations(df) df_top = df[df["Location"].isin(location_list)] df_top_grouped = df_top.groupby(["Location"]).agg( ["mean", "count", "std", "median"] ) df_top_grouped.reset_index(inplace=True) fig = go.Figure() for location in location_list: location_grouped_df = df_top_grouped[ df_top_grouped["Location"] == location ] fig.add_trace( go.Bar( name="{}".format(location), x=location_grouped_df["Location"], y=location_grouped_df["Total Compensation"]["median"], ) ) fig.update_layout( title=plot_title, yaxis_title="Median Total Yearly Compensation (USD)", showlegend=False, ) fig.show() top_DS_locations_aggregated = get_top_compensation_locations_aggregated( "DS", 10 ) plot_median_salaries_aggregated_locations( "DS", top_DS_locations_aggregated, "DS Top Aggregated Location Median Salaries", ) top_SWE_locations_aggregated = get_top_compensation_locations_aggregated( "SWE", 10 ) plot_median_salaries_aggregated_locations( "SWE", top_SWE_locations_aggregated, "SWE Top Aggregated Location Median Salaries", ) # + [markdown] cell_id="eee2c97e261e479dad9497358b5cfed4" deepnote_cell_height=245.59375 deepnote_cell_type="markdown" tags=[] # San Francisco and Silicon valley top the charts for both data scientist and software engineering career paths. Interestingly enough the Greater Seattle Area appears to be a great spot for data scientists, but just barely squeaks into the top ten for software engineers. Beyond those observations we get an interesting mishmash of cities from CA, MA, NJ, and NY. # # One very important thing to note here is that based on this plot the median salaries for software engineers appear to be quite a bit higher than for data scientists. That is an odd observation given that the analysis from my [previous post]({% post_url 2022-05-02-salaryInfo %}) indicated that they shouldn't be that different. The thing that this plot isn't taking into account is that having more years of experience results in higher salary, and there are much more data points for software engineers at later in the careers than there are for data scientists. That makes this plot appear to show a higher salary range for software engineers, but that's not actually an inference we can make from this plot. # + cell_id="b41eb8fe6ac849ccb65d550bdb2fe57f" deepnote_cell_height=348 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=86 execution_start=1651618288752 source_hash="a01b2fd7" tags=[] def count_later_career_datapoints(career_path, start_year): df = focused_df_dict[career_path].copy(deep=True) df_grouped = df.groupby(["YOE Total"]).agg(["count"]) num_data_points = df_grouped[df_grouped.index >= start_year][ "Total Compensation" ]["count"].sum() return num_data_points num_late_career_DS_datapoints = count_later_career_datapoints("DS", 15) num_late_career_SWE_datapoints = count_later_career_datapoints("SWE", 15) print( "Number of late career DS data points: {}".format( num_late_career_DS_datapoints ) ) print( "Number of late career SWE data points: {}".format( num_late_career_SWE_datapoints ) ) # + [markdown] cell_id="f57114e8445b4740ac03c29568c4d404" deepnote_cell_height=165.1875 deepnote_cell_type="markdown" tags=[] # # Wrapping Up # Anyway, that's all I've got for this time. The data I used for this analysis can all be found [on my github](https://github.com/borsboomT/levels_scraper/tree/main/data). I encourage everyone who reads this to download and and see what you can find for yourself. If you think I've gotten something wrong here, prove it to me! I'm always happy to be proven wrong, it gives me an opportunity to learn. :D
_notebooks/2022-05-03-salaryInfo2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import librosa import librosa.display import matplotlib.pyplot as plt import numpy as np import scipy as sp import IPython.display as ipd # - plt.rcParams['figure.figsize'] = (15, 10) # + def novidade_energia(x, fs, w, log=False): x_quadrado = x**2 E = np.convolve(x_quadrado, w, 'same') if log: E = np.log(E) E_diff = np.diff(E) E_diff = np.concatenate((E_diff, np.array([0.]))) E_diff[E_diff < 0] = 0 return E_diff def novidade_espectro(x, fs, N, H,compressao_log=False, gamma=100): w = sp.signal.hann(N) X = librosa.stft(x, n_fft=N, win_length=N, hop_length=H, window=w) Y = np.abs(X) if compressao_log: Y = np.log(1 + gamma*Y) Y_diff = np.diff(Y) Y_diff[Y_diff < 0] = 0 fc_novidade = np.sum(Y_diff, axis=0) fc_novidade = np.concatenate((fc_novidade, np.array([0.]))) return fc_novidade def novidade_fase(x, fs, N, H): w = sp.signal.hann(N) X = librosa.stft(x, n_fft=N, win_length=N, hop_length=H, window=w) phi = np.angle(X)/(2*np.pi) # phase unwrapping (aplicação da função de argumento principal) argumento_principal = lambda x: np.mod(x + 0.5 , 1) - 0.5 # primeira derivada phi_diff = np.diff(argumento_principal(phi)) # segunda derivada phi_diff_2 = np.diff(argumento_principal(phi_diff)) fc_novidade = np.sum(np.abs(phi_diff_2), axis=0) # a concatenação nesse caso tem que ter duas posições porque # aplicamos o diff duas vezes fc_novidade = np.concatenate((fc_novidade, np.array([0., 0.]))) return fc_novidade def novidade_complexo(x, fs, w, compressao_log=False): w = sp.signal.hann(N) X = librosa.stft(x, n_fft=N, win_length=N, hop_length=H, window=w) phi = np.angle(X)/(2*np.pi) # phase unwrapping (aplicação da função de argumento principal) argumento_principal = lambda x: np.mod(x + 0.5 , 1) - 0.5 phi_diff = np.diff(argumento_principal(phi), axis=1) # completando o vetor pra continha ficar mais fácil phi_diff = np.concatenate(phi_diff, np.zeros((phi.shape[0], 1))) X_hat = np.abs(X)*np.e**(2j*np.pi*(phi + phi_diff)) return "ops" # - x, fs = librosa.load(librosa.util.example('trumpet')) x2, fs2 = librosa.load(librosa.util.example('choice')) librosa.util.list_examples() dest_path = "/home/giovana/Documentos/personal/giovana-morais.github.io/assets/images/2021/" librosa.display.waveplot(x) #plt.title("Trumpet loop") #plt.savefig(os.path.join(dest_path, 'waveform.png')) librosa.display.waveplot(x2) plt.title("Choice (drum+bass)") plt.savefig(os.path.join(dest_path, 'waveform_choice.png')) # # Energia # + N = 1024 w = sp.signal.hann(N) nov_energia = novidade_energia(x, fs, w) nov_energia_log = novidade_energia(x, fs, w, log=True) # - plt.plot(x/np.linalg.norm(x), 'gray', alpha=0.3, label="sinal original (normalizado)") plt.plot(nov_energia, label='$\Delta_{energia}$') plt.plot(nov_energia_log, label='$\Delta_{\log{(energia)}}$') plt.legend() plt.title('Função novidade baseada em energia') plt.savefig(os.path.join(dest_path, 'nov_energia.png')) # + nov_energia2 = novidade_energia(x2, fs2, w) nov_energia_log2 = novidade_energia(x2, fs2, w, log=True) plt.plot((x2/np.linalg.norm(x2)), 'gray', alpha=0.3, label="sinal original (normalizado)") plt.plot(nov_energia2, label='$\Delta_{energia}$') plt.plot(nov_energia_log2, label='$\Delta_{\log{(energia)}}$') plt.legend() plt.title('Função novidade baseada em energia') plt.savefig(os.path.join(dest_path, 'nov_energia_choice.png')) # - # # Espectro # + N = 1024 H = 256 nov_espectro = novidade_espectro(x, fs, N, H, False) nov_espectro_1 = novidade_espectro(x, fs, N, H, True, 1) nov_espectro_10 = novidade_espectro(x, fs, N, H, True, 10) nov_espectro_100 = novidade_espectro(x, fs, N, H, True, 100) # - plt.plot(nov_espectro[0:100], label='$\Delta_{espectro}$') plt.plot(nov_espectro_1[0:100], label='$\Delta_{espectro} \gamma=1$') plt.plot(nov_espectro_10[0:100], label='$\Delta_{espectro} \gamma=10$') plt.plot(nov_espectro_100[0:100], label='$\Delta_{espectro} \gamma=100$') plt.title('Função baseada em espectro') plt.legend() #plt.savefig(os.path.join(dest_path, 'nov_espectro.png')) # # Fase X = librosa.stft(x, n_fft=N, win_length=N, window=w, hop_length=H) phi = np.angle(X) phi_2pi = phi/(2*np.pi) diff = np.diff(phi) diff_2pi = np.diff(phi_2pi) plt.plot(diff[0], label="sem intervalo definido") plt.plot(diff_2pi[0]/(2*np.pi), label="com intervalo definido") plt.legend() nov_fase = novidade_fase(x, fs, N, H) plt.plot(nov_fase) plt.plot(nov_espectro) # # Complexo # + w = sp.signal.hann(N) X = librosa.stft(x, n_fft=N, win_length=N, hop_length=H, window=w) phi = np.angle(X)/(2*np.pi) # phase unwrapping (aplicação da função de argumento principal) argumento_principal = lambda x: np.mod(x + 0.5 , 1) - 0.5 phi_diff = np.diff(argumento_principal(phi), axis=1) # + # completando o vetor pra continha ficar mais fácil phi_diff = np.concatenate((phi_diff, np.zeros((phi.shape[0], 1))), axis=1) X_hat = np.abs(X)*np.e**(2j*np.pi*(phi + phi_diff)) X_hat = np.abs(X_hat - X) # - X_hat.shape # o numpy não faz a cópia de verdade de um array a não ser que # a gente force. caso contrário, ao alterarmos X_plus estaríamos # alterando também X_hat X_plus = X_hat.copy() for n in range(1, X_plus.shape[0]): idx = np.where(np.abs(X)[n,:] < np.abs(X)[n-1,:]) X_plus[n, idx] = 0 novidade_complexo = np.sum(X_plus, axis=1) plt.plot(novidade_complexo)
onset_detection/funcoes_novidade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZrwVQsM9TiUw" # ##### Copyright 2019 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + cellView="form" id="CpDUTVKYTowI" #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="ltPJCG6pAUoc" # # A Tour of TensorFlow Probability # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/probability/examples/A_Tour_of_TensorFlow_Probability"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/A_Tour_of_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/A_Tour_of_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/A_Tour_of_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="WRVR-tGTR31S" # In this Colab, we explore some of the fundamental features of TensorFlow Probability. # + [markdown] id="uiR4-VOt9NFX" # ### Dependencies & Prerequisites # # + id="5UYdUIGU5KJ6" #@title Import { display-mode: "form" } from pprint import pprint import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_probability as tfp sns.reset_defaults() sns.set_context(context='talk',font_scale=0.7) plt.rcParams['image.cmap'] = 'viridis' # %matplotlib inline tfd = tfp.distributions tfb = tfp.bijectors # + id="di_gCffY43PT" #@title Utils { display-mode: "form" } def print_subclasses_from_module(module, base_class, maxwidth=80): import functools, inspect, sys subclasses = [name for name, obj in inspect.getmembers(module) if inspect.isclass(obj) and issubclass(obj, base_class)] def red(acc, x): if not acc or len(acc[-1]) + len(x) + 2 > maxwidth: acc.append(x) else: acc[-1] += ", " + x return acc print('\n'.join(functools.reduce(red, subclasses, []))) # + [markdown] id="ZLkia-fRu6Qh" # ## Outline # # * TensorFlow # * TensorFlow Probability # * Distributions # * Bijectors # * MCMC # * ...and more! # + [markdown] id="3GMqBNxpp5Jk" # ## Preamble: TensorFlow # # # TensorFlow is a scientific computing library. # # --- # # It supports # * lots of mathematical operations # * efficient vectorized computation # * easy hardware acceleration # * automatic differentiation # + [markdown] id="rC_qZlx10LQF" # ### Vectorization # # * Vectorization makes things fast! # * It also means we think a lot about shapes # + id="V2t6mr5Wp4Qp" mats = tf.random.uniform(shape=[1000, 10, 10]) vecs = tf.random.uniform(shape=[1000, 10, 1]) def for_loop_solve(): return np.array( [tf.linalg.solve(mats[i, ...], vecs[i, ...]) for i in range(1000)]) def vectorized_solve(): return tf.linalg.solve(mats, vecs) # Vectorization for the win! # %timeit for_loop_solve() # %timeit vectorized_solve() # + [markdown] id="0i0EJp2i0Nfa" # ### Hardware Acceleration # + id="HZR6I98GtY3B" # Code can run seamlessly on a GPU, just change Colab runtime type # in the 'Runtime' menu. if tf.test.gpu_device_name() == '/device:GPU:0': print("Using a GPU") else: print("Using a CPU") # + [markdown] id="5rfAomye0QGb" # ### Automatic Differentiation # + id="NCLkCcWpuKKh" a = tf.constant(np.pi) b = tf.constant(np.e) with tf.GradientTape() as tape: tape.watch([a, b]) c = .5 * (a**2 + b**2) grads = tape.gradient(c, [a, b]) print(grads[0]) print(grads[1]) # + [markdown] id="tS9F_zwnxe8c" # ## TensorFlow Probability # # *TensorFlow Probability is a library for probabilistic reasoning and statistical analysis in TensorFlow.* # # We support *modeling*, *inference*, and *criticism* through composition of low-level modular components. # # ### Low-level building blocks # * Distributions # * Bijectors # # ### High(er)-level constructs # * Markov chain Monte Carlo # * Probabilistic Layers # * Structural Time Series # * Generalized Linear Models # * Optimizers # + [markdown] id="3a-IYDxm5aXg" # ## Distributions # A `tfp.distributions.Distribution` is a class with two core methods: `sample` and `log_prob`. # # TFP has a lot of distributions! # + id="9MEFOWRX2InJ" print_subclasses_from_module(tfp.distributions, tfp.distributions.Distribution) # + [markdown] id="JoRByxnrir-u" # ### A simple scalar-variate `Distribution` # + id="nPh4HEwuCnRr" # A standard normal normal = tfd.Normal(loc=0., scale=1.) print(normal) # + id="tIylCtMDCwnl" # Plot 1000 samples from a standard normal samples = normal.sample(1000) sns.distplot(samples) plt.title("Samples from a standard Normal") plt.show() # + id="Wyn6XnmSh_iI" # Compute the log_prob of a point in the event space of `normal` normal.log_prob(0.) # + id="q67IaJ-qie1u" # Compute the log_prob of a few points normal.log_prob([-1., 0., 1.]) # + [markdown] id="ikAl1XxmI5Ek" # ### Distributions and Shapes # # Numpy `ndarrays` and TensorFlow `Tensors` have *shapes*. # # TensorFlow Probability `Distributions` have *shape semantics* -- we partition shapes into semantically distinct pieces, even though the same chunk of memory (`Tensor`/`ndarray`) is used for the whole everything. # # * **Batch shape** denotes a *collection* of `Distribution`s with distinct parameters # * **Event shape** denotes the shape of *samples* from the `Distribution`. # # We always put batch shapes on the "left" and event shapes on the "right". # + [markdown] id="Bt7D7EgrEdYo" # ### A *batch* of scalar-variate `Distributions` # # Batches are like "vectorized" distributions: independent instances whose computations happen in parallel. # + id="vfY1B4GqDDJo" # Create a batch of 3 normals, and plot 1000 samples from each normals = tfd.Normal([-2.5, 0., 2.5], 1.) # The scale parameter broadacasts! print("Batch shape:", normals.batch_shape) print("Event shape:", normals.event_shape) # + id="iNzTGgKqE5CT" # Samples' shapes go on the left! samples = normals.sample(1000) print("Shape of samples:", samples.shape) # + id="hIRbVOS7OP-g" # Sample shapes can themselves be more complicated print("Shape of samples:", normals.sample([10, 10, 10]).shape) # + id="nqXQ6DEDFsLU" # A batch of normals gives a batch of log_probs. print(normals.log_prob([-2.5, 0., 2.5])) # + id="W1uSOXO8GQB4" # The computation broadcasts, so a batch of normals applied to a scalar # also gives a batch of log_probs. print(normals.log_prob(0.)) # + id="nxAGVtnPGkQ4" # Normal numpy-like broadcasting rules apply! xs = np.linspace(-6, 6, 200) try: normals.log_prob(xs) except Exception as e: print("TFP error:", e.message) # + id="ieF2lRhPHxgd" # That fails for the same reason this does: try: np.zeros(200) + np.zeros(3) except Exception as e: print("Numpy error:", e) # + id="nS9qPHdeH0gz" # But this would work: a = np.zeros([200, 1]) + np.zeros(3) print("Broadcast shape:", a.shape) # + id="9w64N4YQH2r5" # And so will this! xs = np.linspace(-6, 6, 200)[..., np.newaxis] # => shape = [200, 1] lps = normals.log_prob(xs) print("Broadcast log_prob shape:", lps.shape) # + id="rfsG1F0FFAWS" # Summarizing visually for i in range(3): sns.distplot(samples[:, i], kde=False, norm_hist=True) plt.plot(np.tile(xs, 3), normals.prob(xs), c='k', alpha=.5) plt.title("Samples from 3 Normals, and their PDF's") plt.show() # + [markdown] id="E05ut7jxix6C" # ### A vector-variate `Distribution` # + id="2zEm7UpFi2S_" mvn = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag = [1., 1.]) print("Batch shape:", mvn.batch_shape) print("Event shape:", mvn.event_shape) # + id="YfT5qzyRjGfg" samples = mvn.sample(1000) print("Samples shape:", samples.shape) # + id="eKdi0DDqjRiA" g = sns.jointplot(samples[:, 0], samples[:, 1], kind='scatter') plt.show() # + [markdown] id="Hh53LM89PQei" # ### A matrix-variate `Distribution` # + id="fHOIJipjItGG" lkj = tfd.LKJ(dimension=10, concentration=[1.5, 3.0]) print("Batch shape: ", lkj.batch_shape) print("Event shape: ", lkj.event_shape) # + id="NkOqJiT6PcOT" samples = lkj.sample() print("Samples shape: ", samples.shape) # + id="BYvY7_2xPecj" fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6, 3)) sns.heatmap(samples[0, ...], ax=axes[0], cbar=False) sns.heatmap(samples[1, ...], ax=axes[1], cbar=False) fig.tight_layout() plt.show() # + [markdown] id="uwJVF06EQ0SK" # ### Gaussian Processes # + id="Y_IpyZlUTlBr" kernel = tfp.math.psd_kernels.ExponentiatedQuadratic() xs = np.linspace(-5., 5., 200).reshape([-1, 1]) gp = tfd.GaussianProcess(kernel, index_points=xs) print("Batch shape:", gp.batch_shape) print("Event shape:", gp.event_shape) # + id="mgI2dtZERCSX" upper, lower = gp.mean() + [2 * gp.stddev(), -2 * gp.stddev()] plt.plot(xs, gp.mean()) plt.fill_between(xs[..., 0], upper, lower, color='k', alpha=.1) for _ in range(5): plt.plot(xs, gp.sample(), c='r', alpha=.3) plt.title(r"GP prior mean, $2\sigma$ intervals, and samples") plt.show() # *** Bonus question *** # Why do so many of these functions lie outside the 95% intervals? # + [markdown] id="MVj2wH_wSkwg" # ### GP Regression # + id="n0xgOdM2XstI" # Suppose we have some observed data obs_x = [[-3.], [0.], [2.]] # Shape 3x1 (3 1-D vectors) obs_y = [3., -2., 2.] # Shape 3 (3 scalars) gprm = tfd.GaussianProcessRegressionModel(kernel, xs, obs_x, obs_y) # + id="tqICnZOcUX8y" upper, lower = gprm.mean() + [2 * gprm.stddev(), -2 * gprm.stddev()] plt.plot(xs, gprm.mean()) plt.fill_between(xs[..., 0], upper, lower, color='k', alpha=.1) for _ in range(5): plt.plot(xs, gprm.sample(), c='r', alpha=.3) plt.scatter(obs_x, obs_y, c='k', zorder=3) plt.title(r"GP posterior mean, $2\sigma$ intervals, and samples") plt.show() # + [markdown] id="W6GCupiD5r5C" # ## Bijectors # # Bijectors represent (mostly) invertible, smooth functions. They can be used to transform distributions, preserving the ability to take samples and compute log_probs. They can be in the `tfp.bijectors` module. # # Each bijector implements at least 3 methods: # * `forward`, # * `inverse`, and # * (at least) one of `forward_log_det_jacobian` and `inverse_log_det_jacobian`. # # With these ingredients, we can transform a distribution and still get samples and log probs from the result! # # ### In Math, somewhat sloppily # # * $X$ is a random variable with pdf $p(x)$ # * $g$ is a smooth, invertible function on the space of $X$'s # * $Y = g(X)$ is a new, transformed random variable # * $p(Y=y) = p(X=g^{-1}(y)) \cdot |\nabla g^{-1}(y)|$ # # ### Caching # Bijectors also cache the forward and inverse computations, and log-det-Jacobians, which allows us to save # repeating potentially very expensive operations! # + id="OUKjqlHi5BQm" print_subclasses_from_module(tfp.bijectors, tfp.bijectors.Bijector) # + [markdown] id="z9dPFaOVWTPr" # ### A Simple `Bijector` # + id="ChhpOmKiYres" normal_cdf = tfp.bijectors.NormalCDF() xs = np.linspace(-4., 4., 200) plt.plot(xs, normal_cdf.forward(xs)) plt.show() # + id="DfLTIp12UGjn" plt.plot(xs, normal_cdf.forward_log_det_jacobian(xs, event_ndims=0)) plt.show() # + [markdown] id="0Lc38b8uWnzT" # ### A `Bijector` transforming a `Distribution` # + id="m4DeJGmlCJ7j" exp_bijector = tfp.bijectors.Exp() log_normal = exp_bijector(tfd.Normal(0., .5)) samples = log_normal.sample(1000) xs = np.linspace(1e-10, np.max(samples), 200) sns.distplot(samples, norm_hist=True, kde=False) plt.plot(xs, log_normal.prob(xs), c='k', alpha=.75) plt.show() # + [markdown] id="A9MiFjxcW9C_" # ### Batching `Bijectors` # + id="SrbhJ9uhZry9" # Create a batch of bijectors of shape [3,] softplus = tfp.bijectors.Softplus( hinge_softness=[1., .5, .1]) print("Hinge softness shape:", softplus.hinge_softness.shape) # + id="qk_wOqJKXUaS" # For broadcasting, we want this to be shape [200, 1] xs = np.linspace(-4., 4., 200)[..., np.newaxis] ys = softplus.forward(xs) print("Forward shape:", ys.shape) # + id="UazXAovhXHF-" # Visualization lines = plt.plot(np.tile(xs, 3), ys) for line, hs in zip(lines, softplus.hinge_softness): line.set_label("Softness: %1.1f" % hs) plt.legend() plt.show() # + [markdown] id="X2IK1UP7qeIJ" # ### Caching # + id="g20uziHzqdJw" # This bijector represents a matrix outer product on the forward pass, # and a cholesky decomposition on the inverse pass. The latter costs O(N^3)! bij = tfb.CholeskyOuterProduct() size = 2500 # Make a big, lower-triangular matrix big_lower_triangular = tf.eye(size) # Squaring it gives us a positive-definite matrix big_positive_definite = bij.forward(big_lower_triangular) # Caching for the win! # %timeit bij.inverse(big_positive_definite) # %timeit tf.linalg.cholesky(big_positive_definite) # + [markdown] id="lm6nizQNbTOx" # ## MCMC # # TFP has built in support for some standard Markov chain Monte Carlo algorithms, including Hamiltonian Monte Carlo. # + [markdown] id="Ol1vILKog0tf" # ### Generate a data set # + id="5lZhXdbgbSwP" # Generate some data def f(x, w): # Pad x with 1's so we can add bias via matmul x = tf.pad(x, [[1, 0], [0, 0]], constant_values=1) linop = tf.linalg.LinearOperatorFullMatrix(w[..., np.newaxis]) result = linop.matmul(x, adjoint=True) return result[..., 0, :] num_features = 2 num_examples = 50 noise_scale = .5 true_w = np.array([-1., 2., 3.]) xs = np.random.uniform(-1., 1., [num_features, num_examples]) ys = f(xs, true_w) + np.random.normal(0., noise_scale, size=num_examples) # + id="AmaqGeiraFSj" # Visualize the data set plt.scatter(*xs, c=ys, s=100, linewidths=0) grid = np.meshgrid(*([np.linspace(-1, 1, 100)] * 2)) xs_grid = np.stack(grid, axis=0) fs_grid = f(xs_grid.reshape([num_features, -1]), true_w) fs_grid = np.reshape(fs_grid, [100, 100]) plt.colorbar() plt.contour(xs_grid[0, ...], xs_grid[1, ...], fs_grid, 20, linewidths=1) plt.show() # + [markdown] id="pvBO5CMVg30Z" # ### Define our joint log-prob function # # The unnormalized posterior is the result of closing over the data to form a # [partial application](https://en.wikipedia.org/wiki/Partial_application) of the joint log prob. # + id="UY56HpEaduUV" # Define the joint_log_prob function, and our unnormalized posterior. def joint_log_prob(w, x, y): # Our model in maths is # w ~ MVN([0, 0, 0], diag([1, 1, 1])) # y_i ~ Normal(w @ x_i, noise_scale), i=1..N rv_w = tfd.MultivariateNormalDiag( loc=np.zeros(num_features + 1), scale_diag=np.ones(num_features + 1)) rv_y = tfd.Normal(f(x, w), noise_scale) return (rv_w.log_prob(w) + tf.reduce_sum(rv_y.log_prob(y), axis=-1)) # + id="lgL8c1nKjSi8" # Create our unnormalized target density by currying x and y from the joint. def unnormalized_posterior(w): return joint_log_prob(w, xs, ys) # + [markdown] id="45TMHGcUhS9l" # ### Build HMC TransitionKernel and call sample_chain # + id="T9Myqb0Yjph3" # Create an HMC TransitionKernel hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=unnormalized_posterior, step_size=np.float64(.1), num_leapfrog_steps=2) # + id="JBuIs-IbedWo" # We wrap sample_chain in tf.function, telling TF to precompile a reusable # computation graph, which will dramatically improve performance. @tf.function def run_chain(initial_state, num_results=1000, num_burnin_steps=500): return tfp.mcmc.sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=initial_state, kernel=hmc_kernel, trace_fn=lambda current_state, kernel_results: kernel_results) # + id="cR_fZpvY0dUJ" initial_state = np.zeros(num_features + 1) samples, kernel_results = run_chain(initial_state) print("Acceptance rate:", kernel_results.is_accepted.numpy().mean()) # + [markdown] id="o2CKVvkq0uLi" # *That's not great! We'd like an acceptance rate closer to .65.* # # (see ["Optimal Scaling for Various Metropolis-Hastings Algorithms"](https://projecteuclid.org/euclid.ss/1015346320), Roberts & Rosenthal, 2001) # # + [markdown] id="WUb36P8JhXSw" # ### Adaptive step sizes # # We can wrap our HMC TransitionKernel in a `SimpleStepSizeAdaptation` "meta-kernel", which will apply some (rather simple heuristic) logic to adapt the HMC step size during burnin. We allot 80% of burnin for adapting step size, and then let the remaining 20% go just to mixing. # + id="QzAocJeU0wib" # Apply a simple step size adaptation during burnin @tf.function def run_chain(initial_state, num_results=1000, num_burnin_steps=500): adaptive_kernel = tfp.mcmc.SimpleStepSizeAdaptation( hmc_kernel, num_adaptation_steps=int(.8 * num_burnin_steps), target_accept_prob=np.float64(.65)) return tfp.mcmc.sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=initial_state, kernel=adaptive_kernel, trace_fn=lambda cs, kr: kr) # + id="oZwdWBF114E2" samples, kernel_results = run_chain( initial_state=np.zeros(num_features+1)) print("Acceptance rate:", kernel_results.inner_results.is_accepted.numpy().mean()) # + id="di0E8pYy0UOD" # Trace plots colors = ['b', 'g', 'r'] for i in range(3): plt.plot(samples[:, i], c=colors[i], alpha=.3) plt.hlines(true_w[i], 0, 1000, zorder=4, color=colors[i], label="$w_{}$".format(i)) plt.legend(loc='upper right') plt.show() # Histogram of samples for i in range(3): sns.distplot(samples[:, i], color=colors[i]) ymax = plt.ylim()[1] for i in range(3): plt.vlines(true_w[i], 0, ymax, color=colors[i]) plt.ylim(0, ymax) plt.show() # + [markdown] id="wgBUkL1OKJn4" # ### Diagnostics # # Trace plots are nice, but diagnostics are nicer! # # First we need to run multiple chains. This is as simple as giving a batch of # `initial_state` tensors. # + id="IqPcDhD7H3Dw" # Instead of a single set of initial w's, we create a batch of 8. num_chains = 8 initial_state = np.zeros([num_chains, num_features + 1]) chains, kernel_results = run_chain(initial_state) r_hat = tfp.mcmc.potential_scale_reduction(chains) print("Acceptance rate:", kernel_results.inner_results.is_accepted.numpy().mean()) print("R-hat diagnostic (per latent variable):", r_hat.numpy()) # + [markdown] id="TjCHHvEL78by" # ### Sampling the noise scale # + id="_WLdLT875OpQ" # Define the joint_log_prob function, and our unnormalized posterior. def joint_log_prob(w, sigma, x, y): # Our model in maths is # w ~ MVN([0, 0, 0], diag([1, 1, 1])) # y_i ~ Normal(w @ x_i, noise_scale), i=1..N rv_w = tfd.MultivariateNormalDiag( loc=np.zeros(num_features + 1), scale_diag=np.ones(num_features + 1)) rv_sigma = tfd.LogNormal(np.float64(1.), np.float64(5.)) rv_y = tfd.Normal(f(x, w), sigma[..., np.newaxis]) return (rv_w.log_prob(w) + rv_sigma.log_prob(sigma) + tf.reduce_sum(rv_y.log_prob(y), axis=-1)) # Create our unnormalized target density by currying x and y from the joint. def unnormalized_posterior(w, sigma): return joint_log_prob(w, sigma, xs, ys) # Create an HMC TransitionKernel hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=unnormalized_posterior, step_size=np.float64(.1), num_leapfrog_steps=4) # Create a TransformedTransitionKernl transformed_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=hmc_kernel, bijector=[tfb.Identity(), # w tfb.Invert(tfb.Softplus())]) # sigma # Apply a simple step size adaptation during burnin @tf.function def run_chain(initial_state, num_results=1000, num_burnin_steps=500): adaptive_kernel = tfp.mcmc.SimpleStepSizeAdaptation( transformed_kernel, num_adaptation_steps=int(.8 * num_burnin_steps), target_accept_prob=np.float64(.75)) return tfp.mcmc.sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=initial_state, kernel=adaptive_kernel, seed=(0, 1), trace_fn=lambda cs, kr: kr) # Instead of a single set of initial w's, we create a batch of 8. num_chains = 8 initial_state = [np.zeros([num_chains, num_features + 1]), .54 * np.ones([num_chains], dtype=np.float64)] chains, kernel_results = run_chain(initial_state) r_hat = tfp.mcmc.potential_scale_reduction(chains) print("Acceptance rate:", kernel_results.inner_results.inner_results.is_accepted.numpy().mean()) print("R-hat diagnostic (per w variable):", r_hat[0].numpy()) print("R-hat diagnostic (sigma):", r_hat[1].numpy()) # + id="fkQVcO1O9YSA" w_chains, sigma_chains = chains # Trace plots of w (one of 8 chains) colors = ['b', 'g', 'r', 'teal'] fig, axes = plt.subplots(4, num_chains, figsize=(4 * num_chains, 8)) for j in range(num_chains): for i in range(3): ax = axes[i][j] ax.plot(w_chains[:, j, i], c=colors[i], alpha=.3) ax.hlines(true_w[i], 0, 1000, zorder=4, color=colors[i], label="$w_{}$".format(i)) ax.legend(loc='upper right') ax = axes[3][j] ax.plot(sigma_chains[:, j], alpha=.3, c=colors[3]) ax.hlines(noise_scale, 0, 1000, zorder=4, color=colors[3], label=r"$\sigma$".format(i)) ax.legend(loc='upper right') fig.tight_layout() plt.show() # Histogram of samples of w fig, axes = plt.subplots(4, num_chains, figsize=(4 * num_chains, 8)) for j in range(num_chains): for i in range(3): ax = axes[i][j] sns.distplot(w_chains[:, j, i], color=colors[i], norm_hist=True, ax=ax, hist_kws={'alpha': .3}) for i in range(3): ax = axes[i][j] ymax = ax.get_ylim()[1] ax.vlines(true_w[i], 0, ymax, color=colors[i], label="$w_{}$".format(i), linewidth=3) ax.set_ylim(0, ymax) ax.legend(loc='upper right') ax = axes[3][j] sns.distplot(sigma_chains[:, j], color=colors[3], norm_hist=True, ax=ax, hist_kws={'alpha': .3}) ymax = ax.get_ylim()[1] ax.vlines(noise_scale, 0, ymax, color=colors[3], label=r"$\sigma$".format(i), linewidth=3) ax.set_ylim(0, ymax) ax.legend(loc='upper right') fig.tight_layout() plt.show() # + [markdown] id="AvPb53zUkVlh" # ## There's a lot more! # # Check out these cool blog posts and examples: # # * Structural Time Series support [blog](https://medium.com/tensorflow/structural-time-series-modeling-in-tensorflow-probability-344edac24083) [colab](https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Structural_Time_Series_Modeling_Case_Studies_Atmospheric_CO2_and_Electricity_Demand.ipynb) # * Probabilistic Keras Layers (input: Tensor, output: Distribution!) [blog](https://medium.com/tensorflow/regression-with-probabilistic-layers-in-tensorflow-probability-e46ff5d37baf) [colab](https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb) # * Another Layers example: VAEs [blog](https://medium.com/tensorflow/variational-autoencoders-with-tensorflow-probability-layers-d06c658931b7) [colab](https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb) # * Gaussian Process Regression [colab](https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb) and Latent Variable Modeling [colab](https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb) # # More examples and notebooks on our GitHub [here](https://github.com/tensorflow/probability/tree/master/tensorflow_probability/examples)!
site/en-snapshot/probability/examples/A_Tour_of_TensorFlow_Probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (ox) # language: python # name: python3 # --- # # Routing, speed imputation, and travel times # # Including parallelized shortest-path solving via built-in multiprocessing in OSMnx. # # Author: [<NAME>](https://geoffboeing.com/) # # - [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/) # - [GitHub repo](https://github.com/gboeing/osmnx) # - [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples) # - [Documentation](https://osmnx.readthedocs.io/en/stable/) # - [Journal article/citation](http://geoffboeing.com/publications/osmnx-complex-street-networks/) # + import numpy as np import osmnx as ox # %matplotlib inline np.random.seed(0) ox.config(use_cache=True, log_console=False) ox.__version__ # - place = "Piedmont, California, USA" G = ox.graph_from_place(place, network_type="drive") Gp = ox.project_graph(G) # ## 1. Fast nearest node/edge search with OSMnx # # The nearest_nodes and nearest_edges functions take arrays of x and y (or lng/lat) coordinates and return the nearest node/edge to each. # randomly sample n points spatially-constrained to the network's geometry points = ox.utils_geo.sample_points(ox.get_undirected(Gp), n=100) X = points.x.values Y = points.y.values X0 = X.mean() Y0 = Y.mean() # find each nearest node to several points, and optionally return distance nodes, dists = ox.nearest_nodes(Gp, X, Y, return_dist=True) # or, find the nearest node to a single point node = ox.nearest_nodes(Gp, X0, Y0) node # find each nearest edge to several points, and optionally return distance edges, dists = ox.nearest_edges(Gp, X, Y, return_dist=True) # find the nearest edge to a single point edge = ox.nearest_edges(Gp, X0, Y0) edge # ## 2. Basic routing by distance # # Pick two nodes. Then find the shortest path between origin and destination, using weight='length' to find the shortest path by minimizing distance traveled (otherwise it treats each edge as weight=1). # find the shortest path (by distance) between these nodes then plot it orig = list(G)[0] dest = list(G)[120] route = ox.shortest_path(G, orig, dest, weight="length") fig, ax = ox.plot_graph_route(G, route, route_color="y", route_linewidth=6, node_size=0) # Or get *k* shortest paths, weighted by some attribute: routes = ox.k_shortest_paths(G, orig, dest, k=30, weight="length") fig, ax = ox.plot_graph_routes(G, list(routes), route_colors="y", route_linewidth=4, node_size=0) # ## 3. Imputing travel speeds and times # # The `add_edge_speeds` function add edge speeds (km per hour) to graph as new `speed_kph` edge attributes. Imputes free-flow travel speeds for all edges based on mean `maxspeed` value of edges, per highway type. This mean-imputation can obviously be imprecise, and the caller can override it by passing in `hwy_speeds` and/or `fallback` arguments that correspond to local speed limit standards. See docstring for details. # + # impute speed on all edges missing data G = ox.add_edge_speeds(G) # calculate travel time (seconds) for all edges G = ox.add_edge_travel_times(G) # - # see mean speed/time values by road type edges = ox.graph_to_gdfs(G, nodes=False) edges["highway"] = edges["highway"].astype(str) edges.groupby("highway")[["length", "speed_kph", "travel_time"]].mean().round(1) # same thing again, but this time pass in a few default speed values (km/hour) # to fill in edges with missing `maxspeed` from OSM hwy_speeds = {"residential": 35, "secondary": 50, "tertiary": 60} G = ox.add_edge_speeds(G, hwy_speeds) G = ox.add_edge_travel_times(G) # calculate two routes by minimizing travel distance vs travel time orig = list(G)[1] dest = list(G)[120] route1 = ox.shortest_path(G, orig, dest, weight="length") route2 = ox.shortest_path(G, orig, dest, weight="travel_time") # plot the routes fig, ax = ox.plot_graph_routes( G, routes=[route1, route2], route_colors=["r", "y"], route_linewidth=6, node_size=0 ) # compare the two routes route1_length = int(sum(ox.utils_graph.get_route_edge_attributes(G, route1, "length"))) route2_length = int(sum(ox.utils_graph.get_route_edge_attributes(G, route2, "length"))) route1_time = int(sum(ox.utils_graph.get_route_edge_attributes(G, route1, "travel_time"))) route2_time = int(sum(ox.utils_graph.get_route_edge_attributes(G, route2, "travel_time"))) print("Route 1 is", route1_length, "meters and takes", route1_time, "seconds.") print("Route 2 is", route2_length, "meters and takes", route2_time, "seconds.") # The yellow route minimizes travel time, and is thus longer but faster than the red route. # # For more examples of travel time, see the [isochrones example](13-isolines-isochrones.ipynb). # # For more examples of routing, including using elevation as an impedance, see the [elevations example](12-node-elevations-edge-grades.ipynb). # ## 4. Multiprocessing # # Calculating lots of shortest paths can be slow, but OSMnx has built-in shortest path solver parallelization and multiprocessing. With the `shortest_path` function, you can pass in a single origin-destination pair to solve the one shortest path, or you can pass in lists of origins and destinations to solve each shortest path between the pairs. If you're solving shortest paths for multiple origins/destinations, the `cpus` argument determines how many CPU cores to utilize for parallelized solving. Multiprocessing adds some overhead, so it's only faster if you're solving a lot of paths. It also has substantial RAM requirements (as it must copy the graph into each sub-process), so be carefuly with your RAM when setting the `cpus` argument. # calculate 100,000 shortest-path routes using random origin-destination pairs n = 100000 origs = np.random.choice(G.nodes, size=n, replace=True) dests = np.random.choice(G.nodes, size=n, replace=True) # %%time # it takes 3.2 seconds to solve all the routes using all the cores on my computer # I have a 24-thread AMD 5900x: performance will depend on your specific CPU routes = ox.shortest_path(G, origs, dests, weight="travel_time", cpus=None) # %%time # it takes 43 seconds to solve all the routes using just 1 core on my computer routes = ox.shortest_path(G, origs, dests, weight="travel_time", cpus=1) # + # how many total results did we get print(len(routes)) # and how many were solvable paths # some will be unsolvable due to directed graph perimeter effects routes_valid = [r for r in routes if r is not None] print(len(routes_valid)) # - # ## 5. Miscellaneous routing notes # The routing correctly handles one-way streets: G2 = ox.graph_from_address( "<NAME>., Chandler, Arizona", dist=800, network_type="drive", truncate_by_edge=True, ) origin = (33.307792, -111.894940) destination = (33.312994, -111.894998) origin_node = ox.distance.nearest_nodes(G2, origin[1], origin[0]) destination_node = ox.distance.nearest_nodes(G2, destination[1], destination[0]) route = ox.shortest_path(G2, origin_node, destination_node) fig, ax = ox.plot_graph_route(G2, route, route_color="c", node_size=0) # Also, when there are parallel edges between nodes in the route, OSMnx picks the shortest edge to plot: location_point = (33.299896, -111.831638) G2 = ox.graph_from_point(location_point, dist=400, truncate_by_edge=True) origin = (33.301821, -111.829871) destination = (33.301402, -111.833108) origin_node = ox.distance.nearest_nodes(G2, origin[1], origin[0]) destination_node = ox.distance.nearest_nodes(G2, destination[1], destination[0]) route = ox.shortest_path(G2, origin_node, destination_node) fig, ax = ox.plot_graph_route(G2, route, route_color="c", node_size=0)
osmnx-examples/02-routing-speed-time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Langages de script - Python # ## Cours 3a - classes, objets, bidules # ### M2 Ingénierie Multilingue - INaLCO # # - <NAME> <<EMAIL>> # - <NAME> <<EMAIL>> # # + [markdown] slideshow={"slide_type": "slide"} # # Au commencement # Au commencement étaient les variables # + slideshow={"slide_type": "-"} x = 27 # + [markdown] slideshow={"slide_type": "subslide"} # Elles représentaient parfois des concepts sophistiqués # + slideshow={"slide_type": "-"} import math point_1 = (27, 13) point_2 = (19, 84) def distance(p1, p2): return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2) distance(point_1, point_2) # + [markdown] slideshow={"slide_type": "-"} # Et c'était pénible à écrire et à comprendre # + [markdown] slideshow={"slide_type": "subslide"} # Pour simplifier, on peut nommer les données contenues dans variables, par exemple avec un `dict` # + slideshow={"slide_type": "-"} point_1 = {'x': 27, 'y': 13} point_2 = {'x': 19, 'y': 84} def distance(p1, p2): return math.sqrt((p2['x']-p1['x'])**2+(p2['y']-p1['y'])**2) distance(point_1, point_2) # + [markdown] slideshow={"slide_type": "-"} # Et c'est toujours aussi pénible à écrire mais un peu moins à lire # + [markdown] slideshow={"slide_type": "subslide"} # On peut avoir une syntaxe plus agréable en utilisant des tuples nommés # + slideshow={"slide_type": "-"} from collections import namedtuple Point = namedtuple('Point', ('x', 'y')) point_1 = Point(27, 13) point_2 = Point(19, 84) def distance(p1, p2): return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2) distance(point_1, point_2) # + [markdown] slideshow={"slide_type": "-"} # Voilà, le cours est fini, bonnes vacances. # + [markdown] slideshow={"slide_type": "slide"} # ## Peut mieux faire # - Les trucs créés via `namedtuple` sont ce qu'on appelle des *enregistrements* (en C des *struct*s) # - Ils permettent de regrouper de façon lisibles des données qui vont ensemble # - Abscisse et ordonnée d'un point # - Année, mois et jour d'une date # - ~~Signifiant et signifié~~ Prénom et nom d'une personne # + [markdown] slideshow={"slide_type": "subslide"} # - Leur utilisation (comme tout le reste d'ailleurs) est **facultative** : on vit très bien en assembleur # - Mais ils permettent de rendre le code bien plus lisible (et écrivable) # - Et ils sont rétrocompatibles avec les tuples normaux # + def mon_max(lst): """Renvoie le maximum d'une liste et son indice.""" res, arg_res = lst[0], 0 for i, e in enumerate(lst[1:]): if e > res: res = e arg_res = i return res, arg_res def bidule(lst1, lst2): return lst2[mon_max(lst1)[1]] bidule([2,7,1,3], [1,2,4,8]) # + [markdown] slideshow={"slide_type": "subslide"} # Si on convertit `mon_max` pour renvoyer un tuple nommé, on peut continuer à utiliser `bidule` # + MaxRet = namedtuple('MaxRet', ('value', 'indice')) def mon_max(lst): """Renvoie le maximum d'une liste et son indice.""" res, arg_res = lst[0], 0 for i, e in enumerate(lst[1:]): if e > res: res = e arg_res = i return MaxRet(res, arg_res) def bidule(lst1, lst2): """Renvoie la valeur de lst2 à l'indice où lst1 atteint son max""" return lst2[mon_max(lst1)[1]] distance((2, 7), conjugue(Point(1,3))) # - # Vous êtes **fortement** encouragé⋅e⋅s à utiliser des tuples nommés quand vous écrivez une fonction qui renvoie plusieurs valeurs. # + slideshow={"slide_type": "subslide"} Vecteur = namedtuple('Vecteur', ('x', 'y')) v1 = Vecteur(27, 13) v2 = Vecteur(1, 0) def norm(v): return math.sqrt(v.x**2 + v.y**2) def is_unit(v): return norm(v) == 1 print(is_unit(v1)) print(is_unit(v2)) # + [markdown] slideshow={"slide_type": "-"} # C'est plutôt lisible # + [markdown] slideshow={"slide_type": "subslide"} # Mais si je veux pouvoir faire aussi de la 3d # + slideshow={"slide_type": "-"} Vecteur3D = namedtuple('Vecteur3D', ('x', 'y', 'z')) u1 = Vecteur3D(27, 13, 6) u2 = Vecteur3D(1, 0, 0) def norm3d(v): return math.sqrt(v.x**2 + v.y**2 + v.z**2) def is_unit3d(v): return norm3d(v) == 1 print(is_unit3d(u1)) print(is_unit3d(u2)) # - # C'est affreusement pénible de réécrire comme ça le même code. # + [markdown] slideshow={"slide_type": "subslide"} # Une autre solution # + def norm(v): if isinstance(v, Vecteur3D): return math.sqrt(v.x**2 + v.y**2 + v.z**2) elif isinstance(v, Vecteur): return math.sqrt(v.x**2 + v.y**2) else: raise ValueError('Type non supporté') def is_unit(v): return norm(v) == 1 print(is_unit(v1)) print(is_unit(u2)) # + [markdown] slideshow={"slide_type": "-"} # C'est un peu mieux mais pas top. (Même si on aurait pu trouver une solution plus intelligente) # + [markdown] slideshow={"slide_type": "slide"} # ## Ces fameux objets # Une des solutions pour faire mieux c'est de passer à la vitesse supérieure : les objets. # # Ça va d'abord être un peu plus désagréable, pour ensuite être beaucoup plus agréable. # + slideshow={"slide_type": "-"} class Vecteur: def __init__(self, x, y): self.x = x self.y = y def norm(self): return math.sqrt(self.x**2 + self.y**2) v1 = Vecteur(27, 13) v2 = Vecteur(1, 0) print(v1.norm()) print(v2.norm()) # + slideshow={"slide_type": "subslide"} class Vecteur3D: def __init__(self, x, y, z): self.x = x self.y = y self.z = z def norm(self): return math.sqrt(self.x**2 + self.y**2 + self.z**2) u1 = Vecteur3D(27, 13, 6) u2 = Vecteur3D(1, 0, 0) print(u1.norm()) print(u2.norm()) # + slideshow={"slide_type": "subslide"} def is_unit(v): return v.norm() == 1 print(is_unit(v1)) print(is_unit(u2)) # + [markdown] slideshow={"slide_type": "-"} # Le choix de la bonne fonction `norme` se fait automagiquement # + [markdown] slideshow={"slide_type": "subslide"} # Résumons # - Un objet, c'est un bidule qui regroupe # - Des données (on dit *attributs* ou *propriétés*) # - Des fonctions (on dit des *méthodes*) # - Ça permet d'organiser son code de façon plus lisible et plus facilement réutilisable (croyez moi sur parole) # # Et vous en avez déjà rencontré plein # - print(type('abc')) print('abc'.islower()) # Car en Python, tout est objet. Ce qui ne veut pas dire qu'on est obligé d'y faire attention… # + [markdown] slideshow={"slide_type": "slide"} # ## POO # # La programmation orientée objet (POO) est une manière de programmer différente de la programmation procédurale vue jusqu'ici. # # - Les outils de base sont les objets et les classes # - Un concept → une classe, une réalisation concrète → un objet # # C'est une façon particulière de résoudre les problèmes, on parle de *paradigme*, et il y en a d'autres # # - Fonctionnel : les outils de base sont les fonctions # - Impérative : les outils de base sont les structures de contrôle (boucles, tests…) # # Python fait partie des langages multi-paradigmes : on utilise le plus pratique, ce qui n'est pas sans déplaire aux puristes mais # # « *We are all consenting adults here* » # + [markdown] slideshow={"slide_type": "slide"} # ## Classes # * On définit une classe en utilisant le mot-clé `class` # * Par conventions, les noms de classe s'écrivent avec des majuscules (CapWords convention) # # + slideshow={"slide_type": "-"} class Word: """ Classe Word : définit un mot de la langue """ pass # + [markdown] slideshow={"slide_type": "subslide"} # Pour créer un objet, on appelle simplement sa classe comme une fonction # + slideshow={"slide_type": "-"} word1 = Word() print(type(word1)) # renvoie la classe qu'instancie l'objet # + [markdown] slideshow={"slide_type": "-"} # On dit que `word1` est une *instance* de la classe `Word` # + [markdown] slideshow={"slide_type": "subslide"} # Et il a déjà des attributs et des méthodes # - word1.__doc__ print(dir(word1)) # Et aussi un identifiant unique id(word1) word2 = Word() id(word2) # + [markdown] slideshow={"slide_type": "subslide"} # # ## Constructeur et attributs # # * Il existe une méthode spéciale `__init__()` qui automatiquement appelée lors de la création d'un objet. C'est le constructeur # # * Le constructeur permet de définir un état initial à l'objet, lui donner des attributs par exemple # # * Les attributs dans l'exemple ci-dessous sont des variables propres à un objet, une instance # # + slideshow={"slide_type": "-"} class Word: """ Classe Word : définit un mot de la langue """ def __init__(self, form, lemma, pos): self.form = form self.lemma = lemma self.pos = pos word = Word('été', 'être', 'V') word.lemma # - word2 = Word('été', 'été', 'NOM') word2.lemma # + [markdown] slideshow={"slide_type": "slide"} # ## Méthodes # # * Les méthodes d'une classe sont des fonctions. Elles indiquent quelles actions peut mener un objet, elles peuvent donner des informations sur l'objet ou encore le modifier. # * Par convention, on nomme `self` leur premier paramètre, qui fera référence à l'objet lui-même. # # + slideshow={"slide_type": "-"} class Word: """ Classe Word : définit un mot simple de la langue """ def __init__(self, form, lemma, pos): self.form = form self.lemma = lemma self.pos = pos def is_inflected(self): if self.form != self.lemma: return True else: return False w = Word('orientales', 'oriental', 'adj') w.is_inflected() # + [markdown] slideshow={"slide_type": "subslide"} # Pourquoi `self` ? Parce que écrire `w.is_inflected()` c'est du sucre pour # - Word.is_inflected(w) # + [markdown] slideshow={"slide_type": "slide"} # # Héritage # # + slideshow={"slide_type": "-"} class Cake: """ un beau gâteau """ def __init__(self, farine, oeuf, beurre): self.farine = farine self.oeuf = oeuf self.beurre = beurre def is_trop_gras(self): if self.farine + self.beurre > 500: return True else: return False def cuire(self): return self.beurre / self.oeuf # + [markdown] slideshow={"slide_type": "-"} # Cake est la classe mère. # # Les classes enfants vont hériter de ses méthodes et de ses attributs. # # Cela permet de factoriser le code, d'éviter les répétitions et les erreurs qui en découlent. # # + slideshow={"slide_type": "subslide"} class CarrotCake(Cake): """ pas seulement pour les lapins hérite de Cake """ carotte = 3 def cuire(self): return self.carotte * self.oeuf class ChocolateCake(Cake): """ LE gâteau hérite de Cake """ def is_trop_gras(self): return False gato_1 = CarrotCake(200, 3, 150) gato_1.is_trop_gras() >>> False gato_1.cuire() >>> 9 gato_2 = ChocolateCake(200, 6, 300) gato_2.is_trop_gras() >>> False
slides/3a-oop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas df = pandas.read_excel('house_price.xlsx') df[:10] df.describe() df.hist(figsize=(20,20)) from pandas.plotting import scatter_matrix scatter_matrix(df,figsize=(20, 20), diagonal='kde') df.groupby('house_type').count() df.groupby('house_type').mean() df.groupby('house_type').hist(figsize=(12,4),column=['price','views']) df.plot.scatter(x='built_in',y='price') df['unit_price']=df['price']/df['area'] mean_price =df.groupby('built_in').mean() mean_price.plot.line(y='unit_price')
Lab 2 Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/banila1007/AppIntro/blob/master/ECAI_2020_KGE_Tutorial_Hands_on_Session.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hTkVsbcKegOD" # --- # # **Knowledge Graph Embeddings: From Theory to Practice** # ### ECAI 2020 Tutorials, September 4th 2020, Friday, 13:45-17:00 CEST # ###(Hands-on Session) # # <u>**Contents**</u> # - Loading a KG and creating train/test splits # - Training and evaluating a KGE Model # - Testing user hypothesis # - Early stopping and types of evaluation # - Choosing model hyperparameters # - Discovering facts using trained model # - Visualizing embeddings and Clustering # # + [markdown] id="17mJcCLovIkx" # # 1. Introduction and Preliminaries # # For this hands-on tutorial, we will be using the open-source library [AmpliGraph](https://github.com/Accenture/AmpliGraph). # # Let's start by installing the library and it's dependencies, and then importing the libraries used in this tutorial. # + id="NoUV52ke_3fz" # If running on local system execute this cell # Install CUDA # #! conda install -y cudatoolkit=10.0 # Install cudnn libraries # This library improves the performance of tensorflow, however, we need to give up determinism over speed. # #! conda install cudnn=7.6 # Install tensorflow GPU # #! pip install tensorflow-gpu==1.15.3 # + id="4dcvCAfyDc5S" # If using Google Colab run this cell # select tensorflow version for colab # %tensorflow_version 1.x # + [markdown] id="zR0IriwbE63d" # Let us check if tensorflow is correctly installed and if we can access the GPU # + id="vs4xn9CWE5Yx" import tensorflow as tf print('TensorFlow version: {}'.format(tf.__version__)) # Get the GPU name device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # + [markdown] id="UM6Awy5WFUVA" # Let's install AmpliGraph and other dependencies # + id="Lgs8cTcu9hUM" # %%capture # Install AmpliGraph library # ! pip install ampligraph # Required to visualize embeddings with tensorboard projector, comment out if not required! # ! pip install --user tensorboard # Required to plot text on embedding clusters, comment out if not required! # ! pip install --user git+https://github.com/Phlya/adjustText # + id="KqGjJ_SYFxIH" # All imports used in this tutorial # %tensorflow_version 1.x import ampligraph import numpy as np import pandas as pd import tensorflow as tf from ampligraph.datasets import load_fb15k_237 from ampligraph.evaluation import train_test_split_no_unseen, evaluate_performance, mr_score, mrr_score, hits_at_n_score from ampligraph.discovery import query_topn, discover_facts, find_clusters from ampligraph.latent_features import TransE, ComplEx, HolE, DistMult, ConvE, ConvKB from ampligraph.utils import save_model, restore_model def display_aggregate_metrics(ranks): print('Mean Rank:', mr_score(ranks)) print('Mean Reciprocal Rank:', mrr_score(ranks)) print('Hits@1:', hits_at_n_score(ranks, 1)) print('Hits@10:', hits_at_n_score(ranks, 10)) print('Hits@100:', hits_at_n_score(ranks, 100)) print('Ampligraph version: {}'.format(ampligraph.__version__)) # + [markdown] id="E2zQO7QqjYQW" # --- # # 2. Loading a Knowledge Graph dataset # # To begin with we're going to need a knowledge graph, so let's load a standard knowledge graph called ***Freebase-15k-237***. # # Ampligraph provides a set of APIs to [load standard knowledge graphs](https://docs.ampligraph.org/en/1.3.1/ampligraph.datasets.html#benchmark-datasets-loaders). # # Also provided are a set of APIs load csv, ntriples and rdf formats. Details can be found [here](https://docs.ampligraph.org/en/1.3.1/ampligraph.datasets.html#loaders-for-custom-knowledge-graphs) # # + id="IYLn3NXKegOm" from ampligraph.datasets import load_fb15k_237, load_wn18rr, load_yago3_10 # + [markdown] id="MKrD7K04egPS" # # For this tutorial we have remapped the IDs of freebase 237 and created a csv file containing human readable names instead of IDs. # + id="cHzhvBhbegPX" import pandas as pd URL = 'https://ampgraphenc.s3-eu-west-1.amazonaws.com/datasets/freebase-237-merged-and-remapped.csv' dataset = pd.read_csv(URL, header=None) dataset.columns = ['subject', 'predicate', 'object'] dataset.head(5) # + id="4pgpcidHegQC" print('Total triples in the KG:', dataset.shape) # + [markdown] id="flR0xOXmegP9" # # ![KG](https://user-images.githubusercontent.com/39597669/90747195-9fc44c80-e2c8-11ea-9f70-097993581bac.png) # # + [markdown] id="PQJkziMCegQL" # # ## 2.1 Create training, validation and test splits # # Let's use the [`train_test_split_no_unseen`](https://docs.ampligraph.org/en/1.3.1/generated/ampligraph.evaluation.train_test_split_no_unseen.html?#train-test-split-no-unseen) function provided by Ampligraph to create the training, validation and test splits. # # This API ensures that the test and validation splits contains triples whose entities are "seen" during training. # # + id="ltijAdQtegQN" from ampligraph.evaluation import train_test_split_no_unseen # get the validation set of size 500 test_train, X_valid = train_test_split_no_unseen(dataset.values, 500, seed=0) # get the test set of size 1000 from the remaining triples X_train, X_test = train_test_split_no_unseen(test_train, 1000, seed=0) print('Total triples:', dataset.shape) print('Size of train:', X_train.shape) print('Size of valid:', X_valid.shape) print('Size of test:', X_test.shape) # + id="qyLU1262mAJL" # + [markdown] id="4Q38cXnYHHI6" # ##**Key Takeaways** # # - `train_test_split_no_unseen` API can be used to generate train/test splits such that test set contains only entities 'seen' during training # + [markdown] id="fgAcQ1g3egQe" # --- # # 3. Model Training # Now that we have split the dataset, let's dive directly into model training. # # Let us create a TransE model and train it on the training split using the `fit` function. # # **TransE** is one of the first embedding models which set the platform for the KGE research. It uses simple vector algebra to score the triples. It has very low number of trainable parameters compared to most models. # # <center>$f = - || s + p - o ||_{n}$</center> # # + id="ap1Yd4LEegQg" from ampligraph.latent_features import TransE model = TransE(k=150, # embedding size epochs=100, # Num of epochs batches_count= 10, # Number of batches eta=1, # number of corruptions to generate during training loss='pairwise', loss_params={'margin': 1}, # loss type and it's hyperparameters initializer='xavier', initializer_params={'uniform': False}, # initializer type and it's hyperparameters regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, # regularizer along with its hyperparameters optimizer= 'adam', optimizer_params= {'lr': 0.001}, # optimizer to use along with its hyperparameters seed= 0, verbose=True) model.fit(X_train) from ampligraph.utils import save_model, restore_model save_model(model, 'TransE-small.pkl') # + [markdown] id="CHtntuS1Jzf8" # You can refer [this link](https://docs.ampligraph.org/en/latest/api.html) for detailed explaination of the parameters and their values. # + [markdown] id="JaJeVr-megQq" # ## 3.1 Compute the evaluation metrics # # ### Per triple metrics: # This is a metric that is computed for each test set triple: # # - **score**: This is the value assigned to a triple, by the model, by applying the scoring function. # # Let's look at how we can get the score for a triple of interest and how to interpret it. # # + id="edjJcTReegQs" test_triple = ['harrison ford', '/film/actor/film./film/performance/film', 'star wars'] triple_score = model.predict(test_triple) print('Triple of interest:\n', test_triple) print('Triple Score:\n', triple_score) # + [markdown] id="mQwswE4qFArO" # But what does this score tell you? Nothing! It is just a value. In order to interpret the score we have 2 options: # # 1. We can create a list of hypothesis that we want to test, score them and then choose the top n hypothesis as True statements. # # 2. As described earlier in the theory section, unlike classification task, we are doing a learning to rank task. In order to interpret the score we can generate the corruptions and compare the triple score against the scores of corruptions to see how well does the model rank the test triple against them. # # # Let's look at the first option. Let us create a list of hypothesis and score them. # + id="zKewmQmp-1od" import numpy as np list_of_actors = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>' ] # stack it horizontally to create s, p, o hypothesis = np.column_stack([list_of_actors, ['/film/actor/film./film/performance/film'] * len(list_of_actors), ['star wars'] * len(list_of_actors), ]) # score the hypothesis triple_scores = model.predict(hypothesis) # append the scores column scored_hypothesis = np.column_stack([hypothesis, triple_scores]) # sort by score in descending order scored_hypothesis = scored_hypothesis[np.argsort(scored_hypothesis[:, 3])] scored_hypothesis # + [markdown] id="4UUNCR-p_EmC" # # - **rank**: For a triple, this metric is computed by generating corruptions and then scoring them and computing the rank(position) of the triple score against the corruptions. The pseudocode and the example illustrates how to compute rank on the test set. # # for each test set triple <s, p, o>: # a. Compute the score of the test triple (hypothesis) # hypothesis_score = score(<s, p, o>) # # b. Generate the subject corruptions # sub_corr = <?, p, o> # c. Compute the score of the subject corruptions # sub_corr_score = score(sub_corr) # d. Find the position of hypothesis_score in sub_corr_score to get the sub_rank # # e. Generate the object corruption # obj_corr = <s, p, ?> # f. Compute the score of the object corruptions # obj_corr_score = score(obj_corr) # g. Find the position of hypothesis_score in obj_corr_score to get the obj_rank # # h. Return rank = [sub_rank, obj_rank] # # # # ![rank example](https://user-images.githubusercontent.com/281477/90627614-14897f00-e214-11ea-8f8e-d57da9888606.png) # # # # # + [markdown] id="do2yT94Yagds" # ### Illustrative Example # + [markdown] id="dImehz68LHQh" # **Compute the score of the test triple** # + id="dpRVBVn_K-_S" test_triple = ['harrison ford', '/film/actor/film./film/performance/film', 'star wars'] triple_score = model.predict(test_triple) print('Triple of interest:\n', test_triple) print('Triple Score:\n', triple_score) # + [markdown] id="SlLprqLE_Hby" # Before generating the corruptions, let us look at the number of unique entities present in our dataset # + id="PMEBkOW1VoOS" print('The number of unique entities:', len(model.ent_to_idx)) # + [markdown] id="UXBikfbWkuK6" # **Generate the subject *corruptions* and compute rank** # > ```sub_corr = <?, p, o>``` # + id="WE6lw5FB_I_4" subj_corr = np.column_stack([list(model.ent_to_idx.keys()), [test_triple[1]] * len(model.ent_to_idx), [test_triple[2]] * len(model.ent_to_idx)]) print('Subject corruptions:\n', subj_corr) print('\nSize of subject corruptions:\n', subj_corr.shape) # + [markdown] id="vO1HXo4x7t2R" # **Compute the score of the subject corruptions** # + id="N65INkWR_LFx" sub_corr_score = model.predict(subj_corr) # + [markdown] id="Vu7vlKdg_OoM" # Now that we have a score, let us compute the rank as follows: # # <center>$COUNT ( corruption_{score} >= triple_{score} )$</center> # # Find the position of hypothesis_score in sub_corr_score to get the sub_rank # + id="V070u2N2_NBM" sub_rank_worst = np.sum(np.greater_equal(sub_corr_score, triple_score[0])) + 1 print('Assigning the worst rank (to break ties):', sub_rank_worst) # + [markdown] id="ADOTNTGakqAO" # **Generate the object *corruptions* and compute rank** # # > ``` obj_corr = <s, p, ?> ``` # # + id="1MBRrCM0_RRB" obj_corr = np.column_stack([ [test_triple[0]] * len(model.ent_to_idx), [test_triple[1]] * len(model.ent_to_idx), list(model.ent_to_idx.keys())]) print('Object corruptions:\n', obj_corr) print('\nSize of object corruptions:\n', obj_corr.shape) # f. Compute the score of the object corruptions obj_corr_score = model.predict(obj_corr) # g. Find the position of hypothesis_score in obj_corr_score to get the obj_rank obj_rank_worst = np.sum(np.less_equal(triple_score[0], obj_corr_score)) + 1 print('Assigning the worst rank (to break ties):', obj_rank_worst) # + id="F2OPFtvC_TJL" print('Subject corruption rank:', sub_rank_worst) print('Object corruption rank:', obj_rank_worst) # + [markdown] id="V-d0JAxjC6ie" # **Computing the (Unfiltered) rank using evaluate_performance API** # # We can use the [evaluate_performance](https://docs.ampligraph.org/en/latest/generated/ampligraph.evaluation.evaluate_performance.html) API to compute the ranks. By default, `evaluate_performance` API computes the unfiltered ranks i.e. if any true positives are present in corruptions, they will not be removed before ranking. However, usually for evaluation, we follow a filtered evaluation as described in the next section. # # + id="<KEY>" from ampligraph.evaluation import evaluate_performance ranks = evaluate_performance(np.array([test_triple]), model=model, ranking_strategy='worst') print('\nRanks:', ranks) # + [markdown] id="KgkpA_BWk_uo" # There are multiple strategies to compute ranks especially when there are ties. Lets look at each of them in detail with an example. # # Assume there are only 10 corruptions, and assume that all the corruptions get the same score as the test triple. The ranks are as follows # - Assign the **worst rank** i.e. the test set triple gets a rank of 11. This is followed by most papers in the literature. This is the strictest approach and it drives down the mrr by a large margin if there are many ties. We employ this strategy in AmpliGraph. # # <center> $rank = COUNT( corruption_{score} \ge hypothesis_{score} )$ + 1</center> # # - Assign the **middle rank** i.e. the test set triple gets a rank of 6. We found this strategy being used by [ICLR 2020 paper](https://openreview.net/pdf?id=BkxSmlBFvr). This approach seems to be fair towards the model in resolving the ties as it assigns the middle rank to break ties. # # <center> $rank = COUNT( corruption_{score} \gt hypothesis_{score} ) + \dfrac{COUNT( corruption_{score} == hypothesis_{score} )}{2}$ + 1</center> # # - Assign the **best rank** i.e. the test set triple gets a rank of 1. This approach is followed by [ConvKB paper](https://arxiv.org/pdf/1712.02121.pdf). This approach is overly biased and helps the model achieve a very good mrr in case of ties. # # <center> $rank = COUNT( corruption_{score} \gt hypothesis_{score} )$ + 1</center> # # We recommend the usage of the **worst** strategy (default). # + [markdown] id="PuJpqTDklDux" # ## 3.2 Filtered evaluation # While evaluating ([as described earlier](#Compute-the-evaluation-metrics)), we generate all the corruptions (using all the unique entities in our dataset) per test triple, score and rank them. While doing so, we are not filtering the true positives - in other words, some of the corruptions may not really be corruptions and may be ground truth triples observed during training. Training triples usually get a high score as they are "observed" by the model. Hence a test triple would get a lower rank if such triples appear in corruptions. To filter out the True Positives (after step b. and e.), one can pass all the True Positive triples to `filter_triples` parameter of the `evaluate_performance` API. This will perform a **"filtered" evaluation** and return the **"filtered" ranks** adjusted by removing the True Positives from the corruptions. More details for `evaluate_performance` API can be found [here](https://docs.ampligraph.org/en/latest/generated/ampligraph.evaluation.evaluate_performance.html#ampligraph.evaluation.evaluate_performance). # # + id="YLD9MgxkC5Oo" from ampligraph.evaluation import evaluate_performance print('Size of X_test:', X_test.shape) X_filter = np.concatenate([X_train, X_valid, X_test], 0) ranks = evaluate_performance(np.array([test_triple]), model=model, filter_triples=X_filter) print(ranks) # + [markdown] id="CdAw7EUMlYon" # One obvious question is why do we append the Valid and Test set to the X_filter. The model has not "observed" them during training. We do so because, we would like to evaluate a test triple against it's corruptions and not against known facts. If we know that the Validation triples and Test triples are facts (and not queries), we need to filter these triples out of the generated corruptions. This is the standard procedure that is used to compute the metrics to compete on the leadership board. # + [markdown] id="W_YEKhYglae3" # ## 3.3 Aggregate metrics # # # Once we have the ranks for all the test set triples, we can compute the following aggregate metrics: **MR**, **MRR**, **Hits@N**. These metrics indicate the overall quality of the model on a test set. These metrics come from Information Retrieval domain and are always computed on a set of **True Statements**. To illustrate each of these metric let us first create a small test set of 5 triples and compute their ranks. # + id="ZXe0FgPeC_Si" X_test_small = np.array( [['doctorate', '/education/educational_degree/people_with_this_degree./education/education/major_field_of_study', 'computer engineering'], ['star wars', '/film/film/estimated_budget./measurement_unit/dated_money_value/currency', 'united states dollar'], ['harry potter and the chamber of secrets', '/film/film/estimated_budget./measurement_unit/dated_money_value/currency', 'united states dollar'], ['star wars', '/film/film/language', 'english language'], ['harrison ford', '/film/actor/film./film/performance/film', 'star wars']]) X_filter = np.concatenate([X_train, X_valid, X_test], 0) ranks = evaluate_performance(X_test_small, model=model, filter_triples=X_filter, corrupt_side='s,o') print(ranks) # + [markdown] id="KKy0b0XVlgRY" # Now let us look at each aggregate metrics in detail: # # - **Mean rank (MR)**, as the name indicates, is the average of all the ranks of the triples. The value ranges from 1 (ideal case when all ranks equal to 1) to number of corruptions (where all the ranks are last). # # ![mr formula](https://user-images.githubusercontent.com/281477/90627586-105d6180-e214-11ea-84d4-c5d3e4b089f4.png) # + id="7aaCDgkFldn6" from ampligraph.evaluation import mr_score print('MR :', mr_score(ranks)) # + [markdown] id="bDyhtqxYYMmv" # - **Mean reciprocal rank (MRR)**, is the average of the reciprocal ranks of all the triples. The value ranges from 0 to 1; higher the value better is the model. # # ![mrr formula](https://user-images.githubusercontent.com/281477/90627604-12272500-e214-11ea-9777-5d30b23f0d6f.png) # + id="8El8j03AYL5d" from ampligraph.evaluation import mrr_score print('MRR :', mrr_score(ranks)) # + [markdown] id="Wcw8Sgc3lmPc" # MRR is an indicator of mean rank after removing the effect of outliers. # + id="cbWTp59JlnaH" print('Mean rank after removing the outlier effect: ', np.ceil(1/mrr_score(ranks))) # + [markdown] id="3d_YGuEJlpxw" # - **hits@n** is the percentage of computed ranks that are greater than (in terms of ranking) or equal to a rank of n. The value ranges from 0 to 1; higher the value better is the model. # # ![hits formula](https://user-images.githubusercontent.com/281477/90627565-09365380-e214-11ea-81c8-292a3de016d0.png) # + id="u5YHZs9elojV" from ampligraph.evaluation import hits_at_n_score print('hits@1 :', hits_at_n_score(ranks, 1)) print('hits@10 :', hits_at_n_score(ranks, 10)) # + id="IzvhHy6XIHXI" # print unique entities print('Number of unique entities:', len(model.ent_to_idx)) # + [markdown] id="EzUfheLiOfwU" # **What if, for a model, you observe that on a test set, the MRR score is 0.01? Is it a good model?** # # It is not very straightforward. What the above value means is that if you remove the outlier effect, on an average the ranks are around 100 (1/0.01). It may be a good/bad value. It depends on number of corruptions that you have used for the computation. Say you had 1 million corruptions and yet the mrr score was 0.01. The model, in general, was quite good at ranking against 1 million corruption because on an average it gave a rank of close to 100. But say if the corruptions were only 100 and we had an mrr of 0.01, it means that the model did a very bad task at ranking the test triples against just 100 corruptions. # # On a real life dataset, on should take a closer look at **hits@n** values and decide whether the model is a good model or not. ***The choice of n should depend on the number of corruptions that are being generated per test triple***. If a large percentage of ranks computed on the test set triple falls within the n ranks, then the model can be considered as a good model. # + id="GLxF6F1Blxf1" def display_aggregate_metrics(ranks): print('Mean Rank:', mr_score(ranks)) print('Mean Reciprocal Rank:', mrr_score(ranks)) print('Hits@1:', hits_at_n_score(ranks, 1)) print('Hits@10:', hits_at_n_score(ranks, 10)) print('Hits@100:', hits_at_n_score(ranks, 100)) display_aggregate_metrics(ranks) # + [markdown] id="HM0ax6rPpbpe" # # ## 3.4. Training with early stopping # # While training a model, we would like to make sure that the model does not overfit or under fit on the data. If we train a model for a fixed number of epochs, we will not know whether the model has underfit or overfit the training data. Hence it is necessary to test the model performance on a held out set at regular intervals to decide when to stop training. This is called "Early stopping", i.e. we don't let the model run for a long time but stop much before when the performance on the held out set starts to degrade. # # However we also do not want to model to overfit on the held out set and limit the generalization capabilities of the model. Hence we should create both a validation set and a test set to verify the generalization capability of the model, and to make sure that we dont over fit and under fit on the data. # + id="DagdzuwspU1Q" early_stopping_params = { 'x_valid': X_valid, # Validation set on which early stopping will be performed 'criteria': 'mrr', # metric to watch during early stopping 'burn_in': 150, # Burn in time, i.e. early stopping checks will not be performed till 150 epochs 'check_interval': 50, # After burn in time, early stopping checks will be performed at every 50th epochs (i.e. 150, 200, 250, ...) 'stop_interval': 2, # If the monitored criteria degrades for these many epochs, the training stops. 'corrupt_side':'s,o' # Which sides to corrupt furing early stopping evaluation (default both subject and obj as described earlier) } # create a model as earlier model = TransE(k=100, epochs=10000, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.0001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 1, verbose=True) # call model.fit by passing early stopping params model.fit(X_train, # training set early_stopping=True, # set early stopping to true early_stopping_params=early_stopping_params) # pass the early stopping params # evaluate the model with filter X_filter = np.concatenate([X_train, X_valid, X_test], 0) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter) # display the metrics display_aggregate_metrics(ranks) # + [markdown] id="Ic7r20ScpS78" # # ## Summary so far # # + id="hBXrmiA1lzjO" # ---------------------- # Generate train/test data # create train/test/valid splits, train the model and evaluate using train_test_split_no_unseen API from ampligraph.evaluation import train_test_split_no_unseen # get the validation set of size 500 test_train, X_valid = train_test_split_no_unseen(dataset.values, 500, seed=0) # get the test set of size 1000 from the remaining triples X_train, X_test = train_test_split_no_unseen(test_train, 1000, seed=0) # ---------------------- # Training: print('Training set:', X_train.shape) # Train a KGE model model = TransE(k=300, epochs=100, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.0001}, seed= 0, batches_count= 10, verbose=True) model.fit(X_train) # ---------------------- # Evaluate: # Filtered evaluation with ranking strategy assigning worst rank to break ties from ampligraph.utils import save_model, restore_model save_model(model, 'TransE.pkl') model = restore_model('TransE.pkl') # create the filter X_filter = np.concatenate([X_train, X_valid, X_test], 0) # compute ranks ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter) # ranks are computed per triple print('Test set:', X_test.shape) print('Size of ranks:', ranks.shape) # Aggregate metrics show the aggregate performance of the model on the test set using a single number display_aggregate_metrics(ranks) # ---------------------- # + [markdown] id="gP2K-mHgQ54K" # ##**Key Takeaways** # - `train_test_split_no_unseen` API can be used to generate train/test splits such that test set contains only entities 'seen' during training # - Once a model is trained, one can use `model.predict` to choose from a set of hypothesis based on the scores returned by the model. # - One can access the quality of model on a **test set of True Facts** by using metrics such as MR, MRR and hits@n # - We can use early stopping to prevent model from over/under fitting by using a Validation Set. # + [markdown] id="Yjm3VWG8dzna" # --- # ##Q&A # --- # + [markdown] id="E0NmjxkhiMV5" # # 4. Practical evaluation protocols # + [markdown] id="1dTaNLGeiXCL" # Standard protocols, as described earlier, follow a very strict way of evaluating the test set. We corrupt both the subject and object sides with all the entities present in the KG. Also, when the KG is huge with millions of entities, the standard protocol is not feisible. Due to the large number of corruptions, some of which may be semantically incorrect, it becomes a difficult task for the model while ranking and it may lead to misleading metrics. Hence the standard protocol is not recommended for large KGs. # # Let's now look at some practical ways of evaluating for large KGs. # + [markdown] id="7Ibg5h02pYnb" # # # ## 4.1 Evaluating by corrupting specific sides # Let's assume that our test set is made up of triples of type <movie, film_language, language_category> and we want to find if our model can correctly find the language of the movie. # + id="emb5qFfmpZRW" X_test_movie_languages = X_test[X_test[:, 1] == '/film/film/language'] X_test_movie_languages # + [markdown] id="FsBcVe5qpnGU" # With the evaluation shown below, we are using all entities in our dataset and corrupting both subject and object sides of the test triple and returning 2 ranks. # + id="_PAAByzfpZ9c" ranks = evaluate_performance(X_test_movie_languages, model=model, filter_triples=X_filter) display_aggregate_metrics(ranks) print('\nSize of test set:', X_test_movie_languages.shape) print('Size of ranks:', ranks.shape) # + [markdown] id="kN6cFPOxprzp" # This is because [evaluate performance](https://docs.ampligraph.org/en/latest/generated/ampligraph.evaluation.evaluate_performance.html#ampligraph.evaluation.evaluate_performance) with default protocol does the following: # - computes rank by corrupting the subject side (`'s'`) # - computes rank by corrupting the object side (`'o'`) # - returns both the ranks per triple. # # The metrics (such as mrr, mr, hits@n) are computed by flattening and averaging the ranks. # # This is the standard protocol that is usually followed while doing graph completion and is usually adopted for computing the metrics (on traditional datasets like freebase or wordnet) while competing on the leadership board. # # If we want to corrupt specific sides (to suit our use-case), we can do so by passing `corrupt_side` parameter to `evaluate_performance`. It can take on the following values: # - `s` for subject corruption only # - `o` for object corruption only # - `s+o` for subject and object corruption. Returns a single rank. # - `s,o` for subject and object corruption separately (default). Returns 2 ranks. This is equivalent to calling `evaluate_performance` twice with `s` and `o`. # # + id="rH_hLOgippsP" ranks = evaluate_performance(X_test_movie_languages, model=model, filter_triples=X_filter, corrupt_side='o') display_aggregate_metrics(ranks) print('\nSize of test set:', X_test_movie_languages.shape) print('Size of ranks:', ranks.shape) # + [markdown] id="JEodiLjtpvrl" # As you see, only 1 rank is returned per triple, and this rank is the rank obtained by corrupting only the specified side with all the entities in the KG. # + [markdown] id="Z4CTkEakpx_Z" # ## 4.2 Evaluating against a subset of entities # # Depending on the use case or size of the graph, you may want to evaluate the test set by generating corruptions only from a subset of entities. This can be done by using `entities_subset` argument. For example, let's say we are doing a genetic study using KG. The graph may have different entity types like patient, diseases, genes, mutations, co-morbidities,ect. Say we want to find out what mutations cause disease i.e. `< ?, causes, disease_name>`. For this use case it doesnt make sense to replace the placeholder with all the entities. A logical replacement would be by using all the mutations. # # Similarly for our use case, we are interested in finding the language of the movie. So it makes sense to use only language categories to generate the corruptions for the object side. It also makes the task easier for the model. # + id="zd9_hhfApuGt" print('The number of corruptions generated per triple is:', len(model.ent_to_idx)) # + id="nUHoxk1op1B9" unique_languages = set(X_train[X_train[:, 1] == '/film/film/language'][:, 2]) print('Number of languages in KG:', len(unique_languages)) print('\n', unique_languages) # + id="wqqfHUeGp1t9" ranks = evaluate_performance(X_test_movie_languages, model=model, filter_triples=X_filter, corrupt_side='o', entities_subset=list(unique_languages)) display_aggregate_metrics(ranks) # + [markdown] id="we5ChCYAp4CU" # Usually, we can see a drastic increase in the metric values mainly because we are using fewer **semantically "valid" corruptions**. # # When we have a schema for our KG, and the focus is not just graph completion but a specific use case (Eg: similar job search, product recommendation, gene discovery to target a disease, etc), we would recommend using semantically "valid" corruptions by looking at the schema to do performance evaluation. # + [markdown] id="4dU-E_bUSoGm" # ##**Key Takeaways** # - During default evaluation, the `evaluate_performance` API corrupts both subject and object side with ALL the entities in the KG and returns 2 ranks. This is a hard task for the model and the resulting metrics may mislead the user. # - Depending on use case, we can corrupt specific sides of the triples by specifying the `corrupt_side` argument; and we can also provide semantically valid entities to be used for generating corruptions by using the `entities_subset` argument in `evaluate_performance` API # # + [markdown] id="wHGc1fulp28L" # --- # # 5. Comparision of Models # + id="k8eyZE5hqEkt" from ampligraph.latent_features import TransE, ComplEx, HolE, DistMult, ConvE, ConvKB # + [markdown] id="dlXFlocUqGQX" # ## 5.1 Traditional models : # # These models take as input vector representation of embeddings of entities and predicates of a triple. The embeddings are combined using a scoring function to generate a score. Ranking protocol is followed to train/evaluate the model. # + [markdown] id="VV2-4pS_l54F" # ### TransE # This is one of the first embedding models which set the platform for the KGE research. It uses simple vector algebra to score the triples. It has very low number of trainable parameters compared to most models. # # <center>$f = - || s + p - o ||_{n}$</center> # # + id="NPRuQ93iqIwV" model = TransE(k=150, epochs=50, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.0001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 1, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter, corrupt_side='s,o', ranking_strategy='worst') display_aggregate_metrics(ranks) # + id="3wEGdaVtqN00" print('The number of unique entities:', len(model.ent_to_idx)) print('The number of unique relations:', len(model.rel_to_idx)) # + id="1EO10ZV9qOsX" print('Size of entity embeddings:', model.ent_emb.shape) print('Size of entity embeddings:', model.rel_emb.shape) # + [markdown] id="ZTE3qGJ-qPcX" # ###DistMult # This model is similar to TransE. Instead of additive operations, it does multiplication of vectors to compute the score. DistMult also has same number of parameters as TransE. This model is **quite bad at differenciating anti-symmetric relations** (especially during knowledge discovery), because <s,p,o> and <o,p,s> would get the same score. # # <center>$f = \sum s * p * o$</center> # # Example: \< Jack Likes VideoGames \> \< VideoGames Likes Jack \> # + id="d9TSQcLRqRJ9" model = DistMult(k=150, epochs=50, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.0001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 1, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter, corrupt_side='s,o', ranking_strategy='worst') display_aggregate_metrics(ranks) # + id="Z4-Y-I8eqSpu" print('Size of entity embeddings:', model.ent_emb.shape) print('Size of entity embeddings:', model.rel_emb.shape) # + [markdown] id="AgFKn4CRqTzX" # ###Complex # This model can be thought of as performing DistMult like operations but in Complex space. The number of parameters is twice as that of TransE and DistMult (k for real part and k for imaginary part). The scoring function can handle symmetry and anti-symmetry quite well. # + id="5pPOqJ1-qV46" model = ComplEx(k=150, epochs=50, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.0001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 1, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter, corrupt_side='s,o', ranking_strategy='worst') display_aggregate_metrics(ranks) # + id="KIrjD-dPqWoN" print('Size of entity embeddings:', model.ent_emb.shape) print('Size of entity embeddings:', model.rel_emb.shape) # + [markdown] id="4u_YxZGRqYWk" # ## 5.2 Convolutional models # # These are convolutional models. They converts embeddings to an "image" like representation, and performs convolutions on them. Instead of embedding vectors(for s, p and o) as inputs to the model, you can think of the inputs to be like a 2 or 3-channel image where each channel represents s, p and o features. # # Both models are similar in terms of their architecture, that is while extracting feature representation of inputs; but the main difference lies in the way in which the corruptions are generated and how the loss is computed. # # + [markdown] id="1xP_BA1Gmto3" # ### ConvKB # # ConvKB generates eta corruptions per training triple and computes feature matrix for the triples/corruptions (s,p,o) using shared layers. It uses margin based losses like other KGE models. # + id="0f5wQM8Lqapv" model = ConvKB(k=150, epochs=50, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.0001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 5, # Goes OOM (ResourceExhaustedError) if batch count is 1 verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter, corrupt_side='s,o', ranking_strategy='worst') display_aggregate_metrics(ranks) # + id="65mg1RtdqcsW" print('Size of entity embeddings:', model.ent_emb.shape) print('Size of entity embeddings:', model.rel_emb.shape) # + [markdown] id="d62IQMi4m95u" # ###ConvE # ConvE on the other hand does a "target" prediction. It uses s and p embeddings and tries to predict all the o's (multi-hot encoding) in the graph. In one way, it is equivalent to treating all the unknown entities during training as a negative. Due to this approach, one needs to use the standard "target-based" losses for training ConvE models. # # # The output layer of ConvE is extremely huge (since it is equal to the number of unique entities in the graph), which in turn results in scalability issues when working with practical graphs. # + id="jev0TITPqfFg" model = ConvE(k=150, epochs=2, loss='bce', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 20, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test, model=model, filter_triples=X_filter, corrupt_side='o', ranking_strategy='worst') display_aggregate_metrics(ranks) # + id="75svVDO5qgMo" print('Size of entity embeddings:', model.ent_emb.shape) print('Size of entity embeddings:', model.rel_emb.shape) # + [markdown] id="9GskySRoZ-dG" # ###**Key Takeaways** # - Traditional models use vector representation of embeddings as inputs, where as Convolutional models use image-like representations and perform convolutions on them. # - Convolutional models are extremely good when it comes to performance on standard datasets, however they don't scale well as the dataset size increases. # - ComplEx model uses 2*k embeddings internally (for real and imaginary parts) # + [markdown] id="oaQu5Im9q0ng" # --- # # 6. Hyperparameter Selection # # + [markdown] id="mIKd1v3xodV7" # ## 6.1 Role of Hyperparameters # # A large value of ***k*** may result in overfitting, and the size of the embeddings on the disk would also be large. It may also happen that the embedding matrix may not even fit on the GPU. # + id="YpQi2r6Oq4on" from ampligraph.latent_features import TransE model = TransE(k=1000, epochs=20, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 5, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test[::10], model=model, filter_triples=X_filter, corrupt_side='s,o') display_aggregate_metrics(ranks) # + [markdown] id="7oANdethq7OX" # A small ***k*** may result in underfitting. # + id="kVSMp1_xq8-h" model = TransE(k=10, epochs=20, eta=1, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 5, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test[::10], model=model, filter_triples=X_filter, corrupt_side='s,o') display_aggregate_metrics(ranks) # + [markdown] id="UQ1_Hqj4rBV4" # Ideally, you should choose a ***k*** which is large enough, along with a proper choice of ***eta***. # # It has been observed that the performance of model increases with increase in ***eta*** up to a certain point and then saturates. Usually a good value for eta is between 20-30. [add reference]() # + id="SEK2TAYJrDxP" model = TransE(k=250, epochs=20, eta=20, loss='multiclass_nll', initializer='xavier', initializer_params={'uniform': False}, regularizer='LP', regularizer_params= {'lambda': 0.001, 'p': 3}, optimizer= 'adam', optimizer_params= {'lr': 0.001}, seed= 0, batches_count= 10, verbose=True) model.fit(X_train) ranks = evaluate_performance(X_test[::10], model=model, filter_triples=X_filter, corrupt_side='s,o') display_aggregate_metrics(ranks) # + [markdown] id="1j8UBMtLxQ5L" # # ## 6.2 Grid Search and Random Search # # Ampligraph provides an API to perform model selection and to run experimental campaigns on datasets. One can use [select_best_model_ranking](https://docs.ampligraph.org/en/latest/generated/ampligraph.evaluation.select_best_model_ranking.html) to perform model selection. It supports grid search and random search. # # + id="OvNf4pJWrIxA" from ampligraph.evaluation import select_best_model_ranking model_class = TransE param_grid = { "batches_count": [5], "seed": 0, "epochs": [50], "k": [150, 50], "eta": [1, 5], "loss": ["multiclass_nll"], "loss_params": {}, "embedding_model_params": {}, "regularizer": ["LP"], "regularizer_params": { "p": [3], "lambda": [1e-3] }, "optimizer": ["adam"], "optimizer_params":{ "lr": 0.001 #lambda: np.random.uniform(0.00001, 0.01) }, "verbose": False } best_model, best_params, best_mrr_train, ranks_test, mrr_test, experimental_history = \ select_best_model_ranking(model_class, X_train, X_valid, X_test, param_grid, # max_combinations=2, # performs random search-executes 2 models by randomly choosing params use_filter=True, verbose=True, early_stopping=True) # + id="pT2aa3DyrMB0" print('MRR of the best model:', best_mrr_train) # + id="zgBRSXN7rM-P" # params of the best model best_params # + [markdown] id="Di_zQzkjrPJ5" # You can also look at the experimental history and check the various combinations tested during the model selection, along with the results for each combination, using the experimental history. # + id="1pOHp0UNrP6Y" experimental_history # + [markdown] id="GfKJdvRpb9co" # ##**Key Takeaways** # - Large value of k may increase the model performance slightly, however it may result in OOM on GPU and it will need larger storage space on disk. # - A small k would result in model underfitting the data # - Ideal option is to choose an "in-between" k and an appropriate value for eta. A good choice will result is similar mrr as that obtained with larger k. # - Rather than running multiple models manually for hyperparameter selection, one can use the `select_best_model_ranking` API provided by AmpliGraph to do model selection using either grid search or random search. # - You can use a callable in the search param grid and specify `max_combinations` parameter in the `select_best_model_ranking` API. AmpliGraph will perform random search, by randomly choosing values from the callable. It will evaluate `max_combinations` number of models and return the best one. # + [markdown] id="7uK5o7lrrQ1R" # --- # # 7. Model Calibration # + id="cj0IIXk-rTRq" model = restore_model('TransE.pkl') X_test_small = np.array([['star wars', '/film/film/language', 'english language'], ['star wars', '/film/film/language', 'java']]) model.predict(X_test_small) # + [markdown] id="IC92WqR8rVNR" # As described earlier, model.predict returns a score which doesn't signify anything because the score is not bound for most of the models. It is just a value and to interpret it we use the ranking protocol. # # However, one can also [calibrate](https://docs.ampligraph.org/en/latest/generated/ampligraph.latent_features.EmbeddingModel.html?#ampligraph.latent_features.EmbeddingModel.calibrate) the scores of a model, so that one can get a bounded confidence estimate which ranges from 0 to 1. This is done by performing a logistic regression on the score of triples. One can use the `calibrate` API to do this. It takes an argument `X_Pos` which should be True Positives (Eg. training set). If a list of True Negatives are available, then this can be passed to `X_Neg`. The model would calibrate the scores by tuning a logistic regressor. One can then use `predict_proba` API to get a bounded score. # # If a list of True Negatives are not available, then the calibration can be performed on synthetic corruptions. However, while doing so, one must pass the `positive_base_rate` argument which specifies the base rate of positive statements. Choosing this value is a challenging task and it affects the value predicted by `predict_proba`. # # # + id="4jteRatPrWPI" model.calibrate(X_train, X_neg=None, positive_base_rate=0.5, batches_count=100, epochs=100) # + id="8v8qG-3MrXQZ" model.predict_proba(X_test_small) # + [markdown] id="zJ-7cLsBrbTm" # For more details on calibration refer [this paper](https://arxiv.org/abs/1912.10000). # + [markdown] id="GbWs92Bxoe_E" # **Key Takeaways:** # - Models usually return a score between +inf and -inf (depending on the type). These scores can be calibrated to the range [0, 1] using `model.calibrate` # + [markdown] id="pdX1lwK4rX_Y" # --- # # 8. Knowledge Discovery # # In Ampligraph we provide a number of high-level convenience functions for performing knowledge discovery using graph embeddings: # # > ***query_topn***: which when given two elements of a triple will return the top_n results of all possible completions ordered by predicted score. # # > ***discover_facts***: generate a set of candidate statements using one of several defined strategies and return triples that perform well when evaluated against corruptions. # # > ***find_clusters***: perform link-based cluster analysis on graph embeddings. # # > ***find_duplicates***: which will find duplicate entities in a graph based on their embeddings. # # # + [markdown] id="N3dmkTgHrdB0" # ## 8.1 Triple completion # # Sometimes you may have either a relation and entity (head or tail) pair, or just two entities, and you want to see what the top n results returned by the model are that completes the triple. # # ``` # <head, relation, ?> # <head, ?, tail> # <?, relation, tail> # ``` # # Specify ```rels_to_consider``` or ```ents_to_consider``` lists to return triples where the missing element is filled only from that list. # # + id="7U1Wur_hravz" from ampligraph.discovery import query_topn # restore the previously saved model to save time model = restore_model('TransE.pkl') triples, scores = query_topn(model, top_n=10, head='<NAME>', relation='/people/person/profession', tail=None, ents_to_consider=None, rels_to_consider=None) for triple, score in zip(triples, scores): print('Score: {} \t {} '.format(score, triple)) # + id="6i8J-rFvIPuB" triples, scores = query_topn(model, top_n=10, head='the departed', relation=None, tail='/m/086k8', ents_to_consider=None, rels_to_consider=None) for triple, score in zip(triples, scores): print('Score: {} \t {} '.format(score, triple)) # + [markdown] id="WguBtOZBXnZy" # --- # ## 8.2 Clustering # # Once the model is trained, we can use the embeddings and perform downstream tasks like clustering or classification. Here we will illustrate how to do node clustering. Ampligraph provides an api [find_clusters](https://docs.ampligraph.org/en/latest/generated/ampligraph.discovery.find_clusters.html) which takes in model, the concepts to cluster, and the clustering model (sklearn based). It performs clustering and returns the cluster indices for the concepts. # + id="Jjk5MQfFXleu" from ampligraph.discovery import find_clusters from sklearn.cluster import KMeans # restore the previously saved model to save time model = restore_model('TransE.pkl') # Get the entities that we want to cluster. Here we use all unique entities all_entities = np.array(list(set(dataset.values[:, 0]).union(dataset.values[:, 2]))) print('Size of the subset being used for subset generation:', len(all_entities)) # create the clustering algorithm from sklearn kmeans = KMeans(n_clusters=3, n_init=100, max_iter=500) # call find_clusters to get the cluster assignments of the entities clusters = find_clusters(all_entities, model, kmeans, mode='entity') # + [markdown] id="X3m3yAuBX-ZN" # Now that we have the cluster assignments, let us plot it in a 2d space. Let us use PCA to reduce the dimensions of the embeddings from k=150 to 2 dimensions. # + id="H77UV5aJX1RY" from sklearn.decomposition import PCA from matplotlib import pyplot as plt import seaborn as sns import pandas as pd # Get the embeddings (150 dims) for all the entities of interest jobs_embeddings = model.get_embeddings(all_entities, embedding_type='entity') # Perform PCA and reduce the dims to 2 embeddings_2d = PCA(n_components=2).fit_transform(np.array([emb for emb in jobs_embeddings])) # Create a dataframe to plot the embeddings using scatterplot df = pd.DataFrame({"entities": all_entities, "clusters": "cluster" + pd.Series(clusters).astype(str), "embedding1": embeddings_2d[:, 0], "embedding2": embeddings_2d[:, 1]}) plt.figure(figsize=(20, 20)) plt.title("Cluster embeddings") ax = sns.scatterplot(data=df, x="embedding1", y="embedding2", hue="clusters") # Print only a few labels, to avoid clutter, using adjust_text from adjustText import adjust_text texts = [] for i, point in df.iterrows(): # randomly choose a few labels to be printed if np.random.uniform() < 0.003: texts.append(plt.text(point['embedding1']+.1, point['embedding2'], str(point['entities']))) adjust_text(texts) plt.show() # + [markdown] id="I3I9459RrtED" # ## 8.3 Hypothesis Generation # # Other times you may wish to discover **any** potential new facts from an existing knowledge graph. # # With a knowledge graph containing millions of entities the space of possible facts is huge, and evaluating all of them can take a very long time. In order to speed up this task we have implemented a number of sampling strategies. # # The strategies implemented include: # > ```entity_frequency```, ```graph_degree```, ```cluster_coefficient```, ```cluster_triangles```, ```cluster_squares```, ```random_uniform```, ```exhaustive```. # # Entities in all strategies excluding ```random_uniform```, ```exhaustive``` are sorted in ascending fashion, on the assumption that frequent or densely connected entities are less likely to have missing true statements. # # The general procedure is to generate a set of candidate statements, and then rank them against a set of corruptions using the ```ampligraph.evaluation.evaluate_performance()``` function. # # A sampling weight is calculated for each entity using the specified strategy, and ```max_candidates``` are sampled to produce the candidate triple set. # # Candidates are then evaluated to obtain a rank, and triples that appear in the ```top_n``` ranked statements of evaluation procedure are returned as potentially true statements. # # # + id="gRf_W0ZnrvxC" from ampligraph.discovery import discover_facts triples, ranks = discover_facts(dataset.values, model, top_n=500, max_candidates=500, strategy='cluster_triangles', target_rel='/people/person/profession', seed=42) for triple, rank in zip(triples, ranks): print('Rank: {} \t {} '.format(rank, triple)) # + [markdown] id="RQS5xvBzrwmy" # --- # # 9. Visualizing embeddings using Tensorboard # # Tensorboard Projector allows us to visualize high dimensional embeddings in a graphical interface. This can be useful to examine and understand embedded concepts. # # Ampligraph provides a single function for creating the Tensorboard files, [create_tensorboard_visualizations](https://docs.ampligraph.org/en/latest/generated/ampligraph.utils.create_tensorboard_visualizations.html#ampligraph.utils.create_tensorboard_visualizations), as demonstrated below. # # + id="o3_zf28Zry0k" from ampligraph.utils import create_tensorboard_visualizations model = restore_model('TransE.pkl') create_tensorboard_visualizations(model, 'embeddings_transe') # + [markdown] id="fr46N0ANJhTQ" # Run the cell below to run tensorboard, and it will open in a different browser window with the tensorboard interface. # + id="UrGF8tcBr5YK" # This will not work in google colab - only uncomment and run if using jupyter notebook # # ! tensorboard --logdir='./embeddings_transe' # + [markdown] id="VMLanHsMsDpN" # # Appendix # # # **Assume you are incrementally building knowledge graphs and training models in phases. Can you compare the models trained on these incremental datasets?** # # Depends. You should have the same test set and same number of corruptions when you want to compare models. If you are only adding new links incrementally, it does not matter. The models would be comparable. However, if you are also adding new concepts, then you must be careful while evaluating the models. You must make sure that the number of corruptions generated are the same throughout. You can choose a set of entities that would be used as corruptions in all the phases and then use `entities_subset` argument of `evaluate_performance` API. # # # **What if my embedding matrices are not fitting on the GPU?** # # Sometime while running the model, you may run into ResourceExhausted error on the GPU, especially because the batches may not fit in memory. This can usually be solved by increasing the batch count. # # There are times when you may have millions of entities in the graph, and you may not be able to allocate the embedding matrix on the GPU. In this case, you can use the [large graph mode](https://docs.ampligraph.org/en/latest/dev_notes.html#dealing-with-large-graphs). It gets activated automatically when number of entities is >500000. You can also use `set_entity_threshold` and change this threshold manually. # ``` # from ampligraph.latent_features import set_entity_threshold # set_entity_threshold(100000) # # ... # ``` # # In this mode, ampligraph creates the embedding matrix on the CPU and loads only the embeddings of the entities of the batch being trained on the GPU. This mode is much faster than training just on CPU as it can use the GPU cores to speed up computations. More details can be found in the link. #
ECAI_2020_KGE_Tutorial_Hands_on_Session.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting Position vs Time # In this notebook you will plot a position vs time graph of the data you just saw. # # First, I will demonstrate such a plot by following these steps: # # 1. Importing `pyplot`, Python's most popular plotting library. # 2. Storing data to be plotted in variables named `X` and `Y` # 3. Creating a scatter plot of this data using pyplot's `scatter()` function. # 4. Adding a line connecting two data points using pyplot's `plot()` function. # 4. Adding axis labels and a title to the graph. # + # Step 1. # we import pyplot as plt so we can refer to the pyplot # succinctly. This is a standard convention for this library. from matplotlib import pyplot as plt # - # Initially, I only told you the mileage at 2:00 and 3:00. The data looked like this. # # | Time | Odometer <br>(miles) | # |:----:|:--------------------------------:| # | 2:00 | 30 | # | 3:00 | 80 | # # I'd like to make a scatter plot of this data and I want my **horizontal** axis to show time and my **vertical** axis to show mileage. # # In this notebook (and those that follow), we are going to use a capital `X` to store horizontal axis data and a capital `Y` to store vertical axis data. In this case: # + # Step 2. # get the data into variables called X and Y. This naming pattern # is a convention. You could use any variables you like. X = [2,3] Y = [30,80] # + # Step 3. # create a scatter plot using plt.scatter. Note that you NEED # to call plt.show() to actually see the plot. Forgetting to # call plt.show() is a common source of problems for people # new to this library plt.scatter(X,Y) plt.show() # - # This isn't a very exciting scatter plot since it only has two data points. Let's add a line connecting these data points as well. # + # Step 4. # add lines connecting adjacent points plt.scatter(X,Y) plt.plot(X,Y) plt.show() # - # Let's add a title and labels to the X and Y axes plt.scatter(X,Y) plt.plot(X,Y) plt.title("Position vs. Time on a Roadtrip") plt.xlabel("Time (in hours)") plt.ylabel("Odometer Reading (in miles)") plt.show() # ## Twenty minute resolution # When looking at the odometer every *20* minutes, the data looks like this: # # | Time | Odometer <br>(miles) | # |:----:|:--------------------------------:| # | 2:00 | 30 | # | 2:20 | 40 | # | 2:40 | 68 | # | 3:00 | 80 | # # But a better way to think about it for plotting is like this (note the difference in how time is represented): # # | Time | Odometer <br>(miles) | # |:----:|:--------------------------------:| # | 2.000 | 30 | # | 2.333 | 40 | # | 2.667 | 68 | # | 3.000 | 80 | # # ### EXERCISE 1 - Make a position vs time graph of the data shown above with lines connecting adjacent dots. # # Reproduce the demonstration from before using the data shown above. # + # TODO - your code for exercise 1 here X = [2.0, 30] Y = [2.333, 40] Z = [2.667, 68] A = [3.0, 80] plt.scatter(X,Y,Z) plt.plot(X,Y,Z) plt.title("Position vs. Time on a Roadtrip") plt.xlabel("Time (in hours)") plt.ylabel("Odometer Reading (in miles)") plt.show() # - # #### Exercise 1 - Solution Check (full solution code at end of notebook) # You'll know you're correct when you've generated a plot that looks something like the following: # # ![](https://d17h27t6h515a5.cloudfront.net/topher/2017/December/5a2ee74f_vmc-l1-20-min-plot/vmc-l1-20-min-plot.png) # # ### EXERCISE 2 - Reflect # Look at the graph above and think about the following questions (we will talk about them more in the video that follows) # # 1. How can you tell which of these time intervals had the highest average speed (without looking at the actual data?) # # 2. If the car stopped from 3:00 - 4:00 and you were to plot that data, what would the **slope** of that line look like? # + # # # # SOLUTION CODE BELOW # # # # Exercise 1 - Solution X = [ 2.000, 2.333, 2.667, 3.000 ] Y = [ 30, 40, 68, 80 ] plt.scatter(X,Y) plt.plot(X,Y) plt.title("Position vs. Time on a Roadtrip") plt.xlabel("Time (in hours)") plt.ylabel("Odometer Reading (in miles)") plt.show() # -
src/1. Odometers, Speedometers and Derivatives/1. Plotting Position vs Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/Callysto_Notebook-Banner_Top_06.06.18.jpg"> # + language="html" # # <script src="https://cdn.geogebra.org/apps/deployggb.js"></script> # - # # Reflections of Graphs # # <img src="images/cat_fight.jpg" width=960 height=640> # ## Introduction # # In the photo above, a kitten is looking at its reflection in a mirror. # There are a few obvious but important observations to make about the kitten and its reflection. # Firstly, the kitten and its reflection appear to be on opposite sides of the mirror. # Secondly, the kitten and its reflection appear to be equally as far from the mirror's surface. # Thirdly, the kitten can see its reflection because it is looking at the mirror straight on -- # the photographer can't see her reflection in the mirror because she's looking at it at an angle. # # Now we let's see how the reflection of a point across a line is just like a reflection in a mirror. # The applet below shows a point $P$ and its reflection $P'$ across a line. # Try moving $P$ and the line and see how $P'$ changes. # + language="html" # # <div id="ggb-point"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 400, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableRightClick": false, # "enableShiftDragZoom": true, # "useBrowserForJS": false, # "filename": "geogebra/reflection-point.ggb" # }, 'ggb-point'); # # ggbApp.inject(); # </script> # - # Whichever side of the line $P$ is on, its reflection $P'$ is on the opposite side. # The point $P$ is as far from the line as $P'$ is. # If we were to draw a line from $P$ to $P'$, this line would intersect the line we are reflecting across at a right angle; to "see" $P'$, $P$ has to look at the line straight on. # # The applet shows the reflection of a point across any line. # In this notebook, we will learn how to reflect a point across three particular lines: # the $x$-axis, the $y$-axis, and the line $y = x$. # We will also learn how to reflect functions and graphs of functions across these lines. # Reflecting across other lines will be outside the scope of this notebook. # ## Reflections across the $x$-axis # # ### Points # # The easiest line to reflect across is the $x$-axis. # After toying with the above applet, # you might already have an idea of where the reflection of a point across the $x$-axis ought to be. # If not, you may want to try playing with the applet above some more. # In particular, try making the line horizontal, then try dragging the point $P$ around. # # So let's test your intuition. # In the applet below, there are three blue points, $A$, $B$, and $C$. # There are three more red points, $A'$, $B'$, and $C'$ that are supposed to be their reflections, # but they are in the wrong place. # Try moving $A'$, $B'$, and $C'$ to where you think they belong. # You will see a message if you got it right. # You can also keep reading and come back to this exercise later. # + language="html" # # <div id="ggb-exercise1"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-exercise1.ggb" # }, 'ggb-exercise1'); # # ggbApp.inject(); # </script> # - # If you were able to solve the exercise, you might already have guessed the following facts: # # * A point and its reflection across the $x$-axis have the same $x$-coordinate. # # This is because the line from a point to its reflection across the $x$-axis # has to intersect the $x$-axis at a right angle. # Since the $x$-axis is perfectly horizontal, # this line from point to point has to be perfectly vertical, # which means the points are directly above one another, so they have the same $x$-coordinate. # # * A point and its reflection across the $x$-axis have equal but opposite $y$-coordinates. # # What is meant by that is if a point has a $y$-coordinate of, say, 17, # its reflection has the $y$-coordinate -17. # More generally, if a point has a $y$-coordinate of $a$, its reflection has the $y$-coordinate $-a$. # This follows from the fact that the two points are on opposite sides of the $x$-axis # (so one is positive and the other negative, unless the points are *on* the $x$-axis) # and the fact that the two points are equally distant from the $x$-axis. # # Putting these two facts together, we get the following rule: # # **Rule:** For an arbitrary point $(x, y)$, its reflection across the $x$-axis is the point $(x, -y)$. # # **Example:** Consider the point $P = (1, 3)$. # Let's call its reflection $P'$. # Then $P'$ has the same $x$-coordinate as $P$, but its $y$-coordinate is the negative of $P$'s. # This means $P' = (1, -3)$. # # **Example:** Suppose we have instead the point $P = (2, 0)$. # This point is *on* the $x$-axis. # Since $-0 = 0$, its reflection is $P' = (2, 0)$. # If we have a point on the $x$-axis and we reflect it across the $x$-axis, we get the same point back. # It is its own reflection. # ### Graphs # # In the previous exercise, not only did we reflect three points, # but we also plotted the reflection of a triangle. # We can reflect points, triangles, and many other shapes and objects. # Now we will see how to reflect the graph of a function. # # The graph of a function is just a bunch of points -- # so many points packed closely together that it looks like a single curve. # To reflect the graph, we just have to reflect all of the points! # # Below, in blue, is the graph of some function $y = f(x)$ # and a few of the points making up its graph. # The reflection of these points is in red. # Use the slider to see what happens when we take and reflect more and more points on the graph. # + language="html" # # <div id="ggb-slider1"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableRightClick": false, # "enableShiftDragZoom": true, # "useBrowserForJS": false, # "filename": "geogebra/reflection-slider1.ggb" # }, 'ggb-slider1'); # # ggbApp.inject(); # </script> # - # If we only reflect a few points, the red dots don't look like much, # but as we reflect more and more points, the red dots start to resemble the blue curve but flipped upside-down. # This is the reflection of the graph across the $x$-axis. # Or more accurately, if we had the time to sample and reflect infinitely many points, we would get the reflection of the graph. # Usually, it will suffice to sample and reflect a few points and connect the dots with a curve. # (Even computer programs that graph functions typically just plot a bunch of points and connect them by straight lines, but they plot so many points that it looks accurate.) # # **Example:** Let's reflect the graph of $y = \log_2(x)$ across the $x$-axis. # We start by identifying a few points on the graph of $y = \log_2(x)$. # We know, for example, that $(1,0)$, $(2,1)$, $(4,2)$, and $(8,3)$ are points on the graph. # Their reflections are $(1, 0)$, $(2,-1)$, $(4,-2)$, and $(8,-3)$, respectively. # Then we connect these points by a curve. # + import matplotlib import numpy as np import matplotlib.pyplot as plt from math import log def draw(): def g(x): return log(x,2) f = np.vectorize(g) xmin, xmax = 0.01, 10 nsamples = 100 #2*(xmax - xmin) - 1 x = np.linspace(xmin, xmax, nsamples) plt.axhline(color="black", linewidth=1) plt.axvline(color="black", linewidth=1) plt.ylim(-5, 5) plt.plot(x, f(x), label="$y = \log_2(x)$") plt.plot(x, -f(x), label="Reflection of $y = \log_2(x)$", color="red", sketch_params=0.8) pts = [(1,0), (2,1), (4,2), (8,3), (2,-1), (4,-2), (8,-3)] fmts = ["mo", "bo", "bo", "bo", "ro", "ro", "ro"] for i in range(0, len(pts)) : plt.plot(pts[i][0], pts[i][1], fmts[i]) plt.annotate("$({0},{1})$".format(pts[i][0], pts[i][1]), xy = (pts[i][0], pts[i][1]), xytext = (4, 4), textcoords = "offset points") plt.legend(loc='upper center', bbox_to_anchor=(1.45, 0.8)) draw() # - # ### Functions # # We have reflected points and graphs across the $x$-axis. # These were both geometric ideas. # Now we will reflect functions themselves, # and we start with the observation that the reflection of the graph $y = \log_2(x)$ in the last example # is precisely the graph of the function $y = -\log_2(x)$. # # If we have an arbitrary function $y = f(x)$, # its graph is all of the points of the form $(x, f(x))$. # To reflect these points across the $x$-axis, we negate their $y$-coordinates, # so the reflection of the graph is all of the points of the form $(x, -f(x))$. # But this is just the graph of the function $y = -f(x)$! # # **Rule:** The reflection of a function $y = f(x)$ across the $x$-axis is the function $y = -f(x)$. # # **Example:** Suppose we have the function $y = x^2$ and we want to reflect it across the $x$-axis. # All we have to do is negate the right hand side of the equation. # The reflection is simply # $$ y = -(x^2) = -x^2. $$ # # **Example:** Suppose we have the function $y = \sin(x) - x$ instead. # Again, we just negate the right hand side of the equation, # but make sure to negate *all* of the terms and be careful with double negatives. # This time, the reflection across the $x$-axis is # $$ y = -(\sin(x) - x) = -\sin(x) + x. $$ # # The interactive graph below allows you to enter an arbitrary function $f(x)$ and see its graph and its reflection across the $x$-axis. # + language="html" # # <div id="ggb-interactive1"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": true, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-interactive1.ggb" # }, 'ggb-interactive1'); # # ggbApp.inject(); # </script> # - # ## Reflections across the $y$-axis # # ### Points # # Now that we know how to reflect points, graphs, and functions across the $x$-axis, # we will see how to reflect them across the $y$-axis instead. # Geometrically, the idea is the same as before. # A point and its reflection are on opposite sides of the $y$-axis, # and both are the same distance away from the $y$-axis. # The line between them intersects the $y$-axis at a right angle. # Since the the $y$-axis is vertical, this line intersecting it is horizontal, # so the point's reflection must be directly to the left or right of it. # Therefore the point and its reflection have the same $y$-coordinate. # # Before seeing the "rule" for reflecting a point across the $y$-axis, # try this exercise to test your intuition and understanding. # Click and drag the points $A'$, $B'$, and $C'$ so that they are the reflections across the $y$-axis # of $A$, $B$, and $C$, respectively. # + language="html" # # <div id="ggb-exercise2"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-exercise2.ggb" # }, 'ggb-exercise2'); # # ggbApp.inject(); # </script> # - # **Rule:** For an arbitrary point $(x, y)$, its reflection across the $y$-axis is the point $(-x, y)$. # # **Example:** Consider the point $P = (1, 3)$ and its reflection across the $y$-axis, $P'$. # The points $P$ and $P'$ have the same $y$-coordinates, but their $x$-coordinates are negatives of one another. # So $P' = (-1, 3)$. # # **Example:** Suppose we have instead the point $P = (0, 2)$. # This point is on the $y$-axis. # Since $-0 = 0$, its reflection is $P' = (0, 2)$. # If we have a point on the $y$-axis and we reflect it across the $y$-axis, we get the same point back. # # ### Graphs # # To reflect a graph across the $y$-axis, we do just like before. # In theory, we think of the graph as consisting of infinitely many points, # and we draw the reflection across the $y$-axis of each of these points to get the graphs reflection. # In practice, because life is short, we reflect a few points and connect them by a curve. # The more points we reflect, the more accurately we can draw the reflection of the graph. # + language="html" # # <div id="ggb-slider2"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-slider2.ggb" # }, 'ggb-slider2'); # # ggbApp.inject(); # </script> # - # ### Functions # # In the same way that we reflected the function $y = f(x)$ across the $x$-axis to get $y = -f(x)$, # we can also reflect the function across the $y$-axis. # An arbitrary point on the graph of $y = f(x)$ has the form $(x, f(x))$. # Using the rule above, the reflection of an arbitrary point on the graph is of the form $(-x, f(x))$. # But $(-x, f(x))$ has the same "form" as $(x, f(-x))$, and points of *this* form make up the graph of $y = f(-x)$. # # **Rule:** The reflection of a function $y = f(x)$ across the $y$-axis is $y = f(-x)$. # # This means we just have to replace $x$ with $-x$ everywhere in our function. # Let's do a couple of examples. # We reflected the following functions across the $x$-axis before. # Let's reflect them across the $y$-axis instead. # # **Example:** Let's reflect $y = x^2$ across the $y$-axis. # According to our rule, we just replace $x$ with $-x$, # but we should be careful and put parentheses around it, like so: $(-x)$. # The reflection is simply # $$ y = (-x)^2 = x^2. $$ # In this case, the reflection across the $y$-axis is the same as the original function. # # **Example:** Let's reflect the function $y = \sin(x) - x$ across the $y$-axis. # We replace every $x$ with $-x$ to get # $$ y = \sin(-x) - (-x) = \sin(-x) + x. $$ # This is a perfectly acceptable answer. # You might have learned that $\sin(-x) = -\sin(x)$, # so we could also rewrite this as # $$ y = -\sin(x) + x $$ # if we prefer. # You might also notice that this is the same answer that we got when we reflected it across the $x$-axis. # Whether we reflect $y = \sin(x) - x$ across the $x$-axis or the $y$-axis, we get the same result. # # Use the interactive graph below to plot any function with its reflection across the $y$-axis. # As usual, the function will be in blue and its reflection in red. # + language="html" # # <div id="ggb-interactive2"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": true, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-interactive2.ggb" # }, 'ggb-interactive2'); # # ggbApp.inject(); # </script> # - # ## Reflections across both axes # # If we reflect a point across the $x$-axis and then reflect it again, the twice-reflected point is in the same position as the original point. # The same thing happens when we reflect a point twice across the $y$-axis. # But what happens when we reflect a point across the $x$-axis and the across the $y$-axis? # # Let's work with an example. # Suppose we start with the point $(1, 2)$. # Its reflection across the $x$-axis is $(-1, 2)$. # The reflection of *that* across the $y$-axis is $(-1, -2)$. # # What if we work with an arbitrary point whose coordinates we don't know? # We start with the point $(x, y)$, then reflect it across the $x$-axis to get $(-x, y)$. # The reflection of the new point across the $y$-axis is $(-x, -y)$. # # To reflect a point across the $x$-axis followed by the $y$-axis, we just negate both of the point's coordinates. # Now check for yourself that if we reflected $(x, y)$ across the axes in the other order -- # the $y$-axis and then the $x$-axis -- # we still get the point $(-x, -y)$. # The order we do the reflections in does not matter! # # Something interesting happens when we look at what happens graphically. # Try playing with the following applet. # Click and drag the point $P$ in blue. # The red point $P'$ is the result of reflecting $P$ across both axes. # + language="html" # # <div id="ggb-point2"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 400, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-point2.ggb" # }, 'ggb-point2'); # # ggbApp.inject(); # </script> # - # Can you see what is happening? # There are a couple of ways of thinking about the relationship between $P$ and $P'$ in this applet. # One way is that $P'$ is the result of rotating $P$ 180 degrees around the origin. # Another way is that $P'$ is the reflection of $P$ across the origin -- a line from $P$ to $P'$ passes through the origin and both points are equally distant from the origin. # ## Even and odd functions # # Functions that are their own reflections across the $y$-axis have a special name. # These are called **even** functions. # Geometrically, this means the graph of the function is the same after we reflect it across the $y$-axis. # What does this mean algebraically? # An arbitrary point on the graph of the function looks like $(x, f(x))$. # When we reflect it across the $y$-axis, we get the point $(-x, f(x))$. # But because the graph is its own reflection, this has to be the same as the point $(-x, f(-x))$. # If $(-x, f(x))$ and $(-x, f(-x))$ are the same point, that means $f(x) = f(-x)$. # An even function is a function for which $f(x) = f(-x)$. # Some example of even functions are # * $f(x) = c$, where $c$ is any constant; # * $f(x) = |x|$; # * $f(x) = x^2$; # * $f(x) = x^a$ where $a$ is any even power; and # * $f(x) = \cos(x)$. # Earlier in this notebook, there was an interactive graph allowing you to enter a function and see its reflection across the $y$-axis. # Try entering these functions and see how the functions and their reflections overlap. # # Functions that are their own 180-degree rotations around the origin also have a special name. # They are called **odd** functions. # Geometrically, this means if we graph the function and the rotate the graph 180 degrees about the origin, # we get the same image. # Like before, let's consider what this means algebraically. # We start with an arbitrary point on the graph of the function, $(x, f(x))$. # We rotate it 180 degrees (or equivalently, we reflect across the $x$-axis and then the $y$-axis) and get the point $(-x, -f(x))$. # But since the graph is its own rotation, this is the same point as $(-x, f(-x))$. # This means that $f(-x) = -f(x)$. # So an odd function is a function for which $f(-x) = -f(x)$. # Some examples of odd functions are # * $f(x) = 0$; # * $f(x) = x$; # * $f(x) = x^a$ where $a$ is any odd power; and # * $f(x) = \sin(x)$. # # We don't have a special name for functions that are their own reflections across the $x$-axis. # Why not? # Because other than the function $f(x) = 0$, there is no such thing as a function that is its own reflection across the $x$-axis! # Such a "function" would not pass the vertical line test. # # The only function that is both even and odd at the same time is $y = 0$. # # We are used to calling integers even and odd. # It is strange to call functions even and odd, # but there is a relationship between the two. # * The product and quotient of two even functions is even, just like the sum and difference of even numbers is even. # * The product and quotient of two odd functions is even, just like the sum and difference of odd numbers is even. # * The product and quotient of an even and odd function is odd, just like the sum and difference of an even and odd number is odd. # Be careful though, because the sum of two odd functions is odd, whereas the sum of two odd numbers is even. # ## Reflections across the line $y = x$ # # ### Points # # The "rule" for reflecting across the line $y = x$ is not hard to use, # but understanding why the rule works is harder to explain than for the previous two reflections. # Let's state the rule first: # # **Rule:** The reflection of an arbitrary point $(x,y)$ across the line $y = x$ is the point $(y,x)$. # # We just swap the point's $x$- and $y$-coordinates. # # Try the following exercise. # Move the points $A'$, $B'$, $C'$, and $D'$ so that they are the reflections of $A$, $B$, $C$, and $D$, respectively. # + language="html" # # <div id="ggb-exercise3"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-exercise3.ggb" # }, 'ggb-exercise3'); # # ggbApp.inject(); # </script> # - # Here is an explanation of why this rule works. # # Let's start by drawing a picture of the line $y = x$, a point $P$, and its reflection $P'$. # The points $P$ and $P'$ are on opposite sides of the line $y = x$, # and the line connecting them intersects $y = x$ at a right angle. # Let's call the this point of intersection $Q$. # The points $P$ and $P'$ are the same distance from $Q$. # This is our picture so far: # # <img src="images/ggb1.png" width=300 height=300> # Draw a line segment from $P$ directly left until it intersects the line $y = x$. # Draw another line segment from $P'$ directly down until it too intersects the line $y = x$. # These two line segments and the line $y = x$ all meet at the same point. # Let's call this point $R$. # <img src="images/ggb2.png" width=300 height=300> # Since the line segment $PR$ is horizontal and $P'R$ is vertical, # then angle $\angle PRP'$ is a right angle. # The other angles $\angle RPP'$ and $\angle RP'P$ are each 45 degrees, so this makes the triangle $\triangle PRP'$ an isosceles triangle. # # Now suppose $P$ has the coordinates $(a, b)$. # Let's try to find the coordinates of $P'$. # # The triangle $\triangle PRP'$ is an iscoceles triangle, so the lengths $\overline{PR}$ and $\overline{P'R}$ are equal. # What's more, $R$ and $P$ have the same $y$-coordinate (we only moved in the $x$ direction to get to $R$) # and $R$ and $P'$ have the same $x$-coordinate. # We just need to determine how far $R$ is from $P$. # To get from $P$ to $P'$, we subtract that distance from the $x$-coordinate and add it to the $y$-coordinate. # # Since $P = (a, b)$ is to the lower-right of $y = x$, its $x$-coordinate is larger than the $y$-coordinate, so $a > b$. # Since $R$ is on the line $y = x$, its $x$- and $y$-coordinates are equal. # But we know $R$ has the same $y$-coordinate as $P$, so $R = (b, b)$. # The distance from $P$ to $R$ is just the difference between $a$ and $b$, so $\overline{PR} = a - b$. # # Now # \begin{align*} # P' # &= (a - \overline{PR}, b + \overline{PR}) \\ # &= (a - (a - b), b + (a - b)) \\ # &= (a - a + b, b + a - b) \\ # &= (b, a). # \end{align*} # # This argument depended on $P$ being to the lower-right of the line $y = x$. # As an exercise, figure out how this argument has to change if $P$ is to the upper-left instead. # # Now let's reflect the graph of a function across the line $y = x$. # As always, we sample a bunch of points on the curve, and reflect them each across the line $y = x$. # See what happens when we sample more and more points on the graph of $y = x^2$. # + language="html" # # <div id="ggb-slider3"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": false, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-slider3.ggb" # }, 'ggb-slider3'); # # ggbApp.inject(); # </script> # - # We started with a parabola opening upwards. # Its reflection is a parabola opening sideways. # This has an important implication: # *the reflection of a function across $y = x$ might not pass the vertical line test!* # # Keep this in mind while we see how to reflect a function across $y = x$. # Points on the graph of an arbitrary function $y = f(x)$ have the form $(x, f(x))$. # Their reflections have the form $(f(x), x)$, but this is the same "form" as $(f(y), y)$. # Points of that form are just the points on the graph of $x = f(y)$. # # **Rule:** The reflection of a function $y = f(x)$ across the line $y = x$ is the function $x = f(y)$. # # This rule tells us that we only need to swap the $x$'s and $y$'s in our function to get its reflection across $y = x$. # # **Example:** Let's reflect the function $y = x^2$ across the line $y = x$. # According to our rule, we just swap the $x$'s and $y$'s. # So the reflection is # $$ x = y^2. $$ # The problem is solved at this point, but suppose we try to solve for $y$. # We get either # $$ y = \sqrt x $$ # or # $$ y = -\sqrt x. $$ # How do the graphs of $x = y^2$, $y = \sqrt x$, and $y = -\sqrt x$ compare to one another? # The graph of $x = y^2$ is a sideways parabola and does not pass the vertical line test. # The graph of $y = \sqrt x$ is just the top half of that parabola and therefore *does* pass the vertical line test. # The graph of $y = -\sqrt x$ is the bottom half of the parabola and also passes the vertical line test. # # **Example:** Now let's reflect the straight line $y = \frac 1 2 x + 1$ across the line $y = x$. # Our rule says we just need to swap the $x$'s and $y$'s, so we get # $$ x = \frac 1 2 y + 1. $$ # Once again, the problem is solved at this point, but let's try solving for $y$: # $$ y = 2 x - 2. $$ # The reflection of the line $y = \frac 1 2 x + 1$ across the line $y = x$ is the line $y = 2 x - 2$. # This is a case of a function whose reflection actually does pass the vertical line test # and can be written in the form $y = f(x)$. # # Use the interactive graph below to plot functions and their reflections across $y = x$. # + language="html" # # <div id="ggb-interactive3"></div> # # <script> # var ggbApp = new GGBApplet({ # "height": 600, # "showToolBar": false, # "showMenuBar": false, # "showAlgebraInput": false, # "showResetIcon": true, # "enableLabelDrags": true, # "enableShiftDragZoom": true, # "enableRightClick": false, # "useBrowserForJS": false, # "filename": "geogebra/reflection-interactive3.ggb" # }, 'ggb-interactive3'); # # ggbApp.inject(); # </script> # - # ## Reflections combined with function operations # # We might be asked to reflect a function after performing some function operations. # For example, we might be asked to add two functions and the reflect them across the $x$-axis, # or reflect the composition of two functions across the $y$-axis. # Well, the sum of two functions, for instance, is a function and we know how to reflect functions now, # so we ought to be able to do this. # # **Problem:** # Reflect $f(x) + g(x)$ across the $x$-axis, where # $$ f(x) = x^2 + 1 \text{ and } g(x) = x^3 + 1. $$ # # **Solution:** # To do this, let's add $f(x)$ and $g(x)$ and call their sum $h(x)$. So # \begin{align*} # h(x) # &= f(x) + g(x) \\ # &= (x^2 + 1) + (x^3 + 1) \\ # &= x^3 + x^2 + 2. # \end{align*} # Now we just need to reflect $h(x)$ across the $x$-axis. # Its reflection is $y = -h(x)$, so # \begin{align*} # y # &= -h(x) \\ # &= -(x^3 + x^2 + 2) \\ # &= -x^3 - x^2 - 2. # \end{align*} # # **Problem:** # Reflect $f(g(x))$ across the $y$-axis, where # $$ f(x) = \sin(x) \text{ and } g(x) = x^2. $$ # # **Solution:** # Like before, let's compose $f(x)$ and $g(x)$ and call their composite $h(x)$. So # \begin{align*} # h(x) # &= f(g(x)) \\ # &= f(x^2) \\ # &= \sin(x^2). # \end{align*} # Now we reflect $h(x)$ across the $y$-axis. # The reflection of $h(x)$ is $y = h(-x)$, so # \begin{align*} # y # &= h(-x) \\ # &= \sin((-x)^2) \\ # &= \sin(x^2). # \end{align*} # ## Reflections combined with translations # # We can apply the same idea above to reflect translations of graphs. # # **Problem:** # Translate the function $y = f(x)$ left by three units, then reflect across the $y$-axis, # where $y = |2x|$. # # **Solution:** # Let's label by $g(x)$ the translation of $f(x)$ left by three units. # Then we just have to reflect $g(x)$ across the $y$-axis. # So # \begin{align*} # g(x) # &= f(x+3) \\ # &= |2(x+3)| \\ # &= |2x + 6|. # \end{align*} # Its reflection across the $y$-axis is # \begin{align*} # y # &= g(-x) \\ # &= |2(-x) + 6| \\ # &= |-2x + 6|. # \end{align*} # ## Conclusion # # In this notebook, we saw how to reflect over the $x$-axis, the $y$-axis, the line $y = x$, and combining reflection. # We've also made a distinction between reflecting a point, reflecting the graph of a function, and reflecting the function itself. # Reflections of points and graphs are geometric ideas, manipulating plots; # reflections of functions is about manipulating equations, # although # # The studies of statistics and of the sciences depend heavily on the skills learned in later math courses, especially Calculus courses. # A lot of time is spent in Calculus courses analyzing functions and to do this, it is usually helpful to have a clear image in one's mind of the function being analyzed. # Understanding reflections of graphs and functions is one step towards forming such a clear image. # For instance, you might already know what the graph of $y = 10^x$ looks like, # but using the techniques covered in this notebook, # you can also tell what $y = -10^x$, $y = 10^{-x}$, $y = -10^{-x}$, and $x = 10^y$ look like. # # Recognizing when a function is "even" or "odd" also has its uses. # A typical question in a Calculus class is to find the area underneath a curve and above the $x$-axis. # If the curve is given by an even function, then the area to the right of the $y$-axis is the same as the area to the left. # That means we can get away with only calculating half the area and then doubling the result. # Doubling a number is easier than most things in math, so this can be a time saver. # ## Exercises # # * Plot the point $(1, 2)$ as well as its reflections across the $x$-axis, the $y$-axis, both axes, and the line $y = x$. # * Graph the function $y = x^2 - 2x$. Plot its reflection across the $x$-axis and the $y$-axis. What are the equations of these reflections? # * Graph the function $y = e^x$. Plot its reflection across the line $y = x$. Does the reflected function pass the vertical line test? What is the equation of its reflection (in the form $x = f(y)$ and in the form $y = f(x)$)? # <img src="images/Callysto_Notebook-Banners_Bottom_06.06.18.jpg">
Mathematics/Reflections/reflections.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python373jvsc74a57bd0210f9608a45c0278a93c9e0b10db32a427986ab48cfc0d20c139811eb78c4bbc # --- import torch,torchvision import numpy as np import pandas as pd import matplotlib.pyplot as plt from torch.nn import * from torch.optim import * data = pd.read_csv('btcNewsToPrice2.csv') data.head() X = data['date'].tolist() new_X = [] for x in X: x = x.split('-') x = int(f'{x[0]}{x[1]}{x[2]}') new_X.append(x) X = new_X X = torch.from_numpy(np.array(new_X)) y = torch.from_numpy(np.array(data['high'].tolist())) from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125,shuffle=False) X_train.shape,X_test.shape,y_train.shape,y_test.shape plt.figure() plt.plot(X_train,y_train) plt.show() class LSTMPredictor(Module): def __init__(self,n_hidden=512): super().__init__() self.n_hidden = n_hidden # lstm1, lstm2, linear self.lstm1 = LSTMCell(1,self.n_hidden) self.lstm2 = LSTMCell(self.n_hidden,self.n_hidden) self.linear = Linear(self.n_hidden,1) def forward(self,X,future=0): outputs = [] n_samples = X.size(0) h_t = torch.zeros(n_samples,self.n_hidden,dtype=torch.float32) c_t = torch.zeros(n_samples,self.n_hidden,dtype=torch.float32) h_t2 = torch.zeros(n_samples,self.n_hidden,dtype=torch.float32) c_t2 = torch.zeros(n_samples,self.n_hidden,dtype=torch.float32) for input_t in X.split(1,dim=1): h_t,c_t = self.lstm1(input_t,(h_t,c_t)) h_t2,c_t2 = self.lstm2(h_t,(h_t2,c_t2)) output = self.linear(h_t2) outputs.append(output) for i in range(future): h_t,c_t = self.lstm1(output,(h_t,c_t)) h_t2,c_t2 = self.lstm2(h_t,(h_t2,c_t2)) output = self.linear(h_t2) outputs.append(output) outputs = torch.cat(outputs,dim=1) return outputs device = 'cpu' model = LSTMPredictor().to(device) epochs = 100 batch_size = len(X_train) from tqdm import tqdm import wandb PROJECT_NAME = 'Bitcoin-Price-Prediction' criterion = MSELoss() optimizer = Adam(model.parameters(),lr=0.001) import cv2 y_train = y_train.view(1,-1).to(device).float() y_test = y_test.view(1,-1).to(device).float() wandb.init(project=PROJECT_NAME,name='baseline') for _ in tqdm(range(epochs)): model.to(device) preds = model(y_train[:3,:-1].float()) preds.to(device) loss = criterion(preds,y_train[:3,1:]) optimizer.zero_grad() loss.backward() optimizer.step() wandb.log({'Loss':loss.item()}) with torch.no_grad(): future = 100 pred = model(y_test.view(1,-1)[:3,:-1].float(),future=future) loss = criterion(pred[:,:-future],y_test.view(1,-1).float()[:3,1:]) y = pred.detach().numpy() wandb.log({'Val Loss':loss.item()}) plt.figure(figsize=(12,6)) plt.title(f'Step') n = y_test.view(1,-1)[:3,:-1].float().shape[1] def draw(y_i,color): plt.plot(np.arange(n),y_test.view(1,-1).float()[:3,1:][:n].view(-1),color) plt.plot(np.arange(n,n+future),y_i[n:],color + ':') draw(y[0],'r') plt.savefig('./preds/img.png') plt.close() wandb.log({'Val Img':wandb.Image(cv2.imread('./preds/img.png'))}) with torch.no_grad(): future = 100 pred = model(y_train.view(1,-1)[:3,:-1].float(),future=future) loss = criterion(pred[:,:-future],y_train.view(1,-1).float()[:3,1:]) y = pred.detach().numpy() wandb.log({'Val Loss':loss.item()}) plt.figure(figsize=(12,6)) plt.title(f'Step') n = y_train.view(1,-1)[:3,:-1].float().shape[1] def draw(y_i,color): plt.plot(np.arange(n),y_train.view(1,-1).float()[:3,1:][:n].view(-1),color) plt.plot(np.arange(n,n+future),y_i[n:],color + ':') draw(y[0],'r') plt.savefig('./preds/img.png') plt.close() wandb.log({'Img':wandb.Image(cv2.imread('./preds/img.png'))}) wandb.finish() y y_test.view(1,-1).float()[:3,1:][:n].view(-1) y_train[:3,:-1] preds
00.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function, division #If you are running on a server, launch xvfb to record game videos #Please make sure you have xvfb installed import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0: # !bash ../xvfb start # %env DISPLAY=:1 # If you are new to this course and want more instructions on how to set up environement and all the libs (docker / windows / gpu / blas / etc.), you could read [vital instructions here](https://github.com/yandexdataschool/Practical_RL/issues/1#issue-202648393). # # Please make sure that your have bleeding edge versions of Theano, Lasagne and Agentnet. # # General purpose libs import # + import matplotlib.pyplot as plt import numpy as np # %matplotlib inline from timeit import default_timer as timer from IPython.core import display # - # if you have GPU uncomment the line below # %env THEANO_FLAGS=device=gpu0,floatX=float32 # Universal collection of a gentleman: # + import gym from agentnet.agent import Agent from agentnet.experiments.openai_gym.wrappers import PreprocessImage from agentnet.memory import WindowAugmentation, LSTMCell, GRUCell from agentnet.target_network import TargetNetwork from agentnet.resolver import EpsilonGreedyResolver, ProbabilisticResolver from agentnet.experiments.openai_gym.pool import EnvPool from agentnet.learning import qlearning import theano import theano.tensor as T import lasagne from lasagne.layers import DenseLayer, Conv2DLayer, InputLayer, NonlinearityLayer from lasagne.layers import batch_norm, get_all_params, get_output, reshape, concat, dropout from lasagne.nonlinearities import rectify, leaky_rectify, elu, tanh, softmax # - # # Helper function definitions # Downsample image, and crop it, showing only the most useful part of image. def make_env(): env = gym.make("KungFuMaster-v0") env = PreprocessImage(env, height=64, width=64, grayscale=True, crop=lambda img: img[60:-30, 7:] ) return env # Function for tracking performance while training def eval_and_plot(rewards, epoch_counter, pool, target_score, th_times, loop_times): rewards[epoch_counter] = np.mean(pool.evaluate(n_games=N_EVAL_GAMES,record_video=False, verbose=False)) info_string = "Time (DL/All) {:.1f}/{:.1f} epoch={}, mean_score={:.2f}" info_string = info_string.format(np.mean(th_times), np.mean(loop_times), epoch_counter, np.mean(rewards[epoch_counter])) plt.figure(figsize=(8, 5)) plt.plot([rewards[i] for i in sorted(rewards.keys())]) plt.grid() plt.ylabel("Mean reward over evaluation games") plt.title(info_string) plt.show() display.clear_output(wait=True) # # Experiment setup # Here we basically just load the game and check that it works env = gym.make('KungFuMaster-v0') print(env.env.get_action_meanings()) plt.imshow(env.reset()) env = make_env() plt.imshow(np.squeeze(env.reset()), interpolation='none', cmap='gray') # # Global constants definition # All hyperparameters (except number of layers and neurons) are declared here as upper case letters along with global varaibles. # + N_ACTIONS = env.action_space.n OBS_SHAPE = env.observation_space.shape OBS_CHANNELS, OBS_HEIGHT, OBS_WIDTH = OBS_SHAPE # These 4 constanst were shown to lead to nearly state of the art on kung-fu master game N_SIMULTANEOUS_GAMES = 10 # this is also known as number of agents in exp_replay_pool SEQ_LENGTH = 25 EVAL_EVERY_N_ITER = 100 N_EVAL_GAMES = 2 N_FRAMES_IN_BUFFER = 4 # number of consequent frames to feed in CNN # - # # A2C with memory observation_layer = InputLayer((None,) + OBS_SHAPE) prev_wnd = InputLayer([None, N_FRAMES_IN_BUFFER, OBS_CHANNELS, OBS_HEIGHT, OBS_WIDTH]) new_wnd = WindowAugmentation(observation_layer, prev_wnd) wnd_reshape = reshape(new_wnd, [-1, N_FRAMES_IN_BUFFER * OBS_CHANNELS, OBS_HEIGHT, OBS_WIDTH]) # TYPE YOUR CODE HERE # provide the main body of the network : first three convolutional layers and dense one on top # you may want to change nonlinearity - feel free to do this # note that we have changed filter size here because of reduced image width and height compared to those in papers conv1 = Conv2DLayer(wnd_reshape, ...) ... dense = Dense(...) # + # YOUR CODE HERE # define 256 neuron LSTM cell: # - define two input layers each of n_lstm_cells (maybe 256 is a good baseline) neurons # - feed into `LSTMcell` this two layers and # input layer (last `Dense` in case of A2C+LSTM) as additional third parameter # - neck_layer = concat([<dense layer before lstm>, <output of LSTM layer>]) # network neck # YOUR CODE HERE # define actors head as # - logits_layer – dense(neck) with nonlinearity=None # - policy layer – softmax over logits_layer ........ action_layer = ProbabilisticResolver(policy_layer) # critic head V_layer = DenseLayer(neck_layer, 1, nonlinearity=None) # YOUR CODE HERE # `observation_layers` is input layer to NN, as usual # `policy_estimators` should include 1) logits_layer and 2) V_layer # `agent_states` is a dictionary of {new_value: old_value}. You should bother to update # a) prev window (input buffer, prev_wnd) b) previous LSTM cell state c) output of LSTM cell # `action_layers` is action_layer, as usual : ) agent = Agent(....) # may need to adjust (increasing N_SIMULTANEOUS_GAMES is usually a good idea) pool = EnvPool(agent, make_env, n_games=N_SIMULTANEOUS_GAMES) replay = pool.experience_replay _, _, _, action_seq, (logits_seq, V_seq) = agent.get_sessions( replay, session_length=SEQ_LENGTH, experience_replay=True ) # + # compute pi(a|s) and log(pi(a|s)) manually [use logsoftmax] # we can't guarantee that theano optimizes logsoftmax automatically since it's still in dev # for more info see (https://github.com/Theano/Theano/issues/2944 of 2015 year) # logits_seq.shape is (batch_size, SEQ_LENGTH, N_ACTIONS) logits_flat = logits_seq.reshape([-1, N_ACTIONS]) policy_seq = T.nnet.softmax(logits_flat).reshape(logits_seq.shape) logpolicy_seq = T.nnet.logsoftmax(logits_flat).reshape(logits_seq.shape) # + # get policy gradient from agentnet.learning import a2c elwise_actor_loss, elwise_critic_loss = a2c.get_elementwise_objective( policy=logpolicy_seq, treat_policy_as_logpolicy=True, state_values=V_seq[:,:,0], actions=replay.actions[0], rewards=replay.rewards/10, is_alive=replay.is_alive, gamma_or_gammas=0.99, n_steps=None, return_separate=True ) # add losses with magic numbers # (you can change them more or less harmlessly, this usually just makes learning faster/slower) # actor and critic multipliers were selected guided by prior knowledge # entropy / regularization multipliers were tuned with logscale gridsearch # NB: regularization affects exploration reg_logits = T.mean(logits_seq ** 2) reg_entropy = T.mean(T.sum(policy_seq * logpolicy_seq, axis=-1)) loss = 0.1 * elwise_actor_loss.mean() + 0.25 * elwise_critic_loss.mean() + 1e-3 * reg_entropy + 1e-3 * reg_logits # - # Compute weight updates, clip by norm for stability weights = lasagne.layers.get_all_params([V_layer, policy_layer], trainable=True) grads = T.grad(loss, weights) grads = lasagne.updates.total_norm_constraint(grads, 10) updates = lasagne.updates.adam(grads, weights) train_step = theano.function([], loss, updates=updates) # # Train epoch_counter = 1 # starting epoch rewards = {} # full game rewards target_score = 10000 loss, eval_rewards = 0, [] untrained_reward = np.mean(pool.evaluate(n_games=5, record_video=False, verbose=False)) untrained_reward # + # IF you feel disgust about stderr messages due to pool.evaluate() execution # which pollutes output of jupyter cell, you could do one of the following: # 1. use warnings.filterwarnings("ignore") # 2. use cell magic %%capture # 3. simply redirect stderr to /dev/null with command # import os, sys # stder_old = sys.stderr # sys.stderr = open(os.devnull, 'w') # - th_times, loop_times = [], [] for i in range(2000): loop_starts = timer() pool.update(SEQ_LENGTH) train_starts = timer() # YOUR CODE HERE : train network (actor and critic) raise NotImplementedError th_times.append(timer() - train_starts) epoch_counter +=1 loop_times.append(timer() - loop_starts) #You may want to set EVAL_EVERY_N_ITER=1 for the time being if epoch_counter % EVAL_EVERY_N_ITER==0: eval_and_plot(rewards, epoch_counter, pool, target_score, th_times, loop_times) if rewards[epoch_counter] >= target_score: print("VICTORY!") break th_times, loop_times = [], [] eval_and_plot(rewards, epoch_counter, pool, target_score, th_times, loop_times)
week7_pomdp/practice_theano.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Signal Subspace Projections (SSP) for removing artefacts # # ` # Authors: # <NAME> # ` # # License: BSD (3-clause) # + import numpy as np import mne from mne.datasets import sample from mne.utils import set_log_level # - # SSP (projection) related functionality from mne.preprocessing import compute_proj_ecg, compute_proj_eog from mne.preprocessing import create_ecg_epochs, create_eog_epochs set_log_level('warning') # we don't want to see all loging outputs today # + # getting some data ready # data_path = sample.data_path() data_path = '/Users/alex/mne_data/MNE-sample-data' raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' raw = mne.io.read_raw_fif(raw_fname, preload=True) # Let's work only with MEG data with the EOG channel(s) picks_meg = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=True, exclude='bads') raw.pick_channels([raw.ch_names[k] for k in picks_meg]) # select MEG channels only raw.filter(1, 45, n_jobs=2) # note the n_jobs=2 to run filtering in parallel ! # - # # Before fitting models let's look at what we want to get rid off # # First ECG # # What is the order of magnitude of the average artefact? # # Do we see different spatial patterns? # %matplotlib inline reject = dict(mag=4e-12, grad=4000e-13) # don't fit on crazy environmental artefacts ecg_average = create_ecg_epochs(raw, reject=reject).average() ecg_average.plot_joint(); # And now EOG reject = dict(mag=4e-12, grad=4000e-13) # don't fit on crazy environmental artefacts eog_average = create_eog_epochs(raw, reject=reject).average() eog_average.plot_joint(); # # Compute SSP projection vectors # # First for ECG # + # compute_proj_ecg? # - projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, average=True) print(projs) ecg_projs = projs[-2:] mne.viz.plot_projs_topomap(ecg_projs); # Now for EOG projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, average=True) print(projs) eog_projs = projs[-2:] mne.viz.plot_projs_topomap(eog_projs); # ### Apply projections raw.info['projs'] += eog_projs + ecg_projs events = mne.find_events(raw, stim_channel='STI 014') reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) # this can be highly data dependent event_id = [1] epochs_no_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=False, baseline=(None, 0), reject=reject) epochs_no_proj.average().plot(spatial_colors=True); epochs_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=True, baseline=(None, 0), reject=reject) epochs_proj.average().plot(spatial_colors=True);
2017_03_Brussels/3a-Preprocessing_SSP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Tutorial on how to couple a power grid and a gas network by a power-to-gas plant and a fuel cell # - # In this tutorial, a power network and a gas network are coupled by a power-to-gas unit (P2G) and # a gas-to-power unit (G2P), e.g. a fuel cell. The P2G and G2P have an input value that is set in one # network (power or gas consumption, respectively). During the simulation, the output value is calculated # by applying efficiency factors and is written then to the other network. # # There are three basic steps: # 1. bringing the networks together in a multinet-frame # 1. adding elements for the P2G and G2P units and coupling controller # 1. executing the coupled power and pipe flow # + [markdown] pycharm={"name": "#%% md\n"} # ## Creating a multi-net # First, we import some example networks and set the fluid for the gas net and # P2G conversion. # + pycharm={"name": "#%%\n"} from pandapower import networks as e_nw net_power = e_nw.example_simple() import pandapipes as ppipes from pandapipes import networks as g_nw net_gas = g_nw.gas_meshed_square() # some adjustments: net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30 net_gas.pipe.diameter_m = 0.4 # set fluid: ppipes.create_fluid_from_lib(net_gas, 'hydrogen', overwrite=True) # + [markdown] pycharm={"name": "#%% md\n"} # Then, we create a 'multinet'. It serves as a container for multiple networks to enable # coupled simulation. Each net in the multinet has to have an unique name. Any name can be chosen - default # names are 'power' and 'gas', but 'net1' and 'net2' would work just as fine. The number of # networks in the multinet is not limited. # + pycharm={"name": "#%%\n"} from pandapipes.multinet.create_multinet import create_empty_multinet, add_net_to_multinet multinet = create_empty_multinet('tutorial_multinet') add_net_to_multinet(multinet, net_power, 'power') add_net_to_multinet(multinet, net_gas, 'gas') # + [markdown] pycharm={"name": "#%% md\n"} # The individual networks can be called from the multinet or by the variable name - the result is # identical: # + pycharm={"name": "#%%\n"} print(multinet.nets['power']) print(multinet.nets['gas']) # + pycharm={"name": "#%%\n"} print(net_power) print(net_gas) # + pycharm={"name": "#%%\n"} print(net_power is multinet.nets['power']) print(net_gas is multinet.nets['gas']) # + [markdown] pycharm={"name": "#%% md\n"} # Thus, changes to the networks will be found at both places. # - # ## Add elements that represent the coupling units # Now, we add elements to represent the input and output of the P2G and G2P units. They are # assigned to specific buses / junctions. The input values have to be set. Since the output is # calculated during the simulation, we can simply set it to 0 when calling the `create` function. # + pycharm={"name": "#%%\n"} import pandapower as ppower import pandapipes as ppipes p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption") p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in") g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption") g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in") # - # Now, the coupling controllers are imported and initialized. We hand over the IDs of the # P2G unit in the power grid (i.e., which load element represents the electrolyser) # and in the gas grid (i.e., which source elements represents the P2G feed-in). # Analogously, a G2P controller is created. # + pycharm={"name": "#%%\n"} from pandapipes.multinet.control.controller.multinet_control import P2GControlMultiEnergy, \ G2PControlMultiEnergy p2g_ctrl = P2GControlMultiEnergy(multinet, p2g_id_el, p2g_id_gas, efficiency=0.7, name_power_net="power", name_gas_net="gas") g2p_ctrl = G2PControlMultiEnergy(multinet, g2p_id_el, g2p_id_gas, efficiency=0.65, name_power_net="power", name_gas_net="gas") # + [markdown] pycharm={"name": "#%% md\n"} # Internally, both controllers calculate with the higher heating value of the gas. (It is a # a property of the gas ('fluid') in the gas network and provided in the file # *pandapipes/properties/[fluid_name]/higher_heating_value.txt*) # # It is also possible to order the controllers hierarchical, # (cf. the [Control chapter in the pandapower documentation](https://pandapower.readthedocs.io/en/latest/control/control_loop.html)) # + [markdown] pycharm={"name": "#%% md\n"} # ## Run simulation # Now, the simulation can be run. As there are different `run` functions required (power flow or # pipe flow), we simply execute `run_control` for the multinet. This collects all nets and # controllers and conducts the corresponding run function. # + pycharm={"name": "#%%\n"} from pandapipes.multinet.control.run_control_multinet import run_control run_control(multinet) # + [markdown] pycharm={"name": "#%% md\n"} # Now, the output values have been updated and equal the power input times efficiency (and # consideration of unit conversion): # + pycharm={"name": "#%%\n"} print(net_gas.source.loc[p2g_id_gas, 'mdot_kg_per_s']) print(net_power.sgen.loc[g2p_id_el, 'p_mw']) # + [markdown] pycharm={"name": "#%% md\n"} # In summary: # + pycharm={"name": "#%%\n"} import pandapipes as ppipes import pandapower as ppower from pandapipes import networks as g_nw from pandapower import networks as e_nw from pandapipes.multinet.create_multinet import create_empty_multinet, add_net_to_multinet from pandapipes.multinet.control.controller.multinet_control import P2GControlMultiEnergy, G2PControlMultiEnergy from pandapipes.multinet.control.run_control_multinet import run_control # get networks: net_power = e_nw.example_simple() net_gas = g_nw.gas_meshed_square() # some adjustments: net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30 net_gas.pipe.diameter_m = 0.4 net_gas.controller.rename(columns={'controller': 'object'}, inplace=True) # due to new version # set fluid: ppipes.create_fluid_from_lib(net_gas, 'hydrogen', overwrite=True) # create multinet and add networks: multinet = create_empty_multinet('tutorial_multinet') add_net_to_multinet(multinet, net_power, 'power') add_net_to_multinet(multinet, net_gas, 'gas') # create elements corresponding to conversion units: p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption") p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in") g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption") g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in") # create coupling controllers: p2g_ctrl = P2GControlMultiEnergy(multinet, p2g_id_el, p2g_id_gas, efficiency=0.7, name_power_net="power", name_gas_net="gas") g2p_ctrl = G2PControlMultiEnergy(multinet, g2p_id_el, g2p_id_gas, efficiency=0.65, name_power_net="power", name_gas_net="gas") # run simulation: run_control(multinet) # + [markdown] pycharm={"name": "#%% md\n"} # ## Time series simulation # Sometimes, the input values (and the corresponding outputs) for conversion units change over time, # e.g. during a time series simulation. The MultiEnergy controllers themselves cannot handle time # series inputs. However, they can easily be combined with a ConstController that updates the input # values according to a time series. After the update of the values, the MultiEnergy controller is # executed to # calculate and write the output value to the other net. The convenience functions to create both # controllers in one step are `coupled_p2g_const_control` and `coupled_g2p_const_control`. # + [markdown] pycharm={"name": "#%% md\n"} # Here is an example for a coupled time series simulation. # First, the nets are prepared like before: # + pycharm={"name": "#%%\n"} # prepare just like before net_power = e_nw.example_simple() net_gas = g_nw.gas_meshed_square() net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30 net_gas.pipe.diameter_m = 0.4 net_gas.controller.rename(columns={'controller': 'object'}, inplace=True) # due to new version ppipes.create_fluid_from_lib(net_gas, 'hydrogen', overwrite=True) multinet = create_empty_multinet('tutorial_multinet') add_net_to_multinet(multinet, net_power, 'power_net') add_net_to_multinet(multinet, net_gas, 'gas_net') p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption") p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in") g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption") g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in") # + [markdown] pycharm={"name": "#%% md\n"} # For the time series, some example data is created and defined as data source. # + pycharm={"name": "#%%\n"} from pandas import DataFrame from numpy.random import random from pandapower.timeseries import DFData def create_data_source(n_timesteps=10): profiles = DataFrame() profiles['power to gas consumption'] = random(n_timesteps) * 2 + 1 profiles['gas to power consumption'] = random(n_timesteps) * 0.1 ds = DFData(profiles) return profiles, ds profiles, ds = create_data_source(10) # + [markdown] pycharm={"name": "#%% md\n"} # Then, output writers are create for the time series simulation: # + pycharm={"name": "#%%\n"} from os.path import join, dirname from pandapower.timeseries import OutputWriter def create_output_writers(multinet, time_steps=None): nets = multinet["nets"] ows = dict() for key_net in nets.keys(): ows[key_net] = {} if isinstance(nets[key_net], ppower.pandapowerNet): log_variables = [('res_bus', 'vm_pu'), ('res_line', 'loading_percent'), ('res_line', 'i_ka'), ('res_bus', 'p_mw'), ('res_bus', 'q_mvar'), ('res_load', 'p_mw'), ('res_load', 'q_mvar')] ow = OutputWriter(nets[key_net], time_steps=time_steps, log_variables=log_variables, output_path=join(dirname('__file__'),'timeseries', 'results', 'power'), output_file_type=".csv") ows[key_net] = ow elif isinstance(nets[key_net], ppipes.pandapipesNet): log_variables = [('res_sink', 'mdot_kg_per_s'), ('res_source', 'mdot_kg_per_s'), ('res_ext_grid', 'mdot_kg_per_s'), ('res_pipe', 'v_mean_m_per_s'), ('res_junction', 'p_bar'), ('res_junction', 't_k')] ow = OutputWriter(nets[key_net], time_steps=time_steps, log_variables=log_variables, output_path=join(dirname('__file__'), 'timeseries', 'results', 'gas'), output_file_type=".csv") ows[key_net] = ow else: raise AttributeError("Could not create an output writer for nets of kind " + str(key_net)) return ows ows = create_output_writers(multinet, 10) # + [markdown] pycharm={"name": "#%% md\n"} # Now, we add the aforementioned combined controllers. # + pycharm={"name": "#%%\n"} from pandapipes.multinet.control.controller.multinet_control import coupled_p2g_const_control, \ coupled_g2p_const_control coupled_p2g_const_control(multinet, p2g_id_el, p2g_id_gas, name_power_net="power_net", name_gas_net="gas_net", profile_name='power to gas consumption', data_source=ds, p2g_efficiency=0.7) coupled_g2p_const_control(multinet, g2p_id_el, g2p_id_gas, name_power_net="power_net", name_gas_net="gas_net", element_type_power="sgen", profile_name='gas to power consumption', data_source=ds, g2p_efficiency=0.65) # + [markdown] pycharm={"name": "#%% md\n"} # The ConstControllers are stored in the separate nets, while the coupling controllers can be found # in the multinet: # + pycharm={"name": "#%%\n"} print(multinet.controller) print(net_power.controller) print(net_gas.controller) # + [markdown] pycharm={"name": "#%% md\n"} # The time series is calculated with a `run_timeseries` function that has been adapted for multinets: # + pycharm={"name": "#%%\n"} from pandapipes.multinet.timeseries.run_time_series_multinet import run_timeseries run_timeseries(multinet, time_steps=range(10), output_writers=ows)
tutorials/coupled_nets_h2_p2g2p.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Simple gym, roboschool Render Test # # Source: https://stackoverflow.com/a/44426542 # + import gym, roboschool from IPython import display import PIL.Image import time from io import BytesIO def showarray(a, fmt='jpeg'): f = BytesIO() PIL.Image.fromarray(a).save(f, fmt) display.display(display.Image(data=f.getvalue())) env = gym.make('RoboschoolInvertedPendulum-v1') env.reset() fps = [] for _ in range(100): action = env.action_space.sample() env.step(action) t1 = time.time() showarray(env.render(mode='rgb_array')) t2 = time.time() fps.append(1/(t2-t1)) display.clear_output(wait=True)
dockerfile-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables "data cubes" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to "mock-up" what would be seen in observations. # + from yt.config import ytcfg import yt import numpy as np from yt.analysis_modules.ppv_cube.api import PPVCube import yt.units as u # - # To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions: # Density: $\rho(r) \propto r^{\alpha}$ # Velocity: $v_{\theta}(r) \propto \frac{r}{1+(r/r_0)^{\beta}}$ # where for simplicity we won't worry about the normalizations of these profiles. # First, we'll set up the grid and the parameters of the profiles: # increasing the resolution will make the images in this notebook more visually appealing nx,ny,nz = (64, 64, 64) # domain dimensions R = 10. # outer radius of disk, kpc r_0 = 3. # scale radius, kpc beta = 1.4 # for the tangential velocity profile alpha = -1. # for the radial density profile x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk r = np.sqrt(x*x+y*y) # polar coordinates theta = np.arctan2(y, x) # polar coordinates # Second, we'll construct the data arrays for the density, temperature, and velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero. dens = np.zeros((nx,ny,nz)) dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk temp = np.zeros((nx,ny,nz)) temp[:,:,nz/2-3:nz/2+3] = 1.0e5 # Isothermal vel_theta = 100.*r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk velx = np.zeros((nx,ny,nz)) vely = np.zeros((nx,ny,nz)) velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian dens[r > R] = 0.0 temp[r > R] = 0.0 velx[r > R] = 0.0 vely[r > R] = 0.0 # Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1 `code_length`. data = {} data["density"] = (dens,"g/cm**3") data["temperature"] = (temp, "K") data["velocity_x"] = (velx, "km/s") data["velocity_y"] = (vely, "km/s") data["velocity_z"] = (np.zeros((nx,ny,nz)), "km/s") # zero velocity in the z-direction bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0) ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,"kpc"), nprocs=1, bbox=bbox) # To get a sense of what the data looks like, we'll take a slice through the middle of the disk: slc = yt.SlicePlot(ds, "z", ["density","velocity_x","velocity_y","velocity_magnitude"]) slc.set_log("velocity_x", False) slc.set_log("velocity_y", False) slc.set_log("velocity_magnitude", False) slc.set_unit("velocity_magnitude", "km/s") slc.show() # Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the x-axis. We'll create a normal vector: i = 60.*np.pi/180. L = [np.sin(i),0.0,np.cos(i)] # Next, we need to specify a field that will serve as the "intensity" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to choose the bounds in line-of-sight velocity that the data will be binned into, which is a 4-tuple in the shape of `(vmin, vmax, nbins, units)`, which specifies a linear range of `nbins` velocity bins from `vmin` to `vmax` in units of `units`. We may also optionally specify the dimensions of the data cube with the `dims` argument. cube = PPVCube(ds, L, "density", (-150.,150.,50,"km/s"), dims=200, method="sum") # Following this, we can now write this cube to a FITS file. The x and y axes of the file can be in length units, which can be optionally specified by `length_unit`: cube.write_fits("cube.fits", clobber=True, length_unit="kpc") # Or one can use the `sky_scale` and `sky_center` keywords to set up the coordinates in RA and Dec: sky_scale = (1.0, "arcsec/kpc") sky_center = (30., 45.) # RA, Dec in degrees cube.write_fits("cube_sky.fits", clobber=True, sky_scale=sky_scale, sky_center=sky_center) # Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the "z" axis: ds_cube = yt.load("cube.fits") # Specifying no center gives us the center slice slc = yt.SlicePlot(ds_cube, "z", ["density"]) slc.show() # Picking different velocities for the slices new_center = ds_cube.domain_center new_center[2] = ds_cube.spec2pixel(-100.*u.km/u.s) slc = yt.SlicePlot(ds_cube, "z", ["density"], center=new_center) slc.show() new_center[2] = ds_cube.spec2pixel(70.0*u.km/u.s) slc = yt.SlicePlot(ds_cube, "z", ["density"], center=new_center) slc.show() new_center[2] = ds_cube.spec2pixel(-30.0*u.km/u.s) slc = yt.SlicePlot(ds_cube, "z", ["density"], center=new_center) slc.show() # If we project all the emission at all the different velocities along the z-axis, we recover the entire disk: prj = yt.ProjectionPlot(ds_cube, "z", ["density"], method="sum") prj.set_log("density", True) prj.set_zlim("density", 1.0e-3, 0.2) prj.show() # The `thermal_broad` keyword allows one to simulate thermal line broadening based on the temperature, and the `atomic_weight` argument is used to specify the atomic weight of the particle that is doing the emitting. cube2 = PPVCube(ds, L, "density", (-150.,150.,50,"km/s"), dims=200, thermal_broad=True, atomic_weight=12.0, method="sum") cube2.write_fits("cube2.fits", clobber=True, length_unit="kpc") # Taking a slice of this cube shows: ds_cube2 = yt.load("cube2.fits") new_center = ds_cube2.domain_center new_center[2] = ds_cube2.spec2pixel(70.0*u.km/u.s) slc = yt.SlicePlot(ds_cube2, "z", ["density"], center=new_center) slc.show() new_center[2] = ds_cube2.spec2pixel(-100.*u.km/u.s) slc = yt.SlicePlot(ds_cube2, "z", ["density"], center=new_center) slc.show() # where we can see the emission has been smeared into this velocity slice from neighboring slices due to the thermal broadening. # # Finally, the "velocity" or "spectral" axis of the cube can be changed to a different unit, such as wavelength, frequency, or energy: print (cube2.vbins[0], cube2.vbins[-1]) cube2.transform_spectral_axis(400.0,"nm") print (cube2.vbins[0], cube2.vbins[-1]) # If a FITS file is now written from the cube, the spectral axis will be in the new units. To reset the spectral axis back to the original velocity units: cube2.reset_spectral_axis() print (cube2.vbins[0], cube2.vbins[-1])
doc/source/analyzing/analysis_modules/PPVCube.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spectrum class example # Simple example showing how the spectrum class works. Make sure you (pip) install the nasagamma module from: https://github.com/mauricioAyllon/NASA-gamma # # # %matplotlib inline from nasagamma import spectrum as sp import numpy as np import matplotlib.pyplot as plt import pandas as pd # Load data and extract counts, channels, and energy. # + # dataset 1 file = "data/SSR-mcnp.hdf" df = pd.read_hdf(file, key='data') # delete first (large) bin df = df.iloc[1:,:] cts_np = df.cts.to_numpy() * 1e8 erg = np.array(df.index) chan = np.arange(0,len(cts_np),1) # - # Instantiate a Spectrum object. Note that energy is an optional parameter. If it is not passed, channel numbers are infered. # # instantiate a Spectrum object spect = sp.Spectrum(counts=cts_np, energies=erg) # Plot using the built-in plotting routine spect.plot() # We can also use some of the methods in the Spectrum class, such as rebin. Note that this conserves the area under the curve. # # + # rebin by 2 and by 4 ener2, cts2 = spect.rebin() spect4 = sp.Spectrum(counts=cts2, energies=ener2) ener4, cts4 = spect4.rebin() plt.figure() plt.plot(erg, cts_np, label="Original") plt.plot(ener2, cts2, label="Rebinned by 2") plt.plot(ener4, cts4, label="Rebinned by 4") plt.yscale("log") plt.legend() plt.ylabel("cts/s/MeV") plt.xlabel("Energy [MeV]") # - # We can also apply the smooth function, which performs a moving average smoothing of the data. Note that this method does not necessarily conserve the area under the curve. # + # smoothing every 8 points spect_mv = sp.Spectrum(counts=cts_np, energies=erg) n = 8 cts_mv = spect_mv.smooth(num=n) plt.figure() plt.plot(erg, cts_np, label="Original") plt.plot(erg, cts_mv, label=f"Smoothing by {n}") plt.yscale("log") plt.legend() plt.ylabel("cts/s/MeV") plt.xlabel("Energy [MeV]") # -
examples/1.spectrum_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial: Information Imbalance # + import matplotlib import matplotlib.pyplot as plt import numpy as np from duly.plot import plot_inf_imb_plane from duly.metric_comparisons import MetricComparisons # + # %load_ext autoreload # %autoreload 2 # - # ### 3D Gaussian with small variance along $z$ # + # define a simple dataset of a 3D gaussian with a small variance along the z axis N = 1000 cov = np.identity(3) cov[-1, -1] = 0.01**2 mean = np.zeros(3) X = np.random.multivariate_normal(mean = mean, cov = cov, size = (N)) # + # define an instance of the MetricComparisons class d = MetricComparisons(X) d.compute_distances(maxk = X.shape[0]) # + labels = ['x', 'y', 'z'] coord_list = [[0,], [1,], [2,], [0,1], [0,2], [1, 2]] # + # compute the information imbalances imbalances = d.return_inf_imb_full_selected_coords(coord_list) # + # plot information imbalance plane plot_inf_imb_plane(imbalances, coord_list, labels) # - # ### 10D isotropic Gaussian # + N = 1000 X = np.random.normal(size = (N, 10)) # + # define an instance of the MetricComparisons class d = MetricComparisons(X) d.compute_distances(maxk = X.shape[0]) # + labels = ['x{}'.format(i) for i in range(10)] coord_list = [np.arange(i) for i in range(1, 11)] # + # compute the information imbalances imbalances = d.return_inf_imb_full_selected_coords(coord_list) # + # plot information imbalance plane plot_inf_imb_plane(imbalances) # - # ### Sinusoidal function # + N = 1000 x1 = np.linspace(0,1, N) x1 = np.random.uniform(0, 1, N) x1 = np.sort(x1) x1 = np.atleast_2d(x1).T x2 = 5*np.sin(x1*25)+np.random.normal(0, .5, (N, 1)) X = np.hstack([x1, x2]) # - plt.figure(figsize = (3, 3)) plt.plot(x1, x2); plt.xlabel(r'$x1$') plt.ylabel(r'$x2$') # + # define an instance of the MetricComparisons class d = MetricComparisons(X) d.compute_distances(maxk = X.shape[0]) # - imb01, imb10 = d.return_inf_imb_two_selected_coords([0], [1]) imb01 imb10 # + # plot information imbalance plane plt.figure(figsize=(4, 4)) #plot_inf_imb_plane(imbalances, coord_list, labels) plt.scatter(imb01, imb10) plt.plot([0, 1], [0, 1], 'k--') plt.xlabel(r'$\Delta(x_1 \rightarrow x_2) $') plt.ylabel(r'$\Delta(x_2 \rightarrow x_1) $') # -
examples/notebook_on_information_imbalance.ipynb