code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import geopandas as gpd from osgeo import osr, gdal import matplotlib.pyplot as plt import numpy as np vlm = pd.read_excel('data.xls') vlm elevation = gdal.Open('Elevation.tif', gdal.GA_ReadOnly) elev_arr = elevation.GetRasterBand(1).ReadAsArray() elev_arr def visualize_raster_bands(dataset): """ Iterates through all raster bands of a dataset and makes plots. Args: dataset: GDALDataset object Returns: [PLACEHOLDER] """ raster_count = dataset.RasterCount # Iterate from band #1 to band #raster_count bands = [] for i in range(1, raster_count+1): band = dataset.GetRasterBand(i) bands.append(band) arrs = [band.ReadAsArray() for band in bands] fig, axs = plt.subplots(1, raster_count, figsize=(8*raster_count, 8)) for i in range(raster_count): if raster_count > 1: axs[i].imshow(arrs[i], cmap='GnBu') axs[i].set_title(f"Band #{i+1}") else: plt.imshow(arrs[0]) plt.title(f"Band #1") plt.show() visualize_raster_bands(elevation) gt = elevation.GetGeoTransform() if gt: print("Origin = ({}, {})".format(gt[0], gt[3])) print("Pixel Size = ({}, {})".format(gt[1], gt[5])) old_cs = osr.SpatialReference() old_cs.ImportFromWkt(elevation.GetProjectionRef()) elevation.GetProjectionRef() wgs84_wkt = """ GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.01745329251994328, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]]""" new_cs = osr.SpatialReference() new_cs.ImportFromWkt(wgs84_wkt) # GT(0) x-coordinate of the upper-left corner of the upper-left pixel. # # GT(1) w-e pixel resolution / pixel width. # # GT(2) row rotation (typically zero). # # GT(3) y-coordinate of the upper-left corner of the upper-left pixel. # # GT(4) column rotation (typically zero). # # GT(5) n-s pixel resolution / pixel height (negative value for a north-up image). transform = osr.CoordinateTransformation(old_cs, new_cs) width = elevation.RasterXSize height = elevation.RasterYSize minx = gt[0] miny = gt[3] + width*gt[4] + height*gt[5] (width, height), (minx, miny) latlong = transform.TransformPoint(minx, miny) latlong import georasters as gr file = 'Elevation.tif' data = gr.from_file(file) data.plot() df = data.to_pandas() df df.x.min(), df.x.max() df.y.min(), df.y.max() vlm.Longitude.min() new_vlm = vlm.drop(columns=["Station", "VLM_std"], axis=1) plt.figure(figsize=(12, 8)) plt.scatter(df.x, df.y, alpha=0.5) plt.scatter(vlm.Longitude, vlm.Latitude, color='r', alpha=0.5) # plt.scatter(52.587928, 24.171598, color='g') # plt.scatter(53.402955, 23.721481, color='g') plt.show()
Ngoc/Week 5/June 22.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import bnn root_dir = bnn.bnn.BNN_ROOT_DIR param_dir = bnn.bnn.BNN_PARAM_DIR print(root_dir) print(param_dir) # + import os training_dir = "/home/xilinx" print(training_dir) # - # !cp -r $training_dir/fashion-mnist-lfc $param_dir/ bnn.available_params(bnn.NETWORK_LFCW1A1) hw_classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1,"mnist",bnn.RUNTIME_HW) print(hw_classifier.classes) # + from array import * from PIL import Image as PIL_Image from PIL import ImageOps im = PIL_Image.open('/home/xilinx/jupyter_notebooks/BNN-PYNQ-master/notebooks/pictures/red.jpg') img_load= im.convert("L") # Convert to BNN input format # The image is resized to comply with the MNIST standard. The image is resized at 28x28 pixels and the colors inverted. #Resize the image and invert it (white on black) smallimg = ImageOps.invert(img_load) smallimg = smallimg.rotate(0) data_image = array('B') pixel = smallimg.load() for x in range(0,28): for y in range(0,28): if(pixel[y,x] == 255): data_image.append(255) else: data_image.append(1) # Setting up the header of the MNIST format file - Required as the hardware is designed for MNIST dataset hexval = "{0:#0{1}x}".format(1,6) header = array('B') header.extend([0,0,8,1,0,0]) header.append(int('0x'+hexval[2:][:2],16)) header.append(int('0x'+hexval[2:][2:],16)) header.extend([0,0,0,28,0,0,0,28]) header[3] = 3 # Changing MSB for image data (0x00000803) data_image = header + data_image output_file = open('/home/xilinx/img_webcam_mnist_processed', 'wb') data_image.tofile(output_file) output_file.close() im # - class_out = hw_classifier.classify_mnist("/home/xilinx/img_webcam_mnist_processed") print("Class number: {0}".format(class_out)) print("Class name: {0}".format(hw_classifier.class_name(class_out))) # !wget -nc http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz; gunzip -f t10k-images-idx3-ubyte.gz # !wget -nc http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz; gunzip -f t10k-labels-idx1-ubyte.gz # !ls *ubyte ret = c.inference_multiple("t10k-images-idx3-ubyte") import numpy as np with open("t10k-labels-idx1-ubyte", 'rb') as f: ret_test_golden = np.frombuffer(f.read(), np.uint8, offset=8) np.unique(ret_test_golden == ret, return_counts=True)
fashion_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis # 1. The peak of the temperature trend seems to be around 25 degrees latitude. # 2. There seems to be a high degree of humidity at a majority of points near the equator. # 3. Wind speeds seem to be the highest between -25 and 75 degrees latitude. # + #Import dependencies import pandas as pd import numpy as np import random import requests import json import csv import openweathermapy.core as owm import matplotlib.pyplot as plt from datetime import datetime from citipy import citipy #Importing personal API key from own config file from config import api_key # - # # Generating a list of cities #Making an empty list to store randomly generated longitude and latitude data longi = [] latit = [] for i in range(1500): longi.append(float(random.uniform(-180.00,180.00))) latit.append(float(random.uniform(-90.00,90.00))) #Creating empty lists to store the cities and their names cities = [] city_names = [] #Finding the nearest city given the random coordinates for i in range(len(longi)): cities.append(citipy.nearest_city(latit[i],longi[i])) #Finding the city info and appending to empty list for city in cities: city_names.append(city.city_name) #Inputting the city name, lat, and lon into a dataframe city_df = pd.DataFrame({"City": city_names, "Latitude": latit, "Longitude": longi}) #Dropping any duplicate cities unique_city_df = city_df.drop_duplicates(subset = ["City"]) unique_city_df.head() #print(len(unique_city_df)) print(len(unique_city_df)) # # Grabbing city weather data #Grabbing data using API url = "http://api.openweathermap.org/data/2.5/weather?" units = "imperial" target_cities = city_df["City"] country = [] date = [] max_temp = [] humidity = [] cloudiness = [] wind_speed = [] for target_city in target_cities: query_url = f"{url}appid={api_key}&units={units}&q=" response_url = query_url + target_city print(f"Processing record for {target_city}: {response_url}") try: weather_data = requests.get(query_url + target_city).json() country_data = weather_data["sys"]["country"] date_data = weather_data["dt"] temperature = weather_data["main"]["temp_max"] humidity_data = weather_data["main"]["humidity"] cloudiness_data = weather_data["clouds"]["all"] wind_data = weather_data["wind"]["speed"] except KeyError: print("Pull was unsuccessful") country.append(country_data) date.append(date_data) max_temp.append(temperature) humidity.append(humidity_data) cloudiness.append(cloudiness_data) wind_speed.append(wind_data) weather_dict = {"City": target_cities, "Cloudiness": cloudiness, "Country": country, "Date": date, "Humidity": humidity, "Lat": latit, "Lon": longi, "Max Temp": max_temp, "Wind Speed": wind_speed} #Putting the weather dictionary into a dataframe weather_df = pd.DataFrame(weather_dict) weather_df = weather_df.drop_duplicates(subset="City") weather_df = weather_df.dropna(how="any",inplace=False) weather_df.head() # # Temperature (F) vs. Latitude plt.scatter(weather_df["Lat"],weather_df["Max Temp"],c="Blue", alpha=0.75) cur_date = datetime.now() cur_date = cur_date.strftime("%Y-%m-%d") plt.xlim(-95,95) plt.ylim(0,100) plt.title(f"City Latitude vs. Max Temperature {cur_date}") plt.xlabel("Latitude") plt.ylabel("Max Temperature (F)") plt.grid(True) plt.savefig("./Images/temp_vs_lat.jpg") plt.show() # # Humidity (%) vs. Latitude plt.scatter(weather_df["Lat"],weather_df["Humidity"],c="Blue", alpha=0.75) cur_date = datetime.now() cur_date = cur_date.strftime("%Y-%m-%d") plt.xlim(-95,95) plt.ylim(0,110) plt.title(f"City Latitude vs. Humidity {cur_date}") plt.xlabel("Latitude") plt.ylabel("Humidity (%)") plt.grid(True) plt.savefig("./Images/humidity_vs_lat.jpg") plt.show() # # Cloudiness (%) vs. Latitude plt.scatter(weather_df["Lat"],weather_df["Cloudiness"],c="Blue", alpha=0.75) cur_date = datetime.now() cur_date = cur_date.strftime("%Y-%m-%d") plt.xlim(-95,95) plt.ylim(-10,120) plt.title(f"City Latitude vs. Cloudiness {cur_date}") plt.xlabel("Latitude") plt.ylabel("Cloudiness (%)") plt.grid(True) plt.savefig("./Images/cloudiness_vs_lat.jpg") plt.show() # # Wind Speed (mph) vs. Latitude plt.scatter(weather_df["Lat"],weather_df["Wind Speed"],c="Blue", alpha=0.75) cur_date = datetime.now() cur_date = cur_date.strftime("%Y-%m-%d") plt.xlim(-95,95) plt.ylim(-5,50) plt.title(f"City Latitude vs. Wind Speed {cur_date}") plt.xlabel("Latitude") plt.ylabel("Wind Speed (mph)") plt.grid(True) plt.savefig("./Images/windspeed_vs_lat.jpg") plt.show()
.ipynb_checkpoints/WeatherPy-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## 初期設定 # + import boto3 import json import sagemaker from sagemaker.estimator import Estimator client = boto3.client(service_name="sagemaker") runtime = boto3.client(service_name="sagemaker-runtime") boto_session = boto3.session.Session() region = boto_session.region_name print(region) sagemaker_session = sagemaker.Session() base_job_prefix = "demo-sagemaker-inference" # role = sagemaker.get_execution_role() account_id = sagemaker_session.account_id() role = f"arn:aws:iam::{account_id}:role/service-role/SagemakerExecutionRole" print(role) default_bucket = sagemaker_session.default_bucket() print(f"default_bucket = {default_bucket}") # - # ## model1 の設定 # + from time import gmtime, strftime image_uri = f"{account_id}.dkr.ecr.{region}.amazonaws.com/{base_job_prefix}:latest" model1_name = "demo-serverless-model1-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print("Model name: " + model1_name) model1_artifacts = f"s3://{default_bucket}/{base_job_prefix}/model1.tar.gz" model1_env_vars = {"SAGEMAKER_CONTAINER_LOG_LEVEL": "20", "SOME_ENV_VAR": "myEnvVar"} create_model_response = client.create_model( ModelName=model1_name, Containers=[ { "Image": image_uri, "Mode": "SingleModel", "ModelDataUrl": model1_artifacts, "Environment": model1_env_vars, } ], ExecutionRoleArn=role, ) print("Model Arn: " + create_model_response["ModelArn"]) # + epc1_name = "demo-serverless-epc1-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) endpoint_config_response = client.create_endpoint_config( EndpointConfigName=epc1_name, ProductionVariants=[ { "VariantName": "Variant1", "ModelName": model1_name, "ServerlessConfig": { "MemorySizeInMB": 1024, "MaxConcurrency": 1, }, }, ], ) print("Endpoint Configuration Arn: " + endpoint_config_response["EndpointConfigArn"]) # - # ## モデル2の設定 (最初は使わない) # + model2_name = "demo-serverless-model2-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print("Model name: " + model1_name) model2_artifacts = f"s3://{default_bucket}/{base_job_prefix}/model2.tar.gz" model2_env_vars = {"SAGEMAKER_CONTAINER_LOG_LEVEL": "20", "SOME_ENV_VAR": "myEnvVar"} create_model_response = client.create_model( ModelName=model2_name, Containers=[ { "Image": image_uri, "Mode": "SingleModel", "ModelDataUrl": model2_artifacts, "Environment": model2_env_vars, } ], ExecutionRoleArn=role, ) print("Model Arn: " + create_model_response["ModelArn"]) # + epc2_name = "demo-serverless-epc2-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) endpoint_config_response = client.create_endpoint_config( EndpointConfigName=epc2_name, ProductionVariants=[ { "VariantName": "Variant2", "ModelName": model2_name, "ServerlessConfig": { "MemorySizeInMB": 1024, "MaxConcurrency": 1, }, }, ], ) print("Endpoint Configuration Arn: " + endpoint_config_response["EndpointConfigArn"]) # - # ## エンドポイントの設定 # + endpoint_name = "demo-serverless-ep" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) create_endpoint_response = client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=epc1_name, ) print("Endpoint Arn: " + create_endpoint_response["EndpointArn"]) # - # ## InServiceになるまで待機 # + # %%time # wait for endpoint to reach a terminal state (InService) using describe endpoint import time describe_endpoint_response = client.describe_endpoint(EndpointName=endpoint_name) while describe_endpoint_response["EndpointStatus"] != "InService": describe_endpoint_response = client.describe_endpoint(EndpointName=endpoint_name) print(describe_endpoint_response["EndpointStatus"]) time.sleep(15) describe_endpoint_response # - # ## 呼び出し # + # %%time response = runtime.invoke_endpoint( EndpointName=endpoint_name, Body=b'{"key": "b"}', ContentType="application/json", ) body = response["Body"].read() data = json.loads(body) # - # ## 削除 client.delete_endpoint(EndpointName=endpoint_name) client.delete_endpoint_config(EndpointConfigName=epc1_name) client.delete_model(ModelName=model1_name) client.delete_endpoint_config(EndpointConfigName=epc2_name) client.delete_model(ModelName=model2_name) # + # 完全に消すには、ECRやS3のファイルも消す必要がある # - # ## エンドポイントの切り替え update_endpoint_response = client.update_endpoint( EndpointName=endpoint_name, EndpointConfigName=epc2_name, ) update_endpoint_response = client.update_endpoint( EndpointName=endpoint_name, EndpointConfigName=epc1_name, )
Demo - SageMaker Serverless inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Exploratory Visualizations # # In this notebook, we seek to perform some simply exploratory analysis for the sake of formulating and testing later hypotheses. In particular, we will explore how different factors contribute to **success rate** of attacks, and **how many** attacks there are based on factors such as region, weapon type, etc. # # ## Setup # # We've already created some preprocessing functions in ```preprocessing_functions.py```. We'll load the data and import some packages. import pandas as pd from preprocess_functions import load_data_relevant_cols raw = load_data_relevant_cols() import seaborn as sns import matplotlib.pyplot as plt import numpy as np # %matplotlib inline sns.set_style('whitegrid') # ## Success Rate Factors # # The principal goal of our analysis is to understand the factors which lead to successful terrorist attacks. Therefore, we will first explore how the success rate of terrorist attacks differs with respect to certain *traits of interest* such as region or weapon used. This should inform a choice of model later on. from utils import get_success_tuples, plot_attribute_success plot_attribute_success(get_success_tuples(raw, 'region_txt', min_number_attacks=10), 'Region') plot_attribute_success(get_success_tuples(raw, 'targtype1_txt', min_number_attacks=10), 'Target Type') plot_attribute_success(get_success_tuples(raw, 'weaptype1_txt'), 'Weapon Type') plot_attribute_success(get_success_tuples(raw, 'country_txt')[:30], 'Top 30 Countries') plot_attribute_success(get_success_tuples(raw, 'country_txt', min_number_attacks=20)[-30:], 'Bottom 30 Countries') plot_attribute_success(get_success_tuples(raw, 'country_txt'), 'All Countries', use_xticks=False) # ### Discussion # # It seems that, with the possible exception of weapon type, there is not much variance in success rates across certain attributes. Success rate across different regions, targets, and nations always seems to hover around 80%. This makes analysis challenging because predictive algorithms might naively predict a successful attack almost all of the time, and still achieve a fairly high success rate. Therefore, we should perform analysis with the avoidance false positives in mind. # ## Number of Attacks # # In addition to tactical prevention of attacks, counterterrorism officials must address the issue of *resource allocation*. Given the potentially limitless number of targets and groups to contend with, it is critical that an overall strategy concentrate on the most common types of attacks. To this end, we will next explore the *number of attacks* with respect to the same traits of interest as before. from utils import get_num_attacks_tuples, plot_count_by_attribute plot_count_by_attribute(get_num_attacks_tuples(raw, 'region_txt'), 'Region') # Interesting! The Middle East and North Africa, with South Asia as a close second, seem to have around half of all recorded terrorist attacks (out of a total of roughly 170,000). What if we explored the countries? plot_count_by_attribute(get_num_attacks_tuples(raw, 'country_txt'), 'Country', use_xticks=False) # This time, the observed exponential decay is even more pronounced. What are the top countries here? plot_count_by_attribute(get_num_attacks_tuples(raw, 'country_txt')[:30], 'Top 30 Countries') # Unsurprisingly, the 30 countries with the most recorded terrorist attacks are mostly from the Middle East and North Africa. The top 3 countries - Iraq, Pakistan, and Afghanistan - alone account for about a quarter of the recorded attacks. The UK and US, while making it into the top 30, are numbers 9 and 17 respectively. # # Let's explore other attributes. plot_count_by_attribute(get_num_attacks_tuples(raw, 'targtype1_txt'), 'Target Type') plot_count_by_attribute(get_num_attacks_tuples(raw, 'weaptype1_txt'), 'Weapon Type') # ### Most Prominent Terrorist Groups # # Finally, we'll explore the terrorist groups in our dataset. Using the number of attacks seems like a better proxy for this than the percent of successful attacks, since a relatively small but effective terrorist group is less relevant than one which fails quite regularly but carries out many attacks. # # First, let's look at the number of terrorist groups in the dataset. len(raw['gname'].unique()) # Next, we get the names of the terrorist groups in our database with over 500 recorded attacks, and the number of attacks that each group executed. This should give us a sense of which groups are the deadlist. top_terror_groups = get_num_attacks_tuples(raw, 'gname', min_number_attacks=500) plot_count_by_attribute(top_terror_groups, 'Group', save_figure=True, fig_name='figures/top_groups_barplot.png') # Similar to the plots for number of attacks by region or weapon type, we can observe an exponential decay in number of attacks by group - meaning the few deadliest groups account for the vast majority of attacks. # ### Discussion # # Unlike success rates, the number of attacks seems to follow an exponentially decaying distribution for all of the attributes considered. Whether considering region, country, weapon type, or attack type, the lion's share of attacks occur only in a few buckets. # # Given that success rates don't seem to vary much across these factors, this skewing of the data is not that significant for analysis purposes. However, it does matter if one has a more specific objective than simply maximizing attack prevention. For example, if one is interested in preventing attacks on a particular nation, or a particular kind of attack, this would imply a different strategy than the results of a holistic analysis. # ## Conclusion # # We found that across many factors that we expect to have significance for terrorist attack outcomes (such as region, weapon type, or target type), there is not much variance in the success rate of terrorist attacks. However, this analysis is only exploratory and it might be the case that combinations of many features yield greater spread in success rates. # # Unlike success rates, there is substantial variance in the *number* of terrorist attacks with respect to region, weapon type, and target type. From a couter-terrorism perspective, this is more interesting - it indicates that operatives should concentrate their resources on the the places and groups which are the most troublesome. # # ## Intermediate Results Storage # # We'll store a few of the results already calculated for later notebooks. # + import shelve with shelve.open('intermediate_results/vars1') as db: db['top_terror_groups'] = top_terror_groups
counter_terrorism_nb1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # linear algebra import pandas as pd # + user_hist_df = pd.read_csv('../data/raw/movies_dataset_10 months.csv') user_hist_df['ts'] = pd.to_datetime(user_hist_df['ts']) user_hist_df['month'] = user_hist_df['ts'].dt.month test_1 = user_hist_df[user_hist_df['month'] == 6] test_2 = user_hist_df[user_hist_df['month'] == 7] train = user_hist_df[~user_hist_df['month'].isin([6, 7])] # - correct_1 = test_1.drop_duplicates(['user_id', 'movie_id']).groupby('user_id')['movie_id'].apply(list).to_dict() correct_2 = test_2.drop_duplicates(['user_id', 'movie_id']).groupby('user_id')['movie_id'].apply(list).to_dict() train['event'] = 1 train = train.drop_duplicates(['user_id', 'movie_id', 'event']) train.movie_id = train.movie_id.astype('category') train['categ_id'] = train.movie_id.cat.codes + 1 # + # Let`s define inverse transform dictionary cat_to_element_uid = dict(zip( range(1, len(train.movie_id.cat.categories) + 1), train.movie_id.cat.categories )) # Assigning most popular film index to inverse transform of zero padding value cat_to_element_uid[0] = 18943 # - # %%time import tqdm tqdm.tqdm.pandas() sequences = train.groupby('user_id')['categ_id'].progress_apply(list) sequences.head() # Some statistics print('Median length: {}\nMean length: {}\nMax length: {}'.format( sequences.apply(len).median(), sequences.apply(len).mean(), sequences.apply(len).max())) sequences2use = sequences[sequences.apply(len) >= 50] # + maxlen = 90 # Length of sequences in X X = [] y = [] def slice_sequence(seq, num_slices): for i in range(1, num_slices): X.append(seq[-(i+maxlen): -i]) y.append(seq[-i]) for seq in tqdm.tqdm(sequences2use): if len(seq) <= 70: slice_sequence(seq, 10) elif len(seq) <= 90: slice_sequence(seq, 15) elif len(seq) <= 100: slice_sequence(seq, 20) else: slice_sequence(seq, 25) # - len(X), len(y) lens = [len(x) for x in X] max(lens), min(lens), np.mean(lens), np.median(lens) pip install tensorflow # + from keras.preprocessing.sequence import pad_sequences # We should pad our sequences with 0 values, so they all will have the same length X = pad_sequences(X, maxlen=maxlen) y = np.array(y) X.shape, y.shape # + from keras.layers import Input, Embedding, SpatialDropout1D, LSTM, Dropout, Dense from keras.models import Model # Let's set random seed import tensorflow as tf #tf.set_random_seed(42) np.random.seed(42) # - train.categ_id.unique().size + 1 # + max_features = train.categ_id.unique().size + 1 embed_size = 64 def lstm128(): inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size)(inp) x = SpatialDropout1D(0.05)(x) x = LSTM(128, return_sequences=False)(x) x = Dropout(0.02)(x) outp = Dense(max_features, activation="softmax")(x) model = Model(inputs=inp, outputs=outp) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['sparse_categorical_accuracy']) return model # - model = lstm128() model.fit(X, y, batch_size=2048*4, epochs=25, verbose=True, validation_split=0.01, shuffle=True) model_json = model.to_json() with open('../models/lstm128.json', 'w') as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights('../models/lstm128.h5') print("Saved model to disk") sequences_test = sequences.apply(lambda x: x[-maxlen:]) sequences_test = sequences_test.apply(lambda x: [0 for i in range(maxlen - len(x))] + x) sequences_test test_users_in_sequences = sorted(set(sequences_test.index) & set(correct_1.keys())) X_test = np.array(sequences_test[test_users_in_sequences].tolist()) # + # %%time from itertools import chain batch_size = 2048*8 n_batches = int(X_test.shape[0]/batch_size) + 1 preds = [] for batch_ind in tqdm.tqdm(range(n_batches)): batch = X_test[batch_ind*batch_size: (batch_ind + 1)*batch_size] curr_preds = model.predict(batch) curr_preds = np.argsort(-curr_preds)[:, :10] curr_preds = [[cat_to_element_uid[x] for x in row] for row in curr_preds] preds.append([' '.join(map(lambda x: str(x), row)) for row in curr_preds]) preds = list(chain(*preds)) # - preds sample_submission.index = sample_submission.user_id sample_submission.primary_video_id[test_users_in_sequences] = preds sample_submission.to_csv('submission_lstm.csv', header=True, index=False)
notebooks/RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Science 2 (modeling) # # ## Computer-exam BFVM19DATASC2 (irregular opp) # # ### Tue. 26 Jan 2021, 08:30-11:30, BB-Collaborate # # **Materials:** # # On your computer desktop you will find all data files and supplementary materials. # # * `BFVM19DATASC2_I_DataScience2_1920_DSLS_HEMI-LADR-WATS.ipynb` # # * `neuron.csv` # # * ... # # All notes, textbooks and other written reference materials are permitted. # # **Instructions:** # # This exam consists of three parts that can in principle be answered separately. All questions have the possible number of points to be scored indicated. Your grade will be calculated as follows: # # $$ # \text{Grade} = 1 + 9 \cdot \frac {\text{Points Scored}} {\text{Maximum Score}} # $$ # # Provide your answers in the code cells corresponding with each of the questions. For those questions that require a textual answer rather than python code, you may either type your answer in the cell using a python comment or insert a new markdown cell with your formatted text. You can receive partial credit on textual answers as well as code if you don't get the whole right answer. Be sure to explain your code through commenting, even if it doesn't work correctly. # # <div class="alert alert-warning"> # <b>After finishing:</b> # <ol><li> # Rename your notebook with your name and student number, like `JohnDoe_123456`, using the menu option `File` > `Rename`. # </li><li> # Evaluate the notebook by means of the menu option `Kernel` > `Restart & Run All` and check that your notebook runs without errors. # </li><li> # Save the evaluated notebook using the menu option `File` > `Save and Checkpoint`. # </li><li> # Submit your saved file on Blackboard using the `Assignment submission` item. # </li></ol> # </div> # *** # # ## Part I: Graph theory <small>[30 pts]</small> # # #### Question 1a <small>[5 pts]</small> # # Bla bla bla # ## Part II: Numerical analysis <small>[30 pts]</small> # # Below, you will investigate the behavior of the *FitzHugh-Nagumo* (FHN) model that can be used to crudely model the spiking behaviour of a single neuron in the central nervous system when stimulated with excitatory input. The first-order differential equations for the FHN model read <sup>[ref](http://www.scholarpedia.org/article/FitzHugh-Nagumo_model)</sup> # # $$ # \begin{aligned} # \dot{V} &= V - \frac{V^3}{3} - W + I # \\ # \dot{W} &= 0.08 \left( V + 0.7 - 0.8 W \right) # \end{aligned} # $$ # # Here, the dotted variables $\dot{V}$ and $\dot{W}$ denote the derivatives of $V$ and $W$ with respect to time $t$ (so-called Newton's notation), and # # * $V$ is the neuron's membrane potential, # # * $W$ is a supplementary recovery variable, # # * $I$ is the magnitude of the stimulus current. # # It is an example of a *relaxation oscillator* because, if the external stimulus $I$ exceeds a certain threshold value, the system will exhibit a characteristic excursion called an *action potential* before the variables $V$ and $W$ relax back to their rest values. # # ![neuron.jpg](attachment:neuron.jpg) # # #### Question 2a <small>[9 pts]</small> # # Integrate the FHN model using the *Midpoint* method from the Runge-Kutta family of integration methods. Employ starting values $V=W=0$ and a step size $\Delta t = \frac{1}{2}$, and plot the membrane potential $V(t)$ from $t_0=0$ to $t_1=300$ that you obtain for no ($I=0.0$), weak ($I=0.3$) or strong ($I=0.6$) stimulus currents in a single graph. # # What is the order of the Midpoint method? # # <div class="alert alert-info"><b>Hint:</b><br />Modify your implementation of Heun's method to obtain the Midpoint method.</div> import numpy as np import matplotlib.pyplot as plt def FHN(x, y, I = 0): return np.array([ y[0] - (y[0]**3)/3 - y[1] +I , 0.08*(y[0] + 0.7 - 0.8*y[1]) ]) def midpoint(f, y0, x0, x1, steps, I): h = (x1 - x0) / steps xs = np.linspace(x0, x1, steps + 1) y = y0 ys =[y] for x in xs[:-1]: k1 = f(x, y, I) k2 = f(x + (h/2), y + (h/2)*k1, I) y = y + h*(k2) ys.append(y) return xs, ys I = [0, 0.3, 0.6] for i in I: xs, ys = midpoint(FHN, np.array([0.0, 0.0]), 0, 300, 501, i) # print(ys) plt.axhline(-0.0019242265446122067) plt.plot(xs, ys) plt.show() # <div class="alert alert-danger"><b>Note:</b><br />If you did not succeed in calculating neural signals according to the FHN model, import substitute data using `pandas.read_csv('./neuron.csv')`.</div> # # #### Question 2b <small>[7 pts]</small> # # The average value $\bar{V}$ of the continuous signal $V(t)$ over an arbitrary interval $(t_0, t_1)$ can be determined by the expression # # $$ # \bar{V} = \frac{\int_{t_0}^{t_1} V(t) \text{d}t}{t_1-t_0} # $$ # # Given the sampled values $V(t)$ that you determined in **2a.**, determine the average value $\bar{V}$ of the membrane potential $V(t)$ between $t_0=100$ and $t_1=300$ for each of the three stimulus currents $I=0.0,0.3,0.6$ using *Simpson's integration rule* and report the three outcomes using three decimals. # # Would you generally prefer Simpson's rule to the trapezoidal rule? Explain why. def simpson(f, a, b, r, n=100): """df = simpson(f, a, b, n=...). Calculates the definite integral of the function f(x) from a to b using the composite Simpson's rule with n subdivisions (with default n=...). """ n += n % 2 # force to be even h = (b -a) / n I = f(a, r) + f(b, r) for i in range(1, n, 2): xi = a + i*h I += 4*f(xi, r) for i in range(2, n, 2): xi = a + i*h I += 2*f(xi, r) I *= h/3 return I def V(b, r): prey = [] x, res = midpoint(FHN, np.array([0.0, 0.0]), 0, b, 501, r) for i in range(len(res)): prey.append(res[i][0]) return prey[::-1][0] for i in I: print('I:',i) print( simpson( V, a = -2, b = 0, r = i)/200) # #### Question 2c <small>[7 pts]</small> # # For sufficiently high values of the stimulus $I$, the system shows oscillatory behavior, whereas below a certain critical threshold it quickly achieves a stable equilibrium close to $V(t) \approx -1$ in which no excursions occur. The fact that $V$ and $W$ are stationary in such an equilibrium implies that $\dot{V}=\dot{W}=0$. The second FHN equation $\dot{W} = 0.08 \left( V + 0.7 - 0.8 W \right) = 0$ then results in $W = (V+0.7) / 0.8$, which can be substituted into the first FHN equation to obtain # # $$ # V - \frac{V^3}{3} - \frac{V+0.7}{0.8} + I = 0 # $$ # # Find the static solution for the above equality for $V$ near -1 for $I=0.0$, $0.3$, and $0.6$ to at least 3 digits accuracy. # # Do your results agree with those from **2b.**? Explain your observations. def func(x, I): return x - (x**3)/3 - (x -0.7)/0.8 + I x = np.linspace(-5, 5, 400) def rootsearch(f, a, b, steps, r): """lo, hi = rootsearch(f, a, b, steps). Searches the interval (a,b) in a number of steps for the bounds (lo,hi) of the roots of f(x). """ h = (b - a) / steps f_lo = f(a, r) for step in range(steps): lo = a + step * h hi = lo + h f_hi = f(hi, r) if f_lo * f_hi <= 0.0: yield lo, hi f_lo = f_hi for i in I: print('I:', i) plt.plot(x, func(x, i)) plt.show() print(list(rootsearch(func, -2, 2, 1000, i))) # #### Question 2d <small>[7 pts]</small> # # Determine the time $t_\text{min} > 0$ when the first minimum in the signal $V(t)$ occurs. Again, find the separate solutions for $I=0.0$, $0.3$, and $0.6$ to at least 3 digits accuracy. You may choose any preferred minimization algorithm; briefly motivate your choice. # # <div class="alert alert-info"><b>Hint:</b><br />Write a helper function that integrates the model from $t_0=0$ to a requested end time $t_1$ and that returns the value $V(t_1)$. Next, determine the first minimum of this helper function.</div> # ## Part III: Multivariate Component Analysis <small>[30 pts]</small> # # #### Question 3a <small>[10 pts]</small> # # Bla bla bla # *** # # ## End of this exam # # <div class="alert alert-warning">See the instructions at the top of this document for how to submit your answers.</div> # # *Success!*
Numerical_analysis/Test/Test_2/BFVM19DATASC2_I_DataScience2_1920_DSLS_LADR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sequence classification with Neural Networks # ## Per-sample RNN model # # Now we're going to try RNN model (as GRU) on our time series data. # The difference here is that we can feed the network with the whole sequence at once, so that it can learn the patterns and hopefully demonstrate better performance in presence of outliers. # # That should be relatively easy for our data. Basically the model could learn that: # * speed of 5 can only happen at the begininng # * or after the train segment speed has reached 0. # # If the speed of 5 (km/h) happens abruptly after any other speed value -- that would mean it's still a train segment. That means the network should be able to demonstrate high performance even with the 50% or more outliers in the data. # + # Load the TensorBoard notebook extension # %load_ext tensorboard import altair as alt import numpy as np import pandas as pd import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) from tmdprimer.datagen import generate_sample, Dataset, Sample # - # We're going to create a shallow RNN architecture with just one recurrent layer and one output dense unit. But that should be enough for our case given simplicity of our data. # # The learning rate is adjusted with a schedule for faster convergence. # + import tensorflow as tf # converge faster lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( 0.01, decay_steps=100, decay_rate=0.7) def get_rnn_model(): rnn_model = tf.keras.Sequential( [ tf.keras.layers.GRU(8, return_sequences=True), tf.keras.layers.Dense(1, activation="sigmoid") ] ) rnn_model.compile( loss="binary_crossentropy", optimizer=tf.keras.optimizers.RMSprop(learning_rate=lr_schedule), metrics=[tf.keras.metrics.BinaryAccuracy()] ) return rnn_model # + data_rnn = [] for outlier_prob in (0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0): print(outlier_prob) dataset = Dataset.generate(train_outlier_prob=outlier_prob, n_samples=120) # truncate samples since we don't use masking and padding here min_sample_size = min([len(s) for s in dataset.samples]) test_samples = [Sample(s.features[:min_sample_size]) for s in dataset.samples[100:]] dataset.samples = [Sample(s.features[:min_sample_size]) for s in dataset.samples[:100]] model = get_rnn_model() model.fit( x=dataset.to_tfds(), epochs=10, verbose=0 ) dataset.samples = test_samples res = model.evaluate(dataset.to_tfds(), verbose=0) data_rnn.append({'outlier_prob': outlier_prob, 'accuracy': res[1]}) df_rnn = pd.DataFrame(data_rnn) # - alt.Chart(df_rnn).mark_line().encode(x='outlier_prob', y='accuracy') # Right in line with our predictions, the model can easily learn the patterns in the data, and can yield over 90% accuracy even in case of whopping 80% of outliers. # # As expected, at the 100% outlier level, when features become indistinguishable, the network falls to a random 50% accuracy. # Let's see now how the tensorboard graphs look like for RNN. You can use those graphs as a reference when comparing them to the more complex models in production. # + # Clear any logs from previous runs from datetime import datetime # !rm -rf ./logs/ log_dir = "logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) dataset = Dataset.generate(train_outlier_prob=0.10, n_samples=200) # truncate samples since we don't use masking and padding here min_sample_size = min([len(s) for s in dataset.samples]) dataset.samples = [Sample(s.features[:min_sample_size]) for s in dataset.samples[:100]] get_rnn_model().fit( x=dataset.to_tfds().batch(20), epochs=10, callbacks=[tensorboard_callback] ) # #%tensorboard --logdir logs/fit # -
tmdprimer/rnn_models/Per-sample RNN model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lucerogr/algorithmic-complexity/blob/main/Send_more_money.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oDmlDJEYNdUS" # Ejercicio 2: Polinomios # + id="iI7zc10gGtaa" def p(x): a = [10, 20, 0, 1, 23, 4] s = 0.0 for i, ai in enumerate(reversed(a)): s += ai * x ** i return s # + colab={"base_uri": "https://localhost:8080/"} id="0gjVth6UHYQ4" outputId="9c142656-4c4b-4b3a-f6ff-e93846dd99c5" p(2) # + [markdown] id="O7wNh7tNNg2k" # SEND + MORE = MONEY # + id="0v6dMAsXQeNZ" def validate(a, b, c, codex, chars): stra = a strb = b strc = c for i in range(len(codex)): stra = stra.replace(chars[i], str(codex[i])) strb = strb.replace(chars[i], str(codex[i])) strc = strc.replace(chars[i], str(codex[i])) if int(stra) + int(strb) == int(strc): print(a, stra, b, strb, c, strc) # + colab={"base_uri": "https://localhost:8080/"} id="0n3r7DDwZe8i" outputId="bb396270-9d09-414a-a998-474efe641b42" validate("SEND", "MORE", "MONEY", [7,6,4,9,0,8,1,5], "SENDMORY") # + id="htP_zj_MRWBV" def combinations(digits, n, w, chars, codex, a, b, c): if w == n: validate(a, b, c, codex, chars) else: for i in range(len(digits)): e = digits[i] combinations(digits[:i] + digits[i+1:], n, w+1, chars, codex + [e], a, b, c) # + id="ZP9Kyn3RIv0Y" def solve(a, b, c): chars = list(set(a + b + c)) digits = [i for i in range(10)] #set quita los repetidos n = len(chars) combinations(digits, n, 0, chars, [], a, b, c) # + colab={"base_uri": "https://localhost:8080/"} id="LTqO87D0OF9H" outputId="611c5001-50f2-40af-c750-23a82b1ed565" solve("SEND", "MORE", "MONEY")
Send_more_money.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/harinadh12/Freetime_NLP/blob/main/BERT_MLM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="o2uqMKpXpx6n" outputId="3a857d18-4414-4d2c-acc6-2166558ec340" # !pip install transformers # + id="Vieh8LmHph_b" from transformers import BertTokenizer, BertForMaskedLM import torch import pandas as pd # + colab={"base_uri": "https://localhost:8080/"} id="ifdEzNGNpvME" outputId="c2992972-0386-4a7c-a391-77200451e8f9" tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') # + id="BF5aZoQbp8lF" data = pd.read_csv('unredactor.tsv', on_bad_lines='skip',sep='\t', header=None) data.columns =['author','type','label','text'] # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="BKfQcs6tGUN5" outputId="2a1cb5a7-3cfc-4c15-ee7d-4c1efbb098e6" data.text[1] # + colab={"base_uri": "https://localhost:8080/"} id="jw4-x_PErgJM" outputId="06da3a72-d652-4498-9d93-42fca36e6419" data.shape # + id="WuTZPWeBqMK-" train_data = data[data.type=='training'] # + id="bAceTttuqS4a" train_data = train_data.apply(lambda x : x['text'].replace('█'*len(x['label']),x['label']), axis=1).tolist() # + colab={"base_uri": "https://localhost:8080/"} id="4mWD72Hzq_Sa" outputId="4a27090f-31a3-4fdf-b60b-197a548869c4" inputs = tokenizer(train_data,return_tensors='pt', truncation=True,max_length=50,padding=True) inputs.keys() inputs # + id="BN_Lyys7rLYl" inputs['labels'] = inputs.input_ids.detach().clone() # + colab={"base_uri": "https://localhost:8080/"} id="BdozlNEpslyy" outputId="63a8306f-7103-46f7-e8bb-a6d93acb155b" inputs.keys() # + id="uni90XkwsmZl" # create random array of floats in equal dimension to input_ids rand = torch.rand(inputs.input_ids.shape) # + colab={"base_uri": "https://localhost:8080/"} id="LBC_6NWDsrVQ" outputId="7b0438a0-10aa-47c8-c640-93b56b1e8d57" mask_arr = (rand < 0.15) * (inputs.input_ids != 101) * (inputs.input_ids != 102) mask_arr # + colab={"base_uri": "https://localhost:8080/"} id="B8Eb-0H42Gt_" outputId="6a044e47-8ab9-41c1-e172-28c9505fbe95" inputs.input_ids[2].shape # + id="5wElYCFFs8p9" selection = [] for i in range(inputs.input_ids.shape[0]): selection.append( torch.flatten(mask_arr[i].nonzero()).tolist() ) # + id="DKvnvflNuZXk" for i in range(inputs.input_ids.shape[0]): inputs.input_ids[i, selection[i]] = 103 # + colab={"base_uri": "https://localhost:8080/"} id="K5niErcWueOt" outputId="32413d67-c0a3-48c8-8523-b0d9eb9490fb" inputs.input_ids # + id="vCU8fNa1ueux" class MeditationsDataset(torch.utils.data.Dataset): def __init__(self, encodings): self.encodings = encodings def __getitem__(self, idx): return {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} def __len__(self): return len(self.encodings.input_ids) # + id="jKU7s3pvurlT" dataset = MeditationsDataset(inputs) # + id="rBGsHlzKuucp" loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) # + colab={"base_uri": "https://localhost:8080/"} id="frKVpjNcwBRF" outputId="7e1cbfef-52b8-4b2e-83ce-4a04b3624e43" device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # and move our model over to the selected device model.to(device) # activate training mode model.train() # + colab={"base_uri": "https://localhost:8080/"} id="G_rk6gs8wFBk" outputId="a0a9d6c6-81fb-4a5f-ad9b-b9431d108082" from transformers import AdamW # initialize optimizer optim = AdamW(model.parameters(), lr=5e-5) # + id="5l5d70lUwMf_" # + id="zUIuF9zQwRQH" # + [markdown] id="eBArrPMdwpiH" # Trainer API # + colab={"base_uri": "https://localhost:8080/"} id="sCha2RVXwvfl" outputId="8a141491-2f9d-4801-f132-c947d96715ab" from transformers import TrainingArguments args = TrainingArguments( output_dir='out', per_device_train_batch_size=16, num_train_epochs=5, ) # + id="kjY7epUYww0T" from transformers import Trainer trainer = Trainer( model=model, args=args, train_dataset=dataset ) # + colab={"base_uri": "https://localhost:8080/", "height": 403} id="v7hUB8LIw4fM" outputId="e4ee226a-cbc0-4df9-80b7-d9d485932bc8" trainer.train() # + id="bwEMApkpyJsK" # tokenizer = AutoTokenizer.from_pretrained(model_name) # model = AutoModelForMaskedLM.from_pretrained(model_name) # unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer) # + colab={"base_uri": "https://localhost:8080/"} id="ogCt1sJM70Ut" outputId="37ff979f-ca0f-43f8-e891-b5659d5db3d9" model.save_pretrained('./') # + colab={"base_uri": "https://localhost:8080/"} id="25gkuNzA8ESU" outputId="af9a3463-c59c-4113-88a7-4eefc8193b92" trainer.save_model('./') # + id="X1Fi9rxT9yin" model.eval() # + id="GDMCKAsVGBJx" test_token = tokenizer("I'll admit that I was reluctant to see it because from what I knew of [MASK] he was only able to do comedy.",return_tensors='pt', truncation=True,max_length=50,padding=True) # + colab={"base_uri": "https://localhost:8080/"} id="AxaeJY5dGJbx" outputId="1c2f1083-8dff-43b7-bc61-1c2515e6aca3" trainer.save_model("./out/model2/") # + colab={"base_uri": "https://localhost:8080/"} id="iVfFf6o9Lblm" outputId="516eef20-fabc-4466-cc8c-06869851425f" bert_model = BertForMaskedLM.from_pretrained("./out/model2") # + id="pLDNJSNNOMsx" text = "I'll admit that I was reluctant to see it because from what I knew of [MASK] he was only able to do comedy." # + colab={"base_uri": "https://localhost:8080/"} id="IHY5IZfJMH3x" outputId="a7f28d22-98ad-414b-ec61-b478c4a03673" from torch.nn import functional as F input = tokenizer.encode_plus(text, return_tensors = "pt") mask_index = torch.where(input["input_ids"][0] == tokenizer.mask_token_id) output = bert_model(**input) logits = output.logits softmax = F.softmax(logits, dim = -1) mask_word = softmax[0, mask_index, :] top_10 = torch.topk(mask_word, 10, dim = 1)[1][0] for token in top_10: word = tokenizer.decode([token]) new_sentence = text.replace(tokenizer.mask_token, word) print(new_sentence) # + id="b32NzAwBMU8t" # + id="UqMarWXbO3_s" # + id="3bsQaDpMO4wN" # + id="4_jnNYxdO4sx" # + id="rAOmT13eO4qO" # + id="zqn4NVJnO38V"
BERT_MLM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="IG1v46jnGkax" colab_type="code" outputId="1b61dcea-3109-4824-a2d6-a136b1571720" colab={"base_uri": "https://localhost:8080/", "height": 472} # https://matplotlib.org/gallery/lines_bars_and_markers/barh.html#sphx-glr-gallery-lines-bars-and-markers-barh-py import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) plt.rcdefaults() fig, ax = plt.subplots() # Example data people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim') y_pos = np.arange(len(people)) performance = 3 + 10 * np.random.rand(len(people)) error = np.random.rand(len(people)) ax.barh(y_pos, performance, xerr=error, align='center', color='green', ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(people) ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Performance') ax.set_title('How fast do you want to go today?') plt.show() # + id="DWcnKAt4H9PT" colab_type="code" outputId="d14b5d40-bacf-485e-c7e3-927b0919c3ed" colab={"base_uri": "https://localhost:8080/", "height": 432} # Adapted to piechart # https://matplotlib.org/gallery/pie_and_polar_charts/pie_features.html#sphx-glr-gallery-pie-and-polar-charts-pie-features-py import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) plt.rcdefaults() fig, ax = plt.subplots() # Example data people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim') performance = 3 + 10 * np.random.rand(len(people)) error = np.random.rand(len(people)) ax.pie(performance, labels=people) ax.set_title('How fast do you want to go today?') plt.show() # + id="Y26IktTfIZmO" colab_type="code" outputId="2344c79d-474a-49bf-f5be-7b55601f14ac" colab={"base_uri": "https://localhost:8080/", "height": 487} # https://matplotlib.org/gallery/lines_bars_and_markers/scatter_demo2.html#sphx-glr-gallery-lines-bars-and-markers-scatter-demo2-py import numpy as np import matplotlib.pyplot as plt import matplotlib.cbook as cbook # Load a numpy record array from yahoo csv data with fields date, open, close, # volume, adj_close from the mpl-data/example directory. The record array # stores the date as an np.datetime64 with a day unit ('D') in the date column. with cbook.get_sample_data('goog.npz') as datafile: price_data = np.load(datafile)['price_data'].view(np.recarray) price_data = price_data[-250:] # get the most recent 250 trading days delta1 = np.diff(price_data.adj_close) / price_data.adj_close[:-1] # Marker size in units of points^2 volume = (15 * price_data.volume[:-2] / price_data.volume[0])**2 close = 0.003 * price_data.close[:-2] / 0.003 * price_data.open[:-2] fig, ax = plt.subplots() ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5) ax.set_xlabel(r'$\Delta_i$', fontsize=15) ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=15) ax.set_title('Volume and percent change') ax.grid(True) fig.tight_layout() plt.show() # + id="DaEiVQD2K0T1" colab_type="code" outputId="094a3011-9db7-4135-ab17-446fd4505cc6" colab={"base_uri": "https://localhost:8080/", "height": 413} # https://matplotlib.org/gallery/mplot3d/scatter3d.html#sphx-glr-gallery-mplot3d-scatter3d-py # This import registers the 3D projection, but is otherwise unused. from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) def randrange(n, vmin, vmax): ''' Helper function to make an array of random numbers having shape (n, ) with each number distributed Uniform(vmin, vmax). ''' return (vmax - vmin)*np.random.rand(n) + vmin fig = plt.figure() ax = fig.add_subplot(111, projection='3d') n = 100 # For each set of style and range settings, plot n random points in the box # defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh]. for c, m, zlow, zhigh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]: xs = randrange(n, 23, 32) ys = randrange(n, 0, 100) zs = randrange(n, zlow, zhigh) ax.scatter(xs, ys, zs, c=c, marker=m) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() # + id="02mt_9taK6FT" colab_type="code" colab={} # + [markdown] id="hU78rr5sw0s_" colab_type="text" # # MY STUFF # # --- # # Stand back everyone, I'm sbout to do code! # + id="75JIWb8u3bXy" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt # + id="WG_CqwASxDAF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="b25a4fb8-ce53-45e4-9b76-4370304e8516" glass_df_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data' glass_names = ['ID', 'RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type'] Glass_data = pd.read_csv(glass_df_url, header = None, names = glass_names) Glass_data.head() # + id="WInDKh-E3lj8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 376} outputId="880877f9-c6ad-4d18-bd88-64a998187238" #Distribution of types of glass in data set fig, ax = plt.subplots() x = [] for type in Glass_data['Type']: x.append(type) #x = np.asarray(Glass_data['Type']) n, bins = ax.hist(x, sorted(Glass_data['Type'].unique()))[:2] plt.xlabel('Type') plt.ylabel('Observations') plt.title('Commonness of glass types at crime scenes') plt.show() # + id="KkjTAPMf_Ho5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 792} outputId="579621bc-6be0-4582-fca3-195214cc5119" x = Glass_data['RI'] y = Glass_data['Type'] grouped = x.groupby(y) plt.scatter(x, y) plt.xlabel('Refractive Quality') plt.ylabel('Type') plt.title('Refractive quality breakdown by type') plt.show() boxplot_dat = [group for name, group in grouped] bp1 = plt.boxplot(boxplot_dat, sym ='k.', showfliers = True) plt.xlabel('Type') plt.ylabel('Refractive Quality') plt.title('Refractive Quality breakdown by type') plt.show() # + id="YoXvDQUU_H_X" colab_type="code" colab={} policy_name = ['party', 'handi infants', 'water pcs', 'budget adopt', 'phys fee frz', 'el salv aid', 'rel. grp in schools', 'anti sat test ban', 'aid to contras', 'mx missle', 'immig', 'synfuels cb', 'edu spend', 'sf right to sue', 'crime', 'duty free export', 'ex admin act SAfrica'] politics_df_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data' pol_df = pd.read_csv(politics_df_url, header = None, names = policy_name) # + id="XqV_Gp9KkEel" colab_type="code" colab={} map_vars = {'y':1, 'n':2, '?':3, 'republican':12, 'democrat':13} pol_df_clean = pol_df.replace({'party':map_vars, 'handi infants':map_vars, 'water pcs':map_vars, 'budget adopt':map_vars, 'phys fee frz':map_vars, 'el salv aid':map_vars, 'rel. grp in schools':map_vars, 'anti sat test ban':map_vars, 'aid to contras':map_vars, 'mx missle':map_vars, 'immig':map_vars, 'synfuels cb':map_vars, 'edu spend':map_vars, 'sf right to sue':map_vars, 'crime':map_vars, 'duty free export':map_vars, 'ex admin act SAfrica':map_vars}) # + id="AZPgt7i9SVH2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 433} outputId="0e065686-59f8-4c0a-944b-8f43e51ffbcf" #democrats are more likely to vote to help disable infants, but also more likely to abstain from voting. #good to see some things never change. fig, ax = plt.subplots() x = pol_df_clean['party'] y = pol_df_clean['handi infants'] grouped = x.groupby(y) fig.canvas.draw() boxplot_dat = [group for name, group in grouped] bp1 = plt.boxplot(boxplot_dat) plt.ylabel('party') plt.xlabel('vote on aid for handicapped infants') plt.title('Youth Aid Vote Breakdown by Party') plt.setp(ax, xticklabels=('yes', 'no', 'abstain')) plt.show()
module3-basicdatavisualizations/LS_DS_113_Plotting_Playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Information retrival project # -user is reading about sachin blog # -which other post u can recomend to that person # -query A- Document A # - which docs are similar to Docs A and pass on that as well import pandas as pd people = pd.read_csv('people_data.csv') people.head() #Replace all null values with "NO Data" people.fillna("No Data", inplace = True ) # # using tfidf # + #converting text columns into tfidf values from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vect = TfidfVectorizer() X_train_tfidf = tfidf_vect.fit_transform(people.text) # - X_train_tfidf people.shape from sklearn.neighbors import NearestNeighbors nn = NearestNeighbors(n_neighbors=10) nn_fitted = nn.fit(X_train_tfidf) # # Find Neighbors index = people[people.name.str.contains("amitabh", case = False)].index[0] index # + # now find which all people are siilar to amitabah in people text # - X_train_tfidf[index,:] #tfidf vector for amitabh score, neighbours = nn_fitted.kneighbors(X_train_tfidf[index,:]) score[0] neighbours[0] people.iloc[neighbours[0]] # # Unknown Actor some_actor = "he is the best of his time" unkown_actor = tfidf_vect.transform([some_actor])# do not use fit transform score, neighbours = nn_fitted.kneighbors(unkown_actor) people.iloc[neighbours[0]] # # Count Vect from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_count = count_vect.fit_transform(people.text) nn_fitted_cv = nn.fit(X_train_count) score, neighbours = nn_fitted_cv.kneighbors(X_train_count[index:])
info_retrival.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import math import re from scipy.sparse import csr_matrix import matplotlib.pyplot as plt import seaborn as sns from surprise import Reader, Dataset, SVD, evaluate sns.set_style("darkgrid") filepath = '/Users/navi/Downloads/' file = filepath + 'combined_data_1.txt' movies = filepath + 'movie_titles.csv' df1 = pd.read_csv(file, header = None, names = ['Cust_Id', 'Rating'], usecols = [0,1]) df1['Rating'] = df1['Rating'].astype(float) df = df1 df.index = np.arange(0,len(df)) # + p = df.groupby('Rating')['Rating'].agg(['count']) # get movie count movie_count = df.isnull().sum()[1] # get customer count cust_count = df['Cust_Id'].nunique() - movie_count # get rating count rating_count = df['Cust_Id'].count() - movie_count ax = p.plot(kind = 'barh', legend = False, figsize = (15,10)) plt.title('Total pool: {:,} Movies, {:,} customers, {:,} ratings given'.format(movie_count, cust_count, rating_count), fontsize=20) plt.axis('off') for i in range(1,6): ax.text(p.iloc[i-1][0]/4, i-1, 'Rating {}: {:.0f}%'.format(i, p.iloc[i-1][0]*100 / p.sum()[0]), color = 'white', weight = 'bold') # + df_nan = pd.DataFrame(pd.isnull(df.Rating)) df_nan = df_nan[df_nan['Rating'] == True] df_nan = df_nan.reset_index() movie_np = [] movie_id = 1 for i,j in zip(df_nan['index'][1:],df_nan['index'][:-1]): # numpy approach temp = np.full((1,i-j-1), movie_id) movie_np = np.append(movie_np, temp) movie_id += 1 # Account for last record and corresponding length # numpy approach last_record = np.full((1,len(df) - df_nan.iloc[-1, 0] - 1),movie_id) movie_np = np.append(movie_np, last_record) # + df = df[pd.notnull(df['Rating'])] df['Movie_Id'] = movie_np.astype(int) df['Cust_Id'] = df['Cust_Id'].astype(int) # + f = ['count','mean'] df_movie_summary = df.groupby('Movie_Id')['Rating'].agg(f) df_movie_summary.index = df_movie_summary.index.map(int) movie_benchmark = round(df_movie_summary['count'].quantile(0.8),0) drop_movie_list = df_movie_summary[df_movie_summary['count'] < movie_benchmark].index df_cust_summary = df.groupby('Cust_Id')['Rating'].agg(f) df_cust_summary.index = df_cust_summary.index.map(int) cust_benchmark = round(df_cust_summary['count'].quantile(0.8),0) drop_cust_list = df_cust_summary[df_cust_summary['count'] < cust_benchmark].index # - df = df[~df['Movie_Id'].isin(drop_movie_list)] df = df[~df['Cust_Id'].isin(drop_cust_list)] df_title = pd.read_csv(movies, encoding = "ISO-8859-1", header = None, names = ['Movie_Id', 'Year', 'Name']) df_title.set_index('Movie_Id', inplace = True) # + reader = Reader() # get just top 100K rows for faster run time data = Dataset.load_from_df(df[['Cust_Id', 'Movie_Id', 'Rating']][:100000], reader) data.split(n_folds=3) # - svd = SVD() evaluate(svd, data, measures=['RMSE', 'MAE']) df_785314 = df[(df['Cust_Id'] == 785314) & (df['Rating'] == 5)] df_785314 = df_785314.set_index('Movie_Id') df_785314 = df_785314.join(df_title)['Name'] print(df_785314) user_785314 = df_title.copy() user_785314 = user_785314.reset_index() user_785314 = user_785314[~user_785314['Movie_Id'].isin(drop_movie_list)] # + data = Dataset.load_from_df(df[['Cust_Id', 'Movie_Id', 'Rating']][:10000], reader) trainset = data.build_full_trainset() svd.train(trainset) # + user_785314['Estimate_Score'] = user_785314['Movie_Id'].apply(lambda x: svd.predict(785314, x).est) user_785314 = user_785314.drop('Movie_Id', axis = 1) user_785314 = user_785314.sort_values('Estimate_Score', ascending=False) print(user_785314.head(10)) # -
cloud_recommender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dagstuhl ChoirSet: Parsing and Visualization Demo # ### Table of Contents # 1. [DCS Content Parser](#one) # 2. [Multitrack Audio Playback](#two) # 3. [Beat Annotations](#three) # 4. [Time-Aligned Score Representation](#four) # 5. [Fundamental Frequency Trajectories](#five) # + import numpy as np import matplotlib.pyplot as plt from pywebaudioplayer import pywebaudioplayer as pwa import IPython.display as ipd import pandas as pd import librosa import pretty_midi import os import glob import DCStoolbox as dcst # - # ### 1. DCS Content Parser <a class="anchor" id="one"></a> # + # Settings DCS_path = './DagstuhlChoirSet/' song_id = 'DCS_LI' setting = 'QuartetA' take = 'Take04' part = '*' mic = '*' # generate table of available audio and annotation files DCS_table = dcst.DCS_content_parser(DCS_path, song_id, setting, take, part, mic) DCS_table # - # ### 2. Multitrack Audio Playback<a class="anchor" id="two"></a> # **---This player is only supported in Google Chrome---** # + # using pywebaudioplayer by <NAME> tracks = [] for i in range(DCS_table.shape[0]): tracks.append({'title': '%s (%s)' %(DCS_table.iloc[i]['Section'], DCS_table.iloc[i]['Microphone']), 'path': DCS_table.iloc[i]['Audio'][2:], 'mimetype': 'audio/wav'}) # Display trackswitch.js ts1 = pwa.trackswitch(tracks, repeat=False, globalsolo=False, onlyradiosolo=True) ipd.HTML(ts1) # - # ### 3. Beat Annotations<a class="anchor" id="three"></a> fn_beat = DCS_table.iloc[1, :]['Beat'] beat_anno = dcst.read_csv(fn_beat, header=None, names=['Time (sec)', 'Measure.RelativeBeatPosition']) beat_anno # The last row of the beat annotation includes the end of the audio file in seconds and the end of the last measure encoded as .999. # sonify beat annotation using librosa fn_mix = DCS_table.iloc[1, :]['Audio'] x_mix, Fs = librosa.load(fn_mix) beat_soni = librosa.clicks(beat_anno.iloc[:, 0].values, sr=Fs, click_freq=1000, length=len(x_mix)) ipd.Audio(x_mix+beat_soni, rate=Fs) # ### 4. Time-Aligned Score Representation<a class="anchor" id="four"></a> # load tuple of file names fns_score = DCS_table.iloc[1, :]['ScoreRepr'] fns_score # score representation for soprano dcst.read_csv(fns_score[0], header=None, names=['Onset (sec)', 'Offset (sec)', 'MIDIPitch']) # sonify aligned score representation x_soni_score = dcst.sonify_score_representation_with_sinusoidals(fns_score, Fs, len(x_mix)) ipd.Audio(np.vstack((x_mix.reshape(1, -1), x_soni_score.reshape(1, -1))), rate=Fs) # ### 5. Fundamental Frequency Trajectories<a class="anchor" id="five"></a> fn_f0 = DCS_table.iloc[4, :]['F0CREPE'] f0 = dcst.read_csv(fn_f0, header=None, names=['Time (sec)', 'F0 (Hz)', 'Confidence']) f0 # stereo sonification x_lrx, Fs = librosa.load(DCS_table.iloc[4, :]['Audio']) x_f0 = dcst.sonify_trajectory_with_sinusoid(f0.iloc[:, 0:2].values, len(x_lrx), Fs=Fs, amplitude=0.3, smooth_len=11) x_soni_stereo = np.vstack((x_lrx.reshape(1,-1), x_f0.reshape(1,-1))) ipd.Audio(x_soni_stereo, rate=Fs)
demo1_dataParser.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scikit-Learn IRIS Model # # * Wrap a scikit-learn python model for use as a prediction microservice in seldon-core # * Run locally on Docker to test # * Deploy on seldon-core running on a kubernetes cluster # # ## Dependencies # # * [S2I](https://github.com/openshift/source-to-image) # # ```bash # pip install sklearn # pip install seldon-core # ``` # # # ## Setup Seldon Core # # Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html) to setup Seldon Core with an ingress - either Ambassador or Istio. # # Then port-forward to that ingress on localhost:8003 in a separate terminal either with: # # * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080` # * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` # !kubectl create namespace seldon # !kubectl config set-context $(kubectl config current-context) --namespace=seldon # Create Seldon Core config file # %%writefile sklearn_iris_deployment.yaml apiVersion: machinelearning.seldon.io/v1alpha2 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: seldonio/sklearn-iris:0.2 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: REST name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 # !kubectl create -f sklearn_iris_deployment.yaml # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example \ # -o jsonpath='{.items[0].metadata.name}') for i in range(60): # state=!kubectl get sdep seldon-deployment-example -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") # res=!curl -s http://localhost:8003/seldon/seldon/seldon-deployment-example/api/v0.1/predictions -H "Content-Type: application/json" -d '{"data":{"ndarray":[[5.964,4.006,2.081,1.031]]}}' res print(res) import json j=json.loads(res[0]) assert(j["data"]["ndarray"][0][0]>0.0) # REST request with raw_data from seldon_core.seldon_client import SeldonClient sc = SeldonClient(deployment_name="seldon-deployment-example",namespace="seldon") res = sc.predict(gateway="istio",gateway_endpoint="localhost:8003", transport="rest",raw_data = {"data":{"ndarray":[[5.964,4.006,2.081,1.031]]}}) print(res.response) assert(res.success==True) # gRCP request with proto raw_data from seldon_core.utils import json_to_seldon_message proto_raw_data = json_to_seldon_message({"data":{"ndarray":[[5.964,4.006,2.081,1.031]]}}) res = sc.predict(gateway="istio",gateway_endpoint="localhost:8003", transport="grpc",raw_data = proto_raw_data) print(res) assert(res.success==True) # !kubectl delete -f sklearn_iris_deployment.yaml
examples/models/sklearn_iris/sklearn_iris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.2 # language: julia # name: julia-1.5 # --- # ## Julia programming language cheatsheet # #### Help # + # Jupyter notebook: # CTRL+Enter -> evaluate current cell # SHIFT+Enter -> evaluate current cell and advance to next cell # In[*] at the left of a cell means code is running. Please wait # a - insert cell above # b - insert cell below # dd - delete cell # + # type ? followed by function name to get help in an empty cell or in Julia REPL(read-eval-print-loop/console) # - ?sin pwd() # show current working directory cd(raw"C:\Julia1.5.2") # change current directory (notice the string doesn't end with \) @info pwd() # + # Useful links: # Official Julia release (download and install) # https://julialang.org/ # Official Julia language documentation: # https://docs.julialang.org/en/v1/ # Packages, doc search, function search: # https://juliahub.com/ui/Home # Official Julia language forum: # https://discourse.julialang.org/ # - # #### Variables # + # variable binding a = 1 a,b = 2,3 # empty vector of "Any" type b = [] # user defined matrix a = [1 2 3; 4 5 6] # two lines, three columns. separate lines with ; # preallocated vector of Float64 with 5 elements c = zeros(Float64, 5) # or c = Array{Float64, 1}(undef, 5) # preallocate an array of Float64 with "1" dimension: "5" elements(Vector). The array contains garbage Float64 elements. # preallocated Array of FLoat64 (matrix or multidimensional array) c = zeros(Float64, 5,5) # or c = Array{Float64, 2}(undef, 5, 5) # preallocate an array of Float64 with "2" dimensions: "5" rows and 5 columns. The array contains garbage Float64 elements. # add an element to the end of an array push!(b, 4) # There is no clear/clear all/ clear vars command. If you need to release the memory used by some variable just assign the value "nothing" to it c = nothing # the garbage collector will take care of freeing memory. if you want to explicitly invoke it: GC.gc() # Ranges; ranges are lazy collections that store only the start/end of arrays and their length, in order to avoid memory allocation d = 1:10 # one can materialize a range into an array by using the "collect" function e = collect(d) # Strings vs chars x = "this is a string" y = 'c' # String concatenation "string1" * "string2" # String interpolation; $variable_name lets you concatenate a value inside a string x = 20 age = "my age is $x" # - # #### Functions # + # Short definitions f(x) = 2x # notice the omission of "*"; Julia lets you do that f(2) # Typical definition for multi-line function. The result of the last expression of the function will be returned. # Otherwise you can explicitly write "return your_variable" function my_func(x) a = 2*x b = sqrt(a) return a, b end # Multimethods g(x::Int64) = 2x g(x::Float64) = x + 3.0 @info g(2) @info g(2.0) # - # #### Composite types # + mutable struct MyStruct x y end data = MyStruct(1,2) # - # #### Operator broadcasting # + # Julia lets you apply any function over a collection using the "." notation # Let's apply some functions over a vector a = [1, 2, 3, 4] res_sin = sin.(a) res_exp = exp.(a) # Now let's sum the the two results, element to element res = res_sin .+ res_exp # - # #### Loops and conditionals # + # LOOPS INTRODUCE LOCAL VARIABLE SCOPES a=0 for i=1:10 a=i println(a) end a = 1:10 for i in a println(i) end b=true while b println("gotcha, I'm an infinite loop") sleep(0.1) # pause execution for 100ms end c = true if c println("c is true") else println("c is false") end d = 3 e = 5 if d < e println("d is smaller than $e") elseif d == e println("d is equal to $e") else println("d is greater than $e") end # - # #### Macros # decorators starting with "@" are called macros. Examples: @info x = 1 # pretty prints the value of x in the console @time sum([1,2,4,4]) # displays the elapsed time and memory allocation of an expression; useful in benchmarking # #### Plots # + # DOCUMENTATION: http://docs.juliaplots.org/latest/generated/attributes_series/ using Plots # make the Plots library available gr() # gr backend # plotly() # plotly backend offers more tools such as zoom/pan x = 0.0:2e-3:1 y = exp.(-x/0.2) .* sin.(2*pi*10*x) h=plot(x, y; markershape=:circle, label="my decaying sine wave", background_color = :gray); # :gray is called "symbol"(special string) title!("very informative title"); xlabel!("this is the x label [unit]"); ylabel!("this is the y label [unit]"); display(h) # for gr backend # gui() # for plotly backend # - # multiple lines on the same axis; notice the plot! function x = 0.0:2e-3:1 y1 = exp.(-x/0.2) .* sin.(2*pi*10*x) y2 = exp.(-x/0.2) .* sin.(2*pi*20*x) h=plot(x, y1; markershape=:circle, label="my decaying sine wave #1", background_color = :gray); # :gray is called "symbol"(special string) h=plot!(x, y2; markershape=:circle, label="my decaying sine wave #2", background_color = :gray); # :gray is called "symbol"(special string) title!("very informative title"); xlabel!("this is the x label [unit]"); ylabel!("this is the y label [unit]"); display(h) # #### Saving data in .csv files # + using CSV using DataFrames diode_volt = 0:0.1:1 diode_crt = exp.(diode_volt) df = DataFrame("Voltage [V]" => diode_volt, "Current [A]" => diode_crt) CSV.write("C:\\Julia1.5.2\\dioda.csv", df; append=false) # NOTICE THE DOUBLE SLASH!
test/0000_Julia language cheatsheet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.1 (gimkl-2018b) # language: python # name: python3 # --- # <small><i>This notebook was put together by [<NAME>](http://www.vanderplas.com) and modified by [NeSI](https://www.nesi.org.nz). Source and license info is on [GitHub](https://github.com/nesi/sklearn_tutorial) ([original version](https://github.com/jakevdp/sklearn_tutorial/)).</i></small> # # Basic Principles of Machine Learning # Here we'll dive into the basic principles of machine learning, and how to # utilize them via the Scikit-Learn API. # # After briefly introducing scikit-learn's *Estimator* object, we'll cover **supervised learning**, including *classification* and *regression* problems, and **unsupervised learning**, including *dimensionality reduction* and *clustering* problems. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn') # ## The Scikit-learn Estimator Object # # Every algorithm is exposed in scikit-learn via an ''Estimator'' object. For instance a linear regression is implemented as so: from sklearn.linear_model import LinearRegression # **Estimator parameters**: All the parameters of an estimator can be set when it is instantiated, and have suitable default values: model = LinearRegression(normalize=True) print(model.normalize) # ### Quick Question: # # Can you name other parameters of LinearRegression? Use the [official documentation](https://scikit-learn.org) to find them. print(model) # **Estimated Model parameters**: When data is *fit* with an estimator, parameters are estimated from the data at hand. All the estimated parameters are attributes of the estimator object ending by an underscore: x = np.arange(10) y = 2 * x + 1 print(x) print(y) plt.plot(x, y, 'o'); # The input data for sklearn needs to be 2D: (samples == 10 x features == 1) X = x[:, np.newaxis] print(X) print(y) # fit the model on our data model.fit(X, y) # underscore at the end indicates a fit parameter print(model.coef_) print(model.intercept_) # The model found a line with a slope 2 and intercept 1, as we'd expect. # ## Supervised Learning: Classification and Regression # # In **Supervised Learning**, we have a dataset consisting of both features and labels. # The task is to construct an estimator which is able to predict the label of an object # given the set of features. A relatively simple example is predicting the species of # iris given a set of measurements of its flower. This is a relatively simple task. # Some more complicated examples are: # # - given a multicolor image of an object through a telescope, determine # whether that object is a star, a quasar, or a galaxy. # - given a photograph of a person, identify the person in the photo. # - given a list of movies a person has watched and their personal rating # of the movie, recommend a list of movies they would like # (So-called *recommender systems*: a famous example is the [Netflix Prize](http://en.wikipedia.org/wiki/Netflix_prize)). # # What these tasks have in common is that there is one or more unknown # quantities associated with the object which needs to be determined from other # observed quantities. # # Supervised learning is further broken down into two categories, **classification** and **regression**. # In classification, the label is discrete, while in regression, the label is continuous. For example, # in astronomy, the task of determining whether an object is a star, a galaxy, or a quasar is a # classification problem: the label is from three distinct categories. On the other hand, we might # wish to estimate the age of an object based on such observations: this would be a regression problem, # because the label (age) is a continuous quantity. # ### Classification Example # K nearest neighbors (kNN, see [Scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)) is one of the simplest learning strategies: given a new, unknown observation, look up in your reference database which ones have the closest features and assign the predominant class. # # Let's try it out on our iris classification problem: # + from sklearn import neighbors, datasets iris = datasets.load_iris() X, y = iris.data, iris.target # create the model knn = neighbors.KNeighborsClassifier(n_neighbors=5) # fit the model knn.fit(X, y) # What kind of iris has 3cm x 5cm sepal and 4cm x 2cm petal? # call the "predict" method: result = knn.predict([[3, 5, 4, 2]]) print(iris.target_names[result]) # - # You can also do probabilistic predictions: knn.predict_proba([[3, 5, 4, 2]]) # If we train a model only on sepal width and length, we can visualize for each possible input what is the most likely class. from fig_code import plot_iris_knn plot_iris_knn() # --- # # #### Exercise # # Use a different estimator on the same problem: ``sklearn.svm.SVC`` (see [Scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)). # # *Note that you don't have to know what it is do use it. We're simply trying out the interface here* from sklearn.svm import SVC # + # create the model # fit the model # predict on new data, e.g. [[3, 5, 4, 2]] # display the result # - # --- # ### Regression Example # # One of the simplest regression problems is fitting a line to data, which we saw above. # Scikit-learn also contains more sophisticated regression algorithms # + # Create some simple data import numpy as np np.random.seed(0) X = np.random.random(size=(20, 1)) y = 3 * X.squeeze() + 2 + np.random.randn(20) plt.plot(X, y, 'o'); # - # As above, we can plot a line of best fit: # + model = LinearRegression() model.fit(X, y) # Plot the data and the model prediction X_fit = np.linspace(0, 1, 100)[:, np.newaxis] y_fit = model.predict(X_fit) plt.plot(X, y, 'o') plt.plot(X_fit, y_fit); # - # Scikit-learn also has some more sophisticated models, which can respond to finer features in the data: # + # Fit a Random Forest from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() model.fit(X, y) # Plot the data and the model prediction X_fit = np.linspace(0, 1, 100)[:, np.newaxis] y_fit = model.predict(X_fit) plt.plot(X, y, 'o') plt.plot(X_fit, y_fit); # - # Whether either of these is a "good" fit or not depends on a number of things; we'll discuss details of how to choose a model later in the tutorial. # --- # # #### Exercise # # Explore the ``RandomForestRegressor`` object using Jupyter's help features (i.e. put a question mark after the object or use the *Contextual Help*). # What arguments are available to ``RandomForestRegressor``? # How does the above plot change if you change these arguments? # # These class-level arguments are known as *hyperparameters*, and we will discuss later how you to select hyperparameters in the model validation section. # # --- # ## Unsupervised Learning: Dimensionality Reduction and Clustering # # **Unsupervised Learning** addresses a different sort of problem. Here the data has no labels, # and we are interested in finding similarities between the objects in question. In a sense, # you can think of unsupervised learning as a means of discovering labels from the data itself. # Unsupervised learning comprises tasks such as *dimensionality reduction*, *clustering*, and # *density estimation*. For example, in the iris data discussed above, we can used unsupervised # methods to determine combinations of the measurements which best display the structure of the # data. As we'll see below, such a projection of the data can be used to visualize the # four-dimensional dataset in two dimensions. Some more involved unsupervised learning problems are: # # - given detailed observations of distant galaxies, determine which features or combinations of # features best summarize the information. # - given a mixture of two sound sources (for example, a person talking over some music), # separate the two (this is called the [blind source separation](http://en.wikipedia.org/wiki/Blind_signal_separation) problem). # - given a video, isolate a moving object and categorize in relation to other moving objects which have been seen. # # Sometimes the two may even be combined: e.g. Unsupervised learning can be used to find useful # features in heterogeneous data, and then these features can be used within a supervised # framework. # ### Dimensionality Reduction: PCA # # Principle Component Analysis (PCA) is a dimension reduction technique that can find the combinations of variables that explain the most variance. # # Consider the iris dataset. It cannot be visualized in a single 2D plot, as it has 4 features. We are going to extract 2 combinations of sepal and petal dimensions to visualize it: # + X, y = iris.data, iris.target from sklearn.decomposition import PCA pca = PCA(n_components=0.95) pca.fit(X) X_reduced = pca.transform(X) print("Reduced dataset shape:", X_reduced.shape) # + import pylab as plt plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap='RdYlBu') print("Meaning of the 2 components:") for component in pca.components_: print(" + ".join("%.3f x %s" % (value, name) for value, name in zip(component, iris.feature_names))) # - # #### Clustering: K-means # # Clustering groups together observations that are homogeneous with respect to a given criterion, finding ''clusters'' in the data. # # Note that these clusters will uncover relevent hidden structure of the data only if the criterion used highlights it. # + from sklearn.cluster import KMeans k_means = KMeans(n_clusters=3, random_state=0) # Fixing the RNG in kmeans k_means.fit(X) y_pred = k_means.predict(X) plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_pred, cmap='RdYlBu'); # - # ### Recap: Scikit-learn's estimator interface # # Scikit-learn strives to have a uniform interface across all methods, # and we'll see examples of these below. Given a scikit-learn *estimator* # object named `model`, the following methods are available: # # - Available in **all Estimators** # + `model.fit()` : fit training data. For supervised learning applications, # this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`). # For unsupervised learning applications, this accepts only a single argument, # the data `X` (e.g. `model.fit(X)`). # - Available in **supervised estimators** # + `model.predict()` : given a trained model, predict the label of a new set of data. # This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`), # and returns the learned label for each object in the array. # + `model.predict_proba()` : For classification problems, some estimators also provide # this method, which returns the probability that a new observation has each categorical label. # In this case, the label with the highest probability is returned by `model.predict()`. # + `model.score()` : for classification or regression problems, most (all?) estimators implement # a score method. Scores are between 0 and 1, with a larger score indicating a better fit. # - Available in **unsupervised estimators** # + `model.predict()` : predict labels in clustering algorithms. # + `model.transform()` : given an unsupervised model, transform new data into the new basis. # This also accepts one argument `X_new`, and returns the new representation of the data based # on the unsupervised model. # + `model.fit_transform()` : some estimators implement this method, # which more efficiently performs a fit and a transform on the same input data. # ## Model Validation # # An important piece of machine learning is **model validation**: that is, determining how well your model will generalize from the training data to future unlabeled data. Let's look at an example using the *nearest neighbor classifier*. This is a very simple classifier: it simply stores all training data, and for any unknown quantity, simply returns the label of the closest training point. # # With the iris data, it very easily returns the correct prediction for each of the input points: from sklearn.neighbors import KNeighborsClassifier X, y = iris.data, iris.target clf = KNeighborsClassifier(n_neighbors=1) clf.fit(X, y) y_pred = clf.predict(X) print(f"{np.sum(y == y_pred)} / {len(y)} correct") # A more useful way to look at the results is to view the **confusion matrix**, or the matrix showing the frequency of inputs and outputs: from sklearn.metrics import confusion_matrix print(confusion_matrix(y, y_pred)) # For each class, all 50 training samples are correctly identified. But this **does not mean that our model is perfect!** In particular, such a model generalizes extremely poorly to new data. We can simulate this by splitting our data into a *training set* and a *testing set*. Scikit-learn contains some convenient routines to do this: from sklearn.model_selection import train_test_split Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=56) clf.fit(Xtrain, ytrain) ypred = clf.predict(Xtest) print(confusion_matrix(ytest, ypred)) # This paints a better picture of the true performance of our classifier: apparently there is some confusion between the second and third species, which we might anticipate given what we've seen of the data above. # # This is why it's **extremely important** to use a train/test split when evaluating your models. We'll go into more depth on model evaluation later in this tutorial. # ## Flow Chart: How to Choose your Estimator # # This is a flow chart created by scikit-learn super-contributor [<NAME>](https://github.com/amueller) which gives a nice summary of which algorithms to choose in various situations. Keep it around as a handy reference! from IPython.display import Image Image("http://scikit-learn.org/dev/_static/ml_map.png") # Original source on the [scikit-learn website](http://scikit-learn.org/stable/tutorial/machine_learning_map/) # ### Quick Question: # # If you want to predict a the daily consumption of beer in Wellington using the average daily rainfall, temperature and solar radiation over the past 10 years, which estimators would you try first? # ## Quick Application: Optical Character Recognition # To demonstrate the above principles on a more interesting problem, let's consider OCR (Optical Character Recognition) – that is, recognizing hand-written digits. # In the wild, this problem involves both locating and identifying characters in an image. Here we'll take a shortcut and use scikit-learn's set of pre-formatted digits, which is built-in to the library. # ### Loading and visualizing the digits data # # We'll use scikit-learn's data access interface and take a look at this data: from sklearn import datasets digits = datasets.load_digits() digits.images.shape # Let's plot a few of these: # + fig, axes = plt.subplots(10, 10, figsize=(8, 8)) fig.subplots_adjust(hspace=0.1, wspace=0.1) for i, ax in enumerate(axes.flat): ax.imshow(digits.images[i], cmap='binary', interpolation='nearest') ax.text(0.05, 0.05, str(digits.target[i]), transform=ax.transAxes, color='green') ax.set_xticks([]) ax.set_yticks([]) # - # Here the data is simply each pixel value within an 8x8 grid: # The images themselves print(digits.images.shape) print(digits.images[0]) # The data for use in our algorithms print(digits.data.shape) print(digits.data[0]) # The target label print(digits.target) # So our data have 1797 samples in 64 dimensions. # ### Unsupervised Learning: Dimensionality Reduction # # We'd like to visualize our points within the 64-dimensional parameter space, but it's difficult to plot points in 64 dimensions! # Instead we'll reduce the dimensions to 2, using an unsupervised method. # Here, we'll make use of a manifold learning algorithm called *Isomap*, and transform the data to two dimensions. from sklearn.manifold import Isomap iso = Isomap(n_components=2) data_projected = iso.fit_transform(digits.data) data_projected.shape plt.scatter(data_projected[:, 0], data_projected[:, 1], c=digits.target, edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('nipy_spectral', 10)); plt.colorbar(label='digit label', ticks=range(10)) plt.clim(-0.5, 9.5) # We see here that the digits are fairly well-separated in the parameter space; this tells us that a supervised classification algorithm should perform fairly well. Let's give it a try. # ### Classification on Digits # # Let's try a classification task on the digits. The first thing we'll want to do is split the digits into a training and testing sample: from sklearn.model_selection import train_test_split Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target, random_state=42) print(Xtrain.shape, Xtest.shape) # Let's use a simple logistic regression which (despite its confusing name) is a classification algorithm: from sklearn.linear_model import LogisticRegression clf = LogisticRegression(penalty='l2', max_iter=5000) # **Exercise: fit the `clf` model to the training dataset.** # fit the model ypred = clf.predict(Xtest) print(f"{np.sum(ytest == ypred)} / {len(ytest)} correct") # We can check our classification accuracy by comparing the true values of the test set to the predictions: from sklearn.metrics import accuracy_score accuracy_score(ytest, ypred) # This single number doesn't tell us **where** we've gone wrong: one nice way to do this is to use the *confusion matrix* from sklearn.metrics import confusion_matrix print(confusion_matrix(ytest, ypred)) from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(clf, Xtest, ytest, cmap=plt.cm.Blues) plt.grid(False) # We might also take a look at some of the outputs along with their predicted labels. We'll make the bad labels red: # + fig, axes = plt.subplots(10, 10, figsize=(8, 8)) fig.subplots_adjust(hspace=0.1, wspace=0.1) for i, ax in enumerate(axes.flat): ax.imshow(Xtest[i].reshape(8, 8), cmap='binary') ax.text(0.05, 0.05, str(ypred[i]), transform=ax.transAxes, color='green' if (ytest[i] == ypred[i]) else 'red') ax.set_xticks([]) ax.set_yticks([]) # - # The interesting thing is that even with this simple logistic regression algorithm, many of the mislabeled cases are ones that we ourselves might get wrong! # # There are many ways to improve this classifier, but we're out of time here. To go further, we could use a more sophisticated model, use cross validation, or apply other techniques. # We'll cover some of these topics later in the tutorial.
notebooks/02.2-Basic-Principles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- drive_path = 'c:/' import numpy as np import pandas as pd import os import sys import matplotlib.pyplot as plt from scipy.stats import ks_2samp from scipy.stats import anderson_ksamp from scipy.stats import kruskal from scipy.stats import variation from scipy.stats import spearmanr from scipy.stats import zscore from scipy.stats import gaussian_kde import seaborn as sns # %matplotlib #Import data comp=pd.read_csv('C:\Users\Annie\Documents\Data\Ca_Imaging\GoodFiles\\fullpeak.csv') # del comp['Mouse'] comp_sorted=comp.reindex_axis(comp.mean().sort_values().index, axis=1) comp_labels=pd.DataFrame(comp.Group) tmp=[comp_labels,comp_sorted] composite_full=pd.concat(tmp,axis=1) composite_full # + #Calculate means and variance for each odor Cctrl=composite_full[composite_full['Group']=='Control'] Cmean=pd.DataFrame(Cctrl.mean()) Cmean.columns=['Control Mean'] Cvar=pd.DataFrame(Cctrl.var()) Cvar.columns=['Control Variance'] M=composite_full[composite_full['Group']=='Mint'] Mmean=pd.DataFrame(M.mean()) Mmean.columns=['Mint Mean'] Mvar=pd.DataFrame(M.var()) Mvar.columns=['Mint Variance'] H=composite_full[composite_full['Group']=='Hexanal'] Hmean=pd.DataFrame(H.mean()) Hmean.columns=['Hexanal Mean'] Hvar=pd.DataFrame(H.var()) Hvar.columns=['Hexanal Variance'] #Concat Ctmp=[Cmean,Cvar] Mtmp=[Mmean,Mvar] Htmp=[Hmean,Hvar] CtrlDF=pd.concat(Ctmp,axis=1) MDF=pd.concat(Mtmp,axis=1) HDF=pd.concat(Htmp,axis=1) final=[CtrlDF,MDF,HDF] finaldf=pd.concat(final,axis=1) # - finaldf=finaldf.reset_index(drop=True) finaldf.head() sns.set(style="white", palette="muted", color_codes=True); sns.set_context("talk", font_scale=1.8); plt.figure(figsize=(35, 20)); sns.regplot(finaldf['Control Mean'],finaldf['Control Variance'],scatter_kws={"s": 175},color='r') sns.regplot(finaldf['Mint Mean'],finaldf['Mint Variance'],scatter_kws={"s": 175},color='g') sns.regplot(finaldf['Hexanal Mean'],finaldf['Hexanal Variance'],scatter_kws={"s": 175},color='b') sns.despine() plt.ylabel('Variance', fontsize=48); plt.title('Mean vs. Variance', fontsize=55); plt.xlabel('Mean', fontsize=48); plt.legend(loc=2, prop={'size': 48}); finaldf['Control Mean'] # # Cell by cell, averaged odors Ccellmean=Cctrl.mean(axis=1) Ccellvar=Cctrl.var(axis=1) Mcellmean=M.mean(axis=1) Mcellvar=M.var(axis=1) Hcellmean=H.mean(axis=1) Hcellvar=H.var(axis=1) # + #Concat Ctemp=[Cctrl['Group'],Ccellmean,Ccellvar] Mtemp=[M['Group'],Mcellmean,Mcellvar] Htemp=[H['Group'],Hcellmean,Hcellvar] CtrlcellDF=pd.concat(Ctemp,axis=1) CtrlcellDF.columns=('Group','Mean','Variance') McellDF=pd.concat(Mtemp,axis=1) McellDF.columns=('Group','Mean','Variance') HcellDF=pd.concat(Htemp,axis=1) HcellDF.columns=('Group','Mean','Variance') finalcell=[CtrlcellDF,McellDF,HcellDF] finalcelldf=pd.concat(finalcell,axis=0) # - sns.regplot('Mean','Variance',CtrlcellDF) sns.set(style="white", palette="muted", color_codes=True); sns.set_context("talk", font_scale=1.8); plt.figure(figsize=(30, 15)); sns.regplot('Mean','Variance',CtrlcellDF,scatter_kws={"s": 80},color='r',label='Control') sns.regplot('Mean','Variance',McellDF,scatter_kws={"s": 80},color='g',label='Mint') sns.regplot('Mean','Variance',HcellDF,scatter_kws={"s": 80},color='b',label='Hexanal') sns.despine() plt.ylabel('Variance', fontsize=48); plt.title('Mean vs. Variance', fontsize=55); plt.xlabel('Mean', fontsize=48); plt.legend(loc=2, prop={'size': 48});
Non Stationary Fluctuation Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import dependencies # + import numpy as np import sys import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sn from zipfile import ZipFile from fair import * from fair.scripts.stats import * from fair.scripts.data_retrieval import * # %matplotlib inline # - # ### Aerosol parameters following Smith et al. (2020) # # Here we base aerosol forcing relations on [Smith et al., 2020](http://www.essoar.org/doi/10.1002/essoar.10503977.1). ERFari is modelled as a linear combination of SO$_2$, BC and OC emissions. For ERFaci we use a combination of a logarithmic SO$_2$ dependence and a linear BC / OC dependence. We reduce the potential number of parameters to avoid overfitting. That is: # # $$ # ERF_{ari} = \alpha_{SO_2} \cdot E_{SO_2} + \alpha_{BC} \cdot E_{BC} + \alpha_{OC} \cdot E_{OC} \,; \text{and}\\\\ # ERF_{aci} = \beta \cdot ln\left(1 + \frac{E_{SO_2}}{s}\right) + \alpha \cdot ( E_{OC} + E_{BC} ) # $$ # # In the ERFaci form, $s$ is treated as a shape parameter, and measures the extent to which aerosol-cloud interaction is linear or logarithmic with SO$_2$ emissions. # ## Data retrieval # Import data from RFMIP CMIP6_aer_forc_zip = ZipFile('../../aux/input-data/_hidden/aer_forc_CMIP6.zip') CMIP6_aer_forc_fnames = [x.filename for x in CMIP6_aer_forc_zip.infolist()] CMIP6_aer_forc = pd.concat([pd.read_csv(CMIP6_aer_forc_zip.open(x),index_col=0) for x in CMIP6_aer_forc_fnames],axis=1,keys=[x.split('.')[0] for x in CMIP6_aer_forc_fnames]) CMIP6_aer_forc.index = CMIP6_aer_forc.index.astype(int) ## get the RCMIP emms for the species we use to parameterise ERFaer aer_emms = RCMIP_to_FaIR_input_emms('ssp245').loc[:2100,['so2','bc','oc']].interpolate() ## relative to 1850 aer_emms -= aer_emms.loc[1850] # ## Parameterisation tuning # ### ERFari # + # tune ERFari ERFari_coefs = pd.DataFrame(index=CMIP6_aer_forc.columns.levels[0],columns=aer_emms.columns) for model in CMIP6_aer_forc.columns.levels[0]: model_forc = CMIP6_aer_forc.loc[:,model] model_erfari = (model_forc['aprp_ERFariSW']+model_forc['aprp_ERFariLW']).dropna() RCMIP_emms = aer_emms.loc[model_erfari.index] coefs = OLSE_NORM(RCMIP_emms.values,model_erfari.values[:,None],add_intercept=False)['coefs'] ERFari_coefs.loc[model] = coefs[:,0] # + # validate tuning fig,ax = plt.subplots(2,5,figsize=(12,4)) for i,model in enumerate(CMIP6_aer_forc.columns.levels[0]): model_forc = CMIP6_aer_forc.loc[:,model] model_erfari = (model_forc['aprp_ERFariSW']+model_forc['aprp_ERFariLW']).dropna() RCMIP_emms = aer_emms.copy() FaIRv2_erfari = RCMIP_emms * ERFari_coefs.loc[model,:] model_erfari.plot(ax=ax.flatten()[i],color='orange',marker='o',lw=0,ms=1) FaIRv2_erfari.sum(axis=1).plot(ax=ax.flatten()[i],color='k',ls='--') ax.flatten()[i].set_title(model) [a.set_ylim(-0.4,0.2) for a in ax.flatten()] [a.set_xlim(1750,2100) for a in ax.flatten()] plt.tight_layout() '' # - # ### ERFari # + def fit_aci(x,model): model_forc = CMIP6_aer_forc.loc[:,model] model_erfaci = (model_forc['aprp_ERFaciSW']+model_forc['aprp_ERFaciLW']).dropna() emms = aer_emms.loc[model_erfaci.index] fit = x[0] * np.log(1 + emms['so2'].values / np.exp(x[1]) ) + x[2]*(emms['bc'].values+emms['oc'].values) return np.sum((fit-model_erfaci)**2) ERFaci_coefs = pd.DataFrame(index = CMIP6_aer_forc.columns.levels[0],columns =['beta','s','alpha']) for model in CMIP6_aer_forc.columns.levels[0]: fit_so2 = sp.optimize.minimize(fit_aci,x0=[-0.4,np.log(60),0],args=model,method='nelder-mead').x fit_so2[1] = np.exp(fit_so2[1]) ERFaci_coefs.loc[model] = fit_so2 # + # validate tuning fig,ax = plt.subplots(2,5,figsize=(12,4)) for i,model in enumerate(CMIP6_aer_forc.columns.levels[0]): model_forc = CMIP6_aer_forc.loc[:,model] model_erfaci = (model_forc['aprp_ERFaciSW']+model_forc['aprp_ERFaciLW']).dropna() emms = aer_emms.copy() FaIRv2_erfaci = ERFaci_coefs.loc[model,'beta'] * np.log(1 + emms['so2'] / ERFaci_coefs.loc[model,'s'] ) + ERFaci_coefs.loc[model,'alpha']*(emms['bc']+emms['oc']) model_erfaci.plot(ax=ax.flatten()[i],color='orange',marker='o',lw=0,ms=1) FaIRv2_erfaci.plot(ax=ax.flatten()[i],color='k',ls='--') ax.flatten()[i].set_title(model) [a.set_ylim(-1.8,0.2) for a in ax.flatten()] [a.set_xlim(1750,2100) for a in ax.flatten()] plt.tight_layout() '' # - # ## Save coefficients to repo ERFaci_coefs.to_csv('../../aux/parameter-sets/CMIP6_ERFaci.csv') ERFari_coefs.to_csv('../../aux/parameter-sets/CMIP6_ERFari.csv') #definition to round to set sfs: def round_to_sf(x,sf): if x==0: return 0 if np.isnan(x): return '-' else: num= round(x, sf - int(np.floor(np.log10(abs(x))))) if abs(num)>10**sf: return str(int(num)) else: return str(num) # + ERFaer_table = pd.concat([ ERFari_coefs.rename(dict(so2='$f^{\text{SO}_2}_2$',bc='$f^{\text{BC}}_2$',oc='$f^{\text{OC}}_2$'),axis=1), ERFaci_coefs.rename(dict(beta='$f^{\text{aci}}_1$',s='$C_0^{\text{SO}_2}$',alpha='$f^{\text{aci}}_2$'),axis=1), ],axis=1,keys=['ERFari','ERFaci']).applymap(lambda x:round_to_sf(x,2)) ERFaer_table.index.name='model' ERFaer_table.columns.names=['source','parameter'] ERFaer_table#.to_latex('../../docs/manuscript/tables/Tab4',escape=False,bold_rows=True) # - # ## Create plot for the paper # set some matplotlib rcParams # + matplotlib.rcParams['font.family']='Helvetica' matplotlib.rcParams['font.size']=11 matplotlib.rcParams['axes.formatter.limits']=-3,3 matplotlib.rcParams['legend.frameon']=False plt.rcParams['pdf.fonttype'] = 42 # + ## aggregate data into good format: CMIP6_plot_data = [] FaIRfit_plot_data = [] for model in CMIP6_aer_forc.columns.levels[0]: CMIP6_ERFari = (CMIP6_aer_forc.loc[:,(model,'aprp_ERFariSW')]+CMIP6_aer_forc.loc[:,(model,'aprp_ERFariLW')]).dropna() CMIP6_ERFaci = (CMIP6_aer_forc.loc[:,(model,'aprp_ERFaciSW')]+CMIP6_aer_forc.loc[:,(model,'aprp_ERFaciLW')]).dropna() CMIP6_tot = CMIP6_ERFari+CMIP6_ERFaci CMIP6_plot_data+=[pd.concat([CMIP6_ERFari,CMIP6_ERFaci,CMIP6_tot],axis=1,keys=['ERFari','ERFaci','Total'])] FaIR_ERFaci = ERFaci_coefs.loc[model,'beta'] * np.log(1 + aer_emms['so2'] / ERFaci_coefs.loc[model,'s'] ) + ERFaci_coefs.loc[model,'alpha']*(aer_emms['bc']+aer_emms['oc']) FaIR_ERFari = (aer_emms * ERFari_coefs.loc[model,:]).sum(axis=1) FaIR_tot = FaIR_ERFaci+FaIR_ERFari FaIRfit_plot_data+=[pd.concat([FaIR_ERFari,FaIR_ERFaci,FaIR_tot],axis=1,keys=['ERFari','ERFaci','Total'])] CMIP6_plot_data = pd.concat(CMIP6_plot_data,axis=1,keys=CMIP6_aer_forc.columns.levels[0]) FaIRfit_plot_data = pd.concat(FaIRfit_plot_data,axis=1,keys=CMIP6_aer_forc.columns.levels[0]) plot_data = pd.concat([CMIP6_plot_data,FaIRfit_plot_data],axis=1,keys=['CMIP6','FaIRv2.0.0 fit']).stack(level=[0,1,2]).sort_index().reset_index() plot_data.columns=['year','source','model','ERF','value'] # + def custom_facet(x,y,**kwargs): if kwargs['label']=='FaIRv2.0.0 fit': plt.plot(x,y,**kwargs,zorder=10) elif kwargs['label']=='CMIP6': plt.plot(x,y,**kwargs,marker='.',lw=0,ms=2) g= sn.FacetGrid(data=plot_data, col='ERF', row='model', sharey='row', hue='source', palette=['k','orange'], margin_titles=True, height=2, aspect=1.5 ) g.map(custom_facet,'year','value') g.map(plt.axhline,y=0,ls=':',alpha=0.3,color='k') g.set(xlim=[1750,2100]).set_titles(col_template="{col_name}",fontweight='bold', row_template="{row_name}").set_axis_labels("year", "Forcing / W m$^{-2}$") g.axes[0,0].legend(*[x[:2] for x in g.axes[0,0].get_legend_handles_labels()]) g.fig.subplots_adjust(hspace=0.1) [a.ticklabel_format(style='plain') for a in g.axes[-1]] [g.fig.savefig('../../docs/manuscript/figures/Fig5.'+x,dpi=600,bbox_inches='tight') for x in ['png','pdf']] ''
notebooks/parameter-tuning/CMIP6-aerosol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Software Engineering Practices, Part 2 # > A Summary of lecture "AWS ML Foundations Course", via Udacity # # - toc: true # - badges: true # - comments: true # - author: <NAME> # - categories: [Python, Udacity, Software Engineering] # - image: images/logo.png # ## Testing # Testing your code is essential before deployment. It helps you catch errors and faulty conclusions before they make any major impact. Today, employers are looking for data scientists with the skills to properly prepare their code for an industry setting, wich includes testing their code. # ### Testing and Data Science # - Problems that could occur in data science aren't always easily detectable; you might have values being encoded incorrectly, features being used inappropriately, unexpected data breaking assumptions. # # - To catch these errors, you have to check for the quality and accuracy of your analysis in addition to the quality of your code. Proper testing is necessary to avoid unexpected surprises and have confidence in your results. # # - **TEST DRIVEN DEVELOPMENT**: a development process where you write tests for tasks before you even write the code to implement those tasks. # # - **UNIT TEST**: a test that covers a "unit" of code, usually a single function, independently from the rest of the program. # # ##### Resources: # - Four ways Data Science Goes Wrong and How Test Driven Data Analysis Can Help: [Link](https://www.predictiveanalyticsworld.com/patimes/four-ways-data-science-goes-wrong-and-how-test-driven-data-analysis-can-help/6947/) # - <NAME>: Getting Started Testing: [Slide Deck](https://speakerdeck.com/pycon2014/getting-started-testing-by-ned-batchelder) and [Presentation Video](https://www.youtube.com/watch?v=FxSsnHeWQBY) # ### Unit Tests # # We want to test our functions in a way that is repeatable and automated. Ideally, we'd run a test program that runs all our unit tests and cleanly lets us know which ones failed and which ones succeeded. Fortunately, there are great tools available in Python that we can use to create effective unit tests! # # **Unit Test Advantages and Disadvantages** # # The advantage of unit tests is that they are isolated from the rest of your program, and thus, no dependencies are involved. They don't require access to databases, APIs, or other external sources of information. However, passing unit tests isn’t always enough to prove that our program is working successfully. To show that all the parts of our program work with each other properly, communicating and transferring data between them correctly, we use integration tests. In this lesson, we'll focus on unit tests; however, when you start building larger programs, you will want to use integration tests as well. # # You can read about integration testing and how integration tests relate to unit tests [here](https://www.fullstackpython.com/integration-testing.html). That article contains other very useful links as well. # # ### Unit Testing Tools # To install ```pytest```, run ```pip install -U pytest``` in your terminal. You can see more information on getting started [here](https://docs.pytest.org/en/latest/getting-started.html). # # - Create a test file starting with ```test_``` # - Define unit test functions that start with ```test_``` inside the test file # - Enter ```pytest``` into your terminal in the directory of your test file and it will detect these tests for you! # # ```test_``` is the default - if you wish to change this, you can learn how to in this ```pytest``` [configuration](https://docs.pytest.org/en/latest/customize.html) # # In the test output, periods represent successful unit tests and F's represent failed unit tests. Since all you see is what test functions failed, it's wise to have only one ```assert``` statement per test. Otherwise, you wouldn't know exactly how many tests failed, and which tests failed. # # Your tests won't be stopped by failed ```assert``` statements, but it will stop if you have syntax errors. # #### Example # ```shell # root@1c2d457fb26f:/home/workspace# pytest # ========================================================================== test session starts =========================================================================== # platform linux -- Python 3.6.3, pytest-5.4.2, py-1.8.1, pluggy-0.13.1 # rootdir: /home/workspace # plugins: requests-mock-1.5.2 # collected 4 items # # test_compute_launch.py ..F. [100%] # # ================================================================================ FAILURES ================================================================================ # ___________________________________________________________________ test_days_until_launch_0_negative ____________________________________________________________________ # # def test_days_until_launch_0_negative(): # > assert(days_until_launch(83, 64) == 0) # E assert -19 == 0 # E + where -19 = days_until_launch(83, 64) # # test_compute_launch.py:10: AssertionError # ======================================================================== short test summary info ========================================================================= # FAILED test_compute_launch.py::test_days_until_launch_0_negative - assert -19 == 0 # ====================================================================== 1 failed, 3 passed in 0.32s ======================================================================= # ``` # After fixing code, # ```shell # root@1c2d457fb26f:/home/workspace# pytest # ========================================================================== test session starts =========================================================================== # platform linux -- Python 3.6.3, pytest-5.4.2, py-1.8.1, pluggy-0.13.1 # rootdir: /home/workspace # plugins: requests-mock-1.5.2 # collected 4 items # # test_compute_launch.py .... [100%] # # =========================================================================== 4 passed in 0.05s ============================================================================ # ``` # ### Test Driven Development and Data Science # # - **TEST DRIVEN DEVELOPMENT**: writing tests before you write the code that’s being tested. Your test would fail at first, and you’ll know you’ve finished implementing a task when this test passes. # - Tests can check for all the different scenarios and edge cases you can think of, before even starting to write your function. This way, when you do start implementing your function, you can run this test to get immediate feedback on whether it works or not in all the ways you can think of, as you tweak your function. # - When refactoring or adding to your code, tests help you rest assured that the rest of your code didn't break while you were making those changes. Tests also helps ensure that your function behavior is repeatable, regardless of external parameters, such as hardware and time. # # Test driven development for data science is relatively new and has a lot of experimentation and breakthroughs appearing, which you can learn more about in the resources below. # # - [Data Science TDD](https://www.linkedin.com/pulse/data-science-test-driven-development-sam-savage/) # - [TDD for Data Science](http://engineering.pivotal.io/post/test-driven-development-for-data-science/) # - [TDD is Essential for Good Data Science Here's Why](https://medium.com/@karijdempsey/test-driven-development-is-essential-for-good-data-science-heres-why-db7975a03a44) # - [Testing Your Code](http://docs.python-guide.org/en/latest/writing/tests/) (general python TDD) # # # ## Logging # # Logging is valuable for understanding the events that occur while running your program. For example, if you run your model over night and see that it's producing ridiculous results the next day, log messages can really help you understand more about the context in which this occurred. Lets learn about the qualities that make a log message effective. # # ### Log Messages # # Logging is the process of recording messages to describe events that have occurred while running your software. Let's take a look at a few examples, and learn tips for writing good log messages. # # **Tip: Be professional and clear** # ``` # Bad: Hmmm... this isn't working??? # Bad: idk.... :( # Good: Couldn't parse file. # ``` # # **Tip: Be concise and use normal capitalization** # ``` # Bad: Start Product Recommendation Process # Bad: We have completed the steps necessary and will now proceed with the recommendation process for the records in our product database. # Good: Generating product recommendations. # ``` # # **Tip: Choose the appropriate level for logging** # - DEBUG - level you would use for anything that happens in the program. # - ERROR - level to record any error that occurs # - INFO - level to record all actions that are user-driven or system specific, such as regularly scheduled operations # # **Tip: Provide any useful information** # ``` # Bad: Failed to read location data # Good: Failed to read location data: store_id 8324971 # ``` # ## Code Reviews # # Code reviews benefit everyone in a team to promote best programming practices and prepare code for production. Let's go over what to look for in a code review and some tips on how to conduct one. # # - [Code Review](https://github.com/lyst/MakingLyst/tree/master/code-reviews) # - [Code Review Best Practices](https://www.kevinlondon.com/2015/05/05/code-review-best-practices.html) # # # ### Questions to Ask Yourself When Conducting a Code Review # # First, let's look over some of the questions we may ask ourselves while reviewing code. These are simply from the concepts we've covered in these last two lessons! # # **Is the code clean and modular?** # # - Can I understand the code easily? # - Does it use meaningful names and whitespace? # - Is there duplicated code? # - Can you provide another layer of abstraction? # - Is each function and module necessary? # - Is each function or module too long? # # **Is the code efficient?** # # - Are there loops or other steps we can vectorize? # - Can we use better data structures to optimize any steps? # - Can we shorten the number of calculations needed for any steps? # - Can we use generators or multiprocessing to optimize any steps? # # **Is documentation effective?** # # - Are in-line comments concise and meaningful? # - Is there complex code that's missing documentation? # - Do function use effective docstrings? # - Is the necessary project documentation provided? # # **Is the code well tested?** # # - Does the code high test coverage? # - Do tests check for interesting cases? # - Are the tests readable? # - Can the tests be made more efficient? # # **Is the logging effective?** # # - Are log messages clear, concise, and professional? # - Do they include all relevant and useful information? # - Do they use the appropriate logging level? # ### Tips for Conducting a Code Review # # Now that we know what we are looking for, let's go over some tips on how to actually write your code review. When your coworker finishes up some code that they want to merge to the team's code base, they might send it to you for review. You provide feedback and suggestions, and then they may make changes and send it back to you. When you are happy with the code, you approve and it gets merged to the team's code base. # # As you may have noticed, with code reviews you are now dealing with people, not just computers. So it's important to be thoughtful of their ideas and efforts. You are in a team and there will be differences in preferences. The goal of code review isn't to make all code follow your personal preferences, but a standard of quality for the whole team. # # **Tip: Use a code linter** # # This isn't really a tip for code review, but can save you lots of time from code review! Using a Python code linter like [pylint](https://www.pylint.org/) can automatically check for coding standards and PEP 8 guidelines for you! It's also a good idea to agree on a style guide as a team to handle disagreements on code style, whether that's an existing style guide or one you create together incrementally as a team. # # **Tip: Explain issues and make suggestions** # # Rather than commanding people to change their code a specific way because it's better, it will go a long way to explain to them the consequences of the current code and suggest changes to improve it. They will be much more receptive to your feedback if they understand your thought process and are accepting recommendations, rather than following commands. They also may have done it a certain way intentionally, and framing it as a suggestion promotes a constructive discussion, rather than opposition. # # ``` # BAD: Make model evaluation code its own module - too repetitive. # # BETTER: Make the model evaluation code its own module. This will simplify models.py to be less repetitive and focus primarily on building models. # # GOOD: How about we consider making the model evaluation code its own module? This would simplify models.py to only include code for building models. Organizing these evaluations methods into separate functions would also allow us to reuse them with different models without repeating code. # ``` # # **Tip: Keep your comments objective** # # Try to avoid using the words "I" and "you" in your comments. You want to avoid comments that sound personal to bring the attention of the review to the code and not to themselves. # # ``` # BAD: I wouldn't groupby genre twice like you did here... Just compute it once and use that for your aggregations. # # BAD: You create this groupby dataframe twice here. Just compute it once, save it as groupby_genre and then use that to get your average prices and views. # # GOOD: Can we group by genre at the beginning of the function and then save that as a groupby object? We could then reference that object to get the average prices and views without computing groupby twice. # ``` # # **Tip: Provide code examples** # # When providing a code review, you can save the author time and make it easy for them to act on your feedback by writing out your code suggestions. This shows you are willing to spend some extra time to review their code and help them out. It can also just be much quicker for you to demonstrate concepts through code rather than explanations. # # Let's say you were reviewing code that included the following lines: # ```python # first_names = [] # last_names = [] # # for name in enumerate(df.name): # first, last = name.split(' ') # first_names.append(first) # last_names.append(last) # # df['first_name'] = first_names # df['last_names'] = last_names # ``` # # ``` # BAD: You can do this all in one step by using the pandas str.split method. # # GOOD: We can actually simplify this step to the line below using the pandas str.split method. Found this on this stack overflow post: https://stackoverflow.com/questions/14745022/how-to-split-a-column-into-two-columns # ``` # ```python # df['first_name'], df['last_name'] = df['name'].str.split(' ', 1).str # ``` #
_notebooks/2020-05-25-04-Software-Engineering-Practices-Pt-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Homework 3 - Tryout pandas and numpy # # Copy this notebook. Rename it as: YOURNAME-Homework-panda-numpy-Sept-2017 # # with your name replacing yourname and replaced with the date you submit or to the last part. # # Do the homeworks problems below. # # Upload your completed jupyter notebook to your github site and send me the url via the elearning site as your homework submission. Please note that you need to do your own 3.4, and 3.5 if you share data and analysis from others then you cannot get more than a 3. # ## Homework 3.1 # # ### 3.1.a # Load the data from: http://opendata.dc.gov/datasets that I have include in this github # into a dataframe. ( The file has been is available in directory ./data/ccp_current_csv.csv ) import pandas as pd import numpy as np data = pd.read_csv("data/ccp_current_csv.csv") data # ### 3.1.a what is its shape and what does that mean? data.shape #it tells us that how many rows and columns in the data # ## Homework 3.2 # What are the number of rows in each 'QUADRANT' ? #465 q_id = data['QUADRANT'] data_newind = data.copy() data_newind.index = q_id data_newind.index.is_unique pd.Series(data_newind.index).value_counts() # ## Homework 3.3 - Array math demonstration # For two arrarys # # a= [1,2,3,4] type=float # # b= [5,6,7,8] type=float # # Peform the following array operations using numpy # ( show both operational use of numpy and functional (example addition operation => + vs addition function => numbpy.add() ) # # ### 3.3.1 addition a+b from numpy import * a = array([1,2,3,4]) b= array([5,6,7,8]) a=np.array([1,2,3,4],dtype = np.float) b=np.array([5,6,7,8],dtype = np.float) print a+b print np.add(a,b) np.add(a,b) # ### 3.3.2 subtraction a-b a-b np.subtract(a,b) # ### 3.3.3 multiplication a*b a*b np.multiply(a,b) # ### 3.3.4 divsion a/b a/b np.divide(a,b) # ### 3.3.5 modulo a%b a%b np.remainder(a,b) # ### 3.3.6 power a^b a**b np.power(a,b) # ## Homework 3.4 # Find your own data and load it into a dataframe crimedata=pd.read_csv('data/Chicago_2017.csv') print "The crime data of Chicago in 2017" crimedata # # Homework 3.5 # Provide an interesting analysis of the data columns ( frequency or averages ) #calcualte the number of crime in Chicago 2017,average crime number, frequence every 1,000 people print "There are %d crimes in year 2017"%(len(crimedata)) print "The average crime number in 2017 is %f"%(len(crimedata)/365.0) print "The population in Chicago is about 2.7 million, so the crime frequency every thousand people is:%f"%(len(crimedata)/2700.0) xs=crimedata['Primary Type'].value_counts() print("Top 3 types of crime:") print(xs[:3]) print("\nLeast 3 types of crime:") print(xs[-3:]) data1=crimedata.loc[:,['Primary Type','Description','Location Description','Arrest']] xs3=xs.sort_index() #sort all the crimes by name xs1=data1[data1.Arrest==True] #get sub series where arrest is true xs2=xs1['Primary Type'].value_counts() xs2=xs2.sort_index() #sort by name xs2/=xs3 xs2=xs2.sort_values(ascending=False) #sort by value,desending order print "Top ten arrest rate of crimes:" print xs2[:10] import matplotlib.pyplot as plt crimedata=pd.read_csv('data/Chicago_2017.csv',usecols=['Date','Domestic'],parse_dates=['Date']) month=crimedata.Date.dt.month #get month his=crimedata['Domestic'].groupby(month).sum() #get sum for 'Domestic' true and false his2=hour.groupby(month).size() #sum as a whole his2=his/his2 his2=his2.reset_index() his2.columns=['Month','Domestic Propotion'] print "This is a plot where the x-axis is Month and the y-axis is the proportion of crimes occuring that are domestic" # %matplotlib inline his2.plot(x='Month',y='Domestic Propotion',kind='line') print "The number of crime in each month:" crimedata.Date.dt.month.hist(bins=12) print "The number of crime in each hour:" crimedata.Date.dt.hour.hist(bins=24)
Homework3/XimingLI-HW3-panda-numpy-Sept-2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/> # # # MAT281 # ### Aplicaciones de la Matemática en la Ingeniería # + [markdown] slideshow={"slide_type": "slide"} # ## Módulo 02 # ## Clase 05: Análisis Exploratorio de Datos # + [markdown] slideshow={"slide_type": "slide"} # ## Objetivos # # * Conocer los tipos de datos más comunes # * Aprender a tratar con datos _sucios_ # + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false # ## Contenidos # * [Motivación](#motivation) # * [CSV](#csv) # * [JSON](#json) # * [Pickle](#pickle) # * [API](#api) # * [Análiss Exploratorio de Datos](#eda) # + [markdown] slideshow={"slide_type": "slide"} # <a id='motivation'></a> # ## Motivación # # Como ya hemos comentado en clases pasadas, hoy los datos son el centro de atención en el mundo tecnológico. Por lo mismo existe una amplia gama de tipos de datos lo cual conlleva a distintas formas de obtención y manipulación de estos datos. # # En la clase de hoy comenzaremos conociendo los tipos más comunes de datos para luego centrarnos en la obtención de datos a través de una __API__. Los ejemplos serán utilizando la API de DataChile. # - import os import numpy as np import pandas as pd # + [markdown] slideshow={"slide_type": "slide"} # <a id='csv'></a> # ## CSV # # Del inglés _Comma-Separated Values_, los archivos CSV utilizan comas (",") para separar valores y cada registro consiste de una fila. # # - Pros: # * Livianos. # * De fácil entendimiento. # * Editables usando un editor de texto. # - Contras: # * No está totalmente estandarizado (e.g. ¿Qué pasa si un valor tiene comas?) # * Son sensible al _encoding_ (es la forma en que se codifica un carácter). # # Pandas posee su propia función para leer csv: `pd.read_csv()`. # + # Documentación # # pd.read_csv? # - # ### Ejemplo de encoding incorrecto pd.read_csv(os.path.join('data', 'encoding_example.csv'), sep=',', encoding='GBK') # ### Ejemplo de encoding correcto pd.read_csv(os.path.join('data', 'encoding_example.csv'), sep=',', encoding='utf8') # + [markdown] slideshow={"slide_type": "slide"} # <a id='json'></a> # ## JSON # # Acrónimo de _JavaScript Object Notation_, utilizado principalmente para intercambiar datos entre una aplicación web y un servidor. # # - Pros: # * Livianos. # * De fácil entendimiento. # * Editables usando un editor de texto. # * Formato estandarizado. # - Contras: # * La lectura con pandas puede ser un poco complicada. # # Pandas posee su propia función para leer JSON: `pd.read_json()`. # + # # pd.read_json? # - # !head -n 10 data/json_example.json pd.read_json( os.path.join('data', 'json_example.json'), orient="columns" ).head() # + [markdown] slideshow={"slide_type": "slide"} # <a id='pickle'></a> # ## Pickle # # Es un módulo que implementa protocolos binarios de serialización y des-serialización de objetos de Python. # # * Pros # - Puede representar una inmensa cantidad de tipos de objetos de python. # - En un contexto de seguridad, como no es legible por el ser humano (representación binaria) puede ser útil para almacenar datos sensibles. # * Contras: # - Solo Python. # - Si viene de un tercero podría tener contenido malicioso. # # Pandas posee su propia función para leer pickles: `pd.read_pickle()`. # + # # pd.read_pickle? # - pd.read_pickle(os.path.join('data', 'nba.pkl')).head() # <a id='sql'></a> # ## SQL # # Conocimos las bases de datos relacionales SQL en elclases anteriores y como recordarás existe la función `pd.read_sql()`. # + # # pd.read_sql? # - import sqlite3 connector = sqlite3.connect(os.path.join('data', 'chinook.db')) pd.read_sql_query("select * from albums", con=connector).head() # + [markdown] slideshow={"slide_type": "slide"} # <a id='api'></a> # ## API # # ¿Has escuchado el término __API__? Fuera de todo tecnicismo, las APIs (_Application Programming Interface_) permiten hacer uso de funciones ya existentes en otro software (o de la infraestructura ya existente en otras plataformas) para no estar reinventando la rueda constantemente, reutilizando así código que se sabe que está probado y que funciona correctamente. Por ejemplo, cuando haces una compra online y utilizas WebPay. ¡Hay APIs en todos lados! # # ### Un buen ejemplo: Data Chile # # Desde la web de [Data Chile](https://es.datachile.io): # # - Ofrece una radiografía de las temáticas más importantes para el desarrollo del país. # - Ayuda a conocer tendencias y necesidades para el diseño e implementación de políticas públicas, programas de la sociedad civil, oportunidades de negocios y estrategias de marketing del sector privado. # - Colabora en la toma de decisiones tanto del sector público como privado. # - Integra información de más de 15 fuentes distintas pertenecientes a más de 10 organismos de Gobierno. # - Crea y combina visualizaciones interactivas. # - Piensa los datos como historias y no como archivos. # # - # #### Denme todos los datos! # DataChile posee una API de datos que entrega la información para construir los gráficos y generar todos los textos del sitio. Todos los detalles en este [link](https://es.datachile.io/about/api). # + from datachile import ChileCube client = ChileCube() cubes = client.get_cubes() # Por ejemplo el primer cube Mortalidad cubes[0] # - # Me encantaría que pudiésemos utilizar la API en el curso, pero actualmente (2019-10-15) está con problemas. # + import sys try: query = client.get( "exports", { "drilldowns": [ ["Date", "Year"], ["Destination Country", "Country", "Country"] ], "measures": ["FOB US"], "cuts": [ { "drilldown": ["Date", "Year"], "values": [2012, 2013, 2014] } ], "parents": True } ) except Exception as e: print(f"{type(e)}: {e}") # - # <a id='eda'></a> # ## Análisis Exploratorio de Datos # El análisis exploratorio de datos es una forma de analizar datos definido por <NAME> (E.D.A.: Exploratory data analysis) es el tratamiento estadístico al que se someten las muestras recogidas durante un proceso de investigación en cualquier campo científico. Para mayor rapidez y precisión, todo el proceso suele realizarse por medios informáticos, con aplicaciones específicas para el tratamiento estadístico. # ### Checklist en el análisis exploratorio de datos # # El análisis exploratorio de datos debería dar respuestas a las siguientes preguntas: # # 1. ¿Qué pregunta(s) estás tratando de resolver (o probar que estás equivocado)? # 2. ¿Qué tipo de datos tiene y cómo trata los diferentes tipos? # 3. ¿Qué falta en los datos y cómo los maneja? # 4. ¿Qué hacer con los datos faltantes, outliers o información mal inputada? # 5. ¿Se puede sacar más provecho a los datos ? # # + [markdown] toc-hr-collapsed=false # ### Ejemplo: Datos de terremotos # # El dataset `earthquakes.csv` contiene la información de los terremotos de los países durante el año 2000 al 2011. Debido a que la información de este dataset es relativamente fácil de trabajar, hemos creado un dataset denominado `earthquakes_contaminated.csv` que posee información contaminada en cada una de sus columnas. De esta forma se podrá ilustrar los distintos inconvenientes al realizar análisis exploratorio de datos. # - pd.read_csv(os.path.join("data", "earthquakes.csv")).head() earthquakes = pd.read_csv(os.path.join("data", "earthquakes_contaminated.csv")) earthquakes.head() # #### Variables # # * __Pais__: # - Descripción: País del devento sísmico. # - Tipo: _string_ # - Observaciones: No deberían encontrarse nombres de ciudades, comunas, pueblos, estados, etc. # * Año: # - Descripción: Año del devento sísmico. # - Tipo: _integer_ # - Observaciones: Los años deben estar entre 2000 y 2011. # * Magnitud: # - Descripción: Magnitud del devento sísmico medida en [Magnitud de Momento Sísmico](https://en.wikipedia.org/wiki/Moment_magnitude_scale). # - Tipo: _float_ # - Observaciones: Magnitudes menores a 9.6. # * Informacion: # - Descripción: Columna contaminante. # - Tipo: _string_ # - Observaciones: A priori pareciera que no entrega información a los datos. # A pesar que la magnitud es un _float_, el conocimiento de los datos nos da información relevante, pues el terremoto con mayor magnitud registrado a la fecha fue el de Valdivia, Chile el 22 de mayo de 1960 con una magnitud entre 9.4 - 9.6. # # __Los datos son solo bytes en el disco duro si no se le entregan valor y conocimiento.__ # ### 1. ¿Qué pregunta(s) estás tratando de resolver (o probar que estás equivocado)? # # A modo de ejemplo, consideremos que que queremos conocer la mayor magnitud de terremoto en cada país a lo largo de los años. # ### 2. ¿Qué tipo de datos tiene y cómo trata los diferentes tipos? # Por el conocimiento de los datos sabemos que `Pais` e `Información` son variables categóricas, mientras que `Año` y `Magnitud` son variables numéricas. # Utilizemos las herramientas que nos entrega `pandas`. earthquakes.info() earthquakes.describe(include="all").T earthquakes.dtypes # Todas las columnas son de tipo `object`, sospechoso. Además, algunas no tienen datos. # __Tip__: Típicamente se utilizan nombres de columnas en minúsculas y sin espacios. Un truco es hacer lo siguiente: earthquakes = earthquakes.rename(columns=lambda x: x.lower().strip()) earthquakes.head() # Se le aplicó una función `lambda` a cada nombre de columna! Puum! # ### 3. ¿Qué falta en los datos y cómo los maneja? # # No es necesario agregar más variables, pero si procesarla. # ### 4. ¿Qué hacer con los datos faltantes, outliers o información mal inputada? # # A continuación iremos explorando cada una de las columnas. for col in earthquakes: print(f"La columna {col} posee los siguientes valores únicos:\n {earthquakes[col].sort_values().unique()}\n\n") # * En la columna `año` se presentan las siguientes anomalías: # * Datos vacíos. # * Años sin importancia: Se ha establecido que los años de estudios son desde el año 2000 al 2011. # * Nombres mal escritos: en este caso sabemos que 'dos mil uno' corresponde a '2001'. # * En la columna `pais` se presentan las siguientes anomalías: # * Datos vacíos. # * Ciudades, e.g. _arica_. # * Países mal escritos e.g. _shile_. # * Países repetidos pero mal formateados, e.g. _Turkey_. # * Cruce de información, e.g. _Iran, 2005 Qeshm earthquake_. # * En la columna `magnitud` se presentan las siguientes anomalías: # * Datos vacíos. # * Cruce de información, e.g. _2002-Tanzania-5.8_. # * Valores imposibles, e.g. _9.7_. # * La columna `informacion` realmente no está entregando ninguna información valiosa al problema. # Partamos por eliminar la columna `informacion`. eqk = earthquakes.drop(columns="informacion") # A veces es importante no sobrescribir el dataframe original para realizar análisis posteriores. eqk.head() # Respecto a la columna `año`, corregir estos errores no es difícil, pero suele ser tedioso. Aparte que si no se realiza un correcto análisis es posible no detectar estos errores a tiempo. Empecemos con los registros nulos. eqk.loc[lambda x: x["año"].isnull()] # Veamos el archivo # ! sed -n "226,228p" data/earthquakes_contaminated.csv # Toda la información está contenida en una columna! # Para editar la información usaremos dos herramientas: # * Los métodos de `str` en `pandas`, en particular para dividir una columna. # * `loc` para asignar los nuevos valores. eqk.loc[lambda x: x["año"].isnull(), "magnitud"].str.split("-", expand=True).values eqk.loc[lambda x: x["año"].isnull(), :] = eqk.loc[lambda x: x["año"].isnull(), "magnitud"].str.split("-", expand=True).values eqk.loc[[225, 226]] # Ahora los registros que no se pueden convertir a `numeric`. Veamos que no es posible convertirlo. try: eqk["año"].astype(np.int) except Exception as e: print(e) eqk["año"].str.isnumeric().fillna(False) eqk.loc[lambda x: ~ x["año"].str.isnumeric()] # Veamos el valor a cambiar eqk.loc[lambda x: ~ x["año"].str.isnumeric(), "año"].iloc[0] # Reemplazar es muy fácil! eqk["año"].str.replace("dos mil uno", "2001") # Para asignar en el dataframe basta con: eqk["año"] = eqk["año"].str.replace("dos mil uno", "2001").astype(np.int) # La forma encadenada sería: # + # eqk["año"] = eqk.assign(año=lambda x: x["año"].str.replace("dos mil uno", "2001").astype("int")) # - eqk.dtypes # Finalmentem, filtremos los años necesarios: eqk = eqk.query("2000 <= año <= 2011") # Siguiendo de forma análoga con la columna `magnitud`. eqk.loc[lambda x: x["magnitud"].isnull()] # La verdad es que no hay mucho que hacer con estos valores, por el momento no _inputaremos_ ningún valor y los descartaremos. eqk = eqk.loc[lambda x: x["magnitud"].notnull()] try: eqk["magnitud"].astype(np.float) print("Ya es posible transformar la columna a float.") except: print("Aún no es posible transformar la columna a float.") eqk = eqk.astype({"magnitud": np.float}) eqk.dtypes eqk.magnitud.unique() eqk.query("magnitud < 0 or 9.6 < magnitud") eqk = eqk.query("0 <= magnitud <= 9.6") eqk.query("magnitud < 0 or 9.6 < magnitud") # Finalmente, para la columna `pais`. Comenzaremos con los nombres erróneos, estos los podemos mapear directamente. map_paises = {"arica": "Chile", "shile": "Chile", "Iran, 2005 Qeshm earthquake": "Iran"} eqk["pais"].map(map_paises).fillna(eqk["pais"]) # Para editarlo en el dataframe basta hacer un `assign`. eqk = eqk.assign(pais=lambda x: x["pais"].map(map_paises).fillna(x["pais"])) # Ahora formatearemos los nombres, pasándolos a minúsculas y quitando los espacios al principio y final de cada _string_. Y ahabíamos hablado del ejemplo de _Turkey_. eqk.loc[lambda x: x["pais"].apply(lambda s: "Turkey" in s), "pais"].unique() # Chaining method eqk = eqk.assign(pais=lambda x: x["pais"].str.lower().str.strip()) eqk.loc[lambda x: x["pais"].apply(lambda s: "turkey" in s), "pais"].unique() # Nota que no hay países con valores nulos porque ya fueron reparados. eqk.query("pais.isnull()") earthquakes.query("pais.isnull()") # ### 5. ¿Se puede sacar más provecho a los datos ? # No es posible crear variables nuevas o algo por el estilo, ya se hizo todo el procesamiento necesario para cumplir las reglas de negocio. earthquakes.shape eqk.shape # ### Finalmente, responder la pregunta. # # Como es un método de agregación podríamos simplemente hacer un `groupby`. eqk.groupby(["pais", "año"])["magnitud"].max() # Sin embargo, en ocasiones, una tabla __pivoteada__ es mucho más explicativa. eqk.pivot_table( index="pais", columns="año", values="magnitud", aggfunc="max", fill_value="" ) # ¿Notas las similitudes con `groupby`? Ambos son métodos de agregación, pero retornan formas de la matriz distintas. # Sin embargo, esto se vería mucho mejor con una visualización, que es lo que veremos en el próximo módulo. # + import altair as alt alt.themes.enable('opaque') alt.Chart( eqk.groupby(["pais", "año"])["magnitud"].max().reset_index() ).mark_rect().encode( x='año:O', y='pais:N', color='magnitud:Q' ) # - # ### Conclusión del caso # # * El análisis exploratorio de datos (EDA) es una metodología que sirve para asegurarse de la calidad de los datos. # * A medida que se tiene más expertice en el tema, mejor es el análisis de datos y por tanto, mejor son los resultados obtenidos. # * No existe un procedimiento estándar para realizar el EDA, pero siempre se debe tener claro el problema a resolver. # # Es importante tener en mente lo que se quiere resolver, el cómo, el motivo y las posibles limitancias que puede tener la muestra obtenida (datos perdidos, ouliers, etc.). #
Clases/m02_data_analysis/m02_c05_eda/m02_c05_eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Sustainable Software Development, block course, March 2021** # *Scientific Software Center, Institute for Scientific Computing, Dr. <NAME>* # # # Analysis of the data # # Imagine you perform a "measurement" of some type and obtain "scientific data". You know what your data represents, but you have only a vague idea how different features in the data are connected, and what information you can extract from the data. # # You would start first with going through the data, making sure your data set is complete and that the result is reasonable. Imagine this already happened. # # In the next step, you would inspect your data more closely and try to identify structures. That is the step that we are focusing on in this unit. # # In the `data` folder, you will find several data files (`*.t` and `*.dat`). These are data files generated through some "new approach" that hasn't been used in your lab before. No previous analysis software exists, and you are going to establish a protocol for this "new approach" and "publish your results". # # The data can be grouped into two categories: # 1. data to be analyzed using statistical methods; # 2. data to be analyzed using numerical methods. # # In your hypothetical lab, you are an "expert" in one particular "method", and your co-worker is an "expert" in the other. Combined these two methods will lead to much more impactful results than if only one of you analyzed the data. Now, the task in this course is to be solved collaboratively with your team member working on one of the analysis approaches, and you working on the other. You will both implement functionality into the same piece of "software", but do so collaboratively through git. # # As you do not know yet which analysis is most meaningful for your data, and how to implement it, you will start with a jupyter notebook. You and your team member will work on the same notebook that will be part of a github repository for your project. This is the task for today. Discuss with your team members who will work on the statistical and who on the numerical analysis. # ## Step 1 # # Generate a github repository with the relevant files. # ## Step 2 # # Clone the repository to your local machine. # ## Step 3 # # Start working on task 1 for your analysis approach. # ## Step 4 # # Create your own branch of the repository and commit your changes to your branch; push to the remote repository. # ## Step 5 # # Open a `pull request` so your team member can review your implementation. Likewise, your team member will ask you to review theirs. # ## Step 6 # # Merge the changes in your branch into `main`. Resolve conflicts. # ## Step 7 # # Repeat working on task; committing and pushing to your previously generated branch or a new branch; open a pull request; merge with main; until you have finished all the tasks in your analysis approach. Delete obsolete branches. # # Start of the analysis notebook # # **Author : Your Name** # *Date : The date you started working on this* # *Affiliation : The entity under whose name you are working on this* # # Place the required modules in the top, followed by required constants and global functions. # required modules import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sb # constants and global functions threshv = 1.0e-5 filenames = 'efield.t', 'expec.t', 'npop.t', 'nstate_i.t', 'table.dat' filedir = '../../data/' # + # reading of the data files # - # # Statistical analysis # # Find correlations in the data sets. Analyse the data statistically and plot your results. # # Here we would want to do everything with pandas and leave the data in a dataframe. The files that are relevant to you are `expect.t`, `npop.t` and `table.dat`. # ### Task 1: Read in expec.t and plot relevant data # + # read and plot expec.t # I/O module def readfile(filepath, mode, separator='\t', datatype=float, skiprows=1): """Reads a given (csv) file with the indicated separator. Args: filepath (str): path to the file including the filename mode (str): indicates if data should be read into a Pandas dataframe ('df') or into a NumPy array ('npa') separator (str): optional, separator used in the file (e.g. '\t'), can also be a regular expression like ' | | ' datatype (type): optional, skiprows (int): optional Returns: dataframe, array: A structured data object, either Pandas dataframe or NumPy array """ if mode == 'df': data = pd.read_table(filepath, separator, engine='python') if mode == 'npa': data = np.loadtxt(filepath, dtype=datatype, skiprows=skiprows) return data df_expect = readfile(filedir+filenames[1], 'df', ' ') fig, ax = plt.subplots(2,figsize=(6,9)) fig.suptitle('Linear scale above, logarithmic scale below') for i in range(len(df_expect.keys())): ax[0].plot(df_expect[df_expect.keys()[i]], label=df_expect.keys()[i]) ax[0].legend() ax[1].plot(df_expect) ax[1].set_yscale('symlog') # - # We can discard the entries norm, \<x>, and \<y> as these are mostly constant. # + # eliminate columns based on the variance: if the variance of the values # in a column is below a given threshold, that column is discarded #print(df_expect.var()) def filterdata(data, treshhold=0, keeplist=[]): """Filters data based on variance. If variance is below a given threshold, column is discarded Arguments: data: data to be filtered, as dataframe treshhold: xx keeplist: optional, a list with column names to keep """ for i in data.keys(): if i in keeplist: continue if data[i].var() <= treshhold: print('Removing column: {}'.format(i)) data = data.drop(i, axis=1) return data print(filterdata(df_expect, threshv).var()) d = {'col1': [1, 2], 'col2': [3, 4]} test_df = pd.DataFrame(data=d) #print(filterdata(test_df, 0.4).var()) test1 = filterdata(test_df,0.9) print(len(test1.columns)) # - # ### Task 2: Create plots of the relevant data and save as .pdf. # + # create plots fig.savefig('../pdfPlots/expect.pdf',format='pdf') # - # ### Task 3: Read in file `npop.t` and analyze correlations in the data # + # read in npop.t df_npop = readfile(filedir+filenames[2], 'df',' | | ') # + # discard all columns with variance below a set threshold - we can consider them as constant df_npopfiltered = filterdata(df_npop, threshv, ['time']) print(df_npopfiltered.var()) # - # Plot the remaining columns. Seaborn prefers "long format" (one column for all measurement values, one column to indicate the type) as input, whereas the cvs is in "wide format" (one column per measurement type). # + # plot ideally with seaborn npop_longFormat = pd.melt(df_npopfiltered, id_vars=['time']) sb.lineplot(x='time', y='value', hue='variable', data = npop_longFormat) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # - # ## Quantify the pairwise correlation in the data # # - negative correlation: y values decrease for increasing x - large values of one feature correspond to small values of the other feature # - weak or no correlation: no trend observable, association between two features is hardly observable # - positive correlation: y values increase for decreasing x - small values of one feature correspond to small values of the other feature # # Remember that correlation does not indicate causation - the reason that two features are associated can lie in their dependence on same factors. # # Correlate the value pairs using Pearson's $r$. Pearson's $r$ is a measure of the linear relationship between features: # # $r = \frac{\sum_i(x_i − \bar{x})(y_i − \bar{y})}{\sqrt{\sum_i(x_i − \bar{x})^2 \sum_i(y_i − \bar{y})^2}}$ # # Here, $\bar{x}$ and $\bar{y}$ indicate mean values. $i$ runs over the whole data set. For a positive correlation, $r$ is positive, and negative for a negative correlation, with minimum and maximum values of -1 and 1, indicating a perfectly linear relationship. Weakly or not correlated features are characterized by $r$-values close to 0. # # Other measures of correlation that can be used are Spearman's rank (value pairs follow monotonic function) or Kendall's $\tau$ (measures ordinal association), but they do not apply here. You can also define measures yourself. # + # print the correlation matrix def correlatedata(data, corrmethod='pearson'): """Compute pairwise correlation of data with pandas.DataFrame.corr :param data: Data to be correlated. :type data: pandas dataframe (dict) :param corrmethod: Method to be used for correlation (pandas: pearson, kendall, spearman). :type corrmethod: string, optional """ return data.corr(method=corrmethod) df_npop_corr = correlatedata(df_npopfiltered, 'pearson') print(df_npop_corr) # - # The diagonal values tell us that each value is perfectly correlated with itself. We are not interested in the diagonal values and also not in the correlation with time. We also need to get rid of redundant entries. Finally, we need to find the value pairs that exhibit the highest linear correlation. We still want to know if it is positive or negative correlation, so we cannot get rid of the sign. # + # get rid of time column, lower triangular and diagonal entries of the correlation matrix # sort the remaing values according to their absolute value, but keep the sign # drop time column df_npop_corr = df_npop_corr.drop(['time'], axis=1) #print(df_npop_corr) # copied from solution for unit1 def get_correlation_measure(df): drop_values = set() # an unordered collection of items cols = df.columns # get the column labels print(cols) for i in range(0, df.shape[1]): for j in range(0, i+1): # get rid of all diagonal entries and the lower triangle drop_values.add((cols[i], cols[j])) print(drop_values) return drop_values drop_vals = get_correlation_measure(df_npop_corr) # get rid of lower triangular and diagonal entries corr2 = df_npop_corr.corr().unstack() # pivot the correlation matrix corr2 = corr2.drop(labels=drop_vals).sort_values(ascending=False, key=lambda col: col.abs()) # sort by absolute values but keep sign display(corr2) # - # Note that the entries in the left column are not repeated if they do not change from the row above (so the fourth feature pair is MO3 and MO6). # ### Task 4: Print the resulting data to a file # + # write to file def writefile(data, filepath, encoding='utf-8', header=False): """Writes a given dataframe to a csv file. Arguments: xxx """ data.to_csv(filepath, encoding=encoding, header=header) writefile(corr2, filedir+'npop_processed.csv') #corr2.to_csv(filedir+'npop_processed.csv', encoding='utf-8', header=False) # - # ### Task 5: Calculate the Euclidean distance (L2 norm) for the vectors in `table.dat` # # # The Euclidean distance measures the distance between to objects that are not points: # # $d(p,q) = \sqrt{\left(p-q\right)^2}$ # # In this case, consider each of the columns in table.dat as a vector in Euclidean space, where column $r(x)$ and column $v(x)$ denote a pair of vectors that should be compared, as well as $r(y)$ and $v(y)$, and r(z) and v(z). # # (Background: These are dipole moment components in different gauges, the length and velocity gauge.) # + # read in table.dat - I suggest reading it as a numpy array #arr = np.loadtxt(filedir+filenames[4],dtype=float,skiprows=1) arr = readfile(filedir+filenames[4],'npa') # replace the NaNs by zero #arr[np.where(np.isnan(arr))] = 0. table_vals = np.nan_to_num(arr) # - # Now calculate how different the vectors in column 2 are from column 3, column 4 from column 5, and column 6 from column 7. # + # calculate the Euclidean distance def euclidean_distance(list_ref, list_comp, vectors): distances = np.zeros(len(list_ref)) for i in range(len(list_ref)): distances[i] = np.linalg.norm(vectors[list_comp[i]] - vectors[list_ref[i]]) return distances #dx = np.sqrt((arr[:,2]-arr[:,3])**2) #dy = np.sqrt((arr[:,4]-arr[:,5])**2) #dz = np.sqrt((arr[:,6]-arr[:,7])**2) # + # plot the result and save to a .pdf out_dist = euclidean_distance([2,4,6],[3,5,7],table_vals) print(out_dist) x = range(0,len(out_dist)) plt.bar(x,out_dist) plt.xticks(x, ('x', 'y', 'z')) plt.show() #plt.plot(dx,label="column 2,3") #plt.plot(dy,label="column 4,5") #plt.plot(dz,label="column 6,7") #plt.xlabel("table index") #plt.ylabel("euclidean distance") #plt.legend() # + # print the result to a file plt.savefig("euclideandistance.pdf") # - # # Numerical analysis # # Analyze the data using autocorrelation functions and discrete Fourier transforms. Plot your results. # + # define some global functions # - # ### Task 1: Read in `efield.t` and Fourier-transform relevant columns # + # read and plot efield.t efield = np.loadtxt(filedir+filenames[0],dtype=float,skiprows=1) plt.plot(efield[:,0],efield[:,1],label="efield x") plt.plot(efield[:,0],efield[:,2],label="efield y") plt.plot(efield[:,0],efield[:,3],label="efield z") plt.xlabel("time") plt.ylabel("field value") plt.legend() # - # Here we are interested in column 2 since the others are constant. # + # discard the columns with variance below threshold - these are considered constant print(np.var(efield[:,1])) print(np.var(efield[:,2])) print(np.var(efield[:,3])) efield = np.delete(efield,[1,3],1) # + # discrete Fourier transform of the remaining column: You only need the real frequencies efield_fft = np.fft.fft(efield[:,1]) freq = np.fft.fftfreq(efield.shape[0],d=(efield[1,0]-efield[0,0])) # - # ### Task 2: Generate a plot of your results to be saved as pdf. # + # plot your results plt.plot(freq,efield_fft.real,label="fft real part") plt.xlabel("frequency f") plt.ylabel("P(f)") plt.legend() plt.savefig('efield_fft.pdf') # - # ### Task 3: Calculate the autocorrelation function from nstate_i.t # The autocorrelation function measures how correlated subsequent vectors are with an initial vector; ie. # # $\Psi_{corr} = \langle \Psi(t=0) | \Psi(t) \rangle = \int_0^{tfin} \Psi(0)^* \Psi(t) dt$ # # Since we are in a numerical representation, the integral can be replaced with a sum; and the given vectors are already normalized. # read in as numpy array nstate_i = np.loadtxt(filedir+filenames[3],dtype=float,skiprows=1) # + # store the time column (column 0) in a vector and drop from array def get_time_and_update_data(data): time = data[:, 0] new_data = np.delete(data, 0, 1) return time, new_data time, update_nstate_i = get_time_and_update_data(nstate_i) # + # correct the data representation: this is in fact a complex matrix # the real part of each matrix column is contained in numpy array column 0, 2, 4, 6, ... # the imaginary part of each matrix column is contained in numpy array column 1, 3, 5, 7, ... # convert the array that was read as dtype=float into a dtype=complex array def get_complex_array(array): complex_array = array[:, 0::2] + 1j * array[:, 1::2] return complex_array update_nstate_i = get_complex_array(update_nstate_i) # + # for the autocorrelation function, we want the overlap between the first vector at time 0 and all # subsequent vectors at later times - the sum of the product of initial and subsequent vectors for each time step def get_autocorrelation(array): auto_corr_matrix = [] for i in range(1, len(array)): corr = np.multiply(array[0, :], array[i, :]) auto_corr_matrix.append(np.array(corr)) auto_correlation = np.sum(auto_corr_matrix, axis=1) return auto_correlation auto_correlation = get_autocorrelation(update_nstate_i) # - # ### Task 4: Generate a plot of your results to be saved as pdf. # + # plot the autocorrelation function - real, imaginary and absolute part plt.subplot(1, 3, 1) plt.gca().set_title('Real part') plt.plot(time[1:], auto_correlation.real) plt.subplot(1, 3, 2) plt.gca().set_title('Imag part') plt.plot(time[1:], auto_correlation.imag) plt.subplot(1, 3, 3) plt.gca().set_title('Abs part') plt.plot(time[1:], np.abs(auto_correlation)) plt.show() # - # ### Task 5: Discrete Fourier transform of the autocorrelation function # + # discrete Fourier-transform the autocorrelation function - now we need all frequency components, # also the negative ones fourier = np.fft.fft(auto_correlation) # - # ### Task 6: Generate a plot of your results to be saved as pdf. # + # plot the power spectrum (abs**2) def get_power_spectrum(fourier): return abs(fourier)**2 plt.plot(get_power_spectrum(fourier)) plt.title('Power spectrum') plt.show() # -
src/team14-software/notebook_unit2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #model implementation input_size=3 # no_of_features layers=[4,3] #no ofneurons in 1st and 2nd layer output_size=2 import numpy as np def softmax(a): e_pa = np.exp(a) #vecctor ans=e_pa/np.sum(e_pa,axis=1,keepdims=True) return ans # + a=np.array([[10,10], [20,20]]) a_=softmax(a) print(a_) # + import numpy as np class NeuralNetwork: def __init__(self,input_size,layers,output_size): np.random.seed(0) model={} model['W1']=np.random.randn(input_size,layers[0]) #weighted matrix model['b1']=np.zeros((1,layers[0])) model['W2']=np.random.randn(layers[0],layers[1]) #weighted matrix model['b2']=np.zeros((1,layers[1])) model['W3']=np.random.randn(layers[1],output_size) #weighted matrix model['b3']=np.zeros((1,output_size)) self.model=model # + import numpy as np W1=np.random.randn(input_size,layers[0]) #weighted matrix print(W1) #matrix size 2x4 # + def forward(self,x): W1,W2,W3 = self.model['W1'],self.model['W2'],self.model['W3'] b1,b2,b3 = self.model['b1'],self.model['b2'],self.model['b3'] z1 = np.dot(x,W1) + b1 a1 = np.tanh(z1) z2 = np.dot(a1,W2) + b2 a2 = mp.tanh(z2) z3 = np.dot(a2,W3) + b3 y_ = softmax(z3)
ml_cb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to set up your System for deep learning # ### First set up a python distributor and package manager on your system # *There are probably other ways to set up your ssystem, but I preffere this way. * # # **Install conda ** # # First install [conda](https://www.anaconda.com/) # or [miniconda](https://docs.conda.io/en/latest/miniconda.html) # # Conda comes with Python. Choose python 3.x for installation. If you are a troubled man who likes a difficult life, install Python 2.x # # Then I would suggest to set up a conda environment where all the necessary libraries installed. This is not a most have but good practice. The environment separates all different package installations from other environments. In some cases specific libraries versions are needed which would not work in other cases. To not destroy your complete system, an environment shields you in this specific cases. # # One example is python. For tensorflow and Keras only Python 3.6 is supported (to data), while the latest python is 3.7 (at the moment). # # **Create the environment:** # # `conda create --yourenvname python=3.6` # # or # # `conda create -n yourenvname python=3.6` # # **Activate the environment:** # # `WINDOWS: activate py35` # # `LINUX, macOS: source activate py35` # # **Some other usefull comands in conda** # # Make exact copy of an environment : `conda create --clone py35 --name py35-2` # # List all packages and versions installed in active environmen: `conda list` # # conda [cheat sheet](https://docs.conda.io/projects/conda/en/4.6.0/_downloads/52a95608c49671267e40c689e0bc00ca/conda-cheatsheet.pdf) # # *** # ### Install libraries for Pyhton data handling and deep learning # To install packages for Anaconda, open the Anaconda promt. # Activate the environment you want to install to. # # The most common and needed libraries are: conda install -c conda-forge tensorflow # deep learing platfrom #conda install -c conda-forge keras # API for tensorflow and other deep learning libraries conda install -c anaconda pandas # data handling library conda install -c anaconda scikit-learn # machine learning library conda install -c anaconda matplotlib # plotting library conda install -c conda-forge python-utils # collection of usefull python functions conda install -c anaconda jupyter # The notebook enviroment you are looking at right now # If you want to be able to run your code with GPU instead of CPU further steps have to be done # # **** # # How to set up your device for GPU usage # <div class="alert alert-block alert-info"> # You will have to have a Nvidia Card # <div> # **The easiest way** is to install tensorflow-gpu over `conda install tensorflow-gpu` # # then install the CUDA drivers use `conda install -c anaconda cudatoolkit` # # I would suggest to install that again in a conda eviroment to not mix things. If I am not mistaken, tensorflow and tensorflow-gpu cannot be installed at the same time. this is the optimal example for a using different environments. # # Crate a new one or clone the one from before # # `conda create -n tensorflow_gpuenv tensorflow-gpu` # # `conda activate tensorflow_gpuenv` # # To check if you are actually running on GPU use # # `sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))` # # In Keras check if Kears see the GPUs: # # ` from keras import backend as K # K.tensorflow_backend._get_available_gpus()` # # *** # **Otherwise** if you like pain, do it via separate installs. You will have to install cuda. The Nvida specific driver/ product # # Load 9.0 version as it is compatible with tensorflow and CNTK (Microsoft version ) and are compatible with pip # # [Cuda 9.0 downloadlink](https://developer.nvidia.com/cuda-90-download-archive) # # You have to install the base version and can then also install one of the patch once. # # Then also install cuDNN from [here](https://developer.nvidia.com/rdp/cudnn-archive). Use the version for the CUDA version you installed before(9.0) # # extract the download and copy it into the cuda 9.0 folder # # A recent video to follow can be watch [here](https://www.youtube.com/watch?v=QU6CaGyda7Q) # # # # # # #
Getting started.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ahmedhisham73/basics_dnn/blob/master/training_cifar10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="dQK2h8XgW9Jj" colab_type="code" outputId="839f2c97-a6d7-42c3-ce03-c026533294e0" colab={"base_uri": "https://localhost:8080/", "height": 1000} from keras import Sequential, optimizers from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout from keras.utils import to_categorical from keras.datasets import cifar10 import numpy as np # Let's use the CIFAR-10 dataset (training_images, training_labels), (test_images, test_labels) = cifar10.load_data() import matplotlib.pyplot as plt plt.imshow(training_images[0]) print(training_labels[0]) print(training_images[0]) # + id="YcstuZPWXyZM" colab_type="code" colab={} training_images = training_images / 255.0 test_images = test_images / 255.0 # + [markdown] id="N7qp6LIYX_I_" colab_type="text" # lets define our model, 3 layers , input layer,1 hidden, 1 softmax # + id="A1j04krcYxp3" colab_type="code" colab={} import tensorflow as tf model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),tf.keras.layers.Dense(256,activation=tf.nn.relu),tf.keras.layers.Dense(10,activation=tf.nn.softmax)]) # + id="bCS6lQApY7fk" colab_type="code" outputId="de0dd01e-7257-497d-e5b8-0052efed32f1" colab={"base_uri": "https://localhost:8080/", "height": 729} model.compile(optimizer = tf.train.AdamOptimizer(), loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=20) # + id="zVrDioQDaKgt" colab_type="code" outputId="5bf142e9-6960-46ed-c87f-5559baf68cc3" colab={"base_uri": "https://localhost:8080/", "height": 86} #now lets evaluate our model model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) # + id="a8VGcP5faO2Y" colab_type="code" colab={} predict ='/content/test2.jpg' # + [markdown] id="MuiPaOXva1aj" colab_type="text" # let us use cifar-10 for image prediction # + id="sIbVHk1Ka6iY" colab_type="code" outputId="cdb47897-12c7-49b5-c4b8-842ff3599108" colab={"base_uri": "https://localhost:8080/", "height": 304} # example of loading the cifar10 dataset from matplotlib import pyplot from keras.datasets import cifar10 # load dataset (trainX, trainy), (testX, testy) = cifar10.load_data() # summarize loaded dataset print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape)) print('Test: X=%s, y=%s' % (testX.shape, testy.shape)) # plot first few images for i in range(9): # define subplot pyplot.subplot(330 + 1 + i) # plot raw pixel data pyplot.imshow(trainX[i]) # show the figure pyplot.show() # + [markdown] id="czpkiPe4bluE" colab_type="text" # before dealing with cifar-10 dataset # we know that the images are all pre-segmented (e.g. each image contains a single object), that the images all have the same square size of 32×32 pixels, and that the images are color. Therefore, we can load the images and use them for modeling almost immediately. # + id="xiR3HhPdbvGA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 368} outputId="69e3859c-9380-4615-9a1d-e2aa13261007" from keras.datasets import cifar10 from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Dense from keras.layers import Flatten from keras.optimizers import SGD # load train and test dataset def load_dataset(): # load dataset (trainX, trainY), (testX, testY) = cifar10.load_data() # one hot encode target values trainY = to_categorical(trainY) testY = to_categorical(testY) return trainX, trainY, testX, testY # scale pixels def prep_pixels(train, test): # convert from integers to floats train_norm = train.astype('float32') test_norm = test.astype('float32') # normalize to range 0-1 train_norm = train_norm / 255.0 test_norm = test_norm / 255.0 # return normalized images return train_norm, test_norm # define cnn model def define_model(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(10, activation='softmax')) # compile model opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model # run the test harness for evaluating a model def run_test_harness(): # load dataset trainX, trainY, testX, testY = load_dataset() # prepare pixel data trainX, testX = prep_pixels(trainX, testX) # define model model = define_model() # fit model model.fit(trainX, trainY, epochs=100, batch_size=64, verbose=0) # save model model.save('final_model.h5') # entry point, run the test harness run_test_harness() # + id="E8_QNxuzrosP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 885} outputId="85b4963a-cc0c-416e-accd-b23f3dad1c1d" # make a prediction for a new image. from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.models import load_model from IPython.display import Image Image('/content/cat.jpeg') # load and prepare the image def load_image(filename): # load the image img = load_img(filename, target_size=(32, 32)) # convert to array img = img_to_array(img) # reshape into a single sample with 3 channels img = img.reshape(1, 32, 32, 3) # prepare pixel data img = img.astype('float32') img = img / 255.0 return img # load an image and predict the class def run_example(): # load the image img = load_image('/content/cat.jpeg') # load model model = load_model('final_model.h5') # predict the class result = model.predict_classes(img) print(result[0]), print(img),Image('/content/cat.jpeg') # entry point, run the example run_example() # + [markdown] id="8FTrhli3sYXw" colab_type="text" # # 0: airplane # 1: automobile # 2: bird # 3: cat # 4: deer # 5: dog # 6: frog # 7: horse # 8: ship # 9: truck #
training_cifar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="4alGnUPdJSsx" import pandas as pd import numpy as np import os # + id="8pZhe5k1JUWU" from google.colab import drive drive.mount('/content/gdrive/') # + id="sVowaHfAJXAu" os.chdir('/content/gdrive/My Drive/주분') # + id="O-XoWwb3JaDM" train_nonanswer = pd.read_csv("train_nonanswer_jo_sum_final.csv") test_nonanswer = pd.read_csv("test_nonanswer_jo_sum.csv") # + id="AHb6v6ImJA72" ##------------- 'party','user_id' drop---------------------## nonanswer_x = train_nonanswer.drop(['Party','USER_ID'],axis = 1) nonanswer_y = train_nonanswer['Party'] # + id="UYbH5crHJHBA" ##------------- X 변수들 dummy화 해주기---------------------## nonanswer_x_d = pd.get_dummies(nonanswer_x) # + id="-AM6jWUAJKQY" ##------------- test set dummy화 & user_id drop 해주기---------------------## test_nonanswer_d=pd.get_dummies(test_nonanswer) test_nonanswer_d = test_nonanswer_d.drop(['USER_ID'],axis = 1) # + [markdown] id="yXpgIrWsJuXA" # # + id="OilbZIdkJdsV" from lightgbm import LGBMClassifier as lgb from bayes_opt import BayesianOptimization from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.metrics import accuracy_score from sklearn.model_seleciont import KFold,StratifiedKFold from sklearn.model_selection import cross_val_score # + id="L8Y4kdqtI-1y" best_param= {'eval_metric' : 'accuracy', 'early_stopping_rounds' : 20, 'verbose' :-1, 'bagging_fraction':0.0002, # 'colsample_bytree':0, # 'feature_fraction':0.7, # 'learning_rate':0.003, # 'max_depth':-1, # 'min_child_weight':40, # 'n_estimators':900, # 'num_leaves': 30, # 'reg_alph':0, # 'reg_lambda':40, # 'subsample':1, # 'max_bin':800 # } # + id="lEBwIakFJir5" train_X, val_X, train_y, val_y = train_test_split(nonanswer_x_d, nonanswer_y, test_size = 0.15, train_size = 0.85, random_state = 123) # lgb() model_grid =lgb(**best_param,random_seed = 123) # 모델 학습 model_grid.fit(train_X, train_y, eval_set=(val_X, val_y)) # + id="V6LAmRdZJ6yi" pred_y = model_grid.predict(test_nonanswer_d) # + id="XL_0Neg4KEr5" user_id = list(test_nonanswer['USER_ID']) submission_grid=pd.DataFrame({'USER_ID':user_id, 'Predictions':pred_y}) # + id="EWzDqjS5KMcQ" submission_grid.to_csv('submission_grid.csv',index=False) # + id="TDOHLJa8KRCT" # !pip install shap # + id="<KEY>" # Shpley Value 패키지 import shap # Shpley Value 보기 explainer = shap.TreeExplainer(model_grid) shap_values = explainer.shap_values(test_nonanswer_d) shap.initjs() shap.summary_plot(shap_values[1],test_nonanswer_d,plot_type = 'bar')
2020-2 Theme Analysis/Modeling/LGBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tflearn] # language: python # name: conda-env-tflearn-py # --- # # Plotting Cross-Validated Predictions # # 展示如何使用 cross_val_predict 将预测 errors 可视化展示. # # %load ../common_import.py import numpy as np import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import cross_val_predict from sklearn import linear_model lr = linear_model.LinearRegression() boston = datasets.load_boston() # 将数据转成 DataFrame 方便查看 target = pd.DataFrame(boston.target) data = pd.DataFrame(boston.data) predicted = cross_val_predict(lr, data, target, cv=10) fig, ax = plt.subplots() ax.scatter(target, predicted, edgecolors=(0, 0, 0)) ax.plot([target.min(), target.max()], [target.min(), target.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() target['predicted'] = predicted target = target.rename(columns={0:'target'}) target['error'] = target['target'] - target['predicted'] target.head() fig, ax = plt.subplots() ax.hist(target['error']) # + # TODO 数据 normalization
data_visualization/general/Plotting Cross-Validated Predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dreamegg/posting/blob/master/_notebooks/mnist_pt.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="iURhXZiPXo-7" colab_type="text" # Pytorch test on Colab # Exsample from https://github.com/pytorch/examples/blob/master/mnist/main.py # + id="hu3FdEM5Xm4w" colab_type="code" colab={} from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR # + [markdown] id="4FNj2oGCY1Yl" colab_type="text" # 네트워크 정의 # + id="oOMM4TeYYB_8" colab_type="code" colab={} class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.dropout2 = nn.Dropout2d(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output # + id="4a_OwAHcYE-Z" colab_type="code" colab={} def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) if args.dry_run: break # + id="HdPJ2HxPYI_Z" colab_type="code" colab={} def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # + id="WTH3R24vYNvZ" colab_type="code" colab={}
_notebooks/mnist_pt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Modul pandas - základný nástroj na spracovanie dát # + [markdown] slideshow={"slide_type": "slide"} # ### Moduly - knižnice Pythonu # # Základ jazyka je malý, vieme ho rozšíriť **importovaním** (veľkého množstva) existujúcich **modulov.** # + slideshow={"slide_type": "slide"} # Priklad: sin(0.5) # nepozna sinus, vynadal nam # + slideshow={"slide_type": "fragment"} import math # napiste import ma a potom cez Tab sa vam doplnia vsetky moduly, co tak zacinaju print(type(math)) # novy typ - trieda module; math je objektom tej triedy math.sin(0.5) # toto uz pojde, je to kvalifikovane meno - modul a v nom funkcia # bodka sa pouziva na pristup k funkciam a datam objektov # + slideshow={"slide_type": "fragment"} # Stale by neslo sin(0.5), no mozeme ho importnut na "globalnu uroven" mien from math import sin, cos, pi sin(pi/2) # + [markdown] slideshow={"slide_type": "slide"} # __Najdôležitejšie (pre nás) moduly budú__ # - **`pandas`** - reprezentácia (štatistických) dát v tvare tabuľky s pomenovanými stĺpcami (niekedy i riadkami) a ich spracovanie # - **`numpy`** - efektívna práca s číselnymi poľami (vektory, matice, sústavy lin. rovníc,...); bez `numpy` by nešiel ani modul `pandas` # - **`scipy`** - hlavne submodul `scipy.stats` pre štatistické funkcie, rozdelenia pravdepodobností, testovanie hypotéz,... # - **`matplotlib`** - modul pre grafiku, kreslenie obrázkov (v pozadí ho používa aj `pandas`). # + [markdown] slideshow={"slide_type": "slide"} # # **Užitočné sú tiež moduly** # - **`qgrid`** - pekné tabuľkové zobrazovanie dát s možnosťou ich filtrácie a usporiadania (podľa stĺpcov) # - **`folium`** - modul pre zobrazovanie máp # # **Na vytváranie interaktívnych aplikácií** (nemyslíme tým Jupyter notebooky) budeme používať moduly # - **`dash`** - tvorba a vykonávanie webových aplikácií # - **`plotly`** - modul pre veľmi interaktívnu grafiku vo webovom prehliadači # + [markdown] slideshow={"slide_type": "fragment"} # # Napr. pre tento notebook dáme nasledujúce importy (to `as` je pre skratky), čiže ďalej nemusíme písať `numpy`, ale stačí `np` ... # + slideshow={"slide_type": "slide"} import numpy as np import pandas as pd import qgrid # + [markdown] slideshow={"slide_type": "slide"} # **Základnym typom pre reprezentáciu dát v module `pandas` je `DataFrame`.** Môžeme si to predstaviť ako tabuľku s pomenovanými # stĺpcami. **Stĺpce budú jednotlivé (namerané) veličiny a riadky budú predstavovať merania** (treba ich veľa, aby sa dalo čosi o dátach povedať). # # ### Príklad (kolegyňa <NAME>). # Máme k dispozícii dáta o hmotnosti a BMI (Body Mass Index) študentov, v súbore `BMI.csv` (pozrite si, pomeditujte, ale needitujte). # # Modul `pandas` má veľa funkcií pre načítanie dát v rôznych formátoch, napr. z Excelu, CSV (Comma Separated Values) súboru, json, html formátu, atď. Načítame naše dáta do premennej `BMIdata`, presvedčíme sa, že typ načítaného je `pandas DataFrame` a vypíšeme tie dáta, najskôr "klasicky". # + slideshow={"slide_type": "slide"} BMIdata = pd.read_csv("BMI.csv") # doplnajte Tab-om, staci napisat pd.rea a Tab; doplni aj meno CVS suboru print("Premenna BMIdata ma typ", type(BMIdata)) BMIdata[:6] # ak chceme len niekolko prvych riadkov, ak cele, dajte len BMIdata # + [markdown] slideshow={"slide_type": "slide"} # Ajhľa, tabuľka, zobrazenie našej `DataFrame BMIdata`. # # Každá `Dataframe` má tieto tri základné komponenty: # - **`columns`** - pomenovania (indexy) stĺpcov, obyčajne textové reťazce # - **`index`** - podobné pomenovania pre riadky, často sú to len poradové čísla meraní, počnúc nulou # - **`values`** - samotné namerané dáta v tvare `numpy` poľa (tj. typ `ndarray`) # + slideshow={"slide_type": "fragment"} print("columns: ",BMIdata.columns) # ako vzdy, pouzivajte doplnanie cez Tab print("\nindex: ",BMIdata.index) print("\nvalues:\n",BMIdata.values[:6]) # nechceme vsetko vypisovat... # + [markdown] slideshow={"slide_type": "slide"} # ### Ďalej budeme namiesto `DataFrame` písať skratku `DF`. # # Veľmi pohodlným nástrojom na manipuláciu `DF` (filtráciu a usporiadanie podľa stĺpcov) je modul `qgrid` (v pozadí je [JavaScriptova knižnica `SlickGrid`](https://github.com/mleibman/SlickGrid)). # # Asi nepotrebuje komentár. Klikajte na názvy stĺpcov pre usporiadanie a na tie "lieviky" pre filtráciu. # # S pomocou toho nástroja ľahko zistíte napr.: # - minimálnu a maximálnu hmotnosť a tiež min. a max. BMI # - vyberiete tie riadky, kde sú študenti s hmotnosťou od 60 do 80 kg a (zároveň) BMI od 22 do 24. # + slideshow={"slide_type": "slide"} qgrid_tabulka = qgrid.show_grid(BMIdata) qgrid_tabulka # Jupyter vie, ako ju ma zobrazit # + [markdown] slideshow={"slide_type": "slide"} # ### Najlepšie na tom je, že editovanú (filtrovanú) `DF` vieme dostať späť cez metódu `get_changed_df`: # + slideshow={"slide_type": "fragment"} normalni = qgrid_tabulka.get_changed_df() normalni # + [markdown] slideshow={"slide_type": "slide"} # ### Veľmi dôležité - ako filtrovať `DF` programátorsky (kto pochopí, je na dobrej ceste k majstrovstvu v `pandas`) # # To, čo sme stvárali myšou v `qgrid` tabuľke, nebude praktické, ak dát je veľa, alebo filtrovanie zložitejšie. Vždy však môžeme filtrovať pomocou booleovských podmienok na hodnoty stĺpcov `DataFrame`. # # Chceme len tie **riadky, kde je hmotnosť od 60 do 80 kg a zároveň BMI od 22 do 24** (ako sme robili hore, myšou). # + slideshow={"slide_type": "fragment"} vyberHmot = (BMIdata['Hmot'] >= 60) & (BMIdata['Hmot'] <= 80) vyberHmot[:8] # vypiseme len prvych 8 riadkov # + [markdown] slideshow={"slide_type": "fragment"} # **Týmto výberom sme vytvorili booleovskú `DF`, ktorá má hodnoty `True` len pre tie indexy, kde je podmienka na hmotnosť splnená (`&` je `DF` booleovský operátor pre `AND`) a inde sú hodnoty `False` (počet riadkov je ten istý ako v pôvodnej `DF BMIdata`).** # # Podobne vyrobíme filter aj pre BMI. # + slideshow={"slide_type": "slide"} vyberBMI = (BMIdata['BMI'] >= 22) & (BMIdata['BMI'] <= 24) # chceme oba vybery aby platili print("Booleovska DF pre oba vybery, aby platili sucasne:\n") vyberHmot & vyberBMI # + [markdown] slideshow={"slide_type": "fragment"} # Nakoniec, **urobíme Booleovské indexovanie pôvodnej DF, ktoré vyberie len tie riadky, kde sú obidva kritéria splnené** (voľným okom vidíme, že to budú riadky s indexami 2, 3, 4, 6, 11, atď.): # + slideshow={"slide_type": "slide"} normalni = BMIdata[ vyberBMI & vyberHmot ] normalni # + [markdown] slideshow={"slide_type": "slide"} # Keď ďalej pracujeme s dátami, môže pre nás byť užitočná funkcia **group_by**, ktorá zoskupí dáta podľa daného kľúča. # + slideshow={"slide_type": "fragment"} zoskupeni = BMIdata.groupby('Hmot') # zoskupi data podla hmotnosti velkostne = zoskupeni.size() # prideli jednotlivym hmotnostiam pocet studentov (moze byt aj podla ineho kriteria) velkostne # + slideshow={"slide_type": "slide"} velkostne[velkostne.values > 1] # len tie, kde je viac ako 1 student # + slideshow={"slide_type": "slide"} velkostne.sort_values(ascending=False,inplace=True) # usporiada podla poctu studentov velkostne # ascending=False (od najvacsieho), inplace=True(ponecha poctu studentov aj ich hmotnost) # + slideshow={"slide_type": "slide"} len(velkostne == 2) # pocet hmotnosti, ktore maju 2 studenti
Uvod/Pandas_spracovanie_dat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Fit a Random Forest # In this exercise, you'll train a Random Forest classifier to predict whether or not a text message is "spam". In order to train the classifier, you'll use [a dataset of SMS messages labeled as "spam" and "ham" (not spam)](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection). The predictions will be based on the counts of each word in the text message. Before using a Random Forest, see how well a simple Decision Tree model performs. # + # Import our libraries import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # Read in our dataset df = pd.read_table('SMSSpamCollection.dms', sep='\t', header=None, names=['label', 'sms_message']) # Fix our response value df['label'] = df.label.map({'ham':0, 'spam':1}) # Split our dataset into training and testing data X_train, X_test, y_train, y_test = train_test_split(df['sms_message'], df['label'], random_state=1) # Instantiate the CountVectorizer method count_vector = CountVectorizer() # Fit the training data and then return the matrix training_data = count_vector.fit_transform(X_train) # Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer() testing_data = count_vector.transform(X_test) # Instantiate our model decision_tree = DecisionTreeClassifier() # Fit our model to the training data decision_tree.fit(training_data, y_train) # Predict on the test data predictions = decision_tree.predict(testing_data) # Score our model print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) # - # The simple Decision Tree appears to have worked reasonably well, but there is room for improvement. Notice that in order to train and test the model, we took the following steps: # # 1. **Import** the model # 2. **Instantiate** the model # 3. **Fit** the model on training data # 4. **Test** the model on testing data # 5. **Score** the model by comparing the predictions to the true values # # We'll do the same steps for the Random Forest model—but this time, you fill in the appropriate code! # > **Step 1:** First `import` the [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) module. # Import the Random Forest Classifier from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification # > **Step 2:** Then, `instantiate` the classifier. # Instantiate a RandomForestClassifier with # 200 weak learners (n_estimators) and everything else as default values rf = RandomForestClassifier(n_estimators=200) # > **Step 3:** Now, `fit` (train) the model with `training_data` and `y_train`. This may take a little time. # Fit the RandomForestClassifier model rf.fit(training_data, y_train) #TODO # > **Step 4:** Use `predict` to test the model on previously unseen data. # Call model.predict() to test the model on the test data rf_predictions = rf.predict(testing_data) # > **Step 5:** Score the predictions. # Calculate the accuracy, precision, recall, and F1 scores print('Random Forest scores:') print('Accuracy score: ', format(accuracy_score(y_test, rf_predictions))) print('Precision score: ', format(precision_score(y_test, rf_predictions))) print('Recall score: ', format(recall_score(y_test, rf_predictions))) print('F1 score: ', format(f1_score(y_test, rf_predictions))) # Let's re-print the Decision Tree scores again so we can look at them side-by-side. print('Decision Tree scores:') print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) # Interesting! It looks like the Random Forest outperformed the simple Decision Tree in all metrics except recall. # If you need a little help with this exercise, check out the solution notebook [here](spam_rf_solution.ipynb).
extras/spam_rf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # Minimal scikit-learn example code # + pycharm={"name": "#%%\n"} import warnings warnings.filterwarnings('ignore') import slingpy as sp from typing import AnyStr, Dict, List, Optional from sklearn.linear_model import LogisticRegression class MyApplication(sp.AbstractBaseApplication): def __init__(self, output_directory: AnyStr = "", schedule_on_slurm: bool = False, split_index_outer: int = 0, split_index_inner: int = 0, num_splits_outer: int = 5, num_splits_inner: int = 5): super(MyApplication, self).__init__( output_directory=output_directory, schedule_on_slurm=schedule_on_slurm, split_index_outer=split_index_outer, split_index_inner=split_index_inner, num_splits_outer=num_splits_outer, num_splits_inner=num_splits_inner ) def get_metrics(self, set_name: AnyStr) -> List[sp.AbstractMetric]: return [ sp.metrics.AreaUnderTheCurve() ] def load_data(self) -> Dict[AnyStr, sp.AbstractDataSource]: data_source_x, data_source_y = sp.datasets.Iris.load_data(self.output_directory) stratifier = sp.StratifiedSplit() rest_indices, test_indices = stratifier.split(data_source_y, split_index=self.split_index_outer, num_splits=self.num_splits_outer) validation_indices, training_indices = stratifier.split(data_source_y.subset(rest_indices), split_index=self.split_index_inner, num_splits=self.num_splits_inner) return { "training_set_x": data_source_x.subset(training_indices), "training_set_y": data_source_y.subset(training_indices), "validation_set_x": data_source_x.subset(validation_indices), "validation_set_y": data_source_y.subset(validation_indices), "test_set_x": data_source_x.subset(test_indices), "test_set_y": data_source_y.subset(test_indices) } def get_model(self) -> sp.AbstractBaseModel: model = sp.SklearnModel(LogisticRegression()) return model def train_model(self, model: sp.AbstractBaseModel) -> Optional[sp.AbstractBaseModel]: model.fit(self.datasets.training_set_x, self.datasets.training_set_y) return model # - # Run the application (nested cross validation on the specified dataset and model) # + pycharm={"name": "#%%\n"} MyApplication().run()
examples/sklearn_estimator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: data # language: python # name: data # --- # # Students Do: PCA in Action # # In this activity, you will use PCA to reduce the dimensions of the consumers shopping dataset from `4` to `2` features. After applying PCA, you will use the principal components data, to fit a K-Means model with `k=6` and make some conclusions. # Initial imports import pandas as pd from pathlib import Path from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans # Load the data file_path = Path("../Resources/shopping_data_cleaned.csv") # If necessary, standardize the data # + # Initialize PCA model # Get two principal components for the data. # - # Transform PCA data to a DataFrame # Fetch the explained variance # **Sample Analysis** # # According to the explained variance, the first principal component contains `X%` of the variance and the second principal component contains `Y%` of the variance. Since we have `Z%` of the information in the original dataset, we will see whether increasing the number of principal components to 3 will increase the explained variance. # Initialize PCA model for 3 principal components # Transform PCA data to a DataFrame # Fetch the explained variance # **Sample Analysis** # # With three principal components, we have `83.1%` of the information in the original dataset. We therefore conclude that three principal components preserves. # + # Initialize the K-Means model # Fit the model # Predict clusters # Add the predicted class columns # + # BONUS: plot the 3 principal components # import plotly.express as px # fig = px.scatter_3d( # df_shopping_pca, # x="principal component 3", # y="principal component 2", # z="principal component 1", # color="class", # symbol="class", # width=800, # ) # fig.update_layout(legend=dict(x=0, y=1)) # fig.show() # -
01-Lesson-Plans/20-Unsupervised-Machine-Learning/1/Activities/07-Stu_PCA/Unsolved/Stu_PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="o8voE0CfuvZp" # rm -r /content/face_and_emotion_detection # + [markdown] id="I4cjlU2F_yyl" # Clone the github **repository** # + colab={"base_uri": "https://localhost:8080/"} id="5UnuxLEwuR5t" outputId="1e7821b8-a6c5-4999-df5a-62d13e095bcd" # !git clone https://github.com/KhurramShahzadODM/face_and_emotion_detection.git # + colab={"base_uri": "https://localhost:8080/"} id="7k0f-VxaALy8" outputId="5abdbcd6-4d5c-4bf7-cdfb-668f89d1ce67" # !pip install face_recognition # + id="FoSUHIFL4iJX" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} outputId="bc94a9eb-c915-43aa-95b8-bcfd328878ce" import shutil import os # clean and rebuild the image folders input_folder = '/content/face_and_emotion_detection/inputs' if os.path.exists(input_folder): shutil.rmtree(input_folder) os.makedirs(input_folder) output_folder = '/content/face_and_emotion_detection/outputs' if os.path.exists(output_folder): shutil.rmtree(output_folder) os.makedirs(output_folder) # upload images (PNG or JPG) image_names = list(files.upload().keys()) for image_name in image_names: shutil.move(image_name, os.path.join(input_folder, image_name)) # + colab={"base_uri": "https://localhost:8080/"} id="hUBCVUwEk4I3" outputId="d67e9721-5742-44d5-cfbb-7769c4e55004" from imutils import paths import os import cv2 import numpy as np from google.colab import files import numpy as np import face_recognition from keras.models import load_model from google.colab.patches import cv2_imshow import imutils import keras imagePaths = list(paths.list_images('/content/face_and_emotion_detection/trump')) # initialize the list of known encodings and known names knownEncodings = [] knownNames = [] for (i, imagePath) in enumerate(imagePaths): # extract the person name from the image path print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths))) name = imagePath.split(os.path.sep)[-2] # load the input image and convert it from BGR (OpenCV ordering) # to dlib ordering (RGB) image = cv2.imread(imagePath) rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # rgb = cv2.resize(rgb, (512,640)) rgb = imutils.resize(rgb, width=750) boxes = face_recognition.face_locations(rgb,model="cnn") encodings = face_recognition.face_encodings(rgb, boxes) for encoding in encodings: # add each encoding + name to our set of known names and # encodings knownEncodings.append(encoding) knownNames.append(name) # + colab={"base_uri": "https://localhost:8080/"} id="8W4LFkeMbHDq" outputId="2d3d1e7f-c0d7-4ee9-e555-a1fed62e8fb6" # from keras.models import load_model # import cv2 # import numpy as np # rows, cols = (7, 1) # results = [[0]*cols]*rows results = [[] for i in range(7)] print(results) # Create a VideoCapture object and read from input file # If the input is the camera, pass 0 instead of the video file name video_path = "/content/face_and_emotion_detection/inputs/trump_1min_sec.mp4" # video_path = "/content/face_and_emotion_detection/inputs/trump-1mint.mp4" File_name = os.path.basename(video_path) # cap = cv2.VideoCapture(video_path) emotion_dict= {'Angry': 0, 'Sad': 5, 'Neutral': 4, 'Disgust': 1, 'Surprise': 6, 'Fear': 2, 'Happy': 3} total_frame_wd_face = 0 cap = cv2.VideoCapture(video_path) # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video stream or file") # img = cv2.imread('/content/face_and_emotion_detection/inputs/trump.jpg', cv2.IMREAD_COLOR) # cv2_imshow(img) # Read until video is completed # while(cap.isOpened()): while(total_frame_wd_face < 1): # print(total_frame_wd_face) # Capture frame-by-frame ret, frame = cap.read() if ret == True: # Display the resulting frame # cv2_imshow(frame) # frame = img # frame = cv2.resize(frame, (512,640)) frame = imutils.resize(frame, width=750) faces = face_recognition.face_locations(frame,model="cnn") # print(len(faces)) # plt.imshow(face_image1) trump_face = 0 for face in faces: # print(face) encoding_2 = face_recognition.face_encodings(frame, [face]) matches = face_recognition.compare_faces(knownEncodings,encoding) # print(matches) # print(len(knownEncodings)) # print(sum(matches)) if sum(matches) > 0: trump_face = face break # print(trump_face) if(trump_face != 0): total_frame_wd_face += 1 top, right, bottom, left = trump_face face_image = frame[top:bottom, left:right] face_image = cv2.resize(face_image, (48,48)) # cv2_imshow(face_image) face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY) face_image = np.reshape(face_image, [1, face_image.shape[0], face_image.shape[1], 1]) # y_1 = model_1.predict(face_image) # l = np.argmax(y_1, axis=1) # print('conf : '+ str(format(y_1[0][l[0]],'f'))) # print('label : ' + str(emotions[l[0]][0])) model = load_model("/content/face_and_emotion_detection/model/model_v6_23.hdf5") result = model.predict(face_image) # print(result) predicted_class = np.argmax(result) # label_map = dict((v,k) for k,v in emotion_dict.items()) # print(label_map[predicted_class]) # print(predicted_class) confidence = format(result[0][predicted_class],'f') # print(confidence) # print(predicted_class) results[predicted_class].append(confidence) # print(predicted_label) # results = face_recognition.compare_faces(knownEncodings, encoding_2,tolerance=0.5) # print(results) # if(results[0]): # print('trump is in the frame') if cv2.waitKey(25) & 0xFF == ord('q'): break # Break the loop else: break # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows() label_map = dict((v,k) for k,v in emotion_dict.items()) # for i,result in results: f = open("/content/face_and_emotion_detection/results.txt", "a") File_name f.write(str(File_name + "\n")) f.write("label, Percent \n") print ("label, Percent") for idx, result in enumerate(results): # print(result) result = np.array(result).astype(np.float) print(label_map[idx], " " , int(np.sum(result))/total_frame_wd_face*100) f.write(str(label_map[idx] + " " + str(int(np.sum(result))/total_frame_wd_face*100)) + "\n") f.write("\n") f.close() # predicted_label = label_map[predicted_class] # + colab={"base_uri": "https://localhost:8080/"} id="ELNGJkte9UKc" outputId="2ec39d22-e99f-4316-8fc7-64eee660da66" a_file = open("/content/face_and_emotion_detection/results.txt") lines = a_file. readlines() for line in lines: print(line) # + colab={"base_uri": "https://localhost:8080/"} id="0Jv8gzGD3Acb" outputId="097acd95-7573-45da-d5aa-7d59f0bfe87e" # !zip /content/face.zip /content/face_and_emotion_detection/* # + id="o5XhAnUAbxeR" from tensorflow.keras.models import Sequential from tensorflow.keras.layers.experimental.preprocessing import Rescaling from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Dropout, Flatten from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.optimizers import Adam emotions = { 0: ['Angry', (0,0,255), (255,255,255)], 1: ['Disgust', (0,102,0), (255,255,255)], 2: ['Fear', (255,255,153), (0,51,51)], 3: ['Happy', (153,0,153), (255,255,255)], 4: ['Sad', (255,0,0), (255,255,255)], 5: ['Surprise', (0,255,0), (255,255,255)], 6: ['Neutral', (160,160,160), (255,255,255)] } num_classes = len(emotions) input_shape = (48, 48, 1) weights_1 = '/content/face_and_emotion_detection/model/vggnet.h5' # + id="0RDXnG5ab8Qq" class VGGNet(Sequential): def __init__(self, input_shape, num_classes, checkpoint_path, lr=1e-3): super().__init__() self.add(Rescaling(1./255, input_shape=input_shape)) self.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal')) self.add(BatchNormalization()) self.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(MaxPool2D()) self.add(Dropout(0.5)) self.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(MaxPool2D()) self.add(Dropout(0.4)) self.add(Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(MaxPool2D()) self.add(Dropout(0.5)) self.add(Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')) self.add(BatchNormalization()) self.add(MaxPool2D()) self.add(Dropout(0.4)) self.add(Flatten()) self.add(Dense(1024, activation='relu')) self.add(Dropout(0.5)) self.add(Dense(256, activation='relu')) self.add(Dense(num_classes, activation='softmax')) self.compile(optimizer=Adam(learning_rate=lr), loss=categorical_crossentropy, metrics=['accuracy']) self.checkpoint_path = checkpoint_path model_1 = VGGNet(input_shape, num_classes, weights_1) model_1.load_weights(model_1.checkpoint_path)
face_em_latest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.9 64-bit (''research_track-0UGwfk25'': venv)' # language: python # name: python3 # --- # ## Cache Plots # + import matplotlib.pyplot as plt from matplotlib import rcParams import pandas as pd import seaborn as sns PHYSICAL_CORES=64 def plot(p_data, p_yId, p_xId, p_hueId, p_styleId, p_logScale=False, p_smt_marker=False, p_export_filename=None, p_xLabel=None, p_yLabel=None): rcParams['figure.figsize'] = 12,8 rcParams['font.size'] = 12 rcParams['svg.fonttype'] = 'none' plot = sns.lineplot(x=p_xId, y=p_yId, hue=p_hueId, style=p_styleId, data=p_data) if p_logScale == True: plot.set_yscale('log') plot.set_xscale('log') if p_xLabel != None: plot.set(xlabel=p_xLabel) else: plot.set(xlabel=p_xId) if p_yLabel != None: plot.set(ylabel=p_yLabel) else: plot.set(ylabel=p_yId) plt.grid(color='gainsboro') plt.grid(True,which='minor', linestyle='--', linewidth=0.5, color='gainsboro') if(p_smt_marker == True): plt.axvline(PHYSICAL_CORES, linestyle='--', color='red', label='using SMT') plt.legend() if(p_export_filename != None): plt.savefig(p_export_filename) plt.show() # - # ### Gauss3 # #### Strong Scaling # + import pandas as pd data_frame = pd.read_csv('./runtime.csv') data_frame = data_frame[data_frame.region_id == 'apply'] # # NOTE: calc absolute efficiency # ref_runtime = data_frame[data_frame.bench_id == '\Verb{nobind}'][data_frame.threads == 1]['runtime'].values[0] data_frame = data_frame.assign(efficiency_abs=lambda p_entry: ref_runtime/(p_entry.runtime * p_entry.threads)) plot(p_data=data_frame, p_yId='runtime', p_xId='threads', p_hueId='bench_id', p_styleId=None, p_logScale=True, p_smt_marker=True, p_export_filename='runtime.svg', p_xLabel="Threads", p_yLabel="Runtime [s]") plot(p_data=data_frame, p_yId='efficiency_abs', p_xId='threads', p_hueId='bench_id', p_styleId=None, p_logScale=True, p_smt_marker=True, p_export_filename='efficiency.svg', p_xLabel="Threads", p_yLabel="Absolute Efficiency")
src_optimization/05_openmp_stencil_01/e_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Import necessary 3d plotting modules import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # %matplotlib qt # ## A 3d parametric curve # + import numpy as np t = np.linspace(0,6*np.pi,400) z = t**2 x = np.cos(t) y = np.sin(t) fig = plt.figure() ax = fig.add_subplot(111,projection='3d') ax.plot(x,y,z, label='helix', linewidth=2, color='black') ax.legend() plt.show() # - # ## A 3d graph # + from matplotlib import cm x = np.linspace(-2,2,100) y = np.linspace(-2,2,100) x,y = np.meshgrid(x,y) z = np.sin(x)*np.cos(y) fig = plt.figure() ax = fig.add_subplot(111,projection='3d') ax.plot_surface(x,y,z, rstride=2, cstride=2, #cmap=cm.jet, alpha = 0.5, linewidth=0) plt.show() # - # ## Partial derivative demo # + fig = plt.figure() ax = fig.add_subplot(111,projection='3d') from matplotlib import cm # # xy plane # x = np.linspace(0,2,100) # y = np.linspace(0,2,100) # x,y = np.meshgrid(x,y) # z = 0 # ax.plot_surface(x,y,z, rstride=10, cstride=10, # #cmap=cm.jet, # alpha = 0.1, # linewidth=0) # point at which derivatives taken a = 1 b = 0.5 t = np.linspace(-1,1,100) # x direction x = a + t y = b + 0*t z = np.sin(a)*np.cos(b) ax.plot(x,y,z, '--', color='black') # x direction on graph x = a + t y = b + 0*t z = np.sin(x)*np.cos(y) ax.plot(x,y,z, linewidth=1, color='black') # function x = np.linspace(0,2,50) y = np.linspace(0,2,50) x,y = np.meshgrid(x,y) z = np.sin(x)*np.cos(y) ax.plot_surface(x,y,z, rstride=2, cstride=2, cmap=cm.Accent_r, alpha = 1, linewidth=0) plt.show() # -
sympy/3dgraphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # %load_ext watermark # %watermark -d -u -a '<NAME>, <NAME>, <NAME>' -v -p numpy,scipy,matplotlib,scikit-learn # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # # SciPy 2016 Scikit-learn Tutorial # # Linear models # Linear models are useful when little data is available or for very large feature spaces as in text classification. In addition, they form a good case study for regularization. # # Linear models for regression # All linear models for regression learn a coefficient parameter ``coef_`` and an offset ``intercept_`` to make predictions using a linear combination of features: # # ``` # y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_ # ``` # # The difference between the linear models for regression is what kind of restrictions or penalties are put on ``coef_`` as regularization , in addition to fitting the training data well. # The most standard linear model is the 'ordinary least squares regression', often simply called 'linear regression'. It doesn't put any additional restrictions on ``coef_``, so when the number of features is large, it becomes ill-posed and the model overfits. # # Let us generate a simple simulation, to see the behavior of these models. # + from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split X, y, true_coefficient = make_regression(n_samples=200, n_features=30, n_informative=10, noise=100, coef=True, random_state=5) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5, train_size=60) print(X_train.shape) print(y_train.shape) # - # ## Linear Regression # # $$ \text{min}_{w, b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 $$ from sklearn.linear_model import LinearRegression linear_regression = LinearRegression().fit(X_train, y_train) print("R^2 on training set: %f" % linear_regression.score(X_train, y_train)) print("R^2 on test set: %f" % linear_regression.score(X_test, y_test)) from sklearn.metrics import r2_score print(r2_score(np.dot(X, true_coefficient), y)) # + plt.figure(figsize=(10, 5)) coefficient_sorting = np.argsort(true_coefficient)[::-1] plt.plot(true_coefficient[coefficient_sorting], "o", label="true") plt.plot(linear_regression.coef_[coefficient_sorting], "o", label="linear regression") plt.legend() # + from sklearn.model_selection import learning_curve def plot_learning_curve(est, X, y): training_set_size, train_scores, test_scores = learning_curve(est, X, y, train_sizes=np.linspace(.1, 1, 20)) estimator_name = est.__class__.__name__ line = plt.plot(training_set_size, train_scores.mean(axis=1), '--', label="training scores " + estimator_name) plt.plot(training_set_size, test_scores.mean(axis=1), '-', label="test scores " + estimator_name, c=line[0].get_color()) plt.xlabel('Training set size') plt.legend(loc='best') plt.ylim(-0.1, 1.1) plt.figure() plot_learning_curve(LinearRegression(), X, y) # - # ## Ridge Regression (L2 penalty) # # **The Ridge estimator** is a simple regularization (called l2 penalty) of the ordinary LinearRegression. In particular, it has the benefit of being not computationally more expensive than the ordinary least square estimate. # # $$ \text{min}_{w,b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 + \alpha ||w||_2^2$$ # The amount of regularization is set via the `alpha` parameter of the Ridge. # + from sklearn.linear_model import Ridge ridge_models = {} training_scores = [] test_scores = [] for alpha in [100, 10, 1, .01]: ridge = Ridge(alpha=alpha).fit(X_train, y_train) training_scores.append(ridge.score(X_train, y_train)) test_scores.append(ridge.score(X_test, y_test)) ridge_models[alpha] = ridge plt.figure() plt.plot(training_scores, label="training scores") plt.plot(test_scores, label="test scores") plt.xticks(range(4), [100, 10, 1, .01]) plt.legend(loc="best") # + plt.figure(figsize=(10, 5)) plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b') for i, alpha in enumerate([100, 10, 1, .01]): plt.plot(ridge_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.)) plt.legend(loc="best") # - # Tuning alpha is critical for performance. plt.figure() plot_learning_curve(LinearRegression(), X, y) plot_learning_curve(Ridge(alpha=10), X, y) # ## Lasso (L1 penalty) # **The Lasso estimator** is useful to impose sparsity on the coefficient. In other words, it is to be prefered if we believe that many of the features are not relevant. This is done via the so-called l1 penalty. # # $$ \text{min}_{w, b} \sum_i || w^\mathsf{T}x_i + b - y_i||^2 + \alpha ||w||_1$$ # + from sklearn.linear_model import Lasso lasso_models = {} training_scores = [] test_scores = [] for alpha in [30, 10, 1, .01]: lasso = Lasso(alpha=alpha).fit(X_train, y_train) training_scores.append(lasso.score(X_train, y_train)) test_scores.append(lasso.score(X_test, y_test)) lasso_models[alpha] = lasso plt.figure() plt.plot(training_scores, label="training scores") plt.plot(test_scores, label="test scores") plt.xticks(range(4), [30, 10, 1, .01]) plt.legend(loc="best") # + plt.figure(figsize=(10, 5)) plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b') for i, alpha in enumerate([30, 10, 1, .01]): plt.plot(lasso_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.)) plt.legend(loc="best") # - plt.figure(figsize=(10, 5)) plot_learning_curve(LinearRegression(), X, y) plot_learning_curve(Ridge(alpha=10), X, y) plot_learning_curve(Lasso(alpha=10), X, y) # Instead of picking Ridge *or* Lasso, you can also use [ElasticNet](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html), which uses both forms of regularization and provides a parameter to assign a weighting between them. ElasticNet typically performs the best amongst these models. # ## Linear models for classification # All linear models for classification learn a coefficient parameter ``coef_`` and an offset ``intercept_`` to make predictions using a linear combination of features: # ``` # y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_ > 0 # ``` # # As you can see, this is very similar to regression, only that a threshold at zero is applied. # # Again, the difference between the linear models for classification what kind of regularization is put on ``coef_`` and ``intercept_``, but there are also minor differences in how the fit to the training set is measured (the so-called loss function). # # The two most common models for linear classification are the linear SVM as implemented in LinearSVC and LogisticRegression. # # A good intuition for regularization of linear classifiers is that with high regularization, it is enough if most of the points are classified correctly. But with less regularization, more importance is given to each individual data point. # This is illustrated using an linear SVM with different values of ``C`` below. # # ### The influence of C in LinearSVC # In LinearSVC, the `C` parameter controls the regularization within the model. # # Lower `C` entails more regularization and simpler models, whereas higher `C` entails less regularization and more influence from individual data points. from figures import plot_linear_svc_regularization plot_linear_svc_regularization() # Similar to the Ridge/Lasso separation, you can set the `penalty` parameter to 'l1' to enforce sparsity of the coefficients (similar to Lasso) or 'l2' to encourage smaller coefficients (similar to Ridge). # ## Multi-class linear classification from sklearn.datasets import make_blobs plt.figure() X, y = make_blobs(random_state=42) plt.scatter(X[:, 0], X[:, 1], c=y); from sklearn.svm import LinearSVC linear_svm = LinearSVC().fit(X, y) print(linear_svm.coef_.shape) print(linear_svm.intercept_.shape) plt.scatter(X[:, 0], X[:, 1], c=y) line = np.linspace(-15, 15) for coef, intercept in zip(linear_svm.coef_, linear_svm.intercept_): plt.plot(line, -(line * coef[0] + intercept) / coef[1]) plt.ylim(-10, 15) plt.xlim(-10, 8); # Points are classified in a one-vs-rest fashion (aka one-vs-all), where we assign a test point to the class whose model has the highest confidence (in the SVM case, highest distance to the separating hyperplane) for the test point. # # Exercises # Use LogisticRegression to classify the digits data set, and grid-search the C parameter. # + from sklearn.datasets import load_digits from sklearn.linear_model import LogisticRegression digits = load_digits() X_digits, y_digits = digits.data, digits.target # split the dataset, apply grid-search # + # #%load solutions/17A_logreg_grid.py # - # How do you think the learning curves above change when you increase or decrease alpha? # Try changing the alpha parameter in ridge and lasso, and see if your intuition was correct. # + # #%load solutions/17B_learning_curve_alpha.py
notebooks/17 In Depth - Linear Models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.2 # language: julia # name: julia-1.6 # --- # + using BenchmarkTools n = 2^20 a = sinpi.(rand(n)) btmp = BitVector(undef, n) vtmp = Vector{Bool}(undef, n) f!(tmp, a) = @. tmp = a > 0.5 @time f!(btmp, a) @time f!(btmp, a) @time f!(btmp, a) @time f!(vtmp, a) @time f!(vtmp, a) @time f!(vtmp, a); # - sizeof(btmp)/1024 + 4.2 @time @. a > 0.5 @time @. a > 0.5 @time @. a > 0.5; @time a[btmp] @time a[btmp] @time a[btmp]; @time a[vtmp] @time a[vtmp] @time a[vtmp]; @time findall(btmp) @time findall(btmp) @time findall(btmp); @time findall(vtmp) @time findall(vtmp) @time findall(vtmp);
0020/BitVector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pyvsc import numpy as np import os import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline #plt.style.use('ieee.mplstyle') color_cycle =['#fc4f30', '#5cb85c', '#30a2da', '#e5ae38', '#8b8b8b'] k2deg = 273.15 T_a = 40+k2deg # + k2deg = 273.15 man_electric = { 'i_1':1400 ,'m_1':0.8 , 'cosphi_1':0.8,'p_igbt_1':3570 ,'p_diode_1':1046 , 'i_2':1000 ,'m_2':0.8 , 'cosphi_2':0.2,'p_igbt_2':1952 ,'p_diode_2': 931 , 'i_3':1300 ,'m_3':0.8 , 'cosphi_3':0.2,'p_igbt_3':2772 ,'p_diode_3':1252 , 'i_4':1000 ,'m_4':0.1 , 'cosphi_4':0.2,'p_igbt_4':1882 ,'p_diode_4': 987 , 'i_5':1500 ,'m_5':0.85, 'cosphi_5':0.5,'p_igbt_5':3749 ,'p_diode_5':1293 , } man_thermal = { 'p_igbt':3570 ,'p_diode':1046 , 'T_igbt':125+k2deg , 'T_diode':97+k2deg , 'T_sink':57.3+k2deg, 'T_a':40.0+k2deg} params = pyvsc.man2param(man_electric,man_thermal) I = np.arange(100,1600,100) sim_e,sim_th = pyvsc.vscthmodel(I, 0.8, 1.0, 25.0+273.15, params) # - # ### Steps # # 1. Obtain 5 points from manufacturer simulations or experiments considering at least to power factor values # 2. Use formulation to obtain $P_{igbt}$ and $P_{diode}$ polynomios # 3. With nominal current obtain $R_{th}^{igbt}$, $R_{th}^{diode}$ and $R_{th}^{sink}$ # # $$R_{th}^{igbt} = \frac{T_{igbt}-T_{sink}}{P_{igbt}}$$ # # $$R_{th}^{diode} = \frac{T_{diode}-T_{sink}}{P_{diode}}$$ # # $$R_{th}^{sink} = \frac{T_{sink}-T_{a}}{P_{igbt}+P_{diode}}$$ # # ### Validation # # + fig, (ax0,ax1) = plt.subplots(nrows=2) # creates a figure with one axe fig.set_size_inches(10,6) ax0.plot(I, sim_e['p_igbt']) ax0.plot(I, sim_e['p_diode']) ax1.plot(I, sim_th['T_igbt_deg']) ax1.plot(I, sim_th['T_diode_deg']) ax1.plot(I, sim_th['T_sink_deg']) plt.show() sim # - # ## Fuji # + df_fp000 = pd.read_csv('./data/fuji_fp0.csv',skiprows=35) df_fp080 = pd.read_csv('./data/fuji_fp08.csv',skiprows=35) df_fp100 = pd.read_csv('./data/fuji_fp1.csv',skiprows=35) man_electric = { 'i_1':df_fp100.iloc[5][0] ,'m_1':0.8,'cosphi_1':1.0,'p_igbt_1':df_fp100.iloc[5][1] ,'p_diode_1':df_fp100.iloc[5][2], 'i_2':df_fp100.iloc[15][0],'m_2':0.8,'cosphi_2':1.0,'p_igbt_2':df_fp100.iloc[15][1],'p_diode_2':df_fp100.iloc[15][2], 'i_3':df_fp100.iloc[30][0],'m_3':0.8,'cosphi_3':1.0,'p_igbt_3':df_fp100.iloc[30][1],'p_diode_3':df_fp100.iloc[30][2], 'i_4':df_fp000.iloc[15][0],'m_4':0.8,'cosphi_4':0.0,'p_igbt_4':df_fp000.iloc[15][1],'p_diode_4':df_fp000.iloc[15][2], 'i_5':df_fp000.iloc[30][0],'m_5':0.8,'cosphi_5':0.0,'p_igbt_5':df_fp000.iloc[30][1],'p_diode_5':df_fp000.iloc[30][2], } man_thermal = { 'p_igbt':df_fp100.iloc[25][1] ,'p_diode':df_fp100.iloc[25][2] , 'T_igbt':df_fp100.iloc[25][3]+k2deg , 'T_diode':df_fp100.iloc[25][5]+k2deg , 'T_sink':df_fp100.iloc[25][9]+k2deg, 'T_a':40.0+k2deg} params = pyvsc.man2param(man_electric,man_thermal) # - # ## Validation # + I = np.arange(0,460,10) sim_e,sim_th = pyvsc.vscthmodel(I, 0.8, 0.8, 40.0+273.15, params) fig, (ax0,ax1) = plt.subplots(nrows=2) # creates a figure with one axe fig.set_size_inches(10,6) ax0.plot(I, sim_e['p_igbt']) ax0.plot(I, sim_e['p_diode']) ax0.plot(df_fp080[' Output Current[Arms]'], df_fp080[' T1'],'.') ax0.plot(df_fp080[' Output Current[Arms]'], df_fp080[' D1'],'.') ax1.plot(I, sim_th['T_igbt_deg']) ax1.plot(I, sim_th['T_diode_deg']) ax1.plot(I, sim_th['T_sink_deg']) ax1.plot(df_fp080[' Output Current[Arms]'], df_fp080[' Tj-Max(T1)'],'.') ax1.plot(df_fp080[' Output Current[Arms]'], df_fp080[' Tj-Max(D1)'],'.') ax1.plot(df_fp080[' Output Current[Arms]'], df_fp080[' Tf-Max'],'.') plt.show() # - df_fp100.columns # + i_rms_fp000 = np.array(i_rms_list) p_igbt_fp000 = np.array(p_igbt_list) p_diode_fp000 = np.array(p_diode_list) T_igbt_fp000 = np.array(T_igbt_list) T_diode_fp000 = np.array(T_diode_list) T_sink_fp000 = np.array(T_sink_list) pf_fp000 = np.array(fp_list) v_out_fp000 = np.array(v_out_list) print('i_rms_list ',i_rms_list) print('p_igbt_list ',p_igbt_list) print('p_diode_list ',p_diode_list) print('T_igbt_list ', T_igbt_list) print('T_diode_list ',T_diode_list) print('T_s_igbt_list ',T_sink_list) print('fp_list ',fp_list) fig, (ax0,ax1) = plt.subplots(nrows=2) # creates a figure with one axe fig.set_size_inches(10,6) ax0.plot(i_rms_fp000, p_igbt_fp000,'.') ax0.plot(i_rms_fp000, p_diode_fp000,'.') ax1.plot(i_rms_fp000, T_igbt_fp000,'.') ax1.plot(i_rms_fp000, T_diode_fp000,'.') ax1.plot(i_rms_fp000, T_sink_fp000,'.') plt.show() # + k2deg = 273.16 low_idx_fp100 = 1 mid_idx_fp100 = 3 high_idx_fp100 = -1 low_idx_fp000 = 0 high_idx_fp000 = 1 #i_1 = I_rms_abb_fp_100[low_idx] #i_2 = I_rms_abb_fp_100[mid_idx] #i_3 = I_rms_abb_fp_100[high_idx] #i_4 = I_rms_abb_fp_000[7] #i_5 = I_rms_abb_fp_000[14] # # #p_igbt_1 = PtotIGBT_abb_fp_100[low_idx] #p_igbt_2 = PtotIGBT_abb_fp_100[mid_idx] #p_igbt_3 = PtotIGBT_abb_fp_100[high_idx] #p_igbt_4 = PtotIGBT_abb_fp_000[7] #p_igbt_5 = PtotIGBT_abb_fp_000[14] # #p_diode_1 = P_diode_abb_fp_100[low_idx] #p_diode_2 = P_diode_abb_fp_100[mid_idx] #p_diode_3 = P_diode_abb_fp_100[high_idx] #p_diode_4 = P_diode_abb_fp_000[7] #p_diode_5 = P_diode_abb_fp_000[14] # #m_1 = 0.95 #m_2 = 0.95 # #cosphi_1 = 1.0 #cosphi_2 = 0.0 # Iout += sh.col_values(17,7+(it-1)*13,8+(it-1)*13) # VDC += sh.col_values(18,7+(it-1)*13,8+(it-1)*13) # fsw += sh.col_values(20,7+(it-1)*13,8+(it-1)*13) # m += sh.col_values(21,7+(it-1)*13,8+(it-1)*13) # cos_phi += sh.col_values(22,7+(it-1)*13,8+(it-1)*13) # PtotRCCDiode += sh.col_values(10,7+(it-1)*13,8+(it-1)*13) # PtotRCCIGBT += sh.col_values(9,7+(it-1)*13,8+(it-1)*13) i_1 = i_rms_fp100[low_idx_fp100] i_2 = i_rms_fp100[mid_idx_fp100] i_3 = i_rms_fp100[high_idx_fp100] i_4 = i_rms_fp000[low_idx_fp000] i_5 = i_rms_fp000[high_idx_fp000] p_igbt_1 = p_igbt_fp100[low_idx_fp100] p_igbt_2 = p_igbt_fp100[mid_idx_fp100] p_igbt_3 = p_igbt_fp100[high_idx_fp100] p_igbt_4 = p_igbt_fp000[low_idx_fp000] p_igbt_5 = p_igbt_fp000[high_idx_fp000] p_diode_1 = p_diode_fp100[low_idx_fp100] p_diode_2 = p_diode_fp100[mid_idx_fp100] p_diode_3 = p_diode_fp100[high_idx_fp100] p_diode_4 = p_diode_fp000[low_idx_fp000] p_diode_5 = p_diode_fp000[high_idx_fp000] m_1 = np.sqrt(3)*v_out_fp100[low_idx_fp100]/800 m_2 = np.sqrt(3)*v_out_fp100[mid_idx_fp100]/800 m_3 = np.sqrt(3)*v_out_fp100[high_idx_fp100]/800 m_4 = np.sqrt(3)*v_out_fp000[low_idx_fp000]/800 m_5 = np.sqrt(3)*v_out_fp000[high_idx_fp000]/800 cosphi_1 = pf_fp100[low_idx_fp100] cosphi_2 = pf_fp100[mid_idx_fp100] cosphi_3 = pf_fp100[high_idx_fp100] cosphi_4 = pf_fp000[low_idx_fp000] cosphi_5 = pf_fp000[high_idx_fp000] man_result = [ [i_1,m_1,cosphi_1,p_igbt_1,p_diode_1], [i_2,m_2,cosphi_2,p_igbt_2,p_diode_2], [i_3,m_3,cosphi_3,p_igbt_3,p_diode_3], [i_4,m_4,cosphi_4,p_igbt_4,p_diode_4], [i_5,m_5,cosphi_5,p_igbt_5,p_diode_5] ] man_thermal = [ [p_igbt_fp000[high_idx_fp000], p_diode_fp000[high_idx_fp000], T_diode_fp000[high_idx_fp000]+k2deg, T_diode_fp000[high_idx_fp000]+k2deg, T_sink_fp000[high_idx_fp000]+k2deg, T_a], ] abc_params = man2param(man_result, man_thermal) R_th_igbt_sink = (T_igbt_fp100[high_idx_fp100]-T_sink_fp100[high_idx_fp100])/p_igbt_fp100[high_idx_fp100] R_th_diode_sink = (T_diode_fp100[high_idx_fp100]-T_sink_fp100[high_idx_fp100])/p_diode_fp100[high_idx_fp100] p_total = (p_igbt_fp100[high_idx_fp100]+p_diode_fp100[high_idx_fp100]) R_th_sink_a = (T_sink_fp100[high_idx_fp100]-(T_a-k2deg))/(p_total) R_th_igbt_sink = (T_igbt_fp100[high_idx_fp100]-T_sink_fp100[high_idx_fp100])/p_igbt_fp100[high_idx_fp100] R_th_diode_sink = (T_diode_fp000[high_idx_fp000]-T_sink_fp000[high_idx_fp000])/p_diode_fp000[high_idx_fp000] p_total = (p_igbt_fp100[high_idx_fp100]+p_diode_fp100[high_idx_fp100]) R_th_sink_a = (T_sink_fp100[high_idx_fp100]-(T_a-k2deg))/(p_total) #R_th_igbt_sink = 0.2+0.038 #R_th_diode_sink = 0.32 #R_th_sink_a = 0.031*6 therm_params = dict(R_th_igbt_sink = R_th_igbt_sink, R_th_diode_sink = R_th_diode_sink, R_th_sink_a = R_th_sink_a, ) c_therm_params = dict(C_th_igbt = 18, C_th_diode = 10, C_th_igbt_case = 5, C_th_diode_case = 2, C_th_sink = 6000.0) abc_params.update(c_therm_params) abc_params.update(therm_params) params = abc_params print('R_th_igbt_sink',R_th_igbt_sink) print('R_th_diode_sink',R_th_diode_sink) print('R_th_sink_a',R_th_sink_a) # - T_igbt_fp100[high_idx_fp100] p_diode_fp000 man_result pows, temps = vscthmodel(137, (420*1.73)/800, 0.05, 40+k2deg, params) print(pows) print(temps) # + fobj = open('../letter/src/table_validation_semisel.tex', 'w') table_validation = '' results = [] for it in range(5,10): sh_output = wb.sheet_by_index(it) col_1 = sh_output.col_values(1,0) i_rms = float(col_1[2].split(' ')[0]) p_igbt = float(col_1[15].split(' ')[0]) p_diode = float(col_1[18].split(' ')[0]) t_igbt = float(col_1[23].split(' ')[0]) t_diode = float(col_1[24].split(' ')[0]) t_sink = float(col_1[21].split(' ')[0]) fp = float(col_1[5]) v_out = float(col_1[1].split(' ')[0]) m = np.sqrt(3.0)*v_out/800 pows, temps = vscthmodel(i_rms, m, fp, T_a, params) result1 = [ i_rms, m, fp, p_igbt, p_diode, t_igbt, t_diode , pows['p_igbt'], pows['p_diode'], temps['T_diode']-273.16, temps['T_diode']-273.16] result1 = [ '','', '' , 'Manufacturer', p_igbt, p_diode, t_igbt, t_diode] result2 = [ i_rms, m, fp , 'Model', float(pows['p_igbt']), float(pows['p_diode']), float(temps['T_diode']-273.16), float(temps['T_diode']-273.16)] result3 = [ '', '', '' , 'Error ', 100*(p_igbt-pows['p_igbt'])/p_igbt, p_diode, t_igbt, t_diode , pows['p_igbt'], pows['p_diode'], temps['T_diode']-273.16, temps['T_diode']-273.16] results += [[result1],[result2],[result3]] p_igbt_m = float(pows['p_igbt']) p_diode_m = float(pows['p_diode']) t_igbt_m = float(temps['T_igbt']-273.16) t_diode_m = float(temps['T_diode']-273.16) t_sink_m = float(temps['T_sink']-273.16) if it > 0: row_1 = ' & & & Man. & {:2.0f} & {:2.0f} & {:2.0f} & {:2.0f} & {:2.0f} \\\\'.format( p_igbt, p_diode, t_igbt, t_diode, t_sink) row_2 = ' {:2.0f} & {:2.2f} & {:2.2f} & Model & {:2.0f} & {:2.0f} & {:2.0f} & {:2.0f} & {:2.0f} \\\\'.format( i_rms, m, fp, p_igbt_m, p_diode_m, t_igbt_m, t_diode_m, t_sink_m) row_3 = ' & & & Error \\% & {:2.2f} & {:2.2f} & {:2.2f} & {:2.2f}& {:2.2f} \\\\'.format(100.*(p_igbt-p_igbt_m)/p_igbt, 100.0*(p_diode-p_diode_m)/p_diode, 100.0*(t_igbt-t_igbt_m)/t_igbt, 100.*(t_diode- t_diode_m)/t_diode, 100.*(t_sink- t_sink_m)/t_sink) table_validation += row_1 + '\n' table_validation += row_2 + '\n' table_validation += row_3 + '\n' table_validation += r'\midrule' + '\n' it += 1 #print(tabulate(results,tablefmt='latex')) fobj.write(table_validation) fobj.close() print(table_validation) # -
pyvsc/notebooks/parameter_tool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/usm.jpg" width="480" height="240" align="left"/> # # MAT281 - Laboratorio N°02 # # ## Objetivos de la clase # # * Reforzar los conceptos básicos de numpy. # ## Contenidos # # * [Problema 01](#p1) # * [Problema 02](#p2) # * [Problema 03](#p3) import numpy as np # <a id='p1'></a> # # ## Problema 01 # # Una **media móvil simple** (SMA) es el promedio de los últimos $k$ datos anteriores, es decir, sea $a_1$,$a_2$,...,$a_n$ un arreglo $n$-dimensional, entonces la SMA se define por: # # $$sma(k) =\dfrac{1}{k}(a_{n}+a_{n-1}+...+a_{n-(k-1)}) = \dfrac{1}{k}\sum_{i=0}^{k-1}a_{n-i} $$ # # # Por otro lado podemos definir el SMA con una venta móvil de $n$ si el resultado nos retorna la el promedio ponderado avanzando de la siguiente forma: # # * $a = [1,2,3,4,5]$, la SMA con una ventana de $n=2$ sería: # # # * sma(2): [mean(1,2),mean(2,3),mean(3,4)] = [1.5, 2.5, 3.5, 4.5] # * sma(3): [mean(1,2,3),mean(2,3,4),mean(3,4,5)] = [2.,3.,4.] # # # Implemente una función llamada `sma` cuyo input sea un arreglo unidimensional $a$ y un entero $n$, y cuyo ouput retorne el valor de la media móvil simple sobre el arreglo de la siguiente forma: # # * **Ejemplo**: *sma([5,3,8,10,2,1,5,1,0,2], 2)* = $[4. , 5.5, 9. , 6. , 1.5, 3. , 3. , 0.5, 1. ]$ # # En este caso, se esta calculando el SMA para un arreglo con una ventana de $n=2$. # # **Hint**: utilice la función `numpy.cumsum` def sma(a:list,n:int)->list: """ sma(a) Calcula el promedio de los ultimos n datos anteriores en el arreglo a. Parameters ---------- a : Arreglo Arreglo a calcular sus promedios. n : int Numero de datos anteriores. Returns ------- output : list Arreglo con los promedios. Examples -------- >>> sma([5,3,8,10,2,1,5,1,0,2], 2) = [4.,5.5,9.,6.,1.5,3.,3.,0.5,1.] """ import numpy as np #Importo numpy aux1=[] #Defino un auxiliar1 aux2=[] #Defino un auxiliar2 a=np.array(a) #Convierto a "a" en un arreglo k=a.size #Pido el tamaño del arreglo a for i in range(0,k-1): #Comienzo un for para recorrer nuestro arreglo for j in range (0,n): #Comienzo un for para recorrer los siguientes elementos de i aux2.append(a[i+j]) #Agrego el elemento i del arreglo a mi auxiliar2 y los n-1 siguientes aux1.append(aux2) #Agrego el auxiliar2 al auxiliar 1 aux2=[] #Limpio el auxiliar2 arr=np.array(aux1) #Convierto al auxiliar1 en un arreglo return np.mean(arr, axis=1) #Retorno la el arreglo con la media de cada uno de los arreglos de arr sma([5,3,8,10,2,1,5,1,0,2], 2) # <a id='p2'></a> # # ## Problema 02 # # La función **strides($a,n,p$)**, corresponde a transformar un arreglo unidimensional $a$ en una matriz de $n$ columnas, en el cual las filas se van construyendo desfasando la posición del arreglo en $p$ pasos hacia adelante. # # * Para el arreglo unidimensional $a$ = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], la función strides($a,4,2$), corresponde a crear una matriz de $4$ columnas, cuyos desfaces hacia adelante se hacen de dos en dos. # # El resultado tendría que ser algo así:$$\begin{pmatrix} # 1& 2 &3 &4 \\ # 3& 4&5&6 \\ # 5& 6 &7 &8 \\ # 7& 8 &9 &10 \\ # \end{pmatrix}$$ # # # Implemente una función llamada `strides(a,4,2)` cuyo input sea un arreglo unidimensional y retorne la matriz de $4$ columnas, cuyos desfaces hacia adelante se hacen de dos en dos. # # * **Ejemplo**: *strides($a$,4,2)* =$\begin{pmatrix} # 1& 2 &3 &4 \\ # 3& 4&5&6 \\ # 5& 6 &7 &8 \\ # 7& 8 &9 &10 \\ # \end{pmatrix}$ # def strides(a:list,n:int,m:int)->list: """ strides(a,n,m) Transforma un arreglo en una matriz. Parameters ---------- a : Array Arreglo a convertir en matriz. n : int Numero de columnas. m : int Tamaño del desfase Returns ------- output : list Matriz. Examples -------- >>> strides(a,4,2) = [[1,2,3,4],[3,4,5,6],[5,6,7,8],[7,8,9,10]] """ import numpy as np #Importo numpy aux1=[] #Defino un auxiliar1 aux2=[] #Defino un auxiliar2 c=0 #Defino un contador d=0 #Defino otro contador a=np.array(a) #Convierto a "a" en un arreglo k=a.size #Pido el tamaño del arreglo a while d<=k-1: #Mientras d sea menor o igual que k-1 aux2.append(a[d]) #Al auxiliar 2 le agrego el elemento en la posicion d del arreglo a c+=1 #Guardo en c que avanzamos un paso d+=1 #Guardo en d que avanzamos un paso if c==n: #Si avanzamos la misma cantidad de pasos que las columnas aux1.append(aux2) #Guardamos nuestro auxiliar2 en el auxiliar1 aux2=[] #Limpiamos el auxiliar2 d=d-m #Al contador d le restamos el desfase c=0 #Limpiamos el contador c para partir la columna nuevamente arr=np.array(aux1) #Convertimos al auxiliar1 en un arreglo return arr #Retornamos la matriz arr strides([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],4,2) # <a id='p3'></a> # # ## Problema 03 # # # Un **cuadrado mágico** es una matriz de tamaño $n \times n$ de números enteros positivos tal que # la suma de los números por columnas, filas y diagonales principales sea la misma. Usualmente, los números empleados para rellenar las casillas son consecutivos, de 1 a $n^2$, siendo $n$ el número de columnas y filas del cuadrado mágico. # # Si los números son consecutivos de 1 a $n^2$, la suma de los números por columnas, filas y diagonales principales # es igual a : $$M_{n} = \dfrac{n(n^2+1)}{2}$$ # Por ejemplo, # # * $A= \begin{pmatrix} # 4& 9 &2 \\ # 3& 5&7 \\ # 8& 1 &6 # \end{pmatrix}$, # es un cuadrado mágico. # # * $B= \begin{pmatrix} # 4& 2 &9 \\ # 3& 5&7 \\ # 8& 1 &6 # \end{pmatrix}$, no es un cuadrado mágico. # # Implemente una función llamada `es_cudrado_magico` cuyo input sea una matriz cuadrada de tamaño $n$ con números consecutivos de $1$ a $n^2$ y cuyo ouput retorne *True* si es un cuadrado mágico o 'False', en caso contrario # # * **Ejemplo**: *es_cudrado_magico($A$)* = True, *es_cudrado_magico($B$)* = False # # **Hint**: Cree una función que valide la mariz es cuadrada y que sus números son consecutivos del 1 a $n^2$. def es_cuadrado_magico(a:list)->bool: """ es_cuadrado_magico(n) Verifica que a en un cuadrado magico Parameters ---------- a : list Matriz a evaluar. Returns ------- output : bool Veracidad de que a cumple con que es un cuadrado magico. Examples -------- >>> es_cuadrado_magico([[4,9,2], [3,5,7], [8,1,6]])=True >>> es_cuadrado_magico([[4,2,9], [3,5,7], [8,1,6]])=False """ import numpy as np #Importo numpy a=np.array(a) #Convierto a "a" en un arreglo n=a.shape[0] #Guardo el numero de filas en n m=(n/2)*((n*n)+1) #Calculo M_n y lo guardo en m aux1=0 #Defino un auxiliar1 aux2=0 #Defino un auxiliar2 aux3=0 #Defino un auxiliar3 aux4=0 #Defino un auxiliar4 for i in range(0,n): #Comienzo un for para recorrer una fila aux3+=a.item(i,i) #En auxiliar3 guardo los elementos de la diagonal principal aux4+=a.item(i,n-1-i) #En auxiliar4 guardo los elementos de la diagonal secundaria for j in range(0,n): #Comienzo un for para recorrer una columna aux1+=a.item(i,j) #En auxiliar1 guardo los elementos de la j-esima fila aux2+=a.item(j,i) #En auxiliar2 guardo los elementos de la j-esima columna if (aux1!=m)or(aux2!=m): #Si la suma de los elementos de la j-esima fila o columna no es igual a m return False #Entonces a no es un aux1=0 # aux2=0 # if (aux3!=m)or(aux4!=m): # return False # return True # a=[[4,9,2], [3,5,7], [8,1,6]] es_cuadrado_magico(a) a=[[4,2,9], [3,5,7], [8,1,6]] es_cuadrado_magico(a)
labs/02_numpy/laboratorio_02_resuelto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %%time import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import pickle # %matplotlib inline # - # ## Gaussian transformation sklearn # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # - # %time # let's explore the distribution of the numerical variables df[cols].hist(figsize=(20,20)) plt.show() # + import scipy.stats as stats # plot the histograms to have a quick look at the variable distribution # histogram and Q-Q plots def diagnostic_plots(df, variable): # function to plot a histogram and a Q-Q plot # side by side, for a certain variable plt.figure(figsize=(15,6)) plt.subplot(1, 2, 1) df[variable].hist(bins=30) plt.subplot(1, 2, 2) stats.probplot(df[variable], dist="norm", plot=plt) plt.show() # - # ### Logarithmic transformation # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + from sklearn.preprocessing import FunctionTransformer, PowerTransformer # create a log transformer transformer = FunctionTransformer(np.log, validate=True) # transform all the numerical and positive variables df_t = transformer.transform(df[cols].fillna(1)) # Scikit-learn returns NumPy arrays, so capture in dataframe, note that Scikit-learn will return an array with df_t = pd.DataFrame(df_t, columns=cols) # - # original distribution diagnostic_plots(df, 'GrLivArea') # transformed distribution diagnostic_plots(df_t, 'GrLivArea') # ### Reciprocal transformation # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + # create the transformer transformer = FunctionTransformer(lambda x: 1/x, validate=True) # transformer = FunctionTransformer(np.reciprocal, validate=True) # transform the positive variables df_t = transformer.transform(df[cols].fillna(1)) # re-capture in a dataframe df_t = pd.DataFrame(df_t, columns=cols) # - # transformed variable diagnostic_plots(df_t, 'GrLivArea') # ### Square root transformation # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + transformer = FunctionTransformer(lambda x: x**(1/2), validate=True) # transformer = FunctionTransformer(np.sqrt, validate=True) df_t = transformer.transform(df[cols].fillna(1)) df_t = pd.DataFrame(df_t, columns=cols) # - # transformed variable diagnostic_plots(df_t, 'GrLivArea') # ### Exponential transformation # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + transformer = FunctionTransformer(lambda x: x**(1/1.2), validate=True) df_t = transformer.transform(df[cols].fillna(1)) df_t = pd.DataFrame(df_t, columns=cols) # - # transformed variable diagnostic_plots(df_t, 'GrLivArea') # ### Box-Cox transformation # * Box-Cox and Yeo-Johnson transformations need to learn their parameters from the data. Let's divide the dataset into train and test set. # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + X = df.drop(columns=["SalePrice"]) y = df["SalePrice"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, shuffle=True, # stratify=y # Not a classification problem ) X_cols = X.columns # + # create the transformer transformer = PowerTransformer(method='box-cox', standardize=False) # fit on X_train then transform on X_test X_train_t = transformer.fit_transform(X_train[X_cols].fillna(1)) X_test_t = transformer.transform(X_test[X_cols].fillna(1)) # capture data in a dataframe X_train_t = pd.DataFrame(X_train_t, columns=X_cols) X_test_t = pd.DataFrame(X_test_t, columns=X_cols) # - # original distribution diagnostic_plots(X_train, 'GrLivArea') # transformed variable diagnostic_plots(X_train_t, 'GrLivArea') # original distribution diagnostic_plots(X_test, 'GrLivArea') # transformed variable diagnostic_plots(X_test_t, 'GrLivArea') # ### <NAME> # * Yeo-Johnson is an adaptation of Box-Cox that can also be used in negative value variables. # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + X = df.drop(columns=["SalePrice"]) y = df["SalePrice"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, shuffle=True, # stratify=y # Not a classification problem ) X_cols = X.columns # + transformer = PowerTransformer(method='yeo-johnson', standardize=False) # fit on X_train then transform on X_test X_train_t = transformer.fit_transform(X_train[X_cols].fillna(1)) X_test_t = transformer.transform(X_test[X_cols].fillna(1)) # capture data in a dataframe X_train_t = pd.DataFrame(X_train_t, columns=X_cols) X_test_t = pd.DataFrame(X_test_t, columns=X_cols) # - # original distribution diagnostic_plots(X_train, 'GrLivArea') # transformed variable diagnostic_plots(X_train_t, 'GrLivArea') # original distribution diagnostic_plots(X_test, 'GrLivArea') # transformed variable diagnostic_plots(X_test_t, 'GrLivArea') # ### Gaussian transformation (Feature Engine) df = pd.read_csv('houseprice.csv') df.head() # ### LogTransformer (Feature Engine) # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + from feature_engine.transformation import LogTransformer transformer = LogTransformer() # feature engine returns dataframe df_t = transformer.fit_transform(df[cols].fillna(1)) # - # original distribution diagnostic_plots(df, 'GrLivArea') # transformed distribution diagnostic_plots(df_t, 'GrLivArea') transformer.variables_ # ### ReciprocalTransformer (Feature Engine) # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + from feature_engine.transformation import ReciprocalTransformer transformer = ReciprocalTransformer() # feature engine returns dataframe df_t = transformer.fit_transform(df[cols].fillna(1)) # - # original distribution diagnostic_plots(df, 'GrLivArea') # transformed distribution diagnostic_plots(df_t, 'GrLivArea') transformer.variables_ # ### ExponentialTransformer (Feature Engine) # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + from feature_engine.transformation import PowerTransformer transformer = PowerTransformer() # feature engine returns dataframe df_t = transformer.fit_transform(df[cols].fillna(1)) # - # original distribution diagnostic_plots(df, 'GrLivArea') # transformed distribution diagnostic_plots(df_t, 'GrLivArea') transformer.variables_ # ### BoxCoxTransformer (Feature Engine) # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + X = df.drop(columns=["SalePrice"]) y = df["SalePrice"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, shuffle=True, # stratify=y # Not a classification problem ) # + from feature_engine.transformation import BoxCoxTransformer transformer = BoxCoxTransformer() # fit on X_train then transform on X_test X_train_t = transformer.fit_transform(X_train[X_cols].fillna(1)) X_test_t = transformer.transform(X_test[X_cols].fillna(1)) # - # original distribution diagnostic_plots(X_train, 'GrLivArea') # transformed distribution diagnostic_plots(X_train_t, 'GrLivArea') transformer.variables_ transformer.lambda_dict_ # ### Yeo-Johnson Transformer (Feature Engine) # + df = pd.read_csv('houseprice.csv') # Let's select the numerical and positive variables in the dataset for this demonstration. cols = [] for col in df.columns: if df[col].dtypes != 'O' and col != 'Id': # if the variable is numerical if np.sum(np.where(df[col] <= 0, 1, 0)) == 0: # if the variable is positive cols.append(col) # append variable to the list df = df[cols] df.head() # + X = df.drop(columns=["SalePrice"]) y = df["SalePrice"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, shuffle=True, # stratify=y # Not a classification problem ) # + from feature_engine.transformation import YeoJohnsonTransformer transformer = YeoJohnsonTransformer() # fit on X_train then transform on X_test X_train_t = transformer.fit_transform(X_train[X_cols].fillna(1)) X_test_t = transformer.transform(X_test[X_cols].fillna(1)) # - # original distribution diagnostic_plots(X_train, 'GrLivArea') # transformed distribution diagnostic_plots(X_train_t, 'GrLivArea') transformer.variables_ transformer.lambda_dict_
Variable Transformation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import os import sys repo_path = os.path.dirname(os.path.dirname(os.path.abspath("__file__"))) repo_path sys.path.append(repo_path) # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - from src.ac_agent import AgentDDPG, GaussianProcess, OUNoise from src.utils import get_noise_schedulling from src.ac_agent import AgentDDPG, GaussianProcess # + num_agents =1 RND_SEED = 123 g_noise = GaussianProcess(num_agents, RND_SEED, sigma=0.1) ou_noise = OUNoise(num_agents, RND_SEED) # + g_noise_df1 = get_noise_schedulling(5000, 0.999, g_noise) g_noise_df2 = get_noise_schedulling(5000, 0.9995, g_noise) g_noise_df3 = get_noise_schedulling(5000, 0.9998, g_noise) g_noise_df = pd.concat([g_noise_df1, g_noise_df2, g_noise_df3], axis=0) # + fig, axs = plt.subplots(1, 3, figsize=(16, 6)) g_noise_df1.plot(ax=axs[0]) g_noise_df2.plot(ax=axs[1]) g_noise_df3.plot(ax=axs[2]) plt.show() # + g_noise_df1 = get_noise_schedulling(500, 0.99, g_noise) g_noise_df2 = get_noise_schedulling(500, 0.995, g_noise) g_noise_df3 = get_noise_schedulling(500, 0.998, g_noise) g_noise_df = pd.concat([g_noise_df1, g_noise_df2, g_noise_df3], axis=0) # + fig, axs = plt.subplots(1, 3, figsize=(16, 6)) g_noise_df1.plot(ax=axs[0]) g_noise_df2.plot(ax=axs[1]) g_noise_df3.plot(ax=axs[2]) plt.show() # + ou_noise_df1 = get_noise_schedulling(5000, 0.999, ou_noise) ou_noise_df2 = get_noise_schedulling(5000, 0.9995, ou_noise) ou_noise_df3 = get_noise_schedulling(5000, 0.9998, ou_noise) ou_noise_df = pd.concat([g_noise_df1, g_noise_df2, g_noise_df3], axis=0) # + fig, axs = plt.subplots(1, 3, figsize=(16, 6)) ou_noise_df1.plot(ax=axs[0]) ou_noise_df2.plot(ax=axs[1]) ou_noise_df3.plot(ax=axs[2]) plt.show() # - ou_noise_df1['noise_dec'].rolling(100).std().plot(label='ou') g_noise_df1['noise_dec'].rolling(100).std().plot(label='gaussian') plt.legend() plt.show() x = np.arange(-5,5, 0.01) tanh1 = np.tanh(x) tanh2 = np.tanh(x)*2 plt.plot(x, tanh1, label='tanh1') plt.plot(x, tanh2, label='tanh2') plt.legend() plt.show()
ddpg-pendulum/Schedulling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import OpenCV and get its Version import cv2 print(cv2.__version__) # + # Get Images and Videos from OpenCV Repository # Access: https://github.com/opencv/opencv # Go to "samples" folder -> "data" folder to get samples. # Can also download all the repository. # + # Read and Display Image img = cv2.imread('lena.jpg',-1) # Colored Image # print(img) # Print image matrix at notebook cv2.imshow('image',img) # Show image in a window cv2.waitKey(5000) # Wait 5000 ms before closing cv2.destroyAllWindows() # close windows of images # + # Write Image to File if someone press a key img = cv2.imread('lena.jpg',0) # 0 = P&B Image / -1 = Colored Image cv2.imshow('image',img) # Show image in a window key1 = cv2.waitKey(0) & 0xFF # Hold pressed key (0xFF is just a mask. Important for some systems) if key1 == 27: # "esc" key cv2.destroyAllWindows() # close windows of images elif key1 == ord('s'): # "s" key cv2.imwrite('lena_copy.jpg',img) cv2.destroyAllWindows() # close windows of images else: cv2.destroyAllWindows() # close windows of images # + # Read, Display and Save Videos # Catcher: object that have the functions to catch the video frame # catcher = cv2.VideoCapture('myVideo.avi') # get images from a file catcher = cv2.VideoCapture(0) # Get video from camera (if don't work, try '-1') # If you have more than one camera, try '1', '2'... # www.fourcc.org/codecs.php => video codecs #fourcc = cv2.VideoWriter_fourcc('X','V','I','D') fourcc = cv2.VideoWriter_fourcc(*'XVID') # Writer: object that have the functions to write video to a file writer = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480)) # Video Property: if the video is open or not print(catcher.isOpened()) # Show video in a window while(catcher.isOpened()): # Get Frame ret, frame = catcher.read() # ret = 1 if frame available. # Frame is captured in 'frame' variable. if ret == True: # Video Properties # print(catcher.get(cv2.CAP_PROP_FRAME_WIDTH)) # print(catcher.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Save frame writer.write(frame) # Show Frame gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # frame in gray scale #cv2.imshow('frame',frame) # Show last frame cv2.imshow('frame',gray_frame) # Show last frame in gray scale # Finish video if 'q' key was pressed if cv2.waitKey(1) & 0xFF == ord('q'): break else: break # Finish Capture and Saving catcher.release() writer.release() # Close all windows cv2.destroyAllWindows() # -
scripts_openCV/opencv_01_intro.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Tag 2. Kapitel 7. Fortgeschrittene Programmierung # # ## Lektion 39. Datum und Uhrzeit in R # # R bietet uns eine Vielzahl an Möglichkeitem, um mit Datum und Uhrzeit (eng. Date and Time), die auch als Zeitstempel bekannt (en. timestamp) sind, zu arbeiten. Schauen wir uns als erstes das `Date` (Datum) Objekt an: # # ## Date # # Du kannst die `as.Date()` Funktion nutzen, um Zeichen-Strings in ein Datums-Objekt zu konvertieren. Dieses Format erlaub es, mehr Informationen zu speichern. Der String muss dabei in einem Standard-Zeitformat vorliegen. # # Um das heutige Datum zu erhalten könne wir das System (`Sys.`) danach fragen: Sys.Date() # Als Variable festlegen heute <- Sys.Date() heute # Wir können außerdem Zeichen-Strings in ein Datum konvertieren, indem wir `as.Date()` verwenden. Dabei können wir einerseits gleich das richtige Format angeben oder andererseits mit % Zeichen das Format selbst definieren: # # # <table border="1"> # <tr><td>Code</td><td>Wert</td></tr> # <tr><td><tt>%d</tt></td><td>Tag des Monats (Dezimalzahl)</td></tr> # <tr><td><tt>%m</tt></td><td>Monat (Dezimalzahl)</td></tr> # <tr><td><tt>%b</tt></td><td>Monat (abgekürzt)</td></tr> # <tr><td><tt>%B</tt></td><td>Monat (vollständiger Name)</td></tr> # <tr><td><tt>%y</tt></td><td>Jahr (2 Stellen)</td></tr> # <tr><td><tt>%Y</tt></td><td>Jahr (4 Stellen)</td></tr></table> # # # *Hinweis: Achtet dabei darauf, dass bspw. der abgekürzte Monat sich auf die englischen Namen bezieht!* # # Schauen wir uns nun einige Beispiele an, wie wir `as.Date` und das passende Format einsetzen können: # YYYY-MM-DD as.Date('2018-11-03') # MM.DD.YYYY d <- as.Date("16.12.2019",format="%d.%m.%Y") d class(d) # MM.DD.YY as.Date("17.12.19",format="%d.%m.%y") # Formatierung nutzen as.Date("Nov-03-10",format="%b-%d-%y") # Formatierung nutzen as.Date("November-03-1990",format="%B-%d-%Y") toString(Sys.Date()) # Durch die Kombination von `as.Date()` und dem `format` Parameter können wir im Grunde genommen jeden String konvertieren, der Datumsinformationen enthält. Das ist extrem nützlich, um Zeitreihen zu analysieren. # # ## Zeit # # Genau wie mit Daten können wir auch Zeitinformationen aus Strings erhalten. R nutzt den `POSIXct` Objekttyp, um Zeitinformationen zu speichern. # # Mann kann z.B. `as.POSIXct()` nutzen, um einen String in ein POSIXct Objekt zu konvertieren. Die Formatregelung lesen wir am besten in der Dokumentation der `strptime()` Funktion nach: help(strptime) as.POSIXct("11:02:03",format="%H:%M:%S") # Achtet dabei darauf, wie das heutige Datum automatisch hinzugefügt wird, da wir es nicht selbst spezifiziert haben. Hätten wir die Information bereits, dann würden wir schreiben: as.POSIXct("November-03-1990 11:02:03",format="%B-%d-%Y %H:%M:%S") # Meistens benutzt man tatsächlich die `strptime()` Funktion anstatt POSIXct. Hier ist eine kurze Beschreibung der Unterschiede zwischen den Funktionen: # # Es gibt zwei interne Implementierungen von Datum/Zeit: POSIXct, welches die Sekunden seit Beginn der UNIX Epoche speichert, und POSIXlt, welches eine Liste an Tagen, Monaten, Jahren, Stunden, Minuten und Sekunden speichert. # # `strptime` ist eine Funktion, um Zeichenvektoren direkt ins POSIXlt Format zu konvertieren. # # `as.POSIXlt` konvertiert eine Vielzahl an Datentypen zu POSIXlt. Es versucht dabei intelligent zu erkennen, welche Formatierung angebracht ist. # # `as.POSIXct` konvertiert eine Vielzahl an Datentypen ins POSICct Format. Es versucht ebenso intelligent zu erknnnen, welche Umwandlung angebracht ist. Im Fall von Zeichen führt es zuerst strptime aus und konvertiert anschließend von POSIXlt zu POSIXct. # # Es ergibt daher Sinn, dass strptime schneller ist, da es nur Zeichen als Input verarbeitet, während die anderen Methoden zuerst zu erkennen versuchen, welche Umwandlung angebracht ist. Außerdem bietet es insofern etwas mehr Sicherheit, dass kein ungewollter Input übergeben werden kann. # # Schauen wir uns daher ein einfaches Beispiel für strptime an: strptime("09:01:03",format="%H:%M:%S") # + da <- Sys.Date() da da_4_yrs_ago <- as.Date("17.12.2015",format="%d.%m.%Y") da_4_yrs_ago diff <- da - da_4_yrs_ago diff d2 <- strtoi(diff) d2 class(d2) # - # Meistens wird man also direkt strptime() verwenden. Mann sollte am Ende folgendes merken: # # * Wie man strptime() nutzt und formatiert # * Wie du help(strptime) verwenden kannst, um mehr über die Formatierung zu erfahren # # Herzlichen Glückwunsch! Sie sind mit Lektion 39. fertig!
2.7 R Advanced Programming/de-DE/2.7.39 R - Date and Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/stefanvanberkum/CD-ABSC/blob/main/getBERT/getBERT_restaurant.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="jETwsjY7ItU1" ''' For use on local runtime. How to run: - Download the appropriate BERT model from: https://github.com/google-research/bert (here BERT-Base). - Download the code to your system path from the same repository, by running the following command in the command window: git clone https://github.com/google-research/bert - Create a virtual environment (e.g. using anaconda), using Python 3.5. - Install tensorflow in your virtual environment (pip install tensorflow==1.15). - Start a local runtime in your virtual environment using https://research.google.com/colaboratory/local-runtimes.html - Make sure the BERT model is in your system path (here named 'uncased_L-12_H-768_A-12'). - Make sure all data is available and update the paths at the end of this code. Adapted from Trusca, Wassenberg, Frasincar and Dekker (2020) for use on a local runtime <NAME>., <NAME>., <NAME>., <NAME>. (2020) A Hybrid Approach for Aspect-Based Sentiment Analysis Using Deep Contextual Word Embeddings and Hierarchical Attention. In: <NAME>., <NAME>., <NAME>. (eds) Web Engineering. ICWE 2020. Lecture Notes in Computer Science, vol 12128. Springer, Cham. https://doi.org/10.1007/978-3-030-50578-3_25 https://github.com/mtrusca/HAABSA_PLUS_PLUS ''' # + id="IzNbvlqAI2ZH" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys sys.path.append('bert/') import codecs import collections import json import re import os import pprint import numpy as np import tensorflow as tf import modeling import tokenization # + id="f_eg16ZiJDZ0" BERT_PRETRAINED_DIR = 'uncased_L-12_H-768_A-12' LAYERS = [-1, -2, -3, -4] NUM_TPU_CORES = 8 MAX_SEQ_LENGTH = 87 BERT_CONFIG = BERT_PRETRAINED_DIR + '/bert_config.json' CHKPT_DIR = BERT_PRETRAINED_DIR + '/bert_model.ckpt' VOCAB_FILE = BERT_PRETRAINED_DIR + '/vocab.txt' INIT_CHECKPOINT = BERT_PRETRAINED_DIR + '/bert_model.ckpt' BATCH_SIZE = 128 # + id="PQ1wxUc1JGan" class InputExample(object): def __init__(self, unique_id, text_a, text_b=None): self.unique_id = unique_id self.text_a = text_a self.text_b = text_b # + id="ROxBm-HsJJtX" class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids # + id="OuwSb7lTJMbJ" def input_fn_builder(features, seq_length): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_input_type_ids = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_input_type_ids.append(feature.input_type_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "unique_ids": tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "input_type_ids": tf.constant( all_input_type_ids, shape=[num_examples, seq_length], dtype=tf.int32), }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn # + id="PRS3S_HdJcof" def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" unique_ids = features["unique_ids"] input_ids = features["input_ids"] input_mask = features["input_mask"] input_type_ids = features["input_type_ids"] model = modeling.BertModel( config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=input_type_ids, use_one_hot_embeddings=use_one_hot_embeddings) if mode != tf.estimator.ModeKeys.PREDICT: raise ValueError("Only PREDICT modes are supported: %s" % (mode)) tvars = tf.trainable_variables() scaffold_fn = None (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) all_layers = model.get_all_encoder_layers() predictions = { "unique_id": unique_ids, } for (i, layer_index) in enumerate(layer_indexes): predictions["layer_output_%d" % i] = all_layers[layer_index] output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec return model_fn # + id="5D-kg-6QJnrz" def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (example.unique_id)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features # + id="2IrwUPKyJs0w" def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() # + id="RZm_icBJJwOR" def read_sequence(input_sentences): examples = [] unique_id = 0 for sentence in input_sentences: line = tokenization.convert_to_unicode(sentence) examples.append(InputExample(unique_id=unique_id, text_a=line)) unique_id += 1 return examples # + id="DxhiOP79JwqN" def get_features(input_text, dim=768): tf.logging.set_verbosity(tf.logging.ERROR) layer_indexes = LAYERS bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG) tokenizer = tokenization.FullTokenizer( vocab_file=VOCAB_FILE, do_lower_case=True) examples = read_sequence(input_text) features = convert_examples_to_features( examples=examples, seq_length=MAX_SEQ_LENGTH, tokenizer=tokenizer) unique_id_to_feature = {} for feature in features: unique_id_to_feature[feature.unique_id] = feature model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=INIT_CHECKPOINT, layer_indexes=layer_indexes, use_tpu=False, use_one_hot_embeddings=True) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=False, model_fn=model_fn, config=tf.contrib.tpu.RunConfig(), predict_batch_size=BATCH_SIZE, train_batch_size=BATCH_SIZE) input_fn = input_fn_builder( features=features, seq_length=MAX_SEQ_LENGTH) # Get features for result in estimator.predict(input_fn, yield_single_examples=True): unique_id = int(result["unique_id"]) feature = unique_id_to_feature[unique_id] output = collections.OrderedDict() for (i, token) in enumerate(feature.tokens): layers = [] for (j, layer_index) in enumerate(layer_indexes): layer_output = result["layer_output_%d" % j] layer_output_flat = np.array([x for x in layer_output[i:(i + 1)].flat]) layers.append(layer_output_flat) output[token] = sum(layers)[:dim] return output # + id="-1wy2oqJKudd" # When it takes too long, data can be split in multiple subfiles such as in # lines 5-30 lines = open('dataBERT/raw_data_restaurant_2014.txt', errors='replace').readlines() ''' for j in range(0, len(lines), 150): with open("dataBERT/BERT_base_restaurant_2014_" + str(round(j/150)) + ".txt", 'w') as f: for i in range(j, j + 150, 3): # Was 0*3, 2530*3, 3 print("sentence: " + str(i / 3) + " out of " + str(len(lines) / 3) + " in " + "raw_data;") target = lines[i + 1].lower().split() words = lines[i].lower().split() words_l, words_r = [], [] flag = True for word in words: if word == '$t$': flag = False continue if flag: words_l.append(word) else: words_r.append(word) sentence = " ".join(words_l + target + words_r) print(sentence) embeddings = get_features([sentence]) for key, value in embeddings.items(): f.write('\n%s ' % key) for v in value: f.write('%s ' % v) ''' with open("dataBERT/BERT_base_restaurant_2014.txt", 'w') as f: for i in range(0, len(lines), 3): # Was 0*3, 2530*3, 3 print("sentence: " + str(i / 3) + " out of " + str(len(lines) / 3) + " in " + "raw_data;") target = lines[i + 1].lower().split() words = lines[i].lower().split() words_l, words_r = [], [] flag = True for word in words: if word == '$t$': flag = False continue if flag: words_l.append(word) else: words_r.append(word) sentence = " ".join(words_l + target + words_r) print(sentence) embeddings = get_features([sentence]) for key, value in embeddings.items(): f.write('\n%s ' % key) for v in value: f.write('%s ' % v)
getBERT/getBERT_restaurant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <center> <h1>Numerical Methods -- Assignment 6</h1> </center> # ## Problem 1 -- Minimisation # ### (a) Brent's method # In numerical analysis, Brent's method is a root-finding algorithm combining the bisection method, the secant method and inverse quadratic interpolation. It has the reliability of bisection but it can be as quick as some of the less-reliable methods. Now given the following function: # \begin{equation} # f(x) = \frac{(x+3)^2}{2}+\frac{x-5}{3}-\frac{10}{4} # \end{equation} # + # %config InlineBackend.figure_format = 'retina' from scipy.optimize import brent import numpy as np import matplotlib.pyplot as plt f = lambda x: 0.5*np.power(x+3,2)+(x-5)/3-10./4 minimum = brent(f,brack=(-100,100)) x = np.linspace(-100,100,100) y = f(x) print "The minimum of the function is found to be:",minimum #Plot to check the shape of the function plt.plot(x,y,'-') plt.legend('f(x)') plt.show() # - # ### (b) Conjugate gradient method # $$f(x,y) = 0.26((x+0.5)^2+(y-1)^2)-0.48xy$$ # + from scipy.optimize import fmin_cg,minimize def func(x): return 0.26*((x[0]+0.5)**2+(x[1]-1)**2)-0.48*x[0]*x[1] def fprime(x): return np.array((0.26*2*(x[0]+0.5)-0.48*x[1],0.26*2*(x[1]-1)-0.48*x[0])) fc = lambda x: 0.26*(np.power(x[0]+0.5,2)+np.power(x[1]-1,2))-0.48*x[0]*x[1] fp = lambda x: np.array((0.26*2*(x[0]+0.5)-0.48*x[1],0.26*2*(x[1]-1)-0.48*x[0])) x0 = [2,5] fmin = minimize(fc,x0,jac=fp,method='CG') print "The minimum of the function is found at (x,y)=:",fmin.x # + from mpl_toolkits.mplot3d import Axes3D from matplotlib.pyplot import figure, show import numpy as np x = np.linspace(-100,100,100) y = x xx,yy = np.meshgrid(x,y) X = [xx,yy] zz = func(X) #plot fig = plt.figure(figsize=(10,6)) frame = fig.add_subplot(1,1,1,projection ='3d') surf = frame.plot_surface(xx,yy,zz,cmap='Dark2') frame.set_xlabel('x') frame.set_ylabel('y') frame.set_zlabel('z') frame.set_title('f(x,y)') fig.colorbar(surf,ax=frame) show() # - # ## Problem 2 -- x,y data # The $\textit{Maximum Likelihood}$ is done by minimizing the log-likelihood function. # $$ln\,L = -\frac{1}{2}\sum_{i=1}^{N}\left(\frac{(data-model)^2}{2\sigma^2}+ln\,(2\pi\sigma^2)\right)$$, # where the model is described by $y = kx+m$, and $\sigma^2 = yerr^2+f^2(kx+m)^2$. # First use $\chi^2$ fitting and reduced $\chi^2$ to get a reference of the values of parameters. # + from kapteyn import kmpfit data = np.genfromtxt('Downloads/table_1.dat') x = data[:,0] y = data[:,1] ey = data[:,2] #define a model function, since assuming linear correlation def model(params, x): a, b = params return a + b*x # A residuals function def residuals(p, data): # Residuals function needed by kmpfit x, y, err= data return (y - model(p,x))/err p0 = (1, 1) fitobj = kmpfit.Fitter(residuals=residuals, data=(x,y,ey)) status = fitobj.fit(params0=p0) print("Was fit successful?", bool(status)) a, b = fitobj.params da, db = fitobj.stderr s = u"Model parameters a={} \u00B1 {} and b={} \u00B1 {}".format(a, da, b, db) print(s) print("Chi squared, reduced Chi squared:", fitobj.chi2_min, fitobj.rchi2_min) print("The errors derived from the covariance matrix:", fitobj.xerror) # + from matplotlib.pyplot import figure, show fig = figure() frame = fig.add_subplot(1,1,1) frame.errorbar(x, y, yerr=ey,marker='o', ls='') xmin = x.min(); xmax = x.max() p = (a,b) ymin = model(p,xmin); ymax = model(p,xmax) frame.plot([xmin, xmax], [ymin, ymax]) frame.set_xlabel('Distance (Mpc)') frame.set_ylabel('Velocity (km/s)') show() # - # ### (a) -- Optimal parameters # + from math import * import time def log_like(params,x,y,ey): k,m,f = params sig = np.sqrt(ey**2+(f*(k*x+m))**2) L = ((k*x+m)-y)**2/(sig**2)+np.log(2*pi*sig**2) return 0.5*np.sum(L) #minimize via three methods t1 = time.time() l_BFGS = minimize(log_like,x0=[0,0,0],args=(x,y,ey),method='BFGS') t2 = time.time() t3 = time.time() l_NM = minimize(log_like,x0=[0,0,0],args=(x,y,ey),method='Nelder-Mead') t4 = time.time() t5 = time.time() l_CG = minimize(log_like,x0=[0,0,0],args=(x,y,ey),method='CG') t6 = time.time() print '%6s %6s %6s %6s %12s %12s' % ("Methods","k","m","f","time","Success") print '%6s %.7f %.7f %.7f %.6f %8s' % ("BFGS",l_BFGS.x[0],l_BFGS.x[1],l_BFGS.x[2],t2-t1,l_BFGS.success) print '%6s %.7f %.7f %.7f %.6f %8s' % ("NM",l_NM.x[0],l_NM.x[1],np.abs(l_NM.x[2]),t4-t3,l_NM.success) print '%6s %.7f %.7f %.7f %.6f %8s' % ("CG",l_CG.x[0],l_CG.x[1],l_CG.x[2],t6-t5,l_CG.success) # - # The table printed above listed the optimization results via the three methods. In this case CG method performs the fastest. All three methods uses x,y and error in y to minimize the log-likelihood function. # ### (b) -- Plot fitting fig = figure() frame = fig.add_subplot(1,1,1) frame.errorbar(x, y, yerr=ey,marker='*', fillstyle='none',ls='',color='indigo') xmin = x.min(); xmax = x.max() p1 = [l_BFGS.x[1],l_BFGS.x[0]] p2 = [l_NM.x[1],l_NM.x[0]] p3 = [l_CG.x[1],l_CG.x[0]] ymin1 = model(p1,xmin); ymax1 = model(p1,xmax) ymin2 = model(p2,xmin); ymax2 = model(p2,xmax) ymin3 = model(p3,xmin); ymax3 = model(p3,xmax) frame.plot([xmin, xmax], [ymin1, ymax1]) frame.plot([xmin, xmax], [ymin1, ymax1]) frame.plot([xmin, xmax], [ymin1, ymax1]) frame.set_xlabel('x') frame.set_ylabel('y') frame.legend(['BFGS','NM','CG','data']) show() # Notice that because of small deviations the three fits overlay on each other and is not distinguashiable on the graph above. # ## Problem 3 -- Dispersion relation data = np.genfromtxt('./Downloads/table_2.dat') x = data[:,0] y = data[:,1] v = data[:,2] ve = data[:,3] # ### (a) -- Minimization def L(params, x,y,v,ve): e0, vsys = params v_model = vsys #model without A sigma = np.sqrt(e0**2 + ve**2) L = ((v - v_model)**2/ (sigma**2) + np.log(2*pi*sigma**2)) return 0.5 * np.sum(L) log_BFGS = minimize(L,x0=[1,1],args=(x,y,v,ve)) print "Was minimization successful?",log_BFGS.success print '%6s %6s' % ('sig0(rad)','v_sys(km/s)') print '%.3f %.3f' % (log_BFGS.x[0],log_BFGS.x[1]) # ### (b) -- Minimization (with $A$ and $\theta_0$) # + def logL(params,x,y,v,ve): e0, vsys, A, theta0 = params theta = np.arctan2(y,x) v_model = vsys + A*np.sin(theta-theta0) #model with A sigma = np.sqrt(e0**2 + ve**2) L = ((v - v_model)**2/ (sigma**2) + np.log(2*pi*sigma**2)) return 0.5 * np.sum(L) ln_BFGS = minimize(logL,x0=[1,1,1,1],args=(x,y,v,ve)) print "Was minimization successful?",ln_BFGS.success print '%6s %6s %4s %8s' % ('sig0(rad)','v_sys(km/s)','A(km/s)','theta0(rad)') print '%.6f %.6f %.6f %.6f' % (ln_BFGS.x[0],ln_BFGS.x[1],ln_BFGS.x[2],ln_BFGS.x[3]) # - # ### (c) -- MCMC # + import emcee def LMC(params,x,y,v,ve): e0, vsys, A, theta0 = params theta = np.arctan2(y,x) v_model = vsys + A*np.sin(theta-theta0) sigma = np.sqrt(e0**2 + ve**2) L = ((v - v_model)**2/ (sigma**2) + np.log(2*pi*sigma**2)) if A > 0: #A stays positive return -0.5 * np.sum(L) else: return -np.inf nwalkers, ndim = 500, 4 p0 = np.zeros(shape=(500,4)) for i in range(500): p0[i,0] = ln_BFGS.x[0]+1e-4*np.random.uniform(-ln_BFGS.x[0]/10, ln_BFGS.x[0]/10) p0[i,1] = ln_BFGS.x[1]+1e-4*np.random.uniform(-ln_BFGS.x[1]/10, ln_BFGS.x[1]/10) p0[i,2] = ln_BFGS.x[2]+1e-4*np.random.uniform(-ln_BFGS.x[2]/10, ln_BFGS.x[2]/10) p0[i,3] = ln_BFGS.x[3]+1e-6*np.random.uniform(-ln_BFGS.x[3]/10, ln_BFGS.x[3]/10) l = emcee.EnsembleSampler(nwalkers, ndim, LMC, args=[x,y,v,ve]) MCMC = l.run_mcmc(p0, 500) e0 = [i[0] for i in MCMC[0]] vsys = [i[1] for i in MCMC[0]] A = [i[2] for i in MCMC[0]] theta0 = [i[3] for i in MCMC[0]] #print the result print '%6s %6s %4s %8s' % ('sig0(rad)','v_sys(km/s)','A(km/s)','theta0(rad)') print '%.6f %.6f %.6f %.6f' % (np.mean(e0),np.mean(vsys),np.mean(A),np.mean(theta0)) # - # ### (d) -- Histogram plt.hist(e0, bins=100) plt.xlabel(r"$\sigma_0(rad)$") plt.ylabel('occurance') plt.axvline(x=ln_BFGS.x[0],color='#9370db',ls='--') plt.axvline(x=np.mean(e0),color='#b22222',ls='--') plt.legend(['BFGS','MCMC']) plt.show() plt.hist(vsys, bins=100) plt.xlabel(r"$v_{sys}(km/s)$") plt.ylabel('occurance') plt.axvline(x=ln_BFGS.x[1],color='#9370db',ls='--') plt.axvline(x=np.mean(vsys),color='#b22222',ls='--') plt.legend(['BFGS','MCMC']) plt.show() plt.hist(A, bins=100) plt.xlabel(r"$A(km/s)$") plt.ylabel('occurance') plt.axvline(x=ln_BFGS.x[2],color='#9370db',ls='--') plt.axvline(x=np.mean(A),color='#b22222',ls='--') plt.legend(['BFGS','MCMC']) plt.show() plt.hist(theta0, bins=100) plt.xlabel(r"$\theta_0(rad)$") plt.ylabel('occurance') plt.axvline(x=ln_BFGS.x[3],color='#9370db',ls='--') plt.axvline(x=np.mean(theta0),color='#b22222',ls='--') plt.legend(['BFGS','MCMC']) plt.show() # ### (e) -- $v_{rot} \ vs \theta$ # + from matplotlib.pyplot import figure,show params1 = [ln_BFGS.x[1],ln_BFGS.x[2],ln_BFGS.x[3]] params2 = [np.mean(vsys),np.mean(A),np.mean(theta0)] def v_model(params,x,y): vsys, A, theta0 = params theta = np.arctan2(y,x) v_rot = vsys + A*np.sin(theta-theta0) return theta, v_rot Theta1, V1 = v_model(params1,x,y) Theta2, V2 = v_model(params2,x,y) #plot data and fits fig = figure(figsize=(12,9)) frame = fig.add_subplot(1,1,1) frame.errorbar(np.arctan2(y,x),v,ve,marker = 'd',ls='',color='indigo',fillstyle='none') frame.plot(Theta1,V1,'.',color = 'darkorange') frame.plot(Theta2,V2,'.',color='#b22222') frame.set_xlabel(r'$\theta(radian)$') frame.set_ylabel(r'$v_{rot}(km/s)$') frame.legend(['data','BFGS','MCMC']) show() # - # ### (f) -- Sampling method # emcee.EnsembleSampler uses Monte Carlo Markov Chain (MCMC) moethods with $affine\ invariance$. Unlike most single-variable MCMC meothds which force the perturbations of order $\sqrt{\epsilon}$ to slowly come to equilibration, EnsembleSampler would use perturbations of order $\sqrt{\epsilon}$ in the $(1,-1)$ direction and perturbations of order one in the $(1,1)$ direction. # # A general MCMC sampler is of the form # $$X(t+1) = R(X(t),\xi(t),\pi),$$ # where $X(t)$ is the sample after $t$ iterations, $\xi(t)$ is a sequence of iid(independent identically distributed) random variables, and $\pi$ is a probability density. The algorithm is $affine \ invariant$ if for any affine transformation $Ax+b$, # $$R(Ax+b,\xi(t),\pi_{A,b})=AR(x(t),\xi(t),\pi)+b,$$ # for every $x$ and almost all $\xi(t)$.
numerical6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf # language: python # name: tf # --- # ## Fully connected Neural Network # # En el ejercicio siguiente se va a realizar un modelo de clasificación para el dataset MNIST. Para ello, vamos a diseñar una red neuronal sencilla usando el framework KERAS. La red que vamos a diseñar realizará lo siguiente: # # - Vamos a "aplastar" la matriz de entrada de 28x28 a [1,784] # - Una capa sencilla de 512 pesos con la función de activación RELU # - Añadimos un Dropout de 0.2 # - Una capa sencilla de salida con los 10 números posibles # # Como ya sabemos, los datos son imágenes de 28x28 píxeles y existen 10 números a clasificar, del 0 al 9. Puesto que el valor de los píxeles está en escala de grises del 0 al 255, normalizamos de 0 a 1. # # + import tensorflow as tf mnist = tf.keras.datasets.mnist import matplotlib.pyplot as plt (x_train, y_train),(x_test, y_test) = mnist.load_data() # - x_train, x_test = x_train / 255.0, x_test / 255.0 x_train.shape plt.imshow(x_train[0]) x_train[0].shape x_train[0] # + model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # - history = model.fit(x_train, y_train, epochs=5) plt.plot(history.history['acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(history.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper right') plt.show() model.evaluate(x_test, y_test)
keras_tf_pytorch/Keras/.ipynb_checkpoints/MNIST Fully Connected NN-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline tips = sns.load_dataset('tips') tips.head() sns.set_style('darkgrid') sns.countplot(x='sex',data=tips) sns.set_style('ticks') sns.countplot(x='sex',data=tips) sns.set_style('ticks') sns.countplot(x='sex',data=tips) sns.despine() sns.set_style('ticks') sns.countplot(x='sex',data=tips) sns.despine(left=True,bottom=False) plt.figure(figsize=(12,3)) sns.countplot(x='sex',data=tips) sns.set_context('poster') sns.countplot(x='sex',data=tips) sns.set_context('poster',font_scale=2) sns.countplot(x='sex',data=tips) sns.set_context('paper',font_scale=1) sns.lmplot(x='total_bill',y='tip',data=tips, hue='sex',palette='winter')
06-Data-Visualization-with-Seaborn/Style and Color Follow Along Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.autograd.variable as Variable import torch.utils.data as data import torchvision from torchvision import transforms # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import sparse import lightfm # %matplotlib inline # - filepath = 'D:/Data_Science/Recommender systems/the-movies-dataset/' filename = 'movies.csv' data_movie_names = pd.read_csv(filepath + filename) data_movie_names = data_movie_names[['movieId','title']] data_movie_names.head() movie_names_dict = data_movie_names.set_index('movieId').to_dict()['title'] movie_names_dict filepath = 'D:/Data_Science/Recommender systems/the-movies-dataset/' filename = 'ratings_small.csv' data = pd.read_csv(filepath + filename) data.head() data.shape # + #make interaction dictionary interaction_dict = {} cid_to_idx = {} idx_to_cid = {} uid_to_idx ={} idx_to_uid = {} cidx = 0 uidx = 0 input_file = filepath + filename with open(input_file) as fp: next(fp) for line in fp: row = line.split(',') uid = int(row[0]) cid = int(row[1]) rating = float(row[2]) if uid_to_idx.get(uid) == None : uid_to_idx[uid] = uidx idx_to_uid[uidx] = uid interaction_dict[uid] = {} uidx+=1 if cid_to_idx.get(cid) == None : cid_to_idx[cid] = cidx idx_to_cid[cidx] = cid cidx+=1 interaction_dict[uid][cid] = rating fp.close() # - print("unique users : {}".format(data.userId.nunique())) print("unique movies : {}".format(data.movieId.nunique())) # + #interaction_dict # + row = [] column = [] data_1 = [] for uid in interaction_dict.keys(): for cid in interaction_dict[uid].keys(): row.append(cid_to_idx[cid]) column.append(uid_to_idx[uid]) data_1.append(interaction_dict[uid][cid]) # - item_user_data = sparse.csr_matrix((data_1,(column,row))) item_user_data item_user_data.shape torch.tensor(item_user_data[0].todense())[0] input_dim = len(cid_to_idx) h_layer_2 = int(round(len(cid_to_idx) / 4)) h_layer_3 = int(round(h_layer_2 / 4)) h_layer_3 class AutoEncoder(nn.Module): def __init__(self): #Class contructor super(AutoEncoder,self).__init__() #Caal parent constructor self.fc1 = nn.Linear(in_features = input_dim , out_features = h_layer_2) #out_features = size of output tensor. This is rank1 tensor self.fc2 = nn.Linear(in_features = h_layer_2 , out_features = h_layer_3) self.fc3 = nn.Linear(in_features = h_layer_3 , out_features = h_layer_2) self.out = nn.Linear(in_features = h_layer_2 , out_features = input_dim) def forward(self,t): #implement forward pass #1. Input layer t = self.fc1(t) t = F.relu(t) #2. Hidden Linear Layer t = self.fc2(t) t = F.relu(t) #3. Hidden Linear Layer t = self.fc3(t) t = F.relu(t) #3. Output layer t = self.out(t) t = F.relu(t) return t self_ae = AutoEncoder() #Runs the class contructor self_ae.double().cuda() # + #torchvision.datasets.DatasetFolder('') #train_data_loader = data.DataLoader(item_user_data, 256) # + #next(iter(train_data_loader)) # + #item_user_data[batch] # + learning_rate = 0.001 optimizer = torch.optim.Adam(self_ae.parameters(), lr=learning_rate) criterion = F.mse_loss epochs = 10 for epoch in range(1,epochs): for batch in range(0,item_user_data.shape[0]): if batch % 100 == 0: print('processing epoch :{} , batch : {}'.format(epoch , batch+1)) inputs = torch.tensor(np.array(item_user_data[batch].todense())[0]) inputs = inputs.cuda() target = inputs # zero the parameter gradients optimizer.zero_grad() y_pred = self_ae(inputs.double()) loss = criterion(y_pred, target) loss.backward() optimizer.step() print("epoch : {}\t batch : {}\t loss : {}".format(epoch,batch+1,loss.item())) torch.save(self_ae.state_dict(), ('model'+str(epoch))) torch.save(self_ae.state_dict(), 'model.final') # - self_ae.eval().cpu() # + idx = uid_to_idx[24] inputs = np.array(item_user_data[idx].todense())[0] watched_movie_idx = np.argsort(inputs)[-10:][::-1] inputs = torch.tensor(inputs) print('WATCHED MOVIES :') for i in watched_movie_idx: movie_id = idx_to_cid[i] try : name = movie_names_dict[movie_id] except : name = 'unknown' print('index : {}\t id : {}\t name : {}'.format(i,movie_id,name)) y_pred = self_ae(inputs) y_pred = y_pred.detach().numpy() pred_idx = np.argsort(y_pred)[-10:][::-1] print('PREDICTED MOVIES') for i in pred_idx: #reverse list movid_id = idx_to_cid[i] try : name = movie_names_dict[movid_id] except : name = 'unknown' print('index : {}\t id : {}\t name : {}'.format(i,movid_id,name)) # -
Collaborative Filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from uberpy import Uber with open('uber.txt','r') as f: client_id = f.readline().strip() server_token = f.readline().strip() secret= f.readline().strip() uber = Uber(client_id, server_token, secret) from pprint import pprint import pandas as pd import random from pyDOE import * import math from yelpapi import YelpAPI import pandas as pd with open('yelp.txt','r') as f: consumer_key = f.readline().strip() consumer_secret = f.readline().strip() token = f.readline().strip() token_secret = f.readline().strip() yelp_api = YelpAPI(consumer_key, consumer_secret, token, token_secret) # + def meters_to_degs(x, y): #takes meters in the x- and y-directions #returns a tuple changes in degree #this method is refered to as 'quick and dirty' and not suggested for life-dependent applications or long distances return ((y/111111.0), x/(111111 * math.cos(y))) def get_max_distances(land_area): #assuming counties are square (smaller area than circle - less points near or outside boundary) side = math.sqrt(land_area) r = side/2 return r def get_max_distances_circle(land_area): #assuming counties are circles (which they are not, but shapes are hard) r_2 = land_area/math.pi r = math.sqrt(r_2) return r def get_degree_ranges(land_area): d = get_max_distances_circle(land_area) return (meters_to_degs(d, d)) # - def sampler(geo, radius, val): #row corresponds to one of the dataframe rows #val is the row of the Latin Square that I will use for this sample a = lhs(2, samples=40, criterion='center') b = (a-0.5)*2 latin_square_coefficient = b[val] multiplier = get_degree_ranges(radius) center = [geo[0], geo[1]] ret = latin_square_coefficient*multiplier + center return ret a = lhs(2, samples=40, criterion='center') b = (a-0.5)*2 def generate_samples(num): ret = [] for x in xrange(num): #print sampler((43.7182412,-79.378058), 630.2, x) ret = ret + [sampler((43.626609, -79.394537), 63020000, x)] return ret def generate_a_sample(): ret = sampler((43.626609, -79.394537), 63020000, 1) return ret[0], ret[1] def uberize(latitude, longitude): return uber.get_time_estimate(latitude, longitude) def yelpize(): ret = {} search_results = yelp_api.search_query(term='', ll="43.626609, -79.394537", sort=2, radius_filter=8000, offset=0) for idx,y in enumerate(search_results['businesses']): ret[y['name']] = [y['location']['coordinate'], y['rating']] search_results = yelp_api.search_query(term='', ll="43.626609, -79.394537", sort=2, radius_filter=8000, offset=20) for idx,y in enumerate(search_results['businesses']): ret[y['name']] = [y['location']['coordinate'], y['rating']] return ret x = yelpize() search_results['businesses'][0]['rating'] x = yelp_api.search_query(term='', ll="43.626609, -79.394537", sort=2, radius_filter=8000, offset=40) for idx,y in enumerate(x['businesses']): print y['name'] print idx,y['location']['coordinate'] for idx,y in enumerate(x['businesses']): print y['name'] print idx,y['location']['coordinate'] #def yelpize(latitude, longitude): print uber.get_time_estimate(43.626609, -79.394537) print uberize(43.626609, -79.394537) s = [] while True: lat_1, lng_1 = generate_a_sample() s = s + [uberize(lat_1, lng_1)['times']] if len(s) > 4: break print s for x in s: x for x in s: try: print x[0][0], x[0][1] except: print x
src/yelp/yelp_and_uber.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python3 (wirms) # language: python # name: wirms # --- # # SSJ Observation Example # + from LBH_to_eflux.observations.ssj import SSJDay import datetime import os import numpy as np from geospacepy.special_datetime import (doyarr2datetime, datetimearr2jd, datetime2jd, jd2datetime, datetime2doy, jdarr2datetime) import matplotlib.pyplot as pp from geospacepy.satplottools import draw_dialplot from matplotlib.gridspec import GridSpec from ssj_auroral_boundary.dmsp_spectrogram import dmsp_spectrogram # %matplotlib inline # + dmsp = 16 hemisphere = 'S' startdt = datetime.datetime(2014,2,18) enddt = datetime.datetime(2014,2,18,2) ssj_file = '/home/matsuo/Documents/tomokostuff/SSJcdfs_v2/20140218/dmsp-f16_ssj_precipitating-electrons-ions_20140218_v1.1.3_GLOWcond_v2.cdf' # - # Import a day of SSJ data from DMSP F16 ssj = SSJDay(16, 'N', ssj_file, read_spec = True ) # Get the observations for the time fram efro the northern hemisphere ssj_first_pass = ssj.get_ingest_data(startdt,enddt,'S') ssj_first_pass.keys() # ## Plot the spectrogram f1 = pp.figure(figsize=(10,10*0.5),dpi=200) gs = GridSpec(1,13) a2 = f1.add_subplot(gs[0:11]) a2_cb = f1.add_subplot(gs[12]) dmsp_spectrogram(jdarr2datetime(ssj_first_pass['jds']), ssj_first_pass['ele_diff_energy_flux'],\ ax = a2, ax_cb = a2_cb, \ lat = ssj_first_pass['lats'], lt = ssj_first_pass['lons']/15)
notebooks/observation_tests/plot_ssj_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import tensorflow as tf import matplotlib.pylab as plt % matplotlib inline % load_ext autoreload % autoreload 2 tf.__version__ from modules.spectral_pool_test import max_pool from modules.spectral_pool import spectral_pool from modules.frequency_dropout import test_frequency_dropout from modules.create_images import open_image, downscale_image image = open_image('aj.jpg') grayscale_image = np.asarray(downscale_image(image, 256, 256).convert('F')) / 255. image = downscale_image(image, 256, 256).convert('RGB') image = np.asarray(image) / 255. # original image plt.imshow(image, cmap='gray') def get_fft_plot(fft, shift_channel=True, eps=1e-12, pad_to_width=256): """ Convert a fourier transform returned from tensorflow in a format that can be plotted. Args: fft: numpy array with image and channels shift_channel: if True, the channels are assumed as first dimension and will be moved to the end. eps: to be added before taking log """ if shift_channel: fft = np.squeeze(np.moveaxis(np.absolute(fft), 0, -1)) fft = np.log(fft + eps) mn = np.min(fft, axis=(0, 1)) mx = np.max(fft, axis=(0, 1)) fft = (fft - mn) / (mx - mn) fft_shifted = np.fft.fftshift(fft) padding = int((pad_to_width - fft_shifted.shape[1]) / 2) if padding < 0: padding = 0 return np.pad( fft_shifted, pad_width=padding, mode='constant', constant_values=1., ) # ### Create Grid grayscale: # + fig, axes = plt.subplots(3, 6, figsize=(20, 9), sharex=True, sharey=True) pool_size = [64, 32, 16, 8, 4, 1] grayscale_images = np.expand_dims( np.expand_dims(grayscale_image, 0), 0 ) for i in range(6): ax = axes[0, i] im_pool = max_pool(image, pool_size=pool_size[i]) im_pool = np.squeeze(im_pool) ax.imshow(im_pool, cmap='gray') if not i: ax.set_ylabel('Max Pooling', fontsize=16) for i in range(6): ax = axes[1, i] ax2 = axes[2, i] cutoff_freq = int(256 / (pool_size[i] * 2)) tf_cutoff_freq = tf.cast(tf.constant(cutoff_freq), tf.float32) im_pool = test_frequency_dropout(grayscale_images, tf_cutoff_freq)[0] im_pool = np.clip(np.squeeze(im_pool), 0, 1) im_fft, _ = spectral_pool( grayscale_images, filter_size=(1 + 2 * cutoff_freq), return_transformed=True ) ax.imshow(im_pool, cmap='gray') ax2.imshow(get_fft_plot(im_fft[0]), cmap='gray') ax2.set_xlabel(pool_size[i]**2, fontsize=16) if not i: ax.set_ylabel('Spectral Pooling', fontsize=16) ax2.set_ylabel('Fourier Transform', fontsize=16) fig.savefig('../Images/Figure2_Grayscale_Grid_Pooling.png') # - # ### Create Grid RGB: # + fig, axes = plt.subplots(3, 6, figsize=(20, 9), sharex=True, sharey=True) pool_size = [64, 32, 16, 8, 4, 1] for i in range(6): ax = axes[0, i] im_pool = max_pool(image, pool_size=pool_size[i], convert_grayscale=False) im_pool = np.squeeze(im_pool) ax.imshow(im_pool, cmap='gray') if not i: ax.set_ylabel('Max Pooling', fontsize=16) for i in range(6): ax = axes[1, i] ax2 = axes[2, i] cutoff_freq = int(256 / (pool_size[i] * 2)) tf_cutoff_freq = tf.cast(tf.constant(cutoff_freq), tf.float32) im_pool = test_frequency_dropout( np.expand_dims(np.moveaxis(image, -1, 0), 0), tf_cutoff_freq )[0] im_pool = np.clip(np.squeeze(im_pool), 0, 1) im_fft, _ = spectral_pool( grayscale_images, filter_size=(1 + 2 * cutoff_freq), return_transformed=True ) ax.imshow(np.moveaxis(im_pool, 0, -1), cmap='gray') ax2.imshow(get_fft_plot(im_fft[0]), cmap='gray') ax2.set_xlabel(pool_size[i]**2, fontsize=16) if not i: ax.set_ylabel('Spectral Pooling', fontsize=16) ax2.set_ylabel('Fourier Transform', fontsize=16) fig.savefig('../Images/Figure2_RGB_Grid_Pooling.png') # -
src/figure2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/oumaima61/my-machine-learning-projects/blob/master/heart_failure_clinical_records.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="IRfPG-NKlXHl" # importing needed packages # + id="L4zHl-BtIIEs" import pandas as pd import numpy as np import matplotlib.pyplot as plt import pylab as pl import scipy.optimize as opt from sklearn import preprocessing from sklearn.model_selection import train_test_split # + [markdown] id="AwLnBCcgldWa" # load a dataset from csv file # + colab={"base_uri": "https://localhost:8080/", "height": 379} id="dZSx1TGeV7fL" outputId="97723f97-df47-4e5a-a222-d3178bcdea5d" df = pd.read_csv("heart_failure_clinical_records_dataset.csv") df.head(10) # + [markdown] id="Z-nt6Ch7nrDO" # # + colab={"base_uri": "https://localhost:8080/"} id="RuVxJ4yrXDNG" outputId="a37df81a-536b-4f22-ed46-fea950c74418" df.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="3Z1cHd_6xUaB" outputId="0cb54690-af5d-40f0-db5b-44166ab7607b" ax = df[df['age'] == 2][0:50].plot(kind='scatter', x='sex', y='diabetes', color='Blue'); df[df['age'] == 2][0:50].plot(kind='scatter', x='sex', y='diabetes', color='Yellow', ax=ax); plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="gWeXdk5GXSXQ" outputId="6694b4b2-ae1e-4e4c-d938-f3482c97ef74" features_df = df[['age','anaemia','diabetes','high_blood_pressure']] X = np.asarray (features_df) X[0:5] # + [markdown] id="nRgtWQwqZ-qJ" # # + colab={"base_uri": "https://localhost:8080/"} id="UH-1nVTear_Z" outputId="917b140a-dc28-40ec-b201-db9abe4f128c" df['creatinine_phosphokinase'] = df['creatinine_phosphokinase'].astype('int') y = np.asarray(df['creatinine_phosphokinase']) y [0:5] # + [markdown] id="LYQ8XSsZY6A7" # train / test dataset # + colab={"base_uri": "https://localhost:8080/"} id="6W4pN4GuY4uG" outputId="ba52e413-30b5-4563-a197-fcc867759490" X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=6) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) # + [markdown] id="lmRuVCajg7Au" # # + [markdown] id="yqnOlQQkg7Di" # Modeling # + colab={"base_uri": "https://localhost:8080/"} id="GDPYhclvb0i6" outputId="2cd6a57a-c610-4b96-8c02-f687fa5525bd" from sklearn import svm clf = svm.SVC(kernel='rbf') clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="AALhJDAYb01G" outputId="72fc627f-84d8-4d8d-b1e9-dfc5492fe524" yhat = clf.predict(X_test) yhat[0:5] # + [markdown] id="fJBb-jxjg_F1" # evaluation # + id="vecBmua5gI3C" from sklearn.metrics import classification_report , confusion_matrix import itertools # + id="VFYLIIFNiOUg" def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + id="dIb_44DEvqGO" colab={"base_uri": "https://localhost:8080/"} outputId="ed1eb019-722d-4c90-ff51-8dcb87ce889a" from sklearn.metrics import f1_score f1_score(y_test, yhat, average='weighted')
New folder/heart_failure_clinical_records.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import pandas as pd import random as r from sklearn.metrics import balanced_accuracy_score from catboost import CatBoostClassifier from imblearn.over_sampling import SMOTE # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" preprocessed=pd.read_csv("../PreprocessedDataset.csv") habitableRows=list(preprocessed.rowid[preprocessed.habitable==True]) # - def prepareData(): #List of columns to be used for training #it will be all columns in preprocessed except for "habitable" and "rowid" trainCols=[x for x in preprocessed.columns if x not in ['habitable','rowid']] validate=[] Hcopy=habitableRows.copy() #numHidden sets the number of habitable planets to use for validation of the model numHidden=round(len(Hcopy)/2) #Randomly select habitable exoplanets and #add their row_id to validation set print("Hiding ",numHidden," habitable(",end="") for i in range(numHidden): randNum=r.randint(0,len(Hcopy)-1) validate.append(Hcopy[randNum]) print(Hcopy[randNum],end=",") del Hcopy[randNum] print("\b )") #Add row_id of non-habitable planets to the validation set till its length becomes 200 while len(validate)<200: temp=r.randint(0,len(preprocessed)-1) if temp not in habitableRows and temp not in validate: validate.append(temp) #Take all columns of the planets whose row_id is in "validate" variable(in the validation set) and #store it in "validate" variable validate=preprocessed[preprocessed.rowid.isin(validate)] #Store the planets that are not in the validation set in the training set trainingSet=preprocessed[~preprocessed.rowid.isin(validate.rowid)] #Store the training features in X and target feature(habitable or not) in y X=trainingSet[trainCols] y=trainingSet.habitable #the SMOTE library mutates existing data to creating more data #Here we use SMOTE to increase the number of habitable planets in the training and validation data smote = SMOTE(ratio='minority') X_sm, y_sm = smote.fit_sample(X, y) validateX,validateY=smote.fit_sample(validate[trainCols],validate.habitable) return X_sm,y_sm,validateX,validateY # # Effect of Random Seed on model # Does changing the random seed during training but keeping the same dataset substantially change the outcome for this model? # + RandomSeed=42 Bestscores=[0,0] X_sm,y_sm,validateX,validateY=prepareData() n_iterations=[10,100,500,1000,2000] LR=[0.1,0.01,0.001,0.00001] n_estimators=[10,50,100,200] max_features=[1,0.5,0.1,0.01] depth=[2,3,5,7] loss_function=['Logloss','CrossEntropy','MultiClass', 'MultiClassOneVsAll' ] for testNumber in range(5): print("*"*10,end="\n\n") print("TEST NUMBER",testNumber+1,"Random Seed =",RandomSeed) Bestscores=[0,0] r.seed(RandomSeed) RandomSeed=RandomSeed+10 for n in n_estimators: for d in depth: for lr in LR: for ls in loss_function: model=CatBoostClassifier(iterations=n,learning_rate=lr,depth=d,loss_function=ls,logging_level='Silent',thread_count=-1) model.fit(X_sm,y_sm) y_preds=model.predict(validateX) currScore=balanced_accuracy_score(validateY,y_preds) if(currScore>Bestscores[0]): Bestscores[0]=currScore Bestscores[1]=model.get_params() print("BEST SCORE:",str(Bestscores[0])+"\n"+"PARAMS:",Bestscores[1]) # - # # Effect of changing random seed when preparing data on model # + RandomSeed=42 Bestscores=[0,0] n_iterations=[10,100,500,1000,2000] LR=[0.1,0.01,0.001,0.00001] n_estimators=[10,50,100,200] max_features=[1,0.5,0.1,0.01] depth=[2,3,5,7] loss_function=['Logloss','CrossEntropy','MultiClass', 'MultiClassOneVsAll' ] for testNumber in range(5): print("*"*10,end="\n\n") print("TEST NUMBER",testNumber+1,"Random Seed =",RandomSeed) Bestscores=[0,0] r.seed(RandomSeed) RandomSeed=RandomSeed+10 X_sm,y_sm,validateX,validateY=prepareData() for n in n_estimators: for d in depth: for lr in LR: for ls in loss_function: model=CatBoostClassifier(iterations=n,learning_rate=lr,depth=d,loss_function=ls,logging_level='Silent',thread_count=-1) model.fit(X_sm,y_sm) y_preds=model.predict(validateX) currScore=balanced_accuracy_score(validateY,y_preds) if(currScore>Bestscores[0]): Bestscores[0]=currScore Bestscores[1]=model.get_params() print("BEST SCORE:",str(Bestscores[0])+"\n"+"PARAMS:",Bestscores[1])
TrainingModels/fittingModels/catboostmodel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="bfefc03e60a3399ee6aaec29ba71ec39bfbe83d3" # # Why this kernel? # Why should you read through this kernel? The goal is to implement the full chain (skipping over EDA) from data access to preparation of submission and to provide functional example of LightGBM advanced usage: # # - the data will be read and **memory footprint will be reduced**; # - **missing data** will be checked; # - _feature engineering is not implemented yet_; # - a baseline model will be trained: # - Gradient boosting model as implemented in **LightGBM** is used; # - **Mean absolute error (MAE) is used as the loss function** in the training (consistently with the final evaluation metric). **FAIR loss** is also tried and was found to lead similar results # - Training is performed with **early stopping based on MAE metric**. # - **Learning rate in the training is reduced (decays) from iteration to iteration** to improve convergence (one starts with high and finishes with low learning rate) # - The training is implemented in a cross validation (CV) loop and **out-of-fold (OOF) predictions are stored** for future use in stacking. # - **Test predictions** are obtained as an **average over predictions from models trained on k-1 fold subsets**. # - Predictions are **clipped to `[0,1]` range** # # See another my kernel showing how to significantly improve the score by using relative ranking of teams within games: # https://www.kaggle.com/mlisovyi/relativerank-of-predictions # + [markdown] _uuid="73fa2d9e1fe335a7bddc673d056b0eb092c92eca" # # Side note: score of 0.0635 can be achieved with only 50k entries from the train set # + _uuid="de54aea187ca94ee3e3a27ebcd020cdffe855cb9" # The number of entries to read in. Use it to have fast turn-around. The values are separate for train and test sets max_events_trn=None max_events_tst=None # Number on CV folds n_cv=3 # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # %matplotlib inline import warnings warnings.simplefilter(action='ignore', category=Warning) from sklearn.metrics import mean_squared_error, mean_absolute_error import os print(os.listdir("../input")) # + [markdown] _uuid="e8c2280c84ba4940112e24766017f2802edab801" # Define a function to reduce memory foorprint # + _kg_hide-input=true _uuid="0d144d5a4de449c1d744d528d0e1d8e21ec8899e" def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype if col_type != object and col_type.name != 'category' and 'datetime' not in col_type.name: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) elif 'datetime' not in col_type.name: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df # + [markdown] _uuid="4292e016577a9a1971be047e11c173462ee7d43c" # Read in the data # + _uuid="74fa1a7102ea9b9e660408f106db77db51284df5" df_trn = pd.read_csv('../input/train.csv', nrows=max_events_trn) df_trn = reduce_mem_usage(df_trn) df_tst = pd.read_csv('../input/test.csv', nrows=max_events_tst) df_tst = reduce_mem_usage(df_tst) # + [markdown] _uuid="5769611594f0be21b35805fbee239cc9dc72a577" # ## How do the data look like? # + _uuid="5b34b961208d6d98839eadd4ac38c9041c70713c" df_trn.head() # + _uuid="4e52a658cfca59135a496e06224e0f4c6ed2a4de" df_trn.info(memory_usage='deep', verbose=False) # + _uuid="d43ccdc9ca93fc1b4ff7e4d6a222a50e39577b6e" df_tst.info(memory_usage='deep', verbose=False) # + [markdown] _uuid="29864b46f0d8fd733b14d2ed2e7809ff9137f0a3" # - The training dataset has 4.3M entries, which is not small and aloows for advanced models like GBM and NN to dominate. # - The test dataset is only 1.9M entries # - There are 25 features (+ the target in the train dataset) # + [markdown] _uuid="a71f070ac1c448815c6b8f0d78c4ee59bf66e6ab" # ## Are there missing data? # + _uuid="de3b9626998858f27d05ce97f88b9a20a600b898" df_trn.isnull().sum().sum() # + [markdown] _uuid="f7b2fb0b61e94c6d150fc07cf9410cd1e80bc873" # Good news: **There are no entries with `np.nan`**, so at the first glance we do not need to do anything fancy about missing data. # # There might be some default values pre-filled into missing entries- this would have to be discovered. # + [markdown] _uuid="8f85aa4008394e7dcf4efa4f429287807acfb1fa" # # Feature engineering to come here... [tba] # + [markdown] _uuid="56576460176b2f621f29292cb8071670a2bd429e" # # Prepare the data # + _uuid="012046b5bdad17115946f10f4cf60433a40b4209" y = df_trn['winPlacePerc'] df_trn.drop('winPlacePerc', axis=1, inplace=True) # + [markdown] _uuid="93ab6b410268847ee9a143a26f5d548da736e6a8" # We will **NOT** use `Id, groupId, matchId`. The first one is a unique identifier and can be useful only in the case of data leakage. The other two would be useful in feature engineering with grouped stats per match and per team. # + _uuid="4b82e48aa2338805b3874cee682bf2fe886e8ee6" # we will NOT use features_not2use = ['Id', 'groupId', 'matchId'] # + _uuid="3a4a1abd568ab70b165add1b95704b76f0c2f98e" for df in [df_trn, df_tst]: df.drop(features_not2use, axis=1, inplace=True) # + [markdown] _uuid="c8fe98adce4e5114c26cee619431260f57aa0c44" # # Train and evaluate a model # Start by defining handy helper functions... # + _uuid="470b2148601eed76036363bac7028145954aae10" _kg_hide-input=true from sklearn.model_selection import StratifiedKFold, KFold from sklearn.base import clone, ClassifierMixin, RegressorMixin import lightgbm as lgb def learning_rate_decay_power(current_iter): ''' The function defines learning rate deay for LGBM ''' base_learning_rate = 0.10 min_lr = 5e-2 lr = base_learning_rate * np.power(.996, current_iter) return lr if lr > min_lr else min_lr def train_single_model(clf_, X_, y_, random_state_=314, opt_parameters_={}, fit_params_={}): ''' A wrapper to train a model with particular parameters ''' c = clone(clf_) c.set_params(**opt_parameters_) c.set_params(random_state=random_state_) return c.fit(X_, y_, **fit_params_) def train_model_in_CV(model, X, y, metric, metric_args={}, model_name='xmodel', seed=31416, n=5, opt_parameters_={}, fit_params_={}, verbose=True): # the list of classifiers for voting ensable clfs = [] # performance perf_eval = {'score_i_oof': 0, 'score_i_ave': 0, 'score_i_std': 0, 'score_i': [] } # full-sample oof prediction y_full_oof = pd.Series(np.zeros(shape=(y.shape[0],)), index=y.index) if 'sample_weight' in metric_args: sample_weight=metric_args['sample_weight'] doSqrt=False if 'sqrt' in metric_args: doSqrt=True del metric_args['sqrt'] cv = KFold(n, shuffle=True, random_state=seed) #Stratified # The out-of-fold (oof) prediction for the k-1 sample in the outer CV loop y_oof = pd.Series(np.zeros(shape=(X.shape[0],)), index=X.index) scores = [] clfs = [] for n_fold, (trn_idx, val_idx) in enumerate(cv.split(X, (y!=0).astype(np.int8))): X_trn, y_trn = X.iloc[trn_idx], y.iloc[trn_idx] X_val, y_val = X.iloc[val_idx], y.iloc[val_idx] if fit_params_: # use _stp data for early stopping fit_params_["eval_set"] = [(X_trn,y_trn), (X_val,y_val)] fit_params_['verbose'] = verbose clf = train_single_model(model, X_trn, y_trn, 314+n_fold, opt_parameters_, fit_params_) clfs.append(('{}{}'.format(model_name,n_fold), clf)) # evaluate performance if isinstance(clf, RegressorMixin): y_oof.iloc[val_idx] = clf.predict(X_val) elif isinstance(clf, ClassifierMixin): y_oof.iloc[val_idx] = clf.predict_proba(X_val)[:,1] else: raise TypeError('Provided model does not inherit neither from a regressor nor from classifier') if 'sample_weight' in metric_args: metric_args['sample_weight'] = y_val.map(sample_weight) scores.append(metric(y_val, y_oof.iloc[val_idx], **metric_args)) #cleanup del X_trn, y_trn, X_val, y_val # Store performance info for this CV if 'sample_weight' in metric_args: metric_args['sample_weight'] = y_oof.map(sample_weight) perf_eval['score_i_oof'] = metric(y, y_oof, **metric_args) perf_eval['score_i'] = scores if doSqrt: for k in perf_eval.keys(): if 'score' in k: perf_eval[k] = np.sqrt(perf_eval[k]) scores = np.sqrt(scores) perf_eval['score_i_ave'] = np.mean(scores) perf_eval['score_i_std'] = np.std(scores) return clfs, perf_eval, y_oof def print_perf_clf(name, perf_eval): print('Performance of the model:') print('Mean(Val) score inner {} Classifier: {:.4f}+-{:.4f}'.format(name, perf_eval['score_i_ave'], perf_eval['score_i_std'] )) print('Min/max scores on folds: {:.4f} / {:.4f}'.format(np.min(perf_eval['score_i']), np.max(perf_eval['score_i']))) print('OOF score inner {} Classifier: {:.4f}'.format(name, perf_eval['score_i_oof'])) print('Scores in individual folds: {}'.format(perf_eval['score_i'])) # + [markdown] _uuid="c1782953ec79baf2bda4834c9c05085775cae3c4" # Now let's define the parameter and model in a scalable fashion (we can add later on further models to the list and it will work out-of-the-box). # # The format is a dictionary with keys that are user model names and items being an array (or tuple) of: # # - model to be fitted; # - additional model parameters to be set; # - model fit parameters (they are passed to `model.fit()` call); # - target variable. # + _uuid="1ddb35cbc026b22ed8f9d7c7c11897f2a2636db9" _kg_hide-input=true mdl_inputs = { # This will be with MAE loss 'lgbm1_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05), {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 30, 'reg_alpha': 1, 'subsample': 0.75}, {"early_stopping_rounds":100, "eval_metric" : 'mae', 'eval_names': ['train', 'early_stop'], 'verbose': False, 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)], 'categorical_feature': 'auto'}, y ), # 'lgbm45_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05), # {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 45, 'reg_alpha': 1, 'subsample': 0.75}, # {"early_stopping_rounds":100, # "eval_metric" : 'mae', # 'eval_names': ['train', 'early_stop'], # 'verbose': False, # 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)], # 'categorical_feature': 'auto'}, # y # ), # 'lgbm60_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05), # {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 60, 'reg_alpha': 1, 'subsample': 0.75}, # {"early_stopping_rounds":100, # "eval_metric" : 'mae', # 'eval_names': ['train', 'early_stop'], # 'verbose': False, # 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)], # 'categorical_feature': 'auto'}, # y # ), # 'lgbm90_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05), # {'objective': 'mae', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 90, 'reg_alpha': 1, 'subsample': 0.75}, # {"early_stopping_rounds":100, # "eval_metric" : 'mae', # 'eval_names': ['train', 'early_stop'], # 'verbose': False, # 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)], # 'categorical_feature': 'auto'}, # y # ), # This will be with FAIR loss # 'lgbm2_reg': (lgb.LGBMRegressor(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.05), # {'objective': 'fair', 'colsample_bytree': 0.75, 'min_child_weight': 10.0, 'num_leaves': 30, 'reg_alpha': 1, 'subsample': 0.75}, # {"early_stopping_rounds":100, # "eval_metric" : 'mae', # 'eval_names': ['train', 'early_stop'], # 'verbose': False, # 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_decay_power)], # 'categorical_feature': 'auto'}, # y # ), } # + [markdown] _uuid="bb04c3bfac89f557e3c8efdcced2677bf50bd583" # Do the actual model training # + _uuid="36302d78799176cf736aa3e4b81dfe36210cfb0f" # %%time mdls = {} results = {} y_oofs = {} for name, (mdl, mdl_pars, fit_pars, y_) in mdl_inputs.items(): print('--------------- {} -----------'.format(name)) mdl_, perf_eval_, y_oof_ = train_model_in_CV(mdl, df_trn, y_, mean_absolute_error, metric_args={}, model_name=name, opt_parameters_=mdl_pars, fit_params_=fit_pars, n=n_cv, verbose=False) results[name] = perf_eval_ mdls[name] = mdl_ y_oofs[name] = y_oof_ print_perf_clf(name, perf_eval_) # + [markdown] _uuid="36901d783546db24e3a21673b33a7ca9b293623c" # Let's plot how predictions look like # + _uuid="87ceda22c49754e4cbc15ff53055360dbd49e2ff" k = list(y_oofs.keys())[0] _ = y_oofs[k].plot('hist', bins=100, figsize=(15,6)) plt.xlabel('Predicted winPlacePerc OOF') # + [markdown] _uuid="804be78620382885375e8c3a23a656614e3d545e" # Note, that predictions are spilled outside of the `[0,1]` range, which is not meaningful for percentage value. **We will clip test predictions to be within the meaningful range.** This will improve the score slightly # + [markdown] _uuid="3bd60837ec18d8a3037015befd4f3a0bc44f6b19" # ## Visualise importance of features # + _kg_hide-input=true _uuid="426b0274b32a45f5fbdc08884d81bf04b926e807" import matplotlib.pyplot as plt import seaborn as sns def display_importances(feature_importance_df_, n_feat=30, silent=False, dump_strs=[], fout_name=None, title='Features (avg over folds)'): ''' Make a plot of most important features from a tree-based model Parameters ---------- feature_importance_df_ : pd.DataFrame The input dataframe. Must contain columns `'feature'` and `'importance'`. The dataframe will be first grouped by `'feature'` and the mean `'importance'` will be calculated. This allows to calculate and plot importance averaged over folds, when the same features appear in the dataframe as many time as there are folds in CV. n_feats : int [default: 20] The maximum number of the top features to be plotted silent : bool [default: False] Dump additionsl information, in particular the mean importances for features defined by `dump_strs` and the features with zero (<1e-3) importance dump_strs : list of strings [default: []] Features containing either of these srings will be printed to the screen fout_name : str or None [default: None] The name of the file to dump the figure. If `None`, no file is created (to be used in notebooks) ''' # Plot feature importances cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values( by="importance", ascending=False)[:n_feat].index mean_imp = feature_importance_df_[["feature", "importance"]].groupby("feature").mean() df_2_neglect = mean_imp[mean_imp['importance'] < 1e-3] if not silent: print('The list of features with 0 importance: ') print(df_2_neglect.index.values.tolist()) pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) for feat_prefix in dump_strs: feat_names = [x for x in mean_imp.index if feat_prefix in x] print(mean_imp.loc[feat_names].sort_values(by='importance', ascending=False)) del mean_imp, df_2_neglect best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)] plt.figure(figsize=(8,10)) sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title(title) plt.tight_layout() if fout_name is not None: plt.savefig(fout_name) # + _uuid="b1c0d180ec81be2dcfa875c0854e3e4249aeecd8" display_importances(pd.DataFrame({'feature': df_trn.columns, 'importance': mdls['lgbm1_reg'][0][1].booster_.feature_importance('gain')}), n_feat=20, title='GAIN feature importance', fout_name='feature_importance_gain.png' ) # + [markdown] _uuid="1c77dec7b90de56503e3af494fbfc6f55ab4e4f6" # ## Prepare submission # + _uuid="7bd73f0231f934fc9fc49b25f8b54a8f819e6992" # %%time y_subs= {} for c in mdl_inputs: mdls_= mdls[c] y_sub = np.zeros(df_tst.shape[0]) for mdl_ in mdls_: y_sub += np.clip(mdl_[1].predict(df_tst), 0, 1) y_sub /= n_cv y_subs[c] = y_sub # + _uuid="07bbca401b3481ab1bf8a634a62e872caa1044d3" df_sub = pd.read_csv('../input/sample_submission.csv', nrows=max_events_tst) # + _uuid="7b6231e9a078c84f9037b507105d8009c5d51e9b" for c in mdl_inputs: df_sub['winPlacePerc'] = y_subs[c] df_sub.to_csv('sub_{}.csv'.format(c), index=False) oof = pd.DataFrame(y_oofs[c].values) oof.columns = ['winPlacePerc'] oof.clip(0, 1, inplace=True) oof.to_csv('oof_{}.csv'.format(c), index=False) # + _uuid="cb07a6de4adfcd57badea38b85ebcf7b3a2afa62" # !ls # + _uuid="0971f290973f1466a5d46522206160d0037f886e"
PUBG/pubg-survivor-kit.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Exam Fri 02, Jul 2021 // // **Alta Formazione ITT Marconi, Rovereto- UF06 Java** // // <!-- // ## [Download exercises](../../../_static/generated/jupman-2021-07-02-exam.zip) // --> // ## 1. contest // // ✪ An `ArrayList` of `partecipants` has won a contest, and now we want to show their position on a billboard. Write some code which MODIFIES the list by writing the participant number to the side of the name. // // + static void contest(ArrayList<String> participants){ // write here } ArrayList<String> participants = new ArrayList(); participants.add("Marta"); participants.add("Peppo"); participants.add("Elisa"); participants.add("Gioele"); participants.add("Rosa"); contest(participants); System.out.println(participants); // - // ## 2. manymany // // ✪✪ Given a string `word` and a string `repetitions` containing only digits, write a method which RETURN a string containing all the characters of `word` repeated by the number of times reported in the corresponding digit of `repetitions`. // + static String manymany(String word, String repetitions){ // write here } System.out.println(manymany("rospo", "14323")); // "roooosssppooo" System.out.println(manymany("artificio", "144232312")); // "arrrrttttiifffiicccioo") // - // ## 3. Store // // A company produces lamps and has a main store with several franchises on the territory, which may apply different prices. In particular, we focus on the main store and the Trento store. // // - Each store has a certain amount of lamps in stock. // - The price of each lamp is 10€ // - Main store applies no discount // - Trento store applies a 20% discount on orders of at least 500 lamps. If less than 500 lamps are ordered, no discount is applied. // // Write a couple of classes with these two methods: // // 1. `double price(int quantity)`: given a quantity of lamps, return the price they would cost. If the quantity exceeds stock, return -1. // - **NOTE 1**: this method **does not** change the stock. // - **NOTE 2**: I didn't put examples about -1 case. // 2. `void sell(int quantity)` : given a quantity of lamps, sell them and change the stock accordingly. If the quantity exceeds stock, we assume all available stock is sold. // // **Try avoiding repeated code by using proper inheritance.** // + // write classes here // TEST System.out.println("*** mainStore:"); Store mainStore = new Store(5000); System.out.println(" price(400): " + mainStore.price(400) + " €"); System.out.println(" price(1000): " + mainStore.price(1000) + " €"); System.out.println(); System.out.println(" sell(400)"); mainStore.sell(400); System.out.println(" sell(1000)"); mainStore.sell(1000); System.out.println(" sell(8000)"); mainStore.sell(8000); System.out.println(); System.out.println("*** trentoStore:"); TrentoStore trentoStore = new TrentoStore(1000); System.out.println(" price(400): " + trentoStore.price(400) + " €"); System.out.println(" price(1000): " + trentoStore.price(1000) + " €"); // discounted System.out.println(); System.out.println(" sell(400)"); trentoStore.sell(400); System.out.println(" sell(1000)"); trentoStore.sell(1000); // - // ## 4. isMatinc // // Write a method `isMatinc` which given an integer matrix RETURN `true` if all rows are strictly increasing from left to right, otherwise return `false` // + public static boolean isMatinc(int[][] matrice){ // write here } // TEST int[][] mat1 = {{1,4,6,7,9}, {0,1,2,4,8}, {2,6,8,9,10}, {0,1,2,4,8}, {0,1,5,7,9}, {1,2,3,8,9}}; System.out.println(isMatinc(mat1)); // true int[][] mat2 = {{0,1,3,4}, {0,1,3,4}, {0,1,2,4}, {1,4,6,7}, {1,4,6,7}, {2,6,8,9}, {0,1,5,7}, {1,2,3,8}}; System.out.println(isMatinc(mat2)); // true int[][] mat3 = {{1,4,5,7,9}, {0,1,2,4,8}, {2,6,3,9,10}, {1,5,3,8,7}}; System.out.println(isMatinc(mat3)); // false int[][] mat4 = {{1,4,7,7,9}, {0,1,2,4,8}, {3,2,6,8,9}}; System.out.println(isMatinc(mat4)); // false
exams/2021-07-02/solutions/exam-2021-07-02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The goal of this notebook is to demonstrate how to obtain a superpixel segmentation of a raster and then store these as a vector format for visualization and later analyses. # # A nice example of superpixel segmentation using the module we are using can be found [here](https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_segmentations.html). import rasterio import numpy as np import matplotlib.pyplot as plt from skimage.segmentation import felzenszwalb from pathlib import Path from skimage.color import label2rgb import sys sys.path.append('../../rscube') from rio_tools import polygonize_array_to_shapefile # # Data Paths data_path_name = 'data/tif_datacube/' data_path = Path(data_path_name) data_path.exists() output_path_name = 'out' out_path = Path(output_path_name) out_path.mkdir(exist_ok=True, parents=True) tif_paths = sorted(list(data_path.glob('*.tif'))) tif_paths # # Visualizing the Landsat raster # # We will use the landsat raster for visualization. with rasterio.open(tif_paths[2]) as ds: landsat_2015_mosaic_gdal = ds.read() landsat_2015_mosaic = landsat_2015_mosaic_gdal.transpose([1, 2, 0]) profile = ds.profile plt.imshow(landsat_2015_mosaic[..., :3]) # So you may ask what the `transpose` was for. Well, gdal and numpy require arrays of slightly different format. The former reads arrays as Band Interleaved by Pixel (BIP) and the latter, Band Sequential (BSQ) (see [here](http://idlcoyote.com/ip_tips/where3.html)). The landsat has 4 bands (or channels). Rasterio (a gdal wrapper) reads the array with the channels at the front, but numpy requires these channels to be at the back. landsat_2015_mosaic_gdal.shape, landsat_2015_mosaic.shape # The superpixel segmentation takes several parameters that can be read about in the [docs](https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.felzenszwalb). To summarize, the `min_size` specifies the minimum size of all segments in the image, `scale` controls the approximate irregularity of the segments (precisely, a larger `scale` parameter determines if smaller sized segments are merged more easily during the formation of the final segmentation), and `sigma` is set to 0 so there is no smoothing done before segmentation. superpixel_labels = felzenszwalb(landsat_2015_mosaic, min_size=20, scale=.5, sigma=0, # The landsat mosaic has 4 channels multichannel=True, ) # We can visualize the segments assigning random colors to each label. superpixel_labels_rgb = label2rgb(superpixel_labels) plt.imshow(superpixel_labels_rgb) # Now, we save the labels as polygons with each segment corresponding to a geometry. polygonize_array_to_shapefile(superpixel_labels, profile, out_path/'superpixels')
notebooks/supervised_classification/2_Superpixel_Generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Real-Time Stream Viewer (HTTP) # the following function responds to HTTP requests with the list of last 10 processed twitter messages + sentiments in reverse order (newest on top), it reads records from the enriched stream, take the recent 10 messages, and reverse sort them. the function is using nuclio context to store the last results and stream pointers for max efficiency.<br> # # The code is automatically converted into a nuclio (serverless) function and and respond to HTTP requests<br> # # the example demonstrate the use of `%nuclio` magic commands to specify environment variables, package dependencies,<br>configurations, and to deploy functions automatically onto a cluster. # # # ## Initialize nuclio emulation, environment variables and configuration # use `# nuclio: ignore` for sections that don't need to be copied to the function # nuclio: ignore # if the nuclio-jupyter package is not installed run !pip install nuclio-jupyter import nuclio # ## Nuclio function implementation # this function can run in Jupyter or in nuclio (real-time serverless) # + import v3io.dataplane import json import os def init_context(context): access_key = os.getenv('V3IO_ACCESS_KEY', None) setattr(context, 'container', os.getenv('V3IO_CONTAINER', 'users')) setattr(context, 'stream_path', os.getenv('STOCKS_STREAM',os.getenv('V3IO_USERNAME') + '/stocks/stocks_stream')) v3io_client = v3io.dataplane.Client(endpoint=os.getenv('V3IO_API', None), access_key=access_key) setattr(context, 'data', []) setattr(context, 'v3io_client', v3io_client) setattr(context, 'limit', os.getenv('LIMIT', 10)) def handler(context, event): resp = context.v3io_client.seek_shard(container=context.container, path=f'{context.stream_path}/0', seek_type='EARLIEST') setattr(context, 'next_location', resp.output.location) resp = context.v3io_client.get_records(container=context.container, path=f'{context.stream_path}/0', location=context.next_location, limit=context.limit) context.logger.info('location: %s', context.next_location) for rec in resp.output.records: rec_data = rec.data.decode('utf-8') rec_json = json.loads(rec_data) context.data.append({'Time': rec_json['time'], 'Symbol': rec_json['symbol'], 'Sentiment': rec_json['sentiment'], 'Link': rec_json['link'], 'Content': rec_json['content']}) context.data = context.data[-context.limit:] columns = [{'text': key, 'type': 'object'} for key in ['Time', 'Symbol', 'Sentiment', 'Link', 'Content']] data = [list(item.values()) for item in context.data] response = [{'columns': columns, 'rows': data, 'type': 'table'}] return response # + # nuclio: end-code # - # ## Function invocation # the following section simulates nuclio function invocation and will emit the function results # create a test event and invoke the function locally init_context(context) event = nuclio.Event(body='') resp = handler(context, event) # ## Deploy a function onto a cluster # the `%nuclio deploy` command deploy functions into a cluster, make sure the notebook is saved prior to running it !<br>check the help (`%nuclio help deploy`) for more information # + import mlrun import os # Export the bare function fn = mlrun.code_to_function('stream-viewer', handler='handler',kind="nuclio", image='mlrun/mlrun:0.6.5') # Set parameters for current deployment fn.set_envs({'V3IO_CONTAINER': 'users', 'STOCKS_STREAM': os.getenv('V3IO_USERNAME') + '/stocks/stocks_stream'}) fn.spec.max_replicas = 2 fn.apply(mlrun.platforms.v3io_cred()) fn.apply(mlrun.mount_v3io()) fn.spec.build.commands = ['pip install v3io'] fn.export('03-stream-viewer.yaml') # - addr = fn.deploy(project="stocks-" + os.getenv('V3IO_USERNAME')) # !curl {addr}
stock-analysis/code/04-stream-viewer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## CE9010: Introduction to Data Analysis # ## Semester 2 2018/19 # ## <NAME> # <hr> # # ## Tutorial 3: Linear supervised regression # ## Objectives # ### $\bullet$ Code a linear regression model # ### $\bullet$ Implement gradient descent # ### $\bullet$ Explore results # <hr> # + # Import libraries # math library import numpy as np # visualization library # %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('png2x','pdf') import matplotlib.pyplot as plt # machine learning library from sklearn.linear_model import LinearRegression # 3d visualization from mpl_toolkits.mplot3d import axes3d # computational time import time # - # ## 1. Load dataset # <hr> # The data feature, $x$, is unique and represents the population size of various cities. <br> # The data label/target, $y$, to predict is the profit. # # What is the number $n$ of training data?<br> # Hint: You may use numpy function `shape`. # + # import data with numpy data = np.loadtxt('data/profit_population.txt', delimiter=',') # number of training data n = #YOUR CODE HERE print('Number of training data=',n) # print print(data[:10,:]) print(data.shape) print(data.dtype) # - # ## 2. Explore the dataset distribution # <hr> # # Plot the training data points.<br> # Hint: You may use matplotlib function `scatter(x,y)`. # + x_train = data[:,0] y_train = data[:,1] plt.figure(1) plt. #YOUR CODE HERE plt.title('Training data') plt.xlabel('Population size (x 10$)') plt.ylabel('Profit (x 10k$)') plt.show() # - # ## 3. Define the linear prediction function # <hr> # $$ # f_w(x) = w_0 + w_1 x # $$ # # There are 2 possible implementations: # # 1. Unvectorized implementation (with *for* loops): # $$ # f_w(x_i) = w_0 + w_1 x_i # $$ # <br> # # 1. Vectorized implementation: # $$ # f_w(x) = X w # $$ # with # <br> # $$ # X =  # \left[  # \begin{array}{cccc} # 1 & x_1 \\  # 1 & x_2 \\  # \vdots\\ # 1 & x_n # \end{array}  # \right] # \quad # \textrm{ and } # \quad # w =  # \left[  # \begin{array}{cccc} # w_0 \\  # w_1  # \end{array}  # \right] # \quad # \Rightarrow # \quad # f_w(x) = X w = # \left[  # \begin{array}{cccc} # w_0 + w_1 x_1 \\  # w_0 + w_1 x_2 \\  # \vdots\\ # w_0 + w_1 x_n # \end{array}  # \right] # $$ # # Implement the vectorized version of the linear predictive function. <br> # # Check your code correctness: The first 5 values of $f_w(x)$ are [-8.35,-7.53,-11.72,-9.60, -8.00] with $w_0=0.2, w_1=-1.4$. <br> # # Hint: Respect the sizes of $X$ and $w$ when carrying out linear algebra multiplications. You may use numpy function `dot` for matrix multiplication. # # + # construct data matrix X = np.ones([n,2]) X[:,1] = x_train print(X.shape) print(X[:5,:]) # parameters vector w = np.array([0.2,-1.4])[:,None] # [:,None] adds a singleton dimension print(w.shape) #print(w) # predictive function definition def f_pred(X,w): f = #YOUR CODE HERE return f # Test predicitive function y_pred = f_pred(X,w) print(y_pred[:5]) # - # ## 4. Define the linear regression loss # <hr> # $$ # L(w)=\frac{1}{n} \sum_{i=1}^n \ \Big( f_w(x_i) – y_i \Big)^2 # $$ # # There are again 2 possible implementations: # 1. Unvectorized implementation. # # 1. Vectorized implementation: # $$ # L(w)=\frac{1}{n} (Xw-y)^T(Xw-y) # $$ # with # <br> # $$ # Xw= # \left[  # \begin{array}{cccc} # w_0 + w_1 x_1 \\  # w_0 + w_1 x_2 \\  # \vdots\\ # w_0 + w_1 x_n # \end{array}  # \right] # \quad # \textrm{ and } # \quad # y =  # \left[  # \begin{array}{cccc} # y_1 \\  # y_2 \\  # \vdots\\ # y_n # \end{array}  # \right] # $$ # # Implement the vectorized version of the linear regression loss function. <br> # # Check your code correctness: The loss values is $399.75$ for $w_0=0.2, w_1=-1.4$. <br> # # Hint: Respect the sizes of $X$, $w$ and $y$ when carrying out linear algebra multiplications. You may use numpy function transpose `.T`. # # + # loss function definition def loss_mse(y_pred,y): n = len(y) loss = #YOUR CODE HERE return loss # Test loss function y = y_train[:,None] # label #print(y.shape) y_pred = f_pred(X,w) # prediction loss = loss_mse(y_pred,y) print(loss) # - # ## 5. Define the gradient of the linear regression loss # <hr> # # $\bullet$ Unvectorized implementation: # \begin{eqnarray} # \frac{\partial}{\partial w_0} L(w)&=&\frac{2}{n} \sum_{i=1}^n \ ( w_0+w_1x_i - y_i )\\ # \frac{\partial}{\partial w_1} L(w)&=&\frac{2}{n} \sum_{i=1}^n \ ( w_0+w_1x_i - y_i )x_i # \end{eqnarray} # # # # # # $\bullet$ Vectorized implementation: Given the loss # $$ # L(w)=\frac{1}{n} (Xw-y)^T(Xw-y) # $$ # The gradient is given by # $$ # \frac{\partial}{\partial w} L(w) = \frac{2}{n} X^T(Xw-y) # $$ # # # Implement the vectorized version of the gradient of the linear regression loss function. <br> # # Check your code correctness: The gradient value is [-34.12,-355.32] for $w_0=0.2, w_1=-1.4$. <br> # # Hint: Respect the sizes of $X$, $w$ and $y$ when carrying out linear algebra multiplications. # # + # gradient function definition def grad_loss(y_pred,y,X): n = len(y) grad = #YOUR CODE HERE return grad # Test grad function y_pred = f_pred(X,w) grad = grad_loss(y_pred,y,X) print(grad) # - # ## 6. Implement the gradient descent algorithm # <hr> # # $\bullet$ Unvectorized implementation: # \begin{eqnarray} # w_0^{k+1} &= w_0^{k}& - \tau \frac{2}{n} \sum_{i=1}^n \ ( w_0^{k}+w_1^{k}x_i - y_i ) \\ # w_1^{k+1} &= w_1^{k}& - \tau \frac{2}{n} \sum_{i=1}^n \ ( w_0^{k}+w_1^{k}x_i - y_i ) x_i # \end{eqnarray} # # # $\bullet$ Vectorized implementation: # $$ # w^{k+1} = w^{k} - \tau \frac{2}{n} X^T(Xw^{k}-y) # $$ # # **6.1** Implement the vectorized version of the gradient descent function. <br> # Check your code correctness: The $w^{k}$ value after $20$ iterations is [0.116,0.789] for initial values $w_0^{k=0}=0.2, w_1^{k=0}=-1.4$ and the loss $L$ is 11.90.<br> # # # **6.2** Plot the loss values $L(w^k)$ w.r.t. iteration $k$ the number of iterations.<br> # # Hint: You may use a table to store the values of $L(w^k)$ at each iteration. # # + # gradient descent function definition def grad_desc(X, y , w_init=np.array([0,0])[:,None] ,tau=0.01, max_iter=500): L_iters = np.zeros([max_iter]) # record the loss values w_iters = np.zeros([max_iter,2]) # record the loss values w = w_init # initialization for i in range(max_iter): # loop over the iterations y_pred = f_pred(X,w) # linear predicition function #YOUR CODE HERE grad_f = grad_loss(y_pred,y,X) # gradient of the loss #YOUR CODE HERE w = w - tau* grad_f # update rule of gradient descent #YOUR CODE HERE L_iters[i] = loss_mse(y_pred,y) # save the current loss value w_iters[i,:] = w[0],w[1] # save the current w value return w, L_iters, w_iters # run gradient descent algorithm start = time.time() w_init = np.array([0.2,-1.4])[:,None] tau = 0.01 max_iter = 20 w, L_iters, w_iters = grad_desc(X,y,w_init,tau,max_iter) print('Time=',time.time() - start) print(L_iters[-1]) print(w) # plot plt.figure(2) plt.plot(np.array(range(max_iter)), L_iters) plt.xlabel('Iterations') plt.ylabel('Loss') plt.show() # - # ## 7. Plot the linear prediction function # <hr> # $$ # f_w(x) = w_0 + w_1 x # $$ # # Hint: You may use numpy function `linspace`. # # + # linear regression model x_pred = #YOUR CODE HERE y_pred = #YOUR CODE HERE # plot plt.figure(3) plt.scatter(x_train, y_train, s=30, c='r', marker='x', linewidths=1) plt.plot(x_pred, y_pred,label='gradient descent optimization'.format(i=1)) plt.legend(loc='best') plt.title('Training data') plt.xlabel('Population size (x 10k)') plt.ylabel('Profit $(x 10k)') plt.show() # - # ## 8. Comparison with Scikit-learn linear regression algorithm # <hr> # **8.1** What is the loss value of the Scikit-learn solution? <br> # Compare with the loss value given by gradient descent?<br> # What do we need to do to get a better loss (and solution) with gradient descent? # + # run linear regression with scikit-learn start = time.time() lin_reg_sklearn = LinearRegression() lin_reg_sklearn.fit( ) # learn the model parameters #YOUR CODE HERE print('Time=',time.time() - start) # compute loss value w_sklearn = np.zeros([2,1]) w_sklearn[0,0] = lin_reg_sklearn.intercept_ w_sklearn[1,0] = lin_reg_sklearn.coef_ print(w_sklearn) loss_sklearn = loss_mse(f_pred(X,w_sklearn),y_train[:,None]) print('loss sklearn=',loss_sklearn) print('loss gradient descent=',L_iters[-1]) # plot y_pred_sklearn = w_sklearn[0] + w_sklearn[1]* x_pred plt.figure(3) plt.scatter(x_train, y_train, s=30, c='r', marker='x', linewidths=1) plt.plot(x_pred, y_pred,label='gradient descent optimization'.format(i=1)) plt.plot(x_pred, y_pred_sklearn,label='Scikit-learn optimization'.format(i=2)) plt.legend(loc='best') plt.title('Training data') plt.xlabel('Population size (x 10k)') plt.ylabel('Profit $(x 10k)') plt.show() # - # **8.2** What do we need to do to get a better loss (and solution) with gradient descent?<br> # We need more gradient descent iterations.<br> # Run gradient descent with 1,000 iterations. Check the loss value.<br> # + # run gradient descent algorithm w_init = np.array([0.2,-1.4])[:,None] tau = 0.01 max_iter = 1000 w, L_iters, w_iters = grad_desc(X,y,w_init,tau,max_iter) print(L_iters[-1]) print(w) # plot y_pred = w[0] + w[1]* x_pred plt.figure(4) plt.scatter(x_train, y_train, s=30, c='r', marker='x', linewidths=1) plt.plot(x_pred, y_pred,label='gradient descent optimization'.format(i=1)) plt.plot(x_pred, y_pred_sklearn,label='Scikit-learn optimization'.format(i=2)) plt.legend(loc='best') plt.title('Training data') plt.xlabel('Population size (x 10k)') plt.ylabel('Profit $(x 10k)') plt.show() # - # ## 9. Predict profit for a city with population of 45,000? # <hr> # Predict profit for a city with population of 45000 print('Profit would be', ) #YOUR CODE HERE # ## 10. [Bonus] Plot the loss surface, the contours of the loss and the gradient descent steps # <hr> # # Hint: Use function *plot_gradient_descent(X,y,w_init,tau,max_iter)*. # # plot gradient descent def plot_gradient_descent(X,y,w_init,tau,max_iter): def f_pred(X,w): f = X.dot(w) return f def loss_mse(y_pred,y): n = len(y) loss = 1/n* (y_pred - y).T.dot(y_pred - y) return loss def grad_desc(X, y , w_init=np.array([0,0])[:,None] ,tau=0.01, max_iter=500): L_iters = np.zeros([max_iter]) # record the loss values w_iters = np.zeros([max_iter,2]) # record the loss values w = w_init # initialization for i in range(max_iter): # loop over the iterations y_pred = f_pred(X,w) # linear predicition function grad_f = grad_loss(y_pred,y,X) # gradient of the loss w = w - tau* grad_f # update rule of gradient descent L_iters[i] = loss_mse(y_pred,y) # save the current loss value w_iters[i,:] = w[0],w[1] # save the current w value return w, L_iters, w_iters # run gradient descent w, L_iters, w_iters = grad_desc(X,y,w_init,tau,max_iter) # Create grid coordinates for plotting a range of L(w0,w1)-values B0 = np.linspace(-10, 10, 50) B1 = np.linspace(-1, 4, 50) xx, yy = np.meshgrid(B0, B1, indexing='xy') Z = np.zeros((B0.size,B1.size)) # Calculate loss values based on L(w0,w1)-values for (i,j),v in np.ndenumerate(Z): Z[i,j] = loss_mse(f_pred(X,w=[[xx[i,j]],[yy[i,j]]]),y) # 3D visualization fig = plt.figure(figsize=(15,6)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122, projection='3d') # Left plot CS = ax1.contour(xx, yy, Z, np.logspace(-2, 3, 20), cmap=plt.cm.jet) ax1.scatter(w[0],w[1], c='r') ax1.plot(w_iters[:,0],w_iters[:,1]) # Right plot ax2.plot_surface(xx, yy, Z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet) ax2.set_zlabel('Loss $L(w_0,w_1)$') ax2.set_zlim(Z.min(),Z.max()) #ax2.view_init(elev=10, azim=-120) # plot gradient descent Z2 = np.zeros([max_iter]) for i in range(max_iter): w0 = w_iters[i,0] w1 = w_iters[i,1] Z2[i] = loss_mse(f_pred(X,w=[[w0],[w1]]),y) ax2.plot(w_iters[:,0],w_iters[:,1],Z2) ax2.scatter(w[0],w[1],loss_mse(f_pred(X,w=[w[0],w[1]]),y), c='r') # settings common to both plots for ax in fig.axes: ax.set_xlabel(r'$w_0$', fontsize=17) ax.set_ylabel(r'$w_1$', fontsize=17) # run plot_gradient_descent function w_init = np.array([0.2,-1.4])[:,None] tau = 0.01 max_iter = 200 plot_gradient_descent(X,y,w_init,tau,max_iter) # run plot_gradient_descent function w_init = np.array([0.2,-1.4])[:,None] tau = 0.01 max_iter = 2000 plot_gradient_descent(X,y,w_init,tau,max_iter)
tutorials/tutorial03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # importing Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime as dt from matplotlib import pyplot as plt from sklearn import linear_model , model_selection from sklearn.metrics import confusion_matrix , mean_squared_error from sklearn.preprocessing import StandardScaler , MinMaxScaler from sklearn.model_selection import train_test_split , TimeSeriesSplit from keras.models import Sequential from keras.layers import Dense , LSTM , Dropout import math from IPython.display import display # ## importing Dataset | Training Data (Train-Test Split) data = pd.read_csv('Stock_Data.csv') dataset_train=data.iloc[0:930,1:2] dataset_test=data.iloc[930:,1:2] training_set = data.iloc[0:930, 3:4].values testing_set=data.iloc[930:,3:4].values data.head() # ### Removing uncessary Data data.drop('Last', axis=1, inplace=True) data.drop('Total Trade Quantity', axis=1, inplace=True) data.drop('Turnover (Lacs)', axis=1, inplace=True) print(data.head()) data.to_csv('tata_preprocessed.csv',index= False) data = data.iloc[::-1] # ## Visualising Data plt.figure(figsize = (18,9)) plt.plot(range(data.shape[0]),(data['Close'])) plt.xticks(range(0,data.shape[0],500),data['Date'].loc[::500],rotation=45) plt.xlabel('Date',fontsize=18) plt.ylabel('Close Price',fontsize=18) plt.show() # ## Data Normalization sc = MinMaxScaler(feature_range = (0, 1)) training_set_scaled = sc.fit_transform(training_set) # ## Incorporating Timesteps Into Data # + len(training_set_scaled) X_train = [] y_train = [] for i in range(10,930): X_train.append(training_set_scaled[i-10:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = np.array(X_train), np.array(y_train) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) # - # ## Creating LSTM Model # + regressor = Sequential() regressor.add(LSTM(units = 75, return_sequences = True, input_shape = (X_train.shape[1], 1))) regressor.add(Dropout(0.1)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.1)) regressor.add(LSTM(units = 75)) regressor.add(Dropout(0.2)) regressor.add(Dense(units = 1)) regressor.compile(optimizer = 'adam', loss = 'mean_squared_error') regressor.fit(X_train, y_train, epochs = 200, batch_size = 64) # - # # Making Predictions on Test Set real_stock_price = testing_set dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0) inputs = dataset_total[len(dataset_total) - len(dataset_test) - 10:].values inputs = inputs.reshape(-1,1) inputs = sc.transform(inputs) X_test = [] for i in range(10,305): X_test.append(inputs[i-10:i, 0]) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_stock_price = regressor.predict(X_test) predicted_stock_price = sc.inverse_transform(predicted_stock_price) # # Plotting The Results # %matplotlib inline plt.plot(real_stock_price, color = 'red', label = 'TATA Stock Price') plt.plot(predicted_stock_price, color = 'green', label = 'Predicted TATA Stock Price') plt.title('TATA Stock Price Prediction') plt.xlabel('Time') plt.ylabel('TATA Stock Price') plt.legend() plt.show()
sTock_Prediction NSE TATA-GLOBAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: desdeo-problem # language: python # name: desdeo-problem # --- # + import numpy as np import matplotlib.pyplot as plt from desdeo_problem.testproblems.DBMOPP.DBMOPP_generator import DBMOPP_generator from desdeo_problem.testproblems.DBMOPP.Region import AttractorRegion, Attractor, Region from desdeo_problem.testproblems.DBMOPP.utilities import get_2D_version, euclidean_distance, get_random_angles, between_lines_rooted_at_pivot, assign_design_dimension_projection # - plt.rcParams["figure.figsize"] = (10,8) # ## Create DBMOPP generator # #### Parameters: # - n_objectives is number of the objectives from 2 to 10 # # # - n_variables is number of the variables from 2 to 10 # # # - n_local_pareto_regions, number of local pareto regions can be 0 or more. Plotted in green. # # # - n_dominance_res_regions, number of dominance regions can be 0 or more. Plotted in blue. # # # - n_global_pareto_regions, number of global pareto regions can be 1 or more. Plotted in red. # # # - const_space is the proportion of constrained 2D space, 1.0 means whole space is constrained, 0.0 means no amount of space is constrained. If set to anything else than 0.0 constraint type must be 4 or 8. # # # - pareto_set_type. A set type for global pareto set. If set to 1, n_global_pareto_regions must be bigger than 1. # - 0: duplicate performance # - 1: partially overlapping performance # - 2: non-intersecting performance # # # - constraint_type. Is the type of constraints applied. # - 0: no constraints # - 1-4: Hard vertex, centre, moat, extended checker type. # - 5-8: Soft vertex, centre, moat, extended checker type. # # - prop_neutral (float): Proportion of neutral space. Defaults to 0 # # The following parameters are suggested to be left to their default values since using different combinations of them in DBMOPP_generator has not been tested yet. # # # - ndo (int): number of regions to apply whose cause discontinuities in objective functions. Default 0. # # # - vary_sol_density (bool): Should solution density vary in maping down to each of the two visualized dimensions.Default to False # # # - vary_objective_scales (bool): Are objective scale varied. Defaults to False # # # - nm (int): Number of samples used for approximation checker and neutral space coverage. Defaults to 10000 # ## Simple problem # # - 3 objectives and 2 variables. # - hard vertex constraints, 2 local pareto regions and 3 global of type 0. # + # parameters n_objectives = 4 n_variables = 2 n_local_pareto_regions = 2 n_dominance_res_regions = 0 n_global_pareto_regions = 3 const_space = 0.0 pareto_set_type = 0 constraint_type = 1 ndo = 0 vary_sol_density = False vary_objective_scales = False prop_neutral = 0 nm = 10000 simple_problem = DBMOPP_generator( n_objectives, n_variables, n_local_pareto_regions, n_dominance_res_regions, n_global_pareto_regions, const_space, pareto_set_type, constraint_type, ndo, vary_sol_density, vary_objective_scales, prop_neutral, nm ) print(simple_problem._print_params()) print("Initializing works!") # Plot the generated problem simple_problem.plot_problem_instance() # - # ### Plot Pareto set members # # Pareto set can be plotted either by using plot_pareto_set_members function or by getting individual Pareto set members and plotting them. # # #### Get individual Pareto set member. # This returns random Pareto set member uniformly from the Pareto set and the point in 2D that it maps to. Should not be used during optimisation. po_member, point_in_2d = simple_problem.get_Pareto_set_member() print(f"Pareto set member: {po_member} and the point in 2D: {point_in_2d} ") # + ## for example getting 50 points and then plotting them in 2d n_of_points = 50 po_list = np.zeros((n_of_points, simple_problem.n)) po_points = np.zeros((n_of_points, 2)) for i in range(n_of_points): result = simple_problem.get_Pareto_set_member() po_list[i] = result[0] po_points[i] = result[1] plt.scatter(x=po_points[:,0], y=po_points[:,1], s=5, c="r", label="Pareto set members") plt.title(f"Pareto set members") plt.xlabel("F1") plt.xlim([-1,1]) plt.ylim([-1,1]) plt.ylabel("F2") plt.legend() # - # #### Plotting the set using plot_pareto_set_members # # The bigger the resolution the more defined plot we get, and need more time to compute. This one just checks each point if it belongs to Pareto set or not. resolution = 300 # function returns the plotted dots as a list. If list has too little members, make resolution bigger. pareto_optimal_set = simple_problem.plot_pareto_set_members(resolution) print(pareto_optimal_set[:5]) # print first five # ### Plot problem landscape in for single objective # # One can plot the problem landscape for each of the objectives one by one. White areas are the constrained areas. obj = 0 # just pick first objective as an example resolution = 500 simple_problem.plot_landscape_for_single_objective(obj, resolution) # ## More complex problem # # - 6 objectives and 10 variables. # - soft extended checker, 3 local pareto regions and 5 global of type 2. 4 dominance resistance regions. # - constraint space is set 0.3, neutral space is set to 0.1. # + # parameters n_objectives = 6 n_variables = 10 n_local_pareto_regions = 3 n_dominance_res_regions = 4 n_global_pareto_regions = 5 const_space = 0.3 pareto_set_type = 2 constraint_type = 8 ndo = 0 vary_sol_density = False vary_objective_scales = False prop_neutral = 0.1 nm = 10000 complex_problem = DBMOPP_generator( n_objectives, n_variables, n_local_pareto_regions, n_dominance_res_regions, n_global_pareto_regions, const_space, pareto_set_type, constraint_type, ndo, vary_sol_density, vary_objective_scales, prop_neutral, nm ) #print(complex_problem._print_params()) print("Initializing works!") # Plot the generated problem complex_problem.plot_problem_instance() # - # ### Generate DESDEO's MOProblem # use DBMOPP_generators generate problem moproblem = simple_problem.generate_problem() #moproblem = complex_problem.generate_problem() # You can evaluate certain set of decision vectors with the generated MOproblem. # + import random n_of_variables = simple_problem.n x = np.array(np.random.rand(5, n_of_variables)) evaluation_result = moproblem.evaluate(x) print(evaluation_result)
docs/notebooks/DBMOPP_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import xgboost as xgb import lightgbm as lgbm import gc from sklearn.linear_model import Lasso, ElasticNet, RANSACRegressor, Ridge from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier,ExtraTreesClassifier from sklearn.ensemble import RandomForestRegressor,AdaBoostRegressor,GradientBoostingRegressor,ExtraTreesRegressor from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LinearRegression,LogisticRegression from sklearn.svm import LinearSVR,SVR from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn.linear_model import SGDRegressor from sklearn.cross_validation import KFold from sklearn.metrics import mean_absolute_error from sklearn import preprocessing from sklearn.model_selection import StratifiedKFold from itertools import product import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.dates as mdates from mpl_toolkits.axes_grid1 import host_subplot import mpl_toolkits.axisartist as AA # %matplotlib inline import seaborn as sns ### seaborn에서 한글 나오게하기 sns.set(font="New Gulim") import zipfile import stacking from sklearn import preprocessing # - # # Load Data train = pd.read_csv('train_2016.csv') prop = pd.read_csv('properties_2016.csv') sample = pd.read_csv('sample_submission.csv') print(prop['taxdelinquencyyear']) from imp import reload reload(stacking) df_train = train.merge(prop, how='left', on='parcelid') sample['parcelid'] = sample['ParcelId'] df_test = sample.merge(prop, on='parcelid', how='left') # + df_train.loc[df_train['hashottuborspa'].isnull(),'hashottuborspa'] = False df_test.loc[df_test['hashottuborspa'].isnull(),'hashottuborspa'] = False df_train.loc[df_train['taxdelinquencyflag'].isnull(),'taxdelinquencyflag'] = 'N' df_test.loc[df_test['taxdelinquencyflag'].isnull(),'taxdelinquencyflag'] = 'N' df_train.loc[df_train['fireplaceflag'].isnull(),'fireplaceflag'] = False df_test.loc[df_test['fireplaceflag'].isnull(),'fireplaceflag'] = False df_train['transactiondate_month'] = pd.to_datetime(df_train['transactiondate']).dt.month df_train['month_bedrooms'] = df_train['bedroomcnt'] * df_train['transactiondate_month'] del df_train['transactiondate'] # - def feature_fillna(df_data): # should be filled df_data.loc[df_data['decktypeid'].isnull(),'decktypeid'] = 0. df_data.loc[df_data['fireplaceflag'].isnull(),'fireplaceflag'] = False df_data.loc[df_data['fireplacecnt'].isnull(),'fireplacecnt'] = 0. df_data.loc[df_data['hashottuborspa'].isnull(),'hashottuborspa'] = False df_data.loc[df_data['poolcnt'].isnull(),'poolcnt'] = 0. df_data.loc[df_data['poolsizesum'].isnull(),'poolsizesum'] = 0. df_data.loc[df_data['pooltypeid10'].isnull(),'pooltypeid10'] = 0. df_data.loc[df_data['pooltypeid2'].isnull(),'pooltypeid2'] = 0. df_data.loc[df_data['pooltypeid7'].isnull(),'pooltypeid7'] = 0. df_data.loc[df_data['storytypeid'].isnull(),'pooltypeid7'] = 0. df_data.loc[df_data['taxdelinquencyflag'].isnull(),'taxdelinquencyflag'] = 'N' df_data.loc[df_data['basementsqft'].isnull(),'basementsqft'] = 0. df_data.loc[df_data['garagecarcnt'].isnull(),'garagecarcnt'] = 0. df_data.loc[df_data['garagetotalsqft'].isnull(),'garagetotalsqft'] = 0. df_data.loc[df_data['yardbuildingsqft17'].isnull(),'yardbuildingsqft17'] = 0. df_data.loc[df_data['yardbuildingsqft26'].isnull(),'yardbuildingsqft26'] = 0. df_data.loc[df_data['structuretaxvaluedollarcnt'].isnull(),'structuretaxvaluedollarcnt'] = 0. # may be filled df_data.loc[df_data['airconditioningtypeid'].isnull(),'airconditioningtypeid'] = 0. df_data.loc[df_data['numberofstories'].isnull(),'numberofstories'] = 0. df_data.loc[df_data['storytypeid'].isnull(),'storytypeid'] = 0. df_data.loc[df_data['threequarterbathnbr'].isnull(),'threequarterbathnbr'] = 0. df_data.loc[df_data['taxdelinquencyyear'].isnull(),'taxdelinquencyyear'] = 0. # may not but,, df_data.loc[df_data['unitcnt'].isnull(),'unitcnt'] = -1. df_data.loc[df_data['heatingorsystemtypeid'].isnull(),'heatingorsystemtypeid'] = -1. feature_fillna(df_train) feature_fillna(df_test) # Label Encoding을 할 때는 항상 train과 test를 합친 전체 value를 fit하고 transform 해줘야 합니다. for col in df_train.columns: if df_train[col].dtype == 'object': print(col) lbl = preprocessing.LabelEncoder() lbl.fit(list(df_train[col].values) + list(df_test[col].values)) df_train[col] = lbl.transform(list(df_train[col].values)) df_test[col] = lbl.transform(list(df_test[col].values)) #drop outlier df_train=df_train[ df_train.logerror > -0.4 ] df_train=df_train[ df_train.logerror < 0.419 ] def drop_features(df_data): # drop! df_data.drop('finishedsquarefeet13', axis=1, inplace=True) df_data.drop('architecturalstyletypeid', axis=1, inplace=True) df_data.drop('typeconstructiontypeid', axis=1, inplace=True) df_data.drop('finishedsquarefeet6', axis=1, inplace=True) # maybe not but. df_data.drop('buildingclasstypeid', axis=1, inplace=True) df_data.drop('finishedsquarefeet15', axis=1, inplace=True) df_data.drop('finishedfloor1squarefeet', axis=1, inplace=True) df_data.drop('finishedsquarefeet50', axis=1, inplace=True) df_data.drop('regionidneighborhood', axis=1, inplace=True) drop_features(df_train) drop_features(df_test) # + x_train = df_train.drop(['parcelid', 'logerror'], axis=1) train_columns = x_train.columns y_train = df_train['logerror'] y_mean = np.mean(y_train) df_test['transactiondate_month'] = 10 df_test['month_bedrooms'] = df_test['transactiondate_month']*df_test['bedroomcnt'] x_test = df_test[train_columns] print(x_train.shape, y_train.shape, x_test.shape) del df_train,df_test gc.collect() # - # # Stacking Model # + et_parmas = { 'criterion':'mse', 'max_leaf_nodes':30, 'n_estimators':1000, 'min_impurity_split':0.0000001, 'max_features':0.6, 'max_depth':10, 'min_samples_leaf':20, 'min_samples_split':2, 'min_weight_fraction_leaf':0.0, 'bootstrap':True, 'random_state':1, 'verbose':False } # + et_model = stacking.SklearnWrapper(clf = ExtraTreesRegressor,params=rf_params) # + et_train,et_test = stacking.get_oof(et_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) # - x_train_second_layer = np.concatenate((lgbm1_train, lgbm2_train, lgbm3_train, lgbm4_train, xgb1_train, xgb2_train, gbm1_train,ridge_train, lasso_train, rf_train,et_train), axis=1) x_test_second_layer = np.concatenate((lgbm1_test, lgbm2_test, lgbm3_test, lgbm4_test,xgb1_test,xgb2_test, gbm1_test,ridge_test,lasso_test,rf_test,et_test), axis=1) # ### Param # + lgbm_params1 = { 'boosting':'gbdt', 'num_leaves':10, 'learning_rate':0.01, 'min_sum_hessian_in_leaf':0.1, 'max_depth':4, 'feature_fraction':0.5, 'min_data_in_leaf':4, 'poission_max_delta_step':0.7, 'bagging_fraction':0.8, 'min_gain_to_split':0, 'scale_pos_weight':1.0, 'lambda_l2':0.1, 'lambda_l1':0.1, 'huber_delta':1.0, 'bagging_freq':1, 'objective':'regression_l1', 'seed':1, 'categorical_feature':0, 'xgboost_dart_mode':False, 'drop_rate':0.1, 'skip_drop':0.5, 'max_drop':50, 'top_rate':0.1, 'other_rate':0.1, 'max_bin':255, 'min_data_in_bin':50, 'bin_construct_sample_cnt':1000000, 'two_round':False, 'uniform_drop':False,'metric': 'mae','threads':6 } lgbm_params2 = { 'boosting':'gbdt', 'num_leaves':24,'learning_rate':0.03, 'min_sum_hessian_in_leaf':0.1, 'max_depth':6, 'feature_fraction':0.5, 'min_data_in_leaf':50, 'poission_max_delta_step':0.7, 'bagging_fraction':0.8, 'min_gain_to_split':0, 'scale_pos_weight':1.0, 'lambda_l2':0.1, 'lambda_l1':0.1, 'huber_delta':0.05, 'bagging_freq':1, 'objective':'huber', 'seed':1, 'categorical_feature':0 ,'xgboost_dart_mode':False, 'drop_rate':0.1, 'skip_drop':0.5, 'max_drop':50, 'top_rate':0.1, 'other_rate':0.1, 'max_bin':255, 'min_data_in_bin':50, 'bin_construct_sample_cnt':1000000, 'two_round':False, 'uniform_drop':False,'metric': 'mae','threads':6 } lgbm_params3 = { 'boosting':'gbdt', 'num_leaves':28, 'learning_rate':0.03, 'min_sum_hessian_in_leaf':0.1, 'max_depth':7, 'feature_fraction':0.6, 'min_data_in_leaf':70, 'poission_max_delta_step':0.7, 'bagging_fraction':0.8, 'min_gain_to_split':0, 'scale_pos_weight':1.0, 'lambda_l2':0.1, 'lambda_l1':0.1, 'fair_c':0.01, 'bagging_freq':1, 'objective':'fair', 'seed':1, 'categorical_feature':0, 'xgboost_dart_mode':False, 'drop_rate':0.1, 'skip_drop':0.5, 'max_drop':50, 'top_rate':0.1, 'other_rate':0.1, 'max_bin':255, 'min_data_in_bin':50, 'bin_construct_sample_cnt':1000000, 'two_round':False, 'uniform_drop':False,'metric': 'mae','threads':6 } lgbm_params4 = { 'boosting':'gbdt', 'num_leaves':16, 'learning_rate':0.003, 'min_sum_hessian_in_leaf':0.1, 'max_depth':7, 'feature_fraction':0.5, 'min_data_in_leaf':70, 'poission_max_delta_step':0.7, 'bagging_fraction':0.8, 'min_gain_to_split':0, 'scale_pos_weight':1.0, 'lambda_l2':0.1, 'lambda_l1':0.1, 'bagging_freq':1, 'objective':'regression', 'seed':1, 'categorical_feature':0, 'xgboost_dart_mode':False, 'drop_rate':0.1, 'skip_drop':0.5, 'max_drop':50, 'top_rate':0.1,'other_rate':0.1, 'max_bin':255, 'min_data_in_bin':50, 'bin_construct_sample_cnt':1000000, 'two_round':False,'uniform_drop':False,'metric': 'mae','threads':6 } xgb_params1 = { 'booster':'gbtree', 'objective':'reg:linear', 'max_leaves':0, 'eta':0.02, 'gamma':1, 'max_depth':4, 'colsample_bylevel':1.0, 'min_child_weight':4.0, 'max_delta_step':0.0, 'subsample':0.8, 'colsample_bytree':0.5,'scale_pos_weight':1.0, 'alpha':1.0, 'lambda':5.0, 'seed':1 } xgb_params2 = { 'booster':'gblinear', 'objective':'reg:linear', 'max_leaves':0, 'eta':0.1,'gamma':1, 'max_depth':4, 'colsample_bylevel':1.0, 'min_child_weight':4.0, 'max_delta_step':0.0, 'subsample':0.8, 'colsample_bytree':0.5,'scale_pos_weight':1.0, 'alpha':10.0, 'lambda':1.0, 'seed':1 } sgd_param = { 'loss':'huber','penalty':'l2','alpha':1,'l1_ratio':0.15,'eta0':0.001, 'fit_intercept':True,'shuffle':True,'random_state':1, } gbm_param = { 'n_estimators' :100, 'learning_rate':0.1, 'min_samples_split' :0.00001, 'subsample':1.0, 'max_depth':5, 'max_features':0.4, 'min_samples_leaf' :4.0, 'random_state' :1 } lasso_params={ 'alpha':0.003, 'normalize':True, 'max_iter':200,'fit_intercept':True,'tol':0.007, 'warm_start':True } ridge_params={ 'alpha':0.2, 'normalize':True, 'max_iter':200,'fit_intercept':False,'solver':'auto' } rf_params = { 'criterion':'mse', 'max_leaf_nodes':30, 'n_estimators':1000, 'min_impurity_split':0.0000001, 'max_features':0.25, 'max_depth':6, 'min_samples_leaf':20, 'min_samples_split':2, 'min_weight_fraction_leaf':0.0, 'bootstrap':True, 'random_state':1, 'verbose':False } et_parmas = { 'criterion':'mse', 'max_leaf_nodes':30, 'n_estimators':1000, 'min_impurity_split':0.0000001, 'max_features':0.6, 'max_depth':10, 'min_samples_leaf':20, 'min_samples_split':2, 'min_weight_fraction_leaf':0.0, 'bootstrap':True, 'random_state':1, 'verbose':False } # - keras_model = stacking.kerasWrapper(clf = ExtraTreesRegressor,params=rf_params) # ### Model # + lgbm_model1 = stacking.LgbmWrapper(params=lgbm_params1, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) lgbm_model2 = stacking.LgbmWrapper(params=lgbm_params2, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) lgbm_model3 = stacking.LgbmWrapper(params=lgbm_params3, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) lgbm_model4 = stacking.LgbmWrapper(params=lgbm_params4, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) xgb_model1 = stacking.XgbWrapper(params=xgb_params1, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) xgb_model2 = stacking.XgbWrapper(params=xgb_params2, num_rounds = 1500, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) gbm_model = stacking.SklearnWrapper(clf = GradientBoostingRegressor,params=gbm_param) ridge_model = stacking.SklearnWrapper(clf = Ridge,params=ridge_params) lasso_model = stacking.SklearnWrapper(clf = Lasso,params=lasso_params) rf_model = stacking.SklearnWrapper(clf = RandomForestRegressor,params=rf_params) et_model = stacking.SklearnWrapper(clf = ExtraTreesRegressor,params=rf_params) # - # ### Run! lgbm1_train,lgbm1_test = stacking.get_oof(lgbm_model1,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) lgbm2_train,lgbm2_test = stacking.get_oof(lgbm_model2,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) lgbm3_train,lgbm3_test = stacking.get_oof(lgbm_model3,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) lgbm4_train,lgbm4_test = stacking.get_oof(lgbm_model4,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) gc.collect() xgb1_train,xgb1_test = stacking.get_oof(xgb_model1,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) xgb2_train,xgb2_test = stacking.get_oof(xgb_model2,x_train,y_train,x_test, mean_absolute_error,NFOLDS=5) gc.collect() gbm1_train,gbm1_test = stacking.get_oof(gbm_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) ridge_train,ridge_test = stacking.get_oof(ridge_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) lasso_train,lasso_test = stacking.get_oof(lasso_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) rf_train,rf_test = stacking.get_oof(rf_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) et_train,et_test = stacking.get_oof(et_model,x_train.fillna(-1),y_train,x_test.fillna(-1), mean_absolute_error,NFOLDS=5) gc.collect() # ### Second Layer x_train_second_layer = np.concatenate((lgbm1_train, lgbm2_train, lgbm3_train, lgbm4_train, xgb1_train, xgb2_train, gbm1_train,ridge_train, lasso_train, rf_train,et_train), axis=1) x_test_second_layer = np.concatenate((lgbm1_test, lgbm2_test, lgbm3_test, lgbm4_test,xgb1_test,xgb2_test, gbm1_test,ridge_test,lasso_test,rf_test,et_test), axis=1) # + lgbm_ex_no = 9 lgbm_meta_params = { 'boosting':'gbdt', 'num_leaves':28, 'learning_rate':0.03, 'min_sum_hessian_in_leaf':0.1, 'max_depth':7, 'feature_fraction':0.6, 'min_data_in_leaf':70, 'poission_max_delta_step':0.7, 'bagging_fraction':0.8, 'min_gain_to_split':0, 'scale_pos_weight':1.0, 'lambda_l2':0.1, 'lambda_l1':0.1, 'fair_c':1.0, 'bagging_freq':1, 'objective':'fair', 'seed':1, 'categorical_feature':0, 'xgboost_dart_mode':False, 'drop_rate':0.1, 'skip_drop':0.5, 'max_drop':50, 'top_rate':0.1, 'other_rate':0.1, 'max_bin':255, 'min_data_in_bin':50, 'bin_construct_sample_cnt':1000000, 'two_round':False, 'uniform_drop':False,'metric': 'mae','threads':6 } lgbm_meta_model = stacking.LgbmWrapper(params=lgbm_meta_params, num_rounds = 2000, ealry_stopping=100, verbose_eval=False, base_score=True, maximize=False, y_value_log=False) lgbm_cv_score,best_round = stacking.kfold_test(lgbm_meta_model, pd.DataFrame(x_train_second_layer), y_train, mean_absolute_error,NFOLDS=5 ) d_train_all = lgbm.Dataset(pd.DataFrame(x_train_second_layer), label=y_train) bst = lgbm.train(lgbm_params,d_train_all,best_round) predictions = bst.predict(pd.DataFrame(x_test_second_layer)) # + print("fscore result") fscore_df = pd.concat([pd.DataFrame(bst.feature_name()),pd.DataFrame(bst.feature_importance())],axis=1) fscore_df.columns = ['column','fscore'] fscore_df['fscore'] = fscore_df['fscore'].astype(int) fscore_df.sort_values(by='fscore',ascending=False,inplace=True) fscoe_output = 'fscore\\ex_'+str(lgbm_ex_no)+'_lgbm_fscore_'+ str(lgbm_cv_score)+ '.csv' fscore_df.to_csv(fscoe_output) fig, ax = plt.subplots(figsize=(12,18)) lgbm.plot_importance(bst, height=0.8, ax=ax) plt.show() # + print("Predict") sub = pd.read_csv('input/sample_submission.csv') for c in sub.columns[sub.columns != 'ParcelId']: print(c) sub[c] = predictions print("Wrting Files") sub_output = 'output\\ex_'+str(lgbm_ex_no)+'_lightgbm_'+str(lgbm_cv_score) sub.to_csv(sub_output+'.csv', index=False, float_format='%.4f') # Thanks to @inversion print("File Zip") jungle_zip = zipfile.ZipFile(sub_output +'.zip', 'w') jungle_zip.write(sub_output + '.csv', compress_type=zipfile.ZIP_DEFLATED) jungle_zip.close() print("Done")
Titanic/StackingModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time import numpy as np import matplotlib.pyplot as plt # - # ## Parameters # + # num harmonics n = 14 # max frequency w_max = 1800 # num model steps N = 256 random_seed = 100 # - def generate_signal(n, w_max, N, max_A=1.0, print_info=True): """ Generate random signal. :param int n: number of harmonics. :param float w_max: maximum frequency. :param int N: number of generation steps. :param float max_A: max amplitude of individual harmonics. :param boolean print_info: whether to print frequences, amplitudes and phases of harmonics. :return np.ndarray result_signal: shape = (N, ) """ w_step = w_max / n w = np.array([i * w_step for i in range(1, n + 1)]) w.resize((n, 1)) # generate amplitudes and phases amplitudes = np.random.random((n, 1)) * max_A phases = np.random.randint(-15, 15, size=(n, 1)) harmonics = [np.linspace(0, N, num=N) for i in range(n)] harmonics = np.array(harmonics) harmonics = amplitudes * np.sin(w * harmonics + phases) result_signal = harmonics.sum(axis=0, keepdims=False) if print_info: print("Frequencies: \n", w) print("Amplitudes :\n", amplitudes) print("Phases :\n", phases) return result_signal # ## Calculations # np.random.seed(random_seed) signal_x = generate_signal(n, w_max, N, print_info=False) # + # %%time mean = signal_x.mean() print(mean) # + # %%time dispersion = signal_x.std() ** 2 print(dispersion) # - # ## Graphs # + fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(18, 5)) ax.plot(signal_x, color=(1, 0, 0), linewidth=3, ls="-") ax.set(title="Generated signal X", xlabel="time (s)") ax.legend(["signal X"], fontsize=16, loc=0) ax.grid() ax.set(xlim=[0, N/2], ylim=[signal_x.min(), signal_x.max()]) plt.show() # - # ### DFT def dft(signal_x): F = np.zeros((N, 2)) for p in range(N): re, im = 0, 0 for k in range(N): re += signal_x[k] * np.cos(2 * np.pi * p * k / N) im += signal_x[k] * np.sin(2 * np.pi * p * k / N) F[p, :] = (re, im) dft_x = np.sqrt(F[:, 0] ** 2 + F[:, 1] ** 2) return dft_x # ### Table DFT class DFT: def __init__(self, N): """ :param int N: Number of discrete points """ self.N = N self.table = self.create_table(N) def create_table(self, N): W = np.zeros((N, N, 2)) for p in range(N): for k in range(N): re = np.cos(2 * np.pi * p * k / N) im = np.sin(2 * np.pi * p * k / N) W[p, k] = (re, im) return W def dft(self, signal): F = np.zeros((N, 2)) for p in range(N): re, im = 0, 0 for k in range(N): re += signal[k] * self.table[p, k, 0] im += signal[k] * self.table[p, k, 1] F[p, :] = (re, im) dft_x = np.sqrt(F[:, 0] ** 2 + F[:, 1] ** 2) return dft_x # + transformer = DFT(N) dft_x = dft(signal_x) dft_x2 = transformer.dft(signal_x) # + fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(18, 8)) ax[0].plot(dft_x , color=(1, 0, 0), linewidth=3, ls="-") ax[0].set(title="Discrete Fourier Trunsform of signal X", xlabel="") ax[0].legend(["dft X"], fontsize=16, loc=0) ax[0].grid() ax[0].set(xlim=[0, N/2], ylim=[dft_x .min(), dft_x .max()]) ax[1].plot(dft_x2 , color=(1, 0.5, 0), linewidth=3, ls="-") ax[1].set(title="Discrete Fourier Trunsform of signal X by using tables", xlabel="") ax[1].legend(["dft table X"], fontsize=16, loc=0) ax[1].grid() ax[1].set(xlim=[0, N/2], ylim=[dft_x2 .min(), dft_x2 .max()]) plt.show() # - # ### Comparison dft speed and dft with calculated table speed transformer = DFT(N) # + # %%time times = [] time_table = [] for i in range(0, 30): start_time = time.time() for j in range(i): dft_x = dft(signal_x) end_time = time.time() times.append(end_time - start_time) start_time = time.time() for j in range(i): dft_x2 = transformer.dft(signal_x) end_time = time.time() time_table.append(end_time - start_time) # + fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(18, 6)) ax.plot(times, color=(1, 0, 0), linewidth=3, ls="-", label="standart dft time") ax.plot(time_table, color=(0, 0, 1), linewidth=3, ls="--", label="Table dft time") ax.set(title="Transformation time of N", xlabel="num trensforms", ylabel="time (s)") ax.legend(fontsize=16, loc=0) ax.grid() #ax.set(xlim=[0, N], ylim=[signal_x.min(), signal_x.max()]) plt.show() # -
RTS_lab3_discrete_fourier_transform.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def mean(values): """compute means, feed lists""" return sum(values)/len(values) import numpy as np np.mean([1,2,3,19,13,25]) np.median([1,2,3,19,13,25]) import na_utils # if you change things in package, you should restart kernel
day2_2_package&module.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.2.0 # language: julia # name: julia-1.2 # --- # # Linear Algebra - solving least squares problems # ### <NAME> # # # Linear algebra and statistical models # # `Julia` provides one of the best, if not _the best_, environments for numerical linear algebra. # # The `Base` package provides basic array (vector, matrix, etc.) construction and manipulation; `*`, `/`, `\`, `'`. The `LinearAlgebra` package provides many definitions of matrix types (`Diagonal`, `UpperTriangular`, ...) and factorizations. The `SparseArrays` packages provides types and methods for sparse matrices. A = rand(4,3) # simulate a matrix with elements selected at random from (0,1) B = rand(3, 4) A' # "lazy" transpose A*B # ## Solving least squares problems # # A least squares solution is one of the building blocks for statistical models. # # If `X` is an $n\times p$ model matrix and `y` is an $n$-dimensional vector of observed responses, a _linear model_ is of the form # $$ # \mathcal{Y}\sim\mathcal{N}(\mathbf{X}\beta, \sigma^2\mathbf{I}) # $$ # # That is, the _mean response_ is modeled as a _linear predictor_ , $\mathbf{X}\beta$, depending on the _parameter vector_, $\beta$ (also called _coefficients_) with the covariance matrix, $\sigma^2\mathbf{I}$. In general the probability density for a [multivariate normal distribution](https://en.wikipedia.org/wiki/Multivariate_normal_distribution) with mean $\mu$ and covariance matrix $\Sigma$ is # $$ # f(\mathbf{y}|\mu,\Sigma) = \frac{1}{(2\pi)^{n/2}|\Sigma|^{1/2}} # \exp\left(-\frac{(\mathbf{y}-\mu)'\Sigma^{-1}(\mathbf{y}-\mu)}{2}\right) # $$ # where $|\Sigma|$ denotes the determinant of $\Sigma$. # # When the covariance matrix is of the form $\sigma^2\mathbf{I}$ the distribution is called a _spherical_ normal # distribution because the contours of constant density are $n$-dimensional spheres centered at $\mu$. # In these cases the probability density can be simplified to # $$ # \begin{aligned} # f(\mathbf{y}|\beta,\sigma)&= \frac{1}{(2\pi\sigma^2)^{n/2}} \exp\left(-\frac{(\mathbf{y}-\mathbf{X}\beta)'(\mathbf{y}-\mathbf{X}\beta)}{2\sigma^2}\right)\newline # &= \frac{1}{(2\pi\sigma^2)^{n/2}} \exp\left(-\frac{\|\mathbf{y}-\mathbf{X}\beta)\|^2}{2\sigma^2}\right) . # \end{aligned} # $$ # # The _likelihood_ of the parameters, $\beta$ and $\sigma$, given the data, $\mathbf{y}$ (and, implicitly, $\mathbf{X}$), is the same expression as the density but with the roles of the parameters and the observations reversed # $$ # L(\beta,\sigma|\mathbf{y})=\frac{1}{(2\pi\sigma^2)^{n/2}} \exp\left(-\frac{\|\mathbf{y}-\mathbf{X}\beta)\|^2}{2\sigma^2}\right) . # $$ # # The _maximum likelihood estimates_ of the parameters are the values that maximize the likelihood given the data. It is convenient to maximize the logarithm of the likelihood, called the _log-likelihood_, instead of the likelihood. # $$ # \ell(\beta,\sigma|\mathbf{y})=\log L(\beta,\sigma|\mathbf{y})= # -\frac{n}{2}\log(2\pi\sigma^2)-\frac{\|\mathbf{y}-\mathbf{X}\beta)\|^2}{2\sigma^2} # $$ # # (Because the logarithm function is monotone increasing, the values of $\beta$ and $\sigma$ that maximize the log-likelihood also maximize the likelihood.) # # For any value of $\sigma$ the value of $\beta$ that maximizes the log-likelihood is the value that minimizes the sum of squared residuals, # $$ # \widehat{\beta}=\arg\min_\beta \|\mathbf{y} - \mathbf{X}\beta\|^2 # $$ # # ## A simple linear regression model # # Data from a calibration experiment on the optical density versus the concentration of Formaldehyde are available as the `Formaldehyde` data in `R`. We use the `RCall` package to retrieve these data from `R`. using LinearAlgebra, RCall, StatsModels, Tables Formaldehyde = rcopy(R"Formaldehyde") R""" library(ggplot2) qplot(x=carb, y=optden, data=Formaldehyde, geom="point") """ # In a _simple linear regression_ the model matrix, $\mathbf{X}$, consists of a column of 1's and a column of the covariate values; `carb`, in this case. X = hcat(ones(size(Formaldehyde, 1)), Formaldehyde.carb) y = Formaldehyde.optden β = X\y # least squares estimate r = y - X*β #residual # One of the conditions for $\hat{\beta}$ being the least squares estimate is that the residuals must be orthogonal to the columns of $\mathbf{X}$ X'r # not exactly zero but very small entries # ## Creating the model matrix from a formula # # Creating model matrices from a data table can be a tedious and error-prone operation. In addition, _statistical inference_ regarding a linear model often considers groups of columns generated by model _terms_. The `GLM` package provides methods to fit and analyze linear models and generalized linear models (described later) using a _formula/data_ specification similar to that in _R_. using GLM m1 = fit(LinearModel, @formula(optden ~ 1 + carb), Formaldehyde) # The evaluation of the formula is performed by the `StatsModels` package in stages. f1 = @formula(optden ~ 1 + carb) y, X = modelcols(apply_schema(f1, schema(Formaldehyde)), Formaldehyde) X # ## Matrix decompositions for least squares # # According to the formulas given in text books, the least squares estimates are calculated as # $$ # \widehat{\mathbf{\beta}}=\mathbf{X^\prime X}^{-1}\mathbf{X^\prime y} # $$ # # In practice, this formula is not the way the estimates are calculated, because it is wasteful to evaluate the inverse of a matrix if you just want to solve a system of equations. # # Recall that the least squares estimate satisfies the condition that the residual is orthogonal to the columns of $\mathbf{X}$. # $$ # \mathbf{X^\prime (y - X\widehat{\beta})} = \mathbf{0} # $$ # which can be re-written as # $$ # \mathbf{X^\prime X}\widehat{\mathbf{\beta}}=\mathbf{X^\prime y} # $$ # These are called the _normal equations_ - "normal" in the sense of orthogonal, not in the sense of the normal distribution. # # The matrix $\mathbf{X^\prime X}$ is symmetric and _positive definite_. The latter condition means that # $$ # \mathbf{v^\prime(X^\prime X)v}=\mathbf{(Xv)^\prime Xv} = \|\mathbf{Xv}\|^2 > 0\quad\forall \mathbf{v}\ne\mathbf{0} # $$ # if $\mathbf{X}$ has full column rank. # # We will assume that the model matrices $\mathbf{X}$ we will use do have full rank. It is possible to handle rank-deficient model matrices but we will not cover that here. # # A positive-definite matrix has a "square root" in the sense that there is a $p\times p$ matrix $\mathbf{A}$ such that # $$\mathbf{A^\prime A}=\mathbf{X^\prime X} .$$ # In fact, when $p>1$ there are several. A specific choice of $\mathbf{A}$ is an upper triangular matrix with positive elements on the diagonal, usually written $\mathbf{R}$ and called the upper Cholesky factor of $\mathbf{X^\prime X}$. X xpx = X'X ch = cholesky(xpx) ch.U'ch.U ch.U'ch.U ≈ xpx # Because the Cholesky factor is triangular, it is possible to solve systems of equations of the form # $$\mathbf{R^\prime R}\widehat{\mathbf{\beta}}=\mathbf{X^\prime y}$$ # in place in two stages. First solve for $\mathbf{v}$ in # $$\mathbf{R^\prime v}=\mathbf{X^\prime y}$$ v = ldiv!(ch.U', X'y) # then solve for $\widehat{\mathbf{\beta}}$ in # $$ # \mathbf{R}\widehat{\mathbf{\beta}}=\mathbf{v} # $$ βc = ldiv!(ch.U, copy(v)) # solution from the Cholesky factorization βc ≈ β # These steps are combined in one of the many `LinearAlgebra` methods for solutions of equations. ldiv!(ch, X'y) # ## Sum of squared residuals as a quadratic form # # Another way of approaching the least squares problem is to write the sum of squared residuals as what is called a _quadratic form_. # $$ # \begin{aligned} # r^2(\mathbf{\beta}) & = \|\mathbf{y} - \mathbf{X\beta}\|^2\\ # &=\left\|\begin{bmatrix}\mathbf{X}&\mathbf{y}\end{bmatrix}\begin{bmatrix}\mathbf{-\beta}\\ 1\end{bmatrix}\right\|^2\\ # &=\begin{bmatrix}\mathbf{-\beta}&1\end{bmatrix}\begin{bmatrix}\mathbf{X^\prime X} & \mathbf{X^\prime y}\\ # \mathbf{y^\prime X}&\mathbf{y^\prime y}\end{bmatrix} # \begin{bmatrix}\mathbf{-\beta}\\ 1\end{bmatrix}\\ # &=\begin{bmatrix}\mathbf{-\beta}&1\end{bmatrix} # \begin{bmatrix} # \mathbf{R_{XX}}^\prime&\mathbf{0}\\ # \mathbf{r_{Xy}}^\prime&r_{\mathbf{yy}} # \end{bmatrix} # \begin{bmatrix} # \mathbf{R_{XX}}&\mathbf{r_{Xy}}\\ # \mathbf{0}&r_{\mathbf{yy}} # \end{bmatrix} # \begin{bmatrix}\mathbf{-\beta}\\ 1\end{bmatrix}\\ # &= \left\| \begin{bmatrix} # \mathbf{R_{XX}}&\mathbf{r_{Xy}}\\ # \mathbf{0}&r_{\mathbf{yy}} # \end{bmatrix} # \begin{bmatrix}\mathbf{-\beta}\\ 1\end{bmatrix}\right\|^2\\ # &= \|\mathbf{r_{Xy}}-\mathbf{R_{XX}\beta}\|^2 + r_{\mathbf{yy}}^2 # \end{aligned} # $$ # where $\begin{bmatrix}\mathbf{R_{XX}}&\mathbf{r_{Xy}}\\ \mathbf{0}&r_{\mathbf{yy}}\end{bmatrix}$ is the upper Cholesky factor of the augmented matrix # $\begin{bmatrix}\mathbf{X^\prime X} & \mathbf{X^\prime y}\\ \mathbf{y^\prime X}&\mathbf{y^\prime y}\end{bmatrix}$. Xy = hcat(X, y) # augmented model matrix cha = cholesky(Xy'Xy) # augmented Cholesky factor # Note that $\mathbf{R_{XX}}$ is just the Cholesky factor of $\mathbf{X^\prime X}$ which was previously calculated and the vector $\mathbf{r_{Xy}}$ is the solution to $\mathbf{R^\prime v}=\mathbf{X^\prime y}$. The minimum sum of squares is $r^{\mathbf{yy}}$ which is attained when $\mathbf{\beta}$ is the solution to # $$\mathbf{R_{XX}}\widehat{\beta}=\mathbf{r_{Xy}}$$ RXX = UpperTriangular(view(cha.U, 1:2, 1:2)) RXX ≈ ch.U rXy = cha.U[1:2, end] # creates a copy ("view" doesn't copy) rXy ≈ v βac = ldiv!(RXX, copy(rXy)) # least squares solution from the augmented Cholesky βac ≈ β abs2(cha.U[end,end]) ≈ sum(abs2, y - X*β) # check on residual sum of squares # One reason for writing the least squares solution in this way is that we will use a similar decomposition for linear mixed models later. Another is that when dealing with very large data sets we may wish to parallelize the calculation over many cores or over many processors. The natural way to parallelize the calculation is in blocks of rows and the augmented Cholesky can be formed row by row using a `lowrankupdate`. # # ### A row-wise approach to least squares # # To show this we create a row-oriented table from our `Formaldehyde` data set, which is in a column-oriented format. Formrows = Tables.rowtable(Formaldehyde) # then initialize a Cholesky factor and zero out its contents chr = cholesky(zeros(3, 3) + I) # initialize fill!(chr.factors, 0); # zero out the contents chr # Update by rows fill!(chr.factors, 0) for r in Formrows lowrankupdate!(chr, [1.0, r.carb, r.optden]) end chr # This is the same augmented Cholesky factor as before but obtained in a different way. For generalized linear mixed models and for nonlinear mixed models it can be an advantage to work row-wise when performing some of the least squares calculations. # # ### Other decompositions for least squares solutions # # There are other ways of solving a least squares problem, such as using an _orthogonal-triangular_ decomposition, also called a _QR_ decomposition, or a singular value decomposition. The bottom line is that we decompose $\mathbf{X}$ or $\mathbf{X^\prime X}$ into some convenient product of orthogonal or triangular or diagonal matrices and work with those. Just to relate these ideas, the _QR_ decomposition of $\mathbf{X}$ is qrX = qr(X) # Notice that the `R` factor is the upper Cholesky factor of $\mathbf{X^\prime X}$ with the first row multiplied by -1 so its transposed product with itself is $\mathbf{X^\prime X}$. qrX.R'qrX.R # That is, qrX.R'qrX.R ≈ xpx # The original solution for $\widehat{\mathbf{\beta}}$ from the expression `X\y` is actually performed by taking a QR decomposition of `X`.
notebook/linearalgebra/leastsquares.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings import numpy as np import pandas as pd from progress.bar import Bar from gensim.models.ldamodel import LdaModel import topic_modelling as tm # - # Disable any warnings that appear warnings.filterwarnings('ignore') # Define range of alpha and beta hyperparameters NUM_TOPICS = 6 ALPHA = list(np.arange(0.01, 1, 0.2)) + [1, 'symmetric', 'asymmetric'] ALPHA = [round(a, 2) if isinstance(a, float) else a for a in ALPHA] BETA = list(np.arange(0.01, 1, 0.2)) + [1, 'symmetric'] BETA = [round(b, 2) if isinstance(b, float) else b for b in BETA] # Retrieve data from the dataset df = pd.read_csv('resources/bbc-news-data.csv', sep='\t') data = df.content.values.tolist() # Perform data preprocessing data, id2word, corpus = tm.preprocess_data(data) # + # Collect results results = {} with Bar('Final', max=(len(ALPHA) * len(BETA)), check_tty=False) as bar: for a in ALPHA: results[a] = {'alpha': [], 'beta': [], 'coherence': [], 'model': []} for b in BETA: # Execute model model = LdaModel(corpus=corpus, id2word=id2word, num_topics=NUM_TOPICS, alpha=a, eta=b) # Calculate coherence coherence_score = tm.calculate_coherence(model, data, id2word) # Append to results results[a]['alpha'].append(a) results[a]['beta'].append(b) results[a]['coherence'].append(coherence_score) results[a]['model'].append(model) bar.next() # + # Convert results to dataframe dataframes = {} for a, result in results.items(): df = pd.DataFrame(result) # Export the dataframe export_df = df.drop(['model'], axis=1) export_df.to_excel(f'results/results_final_{a}.xlsx', sheet_name='Final', index=False) dataframes[a] = df # Combine dataframes overall_df = pd.concat([v for _, v in dataframes.items()]) # - # Plot the results for a, adf in dataframes.items(): tm.graph_results( f'Coherence Scores for LDA by Beta with Alpha {a}', adf['beta'].tolist(), adf['coherence'].tolist() ) # + # Calculate additional information max_result = overall_df[ overall_df['coherence'] == max(overall_df['coherence'])] best_coherence = float(max_result['coherence']) best_alpha = max_result['alpha'].item() best_beta = max_result['beta'].item() best_model = max_result['model'].item() print(f'Highest coherence score was {best_coherence} ' f'using alpha {best_alpha} and beta {best_beta}') # - # Create wordclouds for final LDA for i, topic in best_model.show_topics(formatted=False): tm.form_wordcloud(f'LDA Topic {i}', topic) # + # Assign labels to topics if possible labels = {} for i, topic in best_model.show_topics(formatted=False, num_words=5): print(f'Final LDA Topic {i}: ', end='') print(' '.join([word for word, _ in topic])) label = input('\tEnter topic label: ') labels[i] = label print() # + # Perform quantitative analysis on qualitative results total_unlabelled = len([s for _, s in labels.items() if not len(s)]) unlabelled_percent = round((total_unlabelled / len(labels)) * 100) print(f'Total topics unlabelled for final LDA: {unlabelled_percent}%') all_labels = set([s for _, s in labels.items() if s]) covered_labels = len([s for s in all_labels if s in tm.DEFAULT_LABELS]) covered_percent = round((covered_labels / len(tm.DEFAULT_LABELS)) * 100) print(f'Total label coverage for final LDA: {covered_percent}%')
evaluate_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from tqdm import tqdm from sklearn.metrics import roc_auc_score from numpy.testing import assert_almost_equal from myfunc.my_roc_auc import my_roc_auc # - # # Scikit-learnと自作のAUC比較 # ## サンプルデータで一致確認 # + # 実装がsklearnのAUCと一致するかテスト n_samples = 1000 np.random.seed(n_samples) # generate sample data true_label = np.random.choice([0,1], size=n_samples) pred_proba = np.random.choice(np.linspace(0, 1, 100), size=n_samples) # + # calc ROCAUC roc_auc_sklearn = roc_auc_score(true_label, pred_proba) roc_auc_mine = my_roc_auc(true_label, pred_proba) print("ROCAUC\n\tsklearn:\t%.15f\n\tmine:\t%.15f"%(roc_auc_sklearn, roc_auc_mine)) assert_almost_equal(roc_auc_sklearn, roc_auc_mine, decimal=10) # - # ## assert_almost_equalの挙動確認 # + # 指定したprediction (decimal)での一致を確認できない場合Errorがでる try: assert_almost_equal(roc_auc_sklearn, roc_auc_mine, decimal=17) # 小数点 16位までの一致を確認 except AssertionError: # raise print("AssertionError Raised") print("ROCAUC\n\tsklearn:\t%.16f\n\tmine:\t%.16f"%(roc_auc_sklearn, roc_auc_mine)) # - # ## 複数のデータに対しての一致を確認 # + # 実装がsklearnのAUCと一致するかテスト, エラーがでなければ一致している n_trials = 1000 n_samples = 200 for n in tqdm(range(n_trials)): np.random.seed(n) # generate sample data true_label = np.random.choice([0,1], size=n_samples) pred_proba = np.random.choice(np.linspace(0, 1, 100), size=n_samples) # calc ROCAUC roc_auc_sklearn = roc_auc_score(true_label, pred_proba) roc_auc_mine = my_roc_auc(true_label, pred_proba) # check almost equal assert_almost_equal(roc_auc_sklearn, roc_auc_mine, decimal=10) # - # # 不均衡データにおいてのROC AUCの挙動確認 # ## すべて同じスコアが入る場合 # example 1 y_true_label = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]) y_pred_proba = np.array([0.5] * 10) # + # calc ROCAUC roc_auc_sklearn = roc_auc_score(y_true_label, y_pred_proba) roc_auc_mine = my_roc_auc(y_true_label, y_pred_proba) print("ROCAUC\n\tsklearn:\t%.5f\n\tmine:\t%.5f"%(roc_auc_sklearn, roc_auc_mine)) # - # ## 少数クラスのサンプルを1サンプル分類できるようになった場合の精度 # example 2 y_true_label = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]) y_pred_proba = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0]) # + # calc ROCAUC roc_auc_sklearn = roc_auc_score(y_true_label, y_pred_proba) roc_auc_mine = my_roc_auc(y_true_label, y_pred_proba) print("ROCAUC\n\tsklearn:\t%.5f\n\tmine:\t%.5f"%(roc_auc_sklearn, roc_auc_mine)) # - # ## 多数クラスのサンプルを1サンプル分類できるようになった場合の精度 # example 3 y_true_label = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]) y_pred_proba = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, 0.5, 0.5]) # + # calc ROCAUC roc_auc_sklearn = roc_auc_score(y_true_label, y_pred_proba) roc_auc_mine = my_roc_auc(y_true_label, y_pred_proba) print("ROCAUC\n\tsklearn:\t%.5f\n\tmine:\t%.5f"%(roc_auc_sklearn, roc_auc_mine)) # - # ## ROCAUC上昇幅の比率を見る # + # 1サンプルを分類成功することによるROC AUC上昇幅の比率を確認 # (少数クラスの上昇幅) / (多数クラスの上昇幅) (0.75000 - 0.5) / (0.56250 - 0.5) # - # 不均衡データの比率を計算する n_major = np.sum(y_true_label == 0) n_minor = np.sum(y_true_label == 1) minor_ratio = n_minor / n_major print("少数クラスの比率 :\t\t\t %.3f"%minor_ratio) print("少数クラスの比率の逆数 :\t %.3f"%(1/minor_ratio)) # 少数クラスの1サンプル分類成功することによるROC AUC上昇幅は、その少数クラスの比率の逆数倍だけ多数クラスの上昇幅より大きいことがわかる。 # (ただし、予測スコアの不等号が完全に成立する場合) # # つまり、ROC AUCはクラス比率の逆数によって、各クラス重みづけて評価していることがわかる。
notebooks/202105_roc_auc_understanding.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Apache Toree - Scala // language: scala // name: apache_toree_scala // --- // # Toree Magics<a name="top"></a> // Magics are special "functions" which enable features or execute some special code. Magics can receive input arguments when they are invoked. There are two types of magics: `cell` magics and `line` magics. Magics invocations are not case sensitive. // // **Table of Contents** // // 1. [Line Magics](#line-magics) // 1. [LsMagic](#lsmagic) // 1. [Truncation](#truncation) // 1. [ShowTypes](#showtypes) // 1. [AddJar](#addjar) // 1. [AddDeps](#adddeps) // 1. [Cell Magics](#cell-magics) // 1. [DataFrame](#dataframe) // 1. [Html](#html) // 1. [JavaScript](#javascript) // 1. [SparkSQL](#sparksql) // ## Line Magics<a name="line-magics"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // Line magics are run on a single line and can have other code and line magics within the same cell. Line magics use the following syntax: // // ``` // // %magicname [args] // ``` // // // ### %LsMagic<a name="lsmagic"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The `LsMagic` is a magic to list all the available magics. // + jupyter={"outputs_hidden": false} // %LsMagic // - // ### %Truncation<a name="truncation"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // Toree will, by default, truncate results from statements. This can be managed through the `%Truncation` magic. To see the current state of the truncation setting you can invoke the magic. // + jupyter={"outputs_hidden": false} // invoke the truncation magic to see if truncation is on or off // %Truncation // + jupyter={"outputs_hidden": false} // return a value to see the truncation (1 to 200) // + jupyter={"outputs_hidden": false} // %Truncation off (1 to 200) // + jupyter={"outputs_hidden": false} // %Truncation on (1 to 200) // - // ### %ShowTypes<a name="showtypes"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The type information for a result is hidden by default. This behavior can be changed by using the `%ShowTypes` magic. You can view the current state of `%ShowTypes` by invoking it with no arguments. // + jupyter={"outputs_hidden": false} // %ShowTypes // + jupyter={"outputs_hidden": false} "Hello types!" // + jupyter={"outputs_hidden": false} // %ShowTypes on "Hello types!" // + jupyter={"outputs_hidden": false} (1, "Hello types!") // + jupyter={"outputs_hidden": false} // %ShowTypes off "Hello types!" // - // ### %AddJar<a name="addjar"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // `AddJar` is a magic that allows the addition of jars to Torree's environment. You can see the arguments for `AddJar` by invoking it with no arguments. // + jupyter={"outputs_hidden": false} // %AddJar // + jupyter={"outputs_hidden": false} // %AddJar https://repo1.maven.org/maven2/org/lwjgl/lwjgl/3.0.0b/lwjgl-3.0.0b.jar // + jupyter={"outputs_hidden": false} org.lwjgl.Version.getVersion() // - // ## %AddDeps<a name="adddeps"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // `AddDeps` is a magic to add dependencies from a maven repository. You can see the arguments for `AddDeps` by invoking it with no arguments. // + jupyter={"outputs_hidden": false} // %AddDeps // - // Note, that by default the `AddDeps` magic will only retrieve the specified dependency. If you want the transitive dependencies provide the `--transitive` flag. // + jupyter={"outputs_hidden": false} // %AddDeps org.joda joda-money 0.11 --transitive --trace --verbose // + jupyter={"outputs_hidden": false} org.joda.money.CurrencyUnit.AUD // - // ## Cell Magics<a name="cell-magics"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // Cell magics are magics which take the whole cell as their argument. They take the following form: // // ``` // // %%magicname // line1 // line2 // ... // ``` // ### %%DataFrame<a name="dataframe"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The `%%DataFrame` magic is used to convert a Spark SQL DataFrame into various formats. Currently, `json`, `html`, and `csv` are supported. The magic takes an expression, which evauluates to a dataframe, to perform the conversion. So, we first need to create a DataFrame object for reference. // + jupyter={"outputs_hidden": true} case class DFRecord(key: String, value: Int) val sqlc = spark import sqlc.implicits._ val df = sc.parallelize(1 to 10).map(x => DFRecord(x.toString, x)).toDF() // - // The default output is `html` // + jupyter={"outputs_hidden": false} // %%dataframe // + jupyter={"outputs_hidden": false} // %%dataframe df // - // You can specify the `--output` argument to change the output type. // + jupyter={"outputs_hidden": false} // %%dataframe --output=csv df // - // There is also an option to limit the number of records returned. // + jupyter={"outputs_hidden": false} // %%dataframe --limit=3 df // - // ### %%Html<a name="html"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The `%%HTML` magic allows you to return HTML. // + jupyter={"outputs_hidden": false} language="html" // <p> // Hello, <strong>Magics</strong>! // </p> // - // ### %%JavaScript<a name="javascript"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The `%%JavaScript` magic allows to return JavaScript. The JavaScript code will run in the notebook. // + jupyter={"outputs_hidden": false} // %%JavaScript alert("Hello, Magics!") // - // ### %%SparkSQL<a name="sparksql"></a><span style="float: right; font-size: 0.5em"><a href="#top">Top</a></span> // The `%%SparkSQL` magic allows for SQL queries to be performed against tables saved in spark. // + jupyter={"outputs_hidden": false} val sqlc = spark import sqlc.implicits._ case class Record(key: String, value: Int) val df = sc.parallelize(1 to 10).map(x => Record(x.toString, x)).toDF() df.registerTempTable("MYTABLE") // + jupyter={"outputs_hidden": false} // %%SQL SELECT * FROM MYTABLE WHERE value >= 6 // + jupyter={"outputs_hidden": false} // %%SQL SELECT * FROM MYTABLE WHERE value >= 4
spark-flavor/resources/tutorials/toree-scala-kernel-tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] Collapsed="false" slideshow={"slide_type": "slide"} # <img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/> # # # MAT281 # ### Aplicaciones de la Matemática en la Ingeniería # + [markdown] Collapsed="false" slideshow={"slide_type": "slide"} # ## Módulo 04 # ## Laboratorio Clase 04: Métricas y selección de modelos # + [markdown] Collapsed="false" # ### Instrucciones # # # * Completa tus datos personales (nombre y rol USM) en siguiente celda. # * La escala es de 0 a 4 considerando solo valores enteros. # * Debes _pushear_ tus cambios a tu repositorio personal del curso. # * Como respaldo, debes enviar un archivo .zip con el siguiente formato `mXX_cYY_lab_apellido_nombre.zip` a <EMAIL>, debe contener todo lo necesario para que se ejecute correctamente cada celda, ya sea datos, imágenes, scripts, etc. # * Se evaluará: # - Soluciones # - Código # - Que Binder esté bien configurado. # - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. # + [markdown] Collapsed="false" # __Nombre__:<NAME> # # __Rol__:201510005-5 # + [markdown] Collapsed="false" # En este laboratorio utilizaremos el conjunto de datos _Abolone_. # + [markdown] Collapsed="false" # **Recuerdo** # # La base de datos contiene mediciones a 4177 abalones, donde las mediciones posibles son sexo ($S$), peso entero $W_1$, peso sin concha $W_2$, peso de visceras $W_3$, peso de concha $W_4$, largo ($L$), diametro $D$, altura $H$, y el número de anillos $A$. # + Collapsed="false" import pandas as pd import numpy as np # + Collapsed="false" abalone = pd.read_csv( "data/abalone.data", header=None, names=["sex", "length", "diameter", "height", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "rings"] ) abalone_data = ( abalone.assign(sex=lambda x: x["sex"].map({"M": 1, "I": 0, "F": -1})) .loc[lambda x: x.drop(columns="sex").gt(0).all(axis=1)] .astype(np.float) ) abalone_data.head() # + [markdown] Collapsed="false" # #### Modelo A # Consideramos 9 parámetros, llamados $\alpha_i$, para el siguiente modelo: # $$ \log(A) = \alpha_0 + \alpha_1 W_1 + \alpha_2 W_2 +\alpha_3 W_3 +\alpha_4 W_4 + \alpha_5 S + \alpha_6 \log L + \alpha_7 \log D+ \alpha_8 \log H$$ # + Collapsed="false" def train_model_A(data): y = np.log(data.loc[:, "rings"].values.ravel()) X = ( data.assign( intercept=1., length=lambda x: x["length"].apply(np.log), diameter=lambda x: x["diameter"].apply(np.log), height=lambda x: x["height"].apply(np.log), ) .loc[: , ["intercept", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "sex", "length", "diameter", "height"]] .values ) coeffs = np.linalg.lstsq(X, y, rcond=None)[0] return coeffs def test_model_A(data, coeffs): X = ( data.assign( intercept=1., length=lambda x: x["length"].apply(np.log), diameter=lambda x: x["diameter"].apply(np.log), height=lambda x: x["height"].apply(np.log), ) .loc[: , ["intercept", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "sex", "length", "diameter", "height"]] .values ) ln_anillos = np.dot(X, coeffs) return np.exp(ln_anillos) # + [markdown] Collapsed="false" # #### Modelo B # Consideramos 6 parámetros, llamados $\beta_i$, para el siguiente modelo: # $$ \log(A) = \beta_0 + \beta_1 W_1 + \beta_2 W_2 +\beta_3 W_3 +\beta W_4 + \beta_5 \log( L D H ) $$ # + Collapsed="false" def train_model_B(data): y = np.log(data.loc[:, "rings"].values.ravel()) X = ( data.assign( intercept=1., ldh=lambda x: (x["length"] * x["diameter"] * x["height"]).apply(np.log), ) .loc[: , ["intercept", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "ldh"]] .values ) coeffs = np.linalg.lstsq(X, y, rcond=None)[0] return coeffs def test_model_B(data, coeffs): X = ( data.assign( intercept=1., ldh=lambda x: (x["length"] * x["diameter"] * x["height"]).apply(np.log), ) .loc[: , ["intercept", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "ldh"]] .values ) ln_anillos = np.dot(X, coeffs) return np.exp(ln_anillos) # + [markdown] Collapsed="false" # #### Modelo C # Consideramos 12 parámetros, llamados $\theta_i^{k}$, con $k \in \{M, F, I\}$, para el siguiente modelo: # # Si $S=male$: # $$ \log(A) = \theta_0^M + \theta_1^M W_2 + \theta_2^M W_4 + \theta_3^M \log( L D H ) $$ # # Si $S=female$ # $$ \log(A) = \theta_0^F + \theta_1^F W_2 + \theta_2^F W_4 + \theta_3^F \log( L D H ) $$ # # Si $S=indefined$ # $$ \log(A) = \theta_0^I + \theta_1^I W_2 + \theta_2^I W_4 + \theta_3^I \log( L D H ) $$ # + Collapsed="false" def train_model_C(data): df = ( data.assign( intercept=1., ldh=lambda x: (x["length"] * x["diameter"] * x["height"]).apply(np.log), ) .loc[: , ["intercept", "shucked_weight", "shell_weight", "ldh", "sex", "rings"]] ) coeffs_dict = {} for sex, df_sex in df.groupby("sex"): X = df_sex.drop(columns=["sex", "rings"]) y = np.log(df_sex["rings"].values.ravel()) coeffs_dict[sex] = np.linalg.lstsq(X, y, rcond=None)[0] return coeffs_dict def test_model_C(data, coeffs_dict): df = ( data.assign( intercept=1., ldh=lambda x: (x["length"] * x["diameter"] * x["height"]).apply(np.log), ) .loc[: , ["intercept", "shucked_weight", "shell_weight", "ldh", "sex", "rings"]] ) pred_dict = {} for sex, df_sex in df.groupby("sex"): X = df_sex.drop(columns=["sex", "rings"]) ln_anillos = np.dot(X, coeffs_dict[sex]) pred_dict[sex] = np.exp(ln_anillos) return pred_dict # + [markdown] Collapsed="false" # ### 1. Split Data (1 pto) # # Crea dos dataframes, uno de entrenamiento (80% de los datos) y otro de test (20% restante de los datos) a partir de `abalone_data`. # # _Hint:_ `sklearn.model_selection.train_test_split` funciona con dataframes! # + Collapsed="false" from sklearn.model_selection import train_test_split abalone_train, abalone_test = train_test_split(abalone_data,test_size=0.2,random_state = 42) abalone_train.head() # + [markdown] Collapsed="false" # ### 2. Entrenamiento (1 pto) # # Utilice las funciones de entrenamiento definidas más arriba con tal de obtener los coeficientes para los datos de entrenamiento. Recuerde que para el modelo C se retorna un diccionario donde la llave corresponde a la columna `sex`. # + Collapsed="false" coeffs_A = train_model_A(abalone_train) coeffs_B = train_model_B(abalone_train) coeffs_C = train_model_C(abalone_train) # + [markdown] Collapsed="false" # ### 3. Predicción (1 pto) # # Con los coeficientes de los modelos realize la predicción utilizando el conjunto de test. El resultado debe ser un array de shape `(835, )` por lo que debes concatenar los resultados del modelo C. # # **Hint**: Usar `np.concatenate`. # + Collapsed="false" y_pred_A = test_model_A(abalone_test,coeffs_A) y_pred_B = test_model_B(abalone_test,coeffs_B) y_pred_C = np.concatenate(list(test_model_C(abalone_test,coeffs_C).values())) # + [markdown] Collapsed="false" # ### 4. Cálculo del error (1 pto) # # Se utilizará el Error Cuadrático Medio (MSE) que se define como # # $$\textrm{MSE}(y,\hat{y}) =\dfrac{1}{n}\sum_{t=1}^{n}\left | y_{t}-\hat{y}_{t}\right |^2$$ # # Defina una la función `MSE` y el vectores `y_test_A`, `y_test_B` e `y_test_C` para luego calcular el error para cada modelo. # # **Ojo:** Nota que al calcular el error cuadrático medio se realiza una resta elemento por elemento, por lo que el orden del vector es importante, en particular para el modelo que separa por `sex`. # + Collapsed="false" def MSE(y_real, y_pred): n = len(y_real) y = ((np.square(np.absolute((y_real - y_pred)))).sum())/n return y # + Collapsed="false" y_test_A = abalone_test['rings'].values y_test_B = abalone_test['rings'].values y_test_C = np.concatenate(list((abalone_test.groupby('sex').get_group(n)['rings'] for n in (-1,0,1)))) # + Collapsed="false" error_A = MSE(y_test_A,y_pred_A) error_B = MSE(y_test_B,y_pred_B) error_C = MSE(y_test_C,y_pred_C) # + Collapsed="false" print(f"Error modelo A: {error_A:.2f}") print(f"Error modelo B: {error_B:.2f}") print(f"Error modelo C: {error_C:.2f}") # + [markdown] Collapsed="false" # **¿Cuál es el mejor modelo considerando esta métrica?** # + [markdown] Collapsed="false" # El mejor modelo considerando como métrica el `MSE` es el modelo **B**. # -
m04_machine_learning/m04_c04_metrics_and_model_selection/m04_c04_lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="U4gLwsfnn9jY" # # Menampilkan peta gempa hari ini # - # <p style="text-align:justify">Sebagai penutup, penyusun akan mengajak sidang pembaca (yang umumnya dari kalangan geosains) untuk melakukan kegiatan pemetaan gempa selama 24 jam terakhir dari data USGS. Diharapkan modul ini akan membantu pembaca untuk mengaplikasikan materi pembelajaran Cartopy pada bidang keilmuannya masing - masing.</p> # <p style="text-align:justify">Untuk memulai proyek mini ini, kita wajib mengimpor tiga buah pustaka Python, yakni: pandas (untuk membaca data tabular), matplotlib (untuk visualisasi), dan Cartopy (untuk pemetaan).</p> import pandas as pd import matplotlib import matplotlib.pyplot as plt import cartopy.crs as ccrs # Selain itu, kita juga perlu mengatur tampilan plot agar tampak lebih estetik. # %matplotlib inline matplotlib.rcParams['figure.figsize'] = (14,10) # Kita membaca data tabular secara *remote* dengan menggunakan pandas. df = pd.read_csv('http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/1.0_week.csv') df.head() # Karena kolom time belum berupa objek datetime, maka kita perlu melakukan konversi sebagai berikut: df['time'] = pd.to_datetime(df['time']) type(df['time'][1]) df.head() # <p style="text-align:justify">Untuk mendapatkan data gempa hari ini, kita perlu melakukan operasi <i>masking</i>. Sebagai catatan, Notebook ini dibuat pada tanggal 8 Juli 2020. Oleh karena itu, penyusun akan melakukan <i>masking</i> waktu dari tanggal 7 hingga 8 Juli 2020 (hal ini patut disesuaikan oleh pembaca).</p> mask = ((df['time'] >= '2020-07-07') & (df['time'] < '2020-07-08')) gempaHariIni = df.loc[mask] gempaHariIni.head() # Kita dapat mengetahui besaran gempa maksimum dan minimum yang terjadi secara global pada hari ini dengan menggunakan perintah sebagai berikut: print(df[df['mag'] == df['mag'].min()]) # besaran gempa minimum print(df[df['mag'] == df['mag'].max()]) # Sesudah itu, kita akan mengekstraksi data bujur, lintang, dan besaran gempa (dalam skala Richter) dalam bentuk objek *list*: bujur = list(df['longitude']) lintang = list(df['latitude']) besaran = list(df['mag']) # <p style="text-align:justify">Kemudian kita akan mengklasifikasikan titik - titik gempa dengan menggunakan warna - warna tertentu (hijau untuk gempa di bawah 3 SR, kuning untuk gempa dengan rentang 3 - 5 SR, dan merah untuk gempa di atas 5 SR) dengan menggunakan fungsi sebagai berikut:</p> def warna(besaran): if besaran < 3.0: return 'g' elif 3.0 <= besaran < 5.0: return 'y' else: return 'r' # Kemudian kita tinggal melakukan pemetaan dengan menggunakan Cartopy: # + ax = plt.axes(projection = ccrs.PlateCarree()) ax.coastlines(resolution='50m') ax.stock_img() for i in range(len(besaran)): warnaEpi = warna(besaran[i]) plt.scatter(bujur[i], lintang[i], s=besaran[i]*10, c=warnaEpi) plt.title('Peta gempa bumi global pada 7 - 8 Juli 2020');
notebooks/mini_proyek_gempa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import re import warnings warnings.filterwarnings("ignore") import nltk nltk.data.path.append("/media/sayantan/Personal/nltk_data") from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from tqdm import tqdm from nltk import word_tokenize stopwords = stopwords.words('english') from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # %matplotlib inline data = pd.read_csv('./sentiment_train', sep='\t') data.info() data.head() data.text.str.split(" ") data.text.apply(lambda x: word_tokenize(x.lower())) def clean_text(x): splchars = re.compile(r'[^A-Za-z ]',re.IGNORECASE) x = splchars.sub('', x) x = word_tokenize(x.lower()) x = [w for w in x if w not in stopwords] return(' '.join(x)) data['text_clean'] = data.text.apply(lambda x: clean_text(x.lower())) count_vec = CountVectorizer() count_vec_dict = count_vec.fit(data.text) count_vec_dict.get_feature_names() reviews_text_vec = count_vec.transform(data.text) reviews_text_vec.toarray().shape df_reviews = pd.DataFrame(reviews_text_vec.toarray()) df_reviews df_reviews.columns = count_vec_dict.get_feature_names() df_reviews[['just','awesome', 'nice']][:1] feature_freq = np.sum(reviews_text_vec.toarray(), axis = 0) feature_freq feature_df = pd.DataFrame({'Feature_Name':count_vec_dict.get_feature_names(), 'Frequency':feature_freq}) feature_df.sort_values('Frequency', ascending = False) from sklearn.feature_extraction import text # + active="" # stopwords = list(text.ENGLISH_STOP_WORDS) # - stopwords count_vec_v1 = CountVectorizer(stop_words=stopwords, max_df= .7, min_df=5, ngram_range=(1,2)) count_vec_dict = count_vec_v1.fit(data.text) reviews_text_vec = count_vec_v1.transform(data.text) df_reviews = pd.DataFrame(reviews_text_vec.toarray()) df_reviews.columns = count_vec_dict.get_feature_names() df_reviews.head() df_reviews.shape reviews_text_vec.getnnz() # # Train Test Split from sklearn.naive_bayes import BernoulliNB from sklearn.model_selection import train_test_split y = data.sentiment.values X_train, X_test, y_train, y_test = train_test_split(df_reviews.values, y, train_size = 0.7, random_state = 42) X_train.shape # # Bayes Classifier bayes_clf = BernoulliNB() bayes_clf.fit(X_train, y_train) pred = bayes_clf.predict(X_test) # # Performance # + from sklearn.metrics import confusion_matrix, roc_auc_score, precision_score, recall_score, precision_recall_curve confusion_matrix(y_test, pred, (1,0)) # - roc_auc_score(y_test, pred) precision_score(y_test, pred) recall_score(y_test, pred) from sklearn.metrics import classification_report print(classification_report(y_test, pred)) # # RF from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(max_depth=6, n_estimators= 100) rf.fit(X_train, y_train) pred = rf.predict(X_test) from sklearn.model_selection import GridSearchCV paramList = {'n_estimators': [100], 'max_depth': [6,9], 'max_features':[.5, .8]} rf = RandomForestClassifier() gridSearch = GridSearchCV(cv=5, estimator=rf, param_grid=paramList, scoring='roc_auc') gridSearch.fit(X_train, y_train) pred = gridSearch.predict(X_test) confusion_matrix(y_test, pred, (1,0)) print(classification_report(y_test, pred)) gridSearch.best_estimator_ gridSearch.grid_scores_ featureImp = pd.DataFrame({'Feature_Name': df_reviews.columns, 'Importance':gridSearch.best_estimator_.feature_importances_}) featureImp.sort_values('Importance', ascending = False,inplace=True) featureImp['cumsum'] = featureImp.Importance.cumsum() featureImp # # STEMMING # + stemmer = PorterStemmer() analyzer = CountVectorizer().build_analyzer() def get_stemmed_words(doc): stemmed_words = [stemmer.stem(word) for word in analyzer(doc)] final_words = [word for word in stemmed_words if word not in stopwords] return final_words # - get_stemmed_words('the player played') # + from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vec_v2 = TfidfVectorizer(tokenizer=get_stemmed_words, max_df=.7, min_df=5, ngram_range=(1,2)) # - tfidf_vec_v2.fit(data.text) tfidf_dict = list(tfidf_vec_v2.get_feature_names()) tfidf_dict len(tfidf_dict) # - Rest are same as above # # # DL for Text Processing data = pd.read_csv('./labeledTrainData.tsv', sep='\t') data.head() from keras.preprocessing import text, sequence tokenizer = text.Tokenizer(num_words=20000) tokenizer.fit_on_texts(data.review) tokenizer.word_counts imdb_seq_df = tokenizer.texts_to_sequences(data.review) imdb_seq_df[:10] np.mean(list(map(lambda x: len(x), imdb_seq_df)))+2*np.std(list(map(lambda x: len(x), imdb_seq_df))) sns.distplot(list(map(lambda x: len(x), imdb_seq_df))) train_seq = sequence.pad_sequences(imdb_seq_df, 200, truncating='pre', padding='pre') y = data.sentiment # # Dense DN - Not Done right, Input should be TFIDF / COUNTVEC and not padded Sequence from keras.models import Sequential from keras.layers import Dense, Activation model_v1 = Sequential() model_v1.add(Dense(256, input_shape = (200,))) model_v1.add(Activation('relu')) model_v1.add(Dense(32)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) model_v1.summary() from keras.optimizers import Adam optim = Adam() model_v1.compile(optimizer=optim, loss='binary_crossentropy', metrics=['accuracy']) BATCH_SIZE = 64 EPOCHS = 10 x_train_seq, x_test_seq, y_train, y_test = train_test_split(train_seq, y, train_size = 0.8, random_state = 42) # + from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') # - model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks = [checkpoint, earlystopping, reduce_lr]) # # Embedding Matrix # # ## IMP why we need one more word in the Matrix and how to process it # # In the method fit_on_texts of Tokenizer class (keras.preprocessing.text.Tokenizer - line 209), there is a comment shown below on line 4: # # ``` # wcounts = list(self.word_counts.items()) # wcounts.sort(key=lambda x: x[1], reverse=True) # sorted_voc = [wc[0] for wc in wcounts] # # note that index 0 is reserved, never assigned to an existing word # self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1))))) # ``` # I am interested for what is 0 reserved for? The only logical answer, implied from the comment as well, would be the uknown word token (given by parameter oov_token), but this is not true. The index of oov_token is 1 + word_count from the input texts. If this is somehow a mistake, and the comment is a legacy which is out of order, then I suggest index 0 becomes reserved for the oov_token. # # Because if you use the pad_sequence to process the sequence, you will find the 0 is used as the padding value. In order to distinguish between PAD and UNKNOWN, keras use `word_count+1` as the index of UNKNOWN. # # ``` # num_words = 3 # tk = Tokenizer(num_words=num_words+1, oov_token='UNK') # texts = ["my name is far faraway asdasd", "my name is","your name is"] # tk.fit_on_texts(texts) # # see #8092 below why I do these two line # tk.word_index = {e:i for e,i in tk.word_index.items() if i <= num_words} # <= because tokenizer is 1 indexed # tk.word_index[tk.oov_token] = num_words + 1 # print(tk.word_index) # print(tk.texts_to_sequences(texts)) # ``` # # Learning Self Embeddings c_embedding_matrix = np.random.rand(nb_words,8) from keras.layers import Embedding, Flatten model_v1 = Sequential() model_v1.add(Embedding(20000,8, input_shape = (200,), weights=[c_embedding_matrix], trainable=True)) model_v1.add(Flatten()) model_v1.add(Dense(800)) model_v1.add(Activation('relu')) model_v1.add(Dense(800)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) optim = Adam() model_v1.compile(optimizer=optim, loss='binary_crossentropy', metrics=['accuracy']) model_v1.summary() # + checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks = [checkpoint, earlystopping, reduce_lr]) # - # # Using GLOVE Embeddings EMBEDDING_FILE_GLOVE='../../../data-kaggle/ref/pretrained-models/glove.840B.300d.txt' #EMBEDDING_FILE_GLOVE def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') embeddings_index_tw = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(EMBEDDING_FILE_GLOVE,encoding='utf-8')) word_index = tokenizer.word_index nb_words = min(20000, len(word_index)) embedding_matrix = np.random.rand(nb_words,300) embedding_matrix.shape def embed_word(embedding_matrix,i,word): embedding_vector_ft = embeddings_index_tw.get(word) if embedding_vector_ft is not None: embedding_matrix[i,:300] = embedding_vector_ft # Glove vector is used by itself if there is no glove vector but not the other way around. for word, i in word_index.items(): if i >= 20000: continue if embeddings_index_tw.get(word) is not None: embed_word(embedding_matrix,i,word) # + model_v1 = Sequential() model_v1.add(Embedding(20000,300, input_shape = (200,), weights=[embedding_matrix], trainable=True)) model_v1.add(Flatten()) model_v1.add(Dense(800)) model_v1.add(Activation('relu')) model_v1.add(Dense(800)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) optim = Adam() model_v1.compile(optimizer=optim, loss='binary_crossentropy', metrics=['accuracy']) model_v1.summary() # + checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=2, batch_size=256, callbacks = [checkpoint, earlystopping, reduce_lr]) # - # # RNN with custom Embeddings # + from keras.layers import SimpleRNN model_v1 = Sequential() model_v1.add(Embedding(20000,8, input_shape = (200,), weights=[c_embedding_matrix], trainable=True)) model_v1.add(SimpleRNN(32)) model_v1.add(Dense(200)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) model_v1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model_v1.summary() # + checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=2, batch_size=256, callbacks = [checkpoint, earlystopping, reduce_lr]) # - # # RNN with GLOVE Embeddings # + from keras.layers import SimpleRNN model_v1 = Sequential() model_v1.add(Embedding(20000,300, input_shape = (200,), weights=[embedding_matrix], trainable=False)) model_v1.add(SimpleRNN(32)) model_v1.add(Dense(200)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) model_v1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model_v1.summary() # + checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=2, batch_size=256, callbacks = [checkpoint, earlystopping, reduce_lr]) # - # # LSTM with GLOVE Embeddings # + from keras.layers import LSTM model_v1 = Sequential() model_v1.add(Embedding(20000,300, input_shape = (200,), weights=[embedding_matrix], trainable=False, name='e1')) model_v1.add(LSTM(32)) model_v1.add(Dense(200)) model_v1.add(Activation('relu')) model_v1.add(Dense(1)) model_v1.add(Activation('sigmoid')) model_v1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model_v1.summary() # + checkpoint = ModelCheckpoint('./chkptr.hdf5', monitor='val_loss', verbose=2, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') model_v1.fit(x_train_seq, y_train, validation_data=(x_test_seq, y_test), epochs=2, batch_size=256, callbacks = [checkpoint, earlystopping, reduce_lr]) # - # # Word Embeddings from NN word_embed_NN = model_v1.get_layer('e1').get_weights() word_embed_NN tokenizer.word_index['good'] word_embed_NN[0][49] from scipy.spatial.distance import cdist, cosine # # Word Similarity def get_distance(word1, word2): first_1 = tokenizer.word_index[word1] first_2 = tokenizer.word_index[word2] return 1- cdist([word_embed_NN[0][first_1]], [word_embed_NN[0][first_2]], metric='correlation') get_distance('great', 'excellent') get_distance('honest', 'honesty')
others/Sentiment_Analysis-Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 (tensorflow) # language: python # name: tensorflow # --- import numpy as np import pandas as pd from xgboost import XGBClassifier from sklearn.metrics import matthews_corrcoef, roc_auc_score from sklearn.model_selection import cross_val_score, StratifiedKFold import matplotlib.pyplot as plt import seaborn as sns import category_encoders as ce # # LOAD DATA # Load pickle file with the categorical features selected import pickle with open('C://Users/luisgasco/Documents/bosh_kaggle_comp/data/list_sel_cat_feat_10k.pkl', 'rb') as f: cat_features = pickle.load(f) # Read the a sample of the rest of the dataset: date_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_date.csv", chunksize=100000, dtype=np.float32) num_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_numeric.csv", usecols=list(range(970)), chunksize=100000, dtype=np.float32) cat_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_categorical.csv", usecols = cat_features, chunksize=100000) X = pd.concat([pd.concat([dchunk, nchunk, cchunk], axis=1).sample(frac=0.05) for dchunk, nchunk, cchunk in zip(date_chunks, num_chunks, cat_chunks)]) y = X.Response y.head() X = X.drop(["Id","Response"],axis=1) X.shape # # Transformation of categorical features # Since we plan to train models such as XGBoost and RandomForest, we need to transform the categorical variables in the same way we do in previous notebooks # Load the previously created encoder: import category_encoders as ce # version 1.2.8 filename = "C://Users/luisgasco/Documents/bosh_kaggle_comp/data/encoder_categorical_model_FULL_FINAL_TRUE.sav" ce_target = pickle.load(open(filename, 'rb')) ce_target cat_fet = X.loc[:, cat_features] cat_fet.shape # %%time # Transform cat_fet_trans = ce_target.transform(cat_fet) cat_fet_trans.shape # Concat again both the transformed categorical features with the rest of the dataframe: X_otros = X.drop(cat_features, axis=1) X_end = pd.concat([X_otros,cat_fet_trans],axis=1) X_end.shape # We have 2199 features. We need to reduce the feature space prior to fine tune a model. For that reason we are going to select features by its importance by using XGBoost classifier. # + from sklearn.model_selection import train_test_split, StratifiedKFold X2 = X_end.values y = y.ravel() clf = XGBClassifier(learning_rate=0.1, max_depth=3,n_estimators=100,min_child_weight=1, nthread=-1) cv = StratifiedKFold(n_splits=3) preds = np.ones(y.shape[0]) for (train, test) in cv.split(X, y): preds[test] = clf.fit(X2[train], y[train]).predict_proba(X2[test])[:,1] print(" ROC AUC: {:.3f}".format( roc_auc_score(y[test], preds[test]))) print(roc_auc_score(y, preds)) # - most_important_features = pd.Series(clf.feature_importances_, index=X_end.columns).nlargest(100) most_important_features df_importance = pd.DataFrame({'feature_name':X_end.columns, 'importance':clf.feature_importances_}) df_importance = df_importance.sort_values(by=['importance'], ascending=False) df_importance_150 = df_importance[:150] df_importance_150.shape df_importance_150.to_pickle("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/final_selected_features.pkl")
2_TotalFeature selection_sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: haiku # language: python # name: haiku # --- # + ##TODO: Randomize seed / seasonize seed (don't base it off last one - overconverges) # + # default_exp core # - # # jetson-haiku # # > API details. # # + # -*- coding: utf-8 -*- """Top-level package for jetson-haiku, GPT2 haikubot for Jetson Nano""" __author__ = """LemurTime""" __email__ = "<EMAIL>" __version__ = "0.0.2" #0.0.2: added bad word list, filter for invalid characters, seasonal haiku seed, datestamp and haiku seed to log. #Todo: Update with new GPT2 model #Todo: fine-tine GPT2 output #Todo: add Twitter output? # + #export #define list of all haikus global finished_haiku global finished_haiku_list #global GPT2_seed_text verse_one_string="" verse_two_string="" verse_three_string="" haiku_seed="" # - #export import numpy as np import syllapy import re import random #import gpt2Pytorch as gp2py #rather, let's just incorporate the gp2pytorch code, for now: # + #export #Initial arguments go here: #GPT2_seed_text="Cherry trees in the summer." args_nsamples = 1 args_batch_size = -1 args_length = 1 args_unconditional = 0 args_temperature = 0.9 args_top_k = 40 args_quiet = 1 verse_input = "" # + #export #Rather than import GPT2, code here (need to update with new GPT2 model later) #Need to fix this, and import rather than keep GPT2 directory in folder ''' code by <NAME>(@graykode) Original Paper and repository here : https://github.com/openai/gpt-2 GPT2 Pytorch Model : https://github.com/huggingface/pytorch-pretrained-BERT ''' import os import sys import torch import random import argparse import numpy as np from GPT2.model import (GPT2LMHeadModel) from GPT2.utils import load_weight from GPT2.config import GPT2Config from GPT2.sample import sample_sequence from GPT2.encoder import get_encoder def text_generator(state_dict): # parser = argparse.ArgumentParser() # parser.add_argument("--text", type=str, required=True) # parser.add_argument("--quiet", type=bool, default=False) # parser.add_argument("--nsamples", type=int, default=1) # parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.') # parser.add_argument("--batch_size", type=int, default=-1) # parser.add_argument("--length", type=int, default=-1) # parser.add_argument("--temperature", type=float, default=0.7) # parser.add_argument("--top_k", type=int, default=40) # args = parser.parse_args() if args_quiet is False: print(args) # if args_batch_size == -1: args_batch_size = 1 assert args_nsamples % args_batch_size == 0 seed = random.randint(0, 2147483647) np.random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load Model enc = get_encoder() config = GPT2Config() model = GPT2LMHeadModel(config) model = load_weight(model, state_dict) model.to(device) model.eval() #if args_length == -1: args_length = config.n_ctx // 2 # elif args_length > config.n_ctx: # raise ValueError("Can't get samples longer than window size: %s" % config.n_ctx) # print(args.text) context_tokens = enc.encode(GPT2_seed_text) generated = 0 for _ in range(args_nsamples // args_batch_size): out = sample_sequence( model=model, length=args_length, context=context_tokens if not args_unconditional else None, start_token=enc.encoder['<|endoftext|>'] if args_unconditional else None, batch_size=args_batch_size, temperature=args_temperature, top_k=args_top_k, device=device ) out = out[:, len(context_tokens):].tolist() for i in range(args_batch_size): generated += 1 text = enc.decode(out[i]) if args_quiet is False: print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40) global GPT2_output GPT2_output = text print(text) if __name__ == '__main__': if os.path.exists('gpt2-pytorch_model.bin'): state_dict = torch.load('gpt2-pytorch_model.bin', map_location='cpu' if not torch.cuda.is_available() else None) # text_generator(state_dict) else: print('Please download gpt2-pytorch_model.bin') sys.exit() # + #export ##UPDATING VERSE GEN CODE TO ADD WORD FILTER - REMOVE ARTICLES AT END OF LINE ##TODO - REMOVE GLOBAL VARIABLES ##TODO - add Kigo / seasonal word ##General verse_gen - input is input, number of syllables required def verse_gen(verse_input, syllable_length): global verse_words global verse_string global verse_count global verse_syllable_count #Go to first whitespace, count syllables. Continue until "syllable_length" syllables. If over required amount syllables try with new input. #initialize counters #left side location in string y=0 #right side location in string x=1 #counter for number of times to try this before getting new input z=0 #initial syllable count of zero verse_syllable_count=0 #Split the input to make it a list of words verse_words=verse_input.split(' ') #Set a counter - try this a number of times then move on while z < 40: if verse_syllable_count < syllable_length: print("Adding next word to the string") #Put words from y to x in a string verse_string=' '.join(verse_words[y:x]) #If no special characters, Count the syllables #Validate haiku-able phrases. If not, reinitialize the string and keep going #Get rid of any phrase with numerals or special characters in it if re.match("^[a-zA-Z\s-]*$", verse_string): verse_syllable_count = syllapy.count(verse_string) #increment x x=x+1 #increment z z=z+1 #print the string print(verse_string) else: print(verse_string, "Invalid characters. Moving up in string.") #reinitialize verse_string verse_string="" verse_syllable_count=0 y=y+1 if verse_syllable_count == syllable_length: #Get rid of any trailing spaces verse_string = verse_string.strip() last_word=verse_string.split(" ")[-1] #Get rid of any phrase ending in an article or preposition words bad_word_list = 'as of the and if or at by our your my' bad_word_set = set(bad_word_list.split()) if (last_word in bad_word_set): print(verse_string, ": String ends in article or other undesirable word. Moving up in string") #reinitialize verse_string verse_string="" verse_syllable_count=0 y=y+1 #If successful, print string and return else: print("Haiku line found:\n", verse_string) return verse_string if verse_syllable_count > syllable_length: #If string is too long, reinitialize the string and keep going print(verse_string, ": String too long. Moving up in string.") #reinitialize verse_string verse_string="" verse_syllable_count=0 y=y+1 #verse_gen(verse_input, syllable_length) #Get a new string and start over if this string doesn't work if z == 40: print("Getting new input.") text_generator(state_dict) verse_input = GPT2_output verse_string=verse_gen(verse_input, syllable_length) return verse_string #END OF VERSE ONE GEN # + #export ## Code to run the module def haiku_gen(): import random global GPT2_seed_text global haiku_seed lines = open('sajiki.txt').read().splitlines() GPT2_seed_text = random.choice(lines) haiku_seed = GPT2_seed_text text_generator(state_dict) #Code to generate verse 1: verse_string = "" verse_input = GPT2_output syllable_length = 5 verse_one_string=verse_gen(verse_input, syllable_length) #Code to generate verse 2: verse_string = "" GPT2_seed_text = verse_one_string text_generator(state_dict) verse_input = GPT2_output syllable_length = 7 verse_two_string=verse_gen(verse_input, syllable_length) #Code to generate verse 3: verse_string = "" GPT2_seed_text = verse_one_string text_generator(state_dict) verse_input = GPT2_output syllable_length=5 verse_three_string=verse_gen(verse_input, syllable_length) #Print finished haiku print("Here is the haiku seed:") print(haiku_seed) print("Here is the haiku:") #Print finished haiku finished_haiku='' finished_haiku='\n'.join([verse_one_string,verse_two_string,verse_three_string]) print(finished_haiku) #Add finished haiku to a list and date #Todo add date and time #import datetime from datetime import datetime # get current date now = datetime.now() # convert current date into timestamp haiku_time = now.strftime("%m/%d/%Y, %H:%M:%S") f = open("haikulist.txt", "a") f.write("\n\nHaiku seed:\n"+haiku_seed+"\n\nHaiku:\n"+finished_haiku+"\n"+haiku_time) f.close() #Place finished haiku in an input for GUI (clear it out first) f = open("latesthaiku.txt", "w") f.write(finished_haiku) f.close() #Put verse2 in as GPT seedtext seed ## This leads to too much convergence # f.write(verse_two_string) # get sajiki from http://jti.lib.virginia.edu/japanese/haiku/saijiki/full.html -> sajiki.txt import random from IPython.display import Audio wave = np.sin(2*np.pi*400*np.arange(10000*2)/10000) Audio(wave, rate=30000, autoplay=True) # + #export ##Run the module: #Initial arguments go here: from IPython.display import Audio #GPT2_seed_text="Gorillas in the mist." args_nsamples = 1 args_batch_size = -1 args_length = 1 args_unconditional = 0 args_temperature = 0.9 args_top_k = 40 args_quiet = 1 verse_input = "" z = 0 while z < 100: # lines = open('sajiki.txt').read().splitlines() # GPT2_seed_text = random.choice(lines) haiku_gen() #Beep after each iteration wave = np.sin(2*np.pi*400*np.arange(10000*2)/10000) Audio(wave, rate=30000, autoplay=True) z+1 #Beep when all done wave = np.sin(2*np.pi*400*np.arange(10000*2)/10000) Audio(wave, rate=40000, autoplay=True) # + #hide #from nbdev.showdoc import * # + #from nbdev.export import * #notebook2script() # - #Todo: #Training feature - what is a good/bad haiku # + ##Todo: make ananthropic #Remove: wordlist indicating persons or personification (I, his, hers, mine, ours, who) # - #Testing finished_haiku='' verse_one_string # import random # lines = open('sajiki.txt').read().splitlines() # myline =random.choice(lines) # print(myline) myline #test regex word = "Welcome " import re print("Valid") if re.match("^[a-zA-Z\s]*$", word) else "Invalid" word word.strip() word verse_string="think it would go as far as plant" verse_string[-1] verse_string=verse_string.strip() verse_string[-1] last_word=verse_string.split(" ")[-1] last_word if any(word in 'as of the' for word in last_word): print("bad word detected") else: print("OK") long_word_list = 'some one long two phrase three about above along after against plant' long_word_set = set(long_word_list.split()) set('word along river'.split()) & long_word_set set(['along']) # + if (last_word in long_word_set): print("WORD FOUND") else: print("Word not found") # + bad_word_list = 'as of the and if or' bad_word_set = set(bad_word_list.split()) if (last_word in long_word_set): print("WORD FOUND") else: print("Word not found") # - # + from datetime import datetime # get current date now = datetime.now() # convert current date into timestamp timestamp = datetime.timestamp(now) # - timestamp print(timestamp) print(now) haikutime="" haikutime=now haikutime dt_object = datetime.fromtimestamp(timestamp) dt_object print(dt_object) date_time = now.strftime("%m/%d/%Y, %H:%M:%S") print("date and time:",date_time) date_time
00_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np # # Item XIV # # Show that the following Laurent expansion is valid in $1 < |z| < 2$: # $$ # \frac{-1}{(z-1)(z-2)} = \sum_{n=0}^\infty \frac{z^n}{2^{n+1}} + \sum_{n=1}^\infty \frac{1}{z^n} \,, # $$ # and draw an sketch of the region. Does it exist an expansion when $|z|>2$? If so, please compute it. # # \* **Note**: I had to change the $1$ with $-1$, for the exercise to be correct. # # --- # We have that: # \begin{align*} # \sum_{n=0}^{\infty} \frac{z^n}{2^{n+1}} &= \frac{1}{2} \sum_{n=0}^{\infty} \frac{z^n}{2^n} # \\ &= \frac{1}{2} \sum_{n=0}^{\infty} \left( z/2 \right)^n # \\ &= \frac{1}{2} \frac{1}{1-z/2} \quad \text{as long as $|z/2|<1$.} # \\ &= \frac{1}{2-z} # \end{align*} # And for the other sum: # \begin{align*} # \sum_{n=1}^{\infty} \frac{1}{z^n} # &= -1 + \sum_{n=0}^{\infty} \frac{1}{z^n} # \\ &= -1 + \sum_{n=0}^{\infty} (1/z)^n # \\ &= -1 + \frac{1}{1-(1/z)} \quad \text{as long as $(|1/z|<1) \Leftrightarrow (z<-1) \vee (z>1)$.} # \\ &= \frac{1}{z-1} # \end{align*} # Adding both results: # \begin{align} # \frac{1}{2-z} + \frac{1}{z-1} &= \frac{-1}{(z-1)(z-2)} # \end{align} # Function to plot a function def plot_fun(fn,xi,xf,vectorized=False,n=101,**args): xs = np.linspace(xi,xf,num=n) if vectorized: ys = fn(xs) else: ys = [fn(x) for x in xs] plt.plot(xs,ys,'-',**args) # The sums, limiting n: def sums(z,n_max): ns = np.arange(n_max+1) sum1 = np.sum(z**ns/2**(ns+1)) sum2 = np.sum(1/z**ns[1:]) return sum1+sum2 # We plot the region plt.figure(figsize=(8,6)) plot_fun(lambda x: -1/((x-1)*(x-2)),0,3,vectorized=True,label="$\\frac{1}{(1-z)(2-z)}$") plot_fun(lambda x: sums(x,2),0,3,label="sums up to $n=2$",c=(.6,.6,.6)) plot_fun(lambda x: sums(x,4),0,3,label="sums up to $n=4$",c=(.4,.4,.4)) plot_fun(lambda x: sums(x,8),0,3,label="sums up to $n=8$",c=(.2,.2,.2)) plot_fun(lambda x: sums(x,16),0,3,label="sums up to $n=16$",c=(.0,.0,.0)) plt.ylim((-20,20)) plt.legend() plt.grid() plt.show() # We can see that it converges in $1<|z|<2$ as $n \rightarrow \infty$. # --- # # We can get the Laurent expansion of the series when $|z|>2$, if we make use of: # $$ # \frac{1}{1-z} = \sum_{n=1}^\infty \frac{1}{z^n} \quad \text{if $|z|>1$} \,. # $$ # We proceed: # \begin{align*} # \frac{1}{(z-1)(z-2)} &= \frac{1}{2-z} + \frac{1}{z-1} # \\ &= \frac{1}{2} \cdot \frac{1}{1-\frac{z}{2}} - 1 \cdot \frac{1}{1-z} # \\ &= \frac{-1}{2} \sum_{n=1}^\infty \frac{1}{(z/2)^n} + 1 \sum_{n=1}^\infty \frac{1}{z^n} # \\ &= \frac{-1}{2} \sum_{n=1}^\infty \frac{2^n}{z^n} + 1 \sum_{n=1}^\infty \frac{1}{z^n} # \end{align*} # # The new sums, limiting n: def sums2(z,n_max): ns = np.arange(n_max+1) sum1 = -0.5*np.sum(2**ns[1:]/z**ns[1:]) sum2 = np.sum(1/z**ns[1:]) return sum1+sum2 # We plot the region, again plt.figure(figsize=(8,6)) plot_fun(lambda x: -1/((x-1)*(x-2)),0,3,vectorized=True,label="$\\frac{1}{(1-z)(2-z)}$") plot_fun(lambda x: sums2(x,2),0,3,label="sums up to $n=2$",c=(.6,.6,.6)) plot_fun(lambda x: sums2(x,4),0,3,label="sums up to $n=4$",c=(.4,.4,.4)) plot_fun(lambda x: sums2(x,8),0,3,label="sums up to $n=8$",c=(.2,.2,.2)) plot_fun(lambda x: sums2(x,16),0,3,label="sums up to $n=16$",c=(.0,.0,.0)) plt.ylim((-20,20)) plt.legend() plt.grid() plt.show() # We can see that it converges to the function when $|z|>2$ as $n \rightarrow \infty$. # ## References # * http://sym.lboro.ac.uk/resources/Handout-Laurent.pdf
t1_questions/item_14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from tqdm import tnrange, tqdm_notebook import gc import operator sns.set_context('talk') pd.set_option('display.max_columns', 500) import warnings warnings.filterwarnings('ignore', message='Changing the shape of non-C contiguous array') # # Read the data dfXtrain = pd.read_csv('preprocessed_csv/train_4.csv', index_col='id', sep=';') dfXtest = pd.read_csv('preprocessed_csv/test_4.csv', index_col='id', sep=';') dfYtrain = pd.read_csv('preprocessed_csv/y_train_4.csv', header=None, names=['ID', 'COTIS'], sep=';') dfYtrain = dfYtrain.set_index('ID') # # Preprocessing # Вынесем var14, department и subreg. # + dropped_col_names = ['department', 'subreg', 'ext_dep'] def drop_cols(df): return df.drop(dropped_col_names, axis=1), df[dropped_col_names] # - train, dropped_train = drop_cols(dfXtrain) test, dropped_test = drop_cols(dfXtest) # Добавим инфу о величине города из subreg'a def add_big_city_cols(df, dropped_df): df['big'] = np.where(dropped_df['subreg'] % 100 == 0, 1, 0) df['average'] = np.where(dropped_df['subreg'] % 10 == 0, 1, 0) df['average'] = df['average'] - df['big'] df['small'] = 1 - df['big'] - df['average'] return df train = add_big_city_cols(train, dropped_train) test = add_big_city_cols(test, dropped_test) plt.scatter(np.power(train.var9, 1), train.var10) train[dfYtrain.COTIS < dfYtrain.COTIS.min() + 5].head(8) for i in dfYtrain[dfYtrain.COTIS < dfYtrain.COTIS.min() + 1].index: plt.plot([dfYtrain.COTIS[i], 0], [0, 100 * dfYtrain.COTIS[i] / train.crm[i]], color='b') for i in train[(train.marque == 'MAZDA') & (train.anc_veh == 6)].index[:50]: plt.plot([dfYtrain.COTIS[i], 0], [0, 100 * dfYtrain.COTIS[i] / train.crm[i]], color='b') train.loc[train[train.marque == 'MAZDA'].index[:2]] # Декодируем оставшиеся категориальные признаки numerical = list(train.select_dtypes(include=[np.number]).columns) numerical categorical = list(train.select_dtypes(exclude=[np.number]).columns) categorical list(test.select_dtypes(exclude=[np.number]).columns) for col in categorical: print(col, train[col].nunique(), test[col].nunique()) # energie_veh и var6 с помощью get_dummies train.energie_veh.unique() test.energie_veh.unique() small_cat = ['energie_veh', 'var6'] train = pd.get_dummies(train, columns=small_cat) test = pd.get_dummies(test, columns=small_cat) # Теперь посмотрим на остальные len(set(train.profession.values) - set(test.profession.values)) len(set(train.var8.values) - set(test.var8.values)) len(set(test.var8.values) - set(train.var8.values)) len(set(train.marque.values) - set(test.marque.values)) len(set(test.marque.values) - set(train.marque.values)) set(test.marque.values) - set(train.marque.values) test[test.marque == 'GEELY'] test[test.marque == 'SOVAM'] # profession и var8 тоже в dummy middle_cat = ['profession', 'var8', 'marque', 'var14'] bigX = pd.concat([train, test]) bigX.shape bigX = pd.get_dummies(bigX, columns=middle_cat) bigX.shape # Расположим столбцы в нужном порядке, добавим константный столбец bigX.crm /= 100 first_col_list = ['crm', 'puis_fiscale'] col_list = first_col_list + sorted(list(set(bigX.columns) - set(first_col_list))) bigX = bigX[col_list] # Разберёмся с нумериками numerical = set(numerical) numerical -= set(['big', 'average', 'small']) for col in numerical: treshold = 10 if bigX[col].nunique() <= treshold: print(col, bigX[col].nunique()) # Эти (что выше) можно ohe for col in numerical: treshold = 10 if bigX[col].nunique() > treshold: print(col, bigX[col].nunique()) # * crm выкидывается # * var1 порог 3 # * age порог 22 intercept = 50 base = 400 target = (dfYtrain.COTIS - intercept)/ train.crm * 100 / base target.describe() # 50 и 400 хорошо ложатся bigX.fillna(-9999, inplace=True) y_train = np.array(dfYtrain) train = bigX.loc[train.index] x_train = np.array(train) test = bigX.loc[test.index] x_test = np.array(test) x_train.shape x_test.shape # # Save routines dfYtest = pd.DataFrame({'ID': dfXtest.index, 'COTIS': np.zeros(test.shape[0])}) dfYtest = dfYtest[['ID', 'COTIS']] dfYtest.head() def save_to_file(y, file_name): dfYtest['COTIS'] = y dfYtest.to_csv('results/{}'.format(file_name), index=False, sep=';') model_name = 'divided' dfYtest_stacking = pd.DataFrame({'ID': dfXtrain.index, model_name: np.zeros(train.shape[0])}) dfYtest_stacking = dfYtest_stacking[['ID', model_name]] dfYtest_stacking.head() def save_to_file_stacking(y, file_name): dfYtest_stacking[model_name] = y dfYtest_stacking.to_csv('stacking/{}'.format(file_name), index=False, sep=';') # # Train model def plot_quality(grid_searcher, param_name): means = [] stds = [] for elem in grid_searcher.grid_scores_: means.append(np.mean(elem.cv_validation_scores)) stds.append(np.sqrt(np.var(elem.cv_validation_scores))) means = np.array(means) stds = np.array(stds) params = grid_searcher.param_grid plt.figure(figsize=(10, 6)) plt.plot(params[param_name], means) plt.fill_between(params[param_name], \ means + stds, means - stds, alpha = 0.3, facecolor='blue') plt.xlabel(param_name) plt.ylabel('MAPE') def mape(y_true, y_pred): return -np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def mape_scorer(est, X, y): gc.collect() return mape(y, est.predict(X)) class MyGS(): class Element(): def __init__(self): self.cv_validation_scores = [] def add(self, score): self.cv_validation_scores.append(score) def __init__(self, param_grid, name, n_folds): self.param_grid = {name: param_grid} self.grid_scores_ = [MyGS.Element() for item in param_grid] self.est = None def add(self, score, param_num): self.grid_scores_[param_num].add(score) # + intercept = 50 base = 400 def scorer(y_true, y_pred, crm): y_true = inv_func(y_true, crm) y_pred = inv_func(y_pred, crm) return mape(y_true, y_pred) # - def func(y, crm): return (y - intercept) / crm / base def inv_func(y, crm): return y * crm * base + intercept train.crm.nunique() train.crm.hist(bins=149) test.crm.nunique() test.crm.hist(bins=104) dfYtrain[train.crm == 0.5].hist(bins=100) from sklearn.model_selection import train_test_split x_50train = np.array(train[train.crm == 0.5]) y_50train = np.array(dfYtrain[train.crm == 0.5].COTIS) x_subtrain, x_validation, y_subtrain, y_validation = train_test_split( x_50train, y_50train, test_size=0.2, random_state=42) y_subtrain.shape import xgboost as xgb def grid_search(x_train, y_train, x_validation, y_validation, scorer, weights=None): param = {'base_score':0.5, 'colsample_bylevel':1, 'colsample_bytree':1, 'gamma':0, 'eta':0.3, 'max_delta_step':0, 'max_depth':10, 'min_child_weight':1, 'nthread':-1, 'objective':'reg:linear', 'alpha':0, 'lambda':1, 'scale_pos_weight':1, 'seed':56, 'silent':True, 'subsample':1} diff_num_round_list = [10 for i in range(5)] diff_num_round_list[0] = 50 num_round_list = np.cumsum(diff_num_round_list) n_folds = 1 mygs = MyGS(num_round_list, 'num_round', n_folds=n_folds) #label_kfold = LabelKFold(np.array(dropped_train['department']), n_folds=n_folds) dtrain = xgb.DMatrix(x_train, label=y_train, missing=-9999, weight=weights) dvalidation = xgb.DMatrix(x_validation, missing=-9999) param['base_score'] = np.mean(y_train) bst = None for index, diff_num_round in enumerate(diff_num_round_list): bst = xgb.train(param, dtrain, diff_num_round, xgb_model=bst) y_pred = np.exp(bst.predict(dvalidation)) score = scorer(y_validation, y_pred) mygs.add(score, index) mygs.est = bst gc.collect() return mygs # %%time mygs = grid_search(x_subtrain, np.log(y_subtrain), x_validation, y_validation, mape, None) plot_quality(mygs, 'num_round') # + importance = mygs.est.get_fscore() importance = sorted(importance.items(), key=operator.itemgetter(1)) df_all = pd.DataFrame(importance, columns=['feature', 'fscore']) df_all['fscore'] = df_all['fscore'] / df_all['fscore'].sum() df_all['feature'] = df_all['feature'].apply(lambda x: train.columns[int(str(x)[1:])]) plt.figure() df_all.plot() df_all.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10)) plt.title('XGBoost Feature Importance') plt.xlabel('relative importance') # - norm_cols = df_all[df_all.fscore > 0.001].feature.values x_50train = np.array(train[train.crm == 0.5][norm_cols]) y_50train = np.array(dfYtrain[train.crm == 0.5].COTIS) x_subtrain, x_validation, y_subtrain, y_validation = train_test_split( x_50train, y_50train, test_size=0.2, random_state=42) x_subtrain.shape def grid_search(x_train, y_train, x_validation, y_validation, scorer, weights=None): param = {'base_score':0.5, 'colsample_bylevel':1, 'colsample_bytree':1, 'gamma':0, 'eta':0.3, 'max_delta_step':0, 'max_depth':10, 'min_child_weight':1, 'nthread':-1, 'objective':'reg:linear', 'alpha':0, 'lambda':1, 'scale_pos_weight':1, 'seed':56, 'silent':True, 'subsample':1} diff_num_round_list = [10 for i in range(5)] diff_num_round_list[0] = 50 num_round_list = np.cumsum(diff_num_round_list) n_folds = 1 mygs = MyGS(num_round_list, 'num_round', n_folds=n_folds) #label_kfold = LabelKFold(np.array(dropped_train['department']), n_folds=n_folds) dtrain = xgb.DMatrix(x_train, label=y_train, missing=-9999, weight=weights) dvalidation = xgb.DMatrix(x_validation, missing=-9999) param['base_score'] = np.mean(y_train) bst = None for index, diff_num_round in enumerate(diff_num_round_list): bst = xgb.train(param, dtrain, diff_num_round, xgb_model=bst) y_pred = np.exp(bst.predict(dvalidation)) score = scorer(y_validation, y_pred) mygs.add(score, index) mygs.est = bst gc.collect() return mygs # %%time mygs = grid_search(x_subtrain, np.log(y_subtrain), x_validation, y_validation, mape, None) plot_quality(mygs, 'num_round') # + importance = mygs.est.get_fscore() importance = sorted(importance.items(), key=operator.itemgetter(1)) df = pd.DataFrame(importance, columns=['feature', 'fscore']) df['fscore'] = df['fscore'] / df_all['fscore'].sum() df['feature'] = df['feature'].apply(lambda x: train[norm_cols].columns[int(str(x)[1:])]) plt.figure() df.plot() df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10)) plt.title('XGBoost Feature Importance') plt.xlabel('relative importance') # - plot_quality(mygs, 'num_round') validation_index = (dropped_train.ext_dep == 10) | (dropped_train.ext_dep > 900) train_index = ~validation_index # + subtrain, validation = train[train_index], train[validation_index] x_subtrain = np.array(subtrain) x_validation = np.array(validation) ysubtrain, yvalidation = dfYtrain[train_index], dfYtrain[validation_index] y_subtrain = np.array(ysubtrain).flatten() y_validation = np.array(yvalidation).flatten() # - validation.shape from sklearn.tree import LinearDecisionTreeRegressor as LDTR from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.model_selection import ShuffleSplit # + # %%time n_splits = 1000 rs = ShuffleSplit(n_splits=n_splits, test_size=None, train_size=1 / 1000, random_state=42) estimators = [] y_pred = 0 for train_index, test_index in rs.split(x_subtrain): estimators.append(LDTR(n_coefficients=1, n_first_dropped=1, const_term=True, max_depth=5, random_state=42).fit( x_subtrain[train_index], y_subtrain[train_index])) y_pred += estimators[-1].predict(x_validation) y_pred /= n_splits # - mape(y_validation, y_pred) plt.scatter(x_validation[:, 0], y_pred) # + # %%time intercept = 50 sample_weight_subtrain = np.power(y_subtrain, -1) n_estimators = 100 estimators = [] weights = [] diff = y_subtrain - 0 estimators.append(LDTR(n_coefficients=1, n_first_dropped=1, const_term=False, max_depth=25, random_state=42, min_samples_leaf=50).fit( x_subtrain, diff - intercept, sample_weight=sample_weight_subtrain)) diff -= estimators[-1].predict(x_subtrain) + intercept weights.append(1) rs = ShuffleSplit(n_splits=n_estimators, test_size=1 / 100, train_size=1 / 1000, random_state=42) for random_state, (train_index, test_index) in enumerate(rs.split(x_subtrain)): grad = diff estimators.append(LDTR(n_coefficients=2, n_first_dropped=0, const_term=False, max_depth=1, random_state=random_state, min_samples_leaf=5, splitter='random').fit( x_subtrain, grad)) grad = estimators[-1].predict(x_subtrain) good_error = 100000 good_weight = 0 for coef in [0.001, 0.01, 0.1, 1, 10]: error = np.mean(np.abs(diff - grad * coef) / y_subtrain) if error < good_error: good_weight = coef good_error = error if good_weight > 0: weights.append(good_weight) diff -= good_weight * estimators[-1].predict(x_subtrain) # - y_pred = sum(weight * est.predict(x_validation) for weight, est in zip(weights, estimators)) + intercept mape(y_validation, y_pred) weights # %%time est = ExtraTreesRegressor(n_estimators=10, max_features=None, max_depth=None, n_jobs=-1, random_state=42).fit( X=x_subtrain, y=func(y_subtrain, x_subtrain[:, 0]), sample_weight=None) y_pred = inv_func(est.predict(x_validation), x_validation[:, 0]) mape(y_validation, y_pred) gc.collect() sample_weight_subtrain = np.power(y_subtrain, -1) # %%time est = DecisionTreeRegressor(max_features=None, max_depth=None, random_state=42).fit( X=x_subtrain, y=func(y_subtrain, x_subtrain[:, 0]), sample_weight=sample_weight_subtrain) y_pred = inv_func(est.predict(x_validation), x_validation[:, 0]) mape(y_validation, y_pred) gc.collect() import xgboost as xgb def grid_search(x_train, y_train, x_validation, y_validation, scorer, weights=None): param = {'base_score':0.5, 'colsample_bylevel':1, 'colsample_bytree':1, 'gamma':0, 'eta':0.15, 'max_delta_step':0, 'max_depth':15, 'min_child_weight':20, 'nthread':-1, 'objective':'reg:linear', 'alpha':0, 'lambda':1, 'scale_pos_weight':1, 'seed':56, 'silent':True, 'subsample':1} diff_num_round_list = [4 for i in range(5)] diff_num_round_list[0] = 60 num_round_list = np.cumsum(diff_num_round_list) n_folds = 1 mygs = MyGS(num_round_list, 'num_round', n_folds=n_folds) #label_kfold = LabelKFold(np.array(dropped_train['department']), n_folds=n_folds) dtrain = xgb.DMatrix(x_train, label=y_train, missing=-9999, weight=weights) dvalidation = xgb.DMatrix(x_validation, missing=-9999) param['base_score'] = np.mean(y_train) bst = None for index, diff_num_round in enumerate(diff_num_round_list): bst = xgb.train(param, dtrain, diff_num_round, xgb_model=bst) y_pred = bst.predict(dvalidation) score = scorer(y_validation, y_pred, x_validation[:, 0]) mygs.add(score, index) mygs.est = bst gc.collect() return mygs # %%time mygs = grid_search(x_subtrain, func(y_subtrain, x_subtrain[:, 0]), x_validation, func(y_validation, x_validation[:, 0]), scorer, None) plot_quality(mygs, 'num_round') # min_child_weight = 5 plot_quality(mygs, 'num_round') plot_quality(mygs, 'num_round') gc.collect() dvalidation = xgb.DMatrix(x_validation, missing=-9999) y_pred = inv_func(mygs.est.predict(dvalidation), x_validation[:, 0]) plt.scatter(x_validation[:, 0], y_validation) plt.scatter(x_validation[:, 0], y_pred, color='g') plt.show() # # Save # %%time est = LDTR(max_features=None, max_depth=15, random_state=42, n_coefficients=2, n_first_dropped=2, const_term=True, min_samples_leaf=40).fit( X=x_train, y=y_train, sample_weight=np.power(y_train.flatten(), -1)) y_pred = est.predict(x_test) save_to_file(y_pred, 'ldtr.csv')
divided_line_parrot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:larval_gonad] # language: python # name: conda-env-larval_gonad-py # --- # # Quick Numbers for Paper # In today's meeting we are going through the paper looking for holes. I am just taking a quick look and filling some of them. # + import os import sys import re from pathlib import Path from io import StringIO from yaml import load from IPython.display import display, HTML, Markdown import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # Project level imports from larval_gonad.notebook import Nb # - # Setup notebook nbconfig = Nb.setup_notebook(seurat_dir='../output/scrnaseq-wf/scrnaseq_combine_force') biomakers = nbconfig.seurat.get_biomarkers('res.0.6') biomarker_genes = biomakers.index.unique() # + ptrap_str = """gene_symbol SP ES MS LS C EC MC LC PC TE H ADD1 1 2 3 3 1 0 0 0 0 1 0 Ance 1 2 2 2 1 2 2 2 1 1 1 ATP8A 0 0 0 0 0 0 0 0 0 0 0 bol 1 1 2 3 0 0 ND ND 0 0 0 CadN 0 0 0 0 0 0 0 0 0 0 0 CadN 0 0 0 0 0 0 0 0 0 0 0 CadN 0 0 0 0 0 0 0 0 0 0 0 CadN_Avg 0 0 0 0 0 0 0 0 0 0 0 CG17349 0 0 0 0 0 0 0 0 0 0 0 CG17646 0 0 0 0 1 1 1 1 1 1 0 CG3277 (receptor protein-tyrosine kinase) 0 0 0 0 0 0 0 0 0 0 0 CG8100 0 0 0 0 0 0 0 0 0 0 0 CG9747 0 1 1 1 0 1 1 1 1 1 0 Cht5 0 0 0 0 0 1 1 2 0 0 0 cindr 1 1 1 1 1 1 1 1 2 2 2 cmpy 0 0 0 0 0 1 1 1 0 0 0 Dek 2 2 2 2 1 1 1 1 2 2 2 Dh31-R 0 0 0 0 0 0 0 0 0 0 0 dpr17 1 1 1 0 0 0 0 0 0 0 0 e(y)3 2 2 2 2 1 1 1 1 2 2 1 Eaat2 0 0 0 0 0 0 0 0 0 0 0 Efa6 2 2 1 1 0 0 0 0 1 2 0 Efa6 1 1 1 1 1 2 2 2 1 1 1 Efa6 2 1 1 1 2 2 2 2 1 1 2 Efa6_Avg 1.666666667 1.333333333 1 1 1 1.333333333 1.333333333 1.333333333 1 1.333333333 1 Fas3 1 1 1 1 1 1 1 1 1 3 3 Fas3 0 0 0 0 0 0 0 0 0 2 3 Fas3_Avg 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 2.5 3 fln 1 1 1 1 2 2 2 2 1 1 ND foxo 2 2 2 2 1 1 1 1 1 2 1 Fs(2)Ket 2 2 2 2 1 1 1 1 1 1 1 grim 0 0 0 0 0 0 0 0 0 0 0 haf 0 0 0 0 0 0 0 0 0 0 0 kkv, CG14668 1 1 1 1 1 1 2 2 1 1 1 klu 0 0 0 0 0 0 0 0 0 0 0 Mapmodulin 2 2 2 1 1 1 1 1 2 2 1 mbl 1 2 2 2 1 0 0 0 1 3 1 Mi-2 1 1 1 1 2 2 2 2 1 0 ND Su(Tpl) 1 1 1 1 2 2 2 2 1 0 ND Mipp1 0 0 0 0 0 0 0 0 0 0 0 Mlc2 0 0 0 0 0 0 0 0 0 0 0 NFAT 0 0 0 0 0 0 0 0 0 0 0 nkd 0 0 0 0 0 0 0 0 0 0 0 Nlg3 1 1 1 1 1 2 2 1 ND 1 1 Nlg3 0 0 0 0 0 1 1 1 0 0 0 Nlg3_Avg 0.5 0.5 0.5 0.5 0.5 1.5 1.5 1 0 0.5 0.5 nord 1 1 1 1 0 0 0 0 0 2 0 Np 0 0 0 0 0 0 0 0 0 0 0 Nrg 2 1 1 1 2 2 2 2 2 2 2 osa 0 0 0 0 0 0 0 0 0 0 0 osa 1 1 1 1 2 2 2 2 2 2 ND osa_Avg 0.5 0.5 0.5 0.5 1 1 1 1 1 1 0 p53 2 2 1 0 0 0 0 0 ND 1 0 Pdcd4 3 3 3 3 1 2 2 2 3 3 1 Pde11 0 0 0 0 0 0 0 0 0 0 0 Piezo 0 0 0 0 0 0 0 0 0 3 0 ppk19 0 0 0 0 0 0 0 0 0 0 0 ppk30 0 0 0 0 0 0 0 0 0 0 0 rdo 1 1 1 1 2 2 2 2 1 1 1 rdo 1 1 1 1 2 2 3 3 1 1 3 rdo 1 1 1 1 2 2 3 3 1 1 3 rdo_Avg 1 1 1 1 2 2 2.666666667 2.666666667 1 1 2.333333333 RunxA 1 1 1 1 1 1 1 1 1 1 1 Sap-r 1 1 1 1 2 3 3 3 2 3 1 sca 0 0 0 0 0 0 0 0 0 0 0 SNF4gamma 1 1 1 1 1 1 1 1 1 1 1 Snmp1 1 1 1 1 1 1 1 1 1 1 1 sosie 1 1 1 1 1 2 2 2 1 1 1 spir 1 1 1 1 0 0 0 0 0 0 0 SRPK 2 2 2 2 0 0 0 0 1 1 1 stai 3 2 2 2 2 2 2 2 1 3 2 stg 0 0 0 0 0 0 0 0 0 0 0 Syn 1 1 1 1 1 1 1 2 1 1 1 Syn 0 0 0 0 0 0 0 0 0 0 0 Syn 1 1 1 1 1 1 1 1 1 1 0 Syn_Avg 0.6666666667 0.6666666667 0.6666666667 0.6666666667 0.6666666667 0.6666666667 0.6666666667 1 0.6666666667 0.6666666667 0.3333333333 Tep2 0 1 1 1 0 2 2 2 2 0 0 tok 1 1 1 1 1 0 0 0 1 2 1 tutl 1 1 1 1 0 0 0 0 1 1 0 twin 1 1 1 1 0 0 0 0 0 0 0 VGlut 0 0 0 0 0 0 0 0 0 0 0 """ gene_mapper = nbconfig.symbol2fbgn.copy() gene_mapper.update( { 'ATP8A': 'FBgn0259221', 'CG3277 (receptor protein-tyrosine kinase)': 'FBgn0031518', 'kkv, CG14668': 'FBgn0037320', 'SNF4gamma': 'FBgn0264357', } ) ptrap = ( pd.read_csv(StringIO(ptrap_str), sep='\t') .query('not gene_symbol.str.contains("Avg")', engine='python') .assign(FBgn=lambda df: df.gene_symbol.map(gene_mapper)) .set_index('FBgn') .drop(columns=['gene_symbol', 'H', 'C']) .replace('ND', 0) .pipe(lambda df: df[df.sum(axis=1) > 0]) .astype(int) ) # - ptrap.index.unique().shape[0], ptrap.index.unique().intersection(biomarker_genes).shape[0] sorted(ptrap.index.unique().intersection(biomarker_genes).map(nbconfig.fbgn2symbol), key=lambda x: x.lower()) # + lit_genes = { 'germline': ['vas', 'bam', 'Phf7', 'CG11697', 'p53', 'nos', 'bgcn', 'tut', 'Rbp9', 'peb', 'tej', 'Marf',], 'late_spermatocytes': ['aly', 'nht', 'soti', 'dj', 'ocn', 'can', 'fzo', 'bol', 'mle', 'mia', 'CG3927', 'sunz', 'sowi', 'd-cup', 'c-cup', 'wa-cup', 'p-cup', 'r-cup', 'oys', 'topi', 'sa', 'CG8368',], 'cyst': ['tj', 'eya', 'zfh1', 'vn', 'foxo', 'apt', 'ImpL2', 'Wnt4', 'Nrt', 'bnb', 'neur', 'robo2', 'EcR', 'gbb', 'spict', 'puc', 'sev', 'hui', 'sano', 'glob1', 'Eip93F', 'fax', 'kek1', 'so',], 'te': ['nord', 'retn', 'abd-A', 'Abd-B', 'Wnt2', 'Six4', 'CG18628', 'MtnA', 'N',], 'pc': ['vkg', 'Sox100B', 'bw', 'ems',], } gene_annot = [] for k, v in lit_genes.items(): for gene in v: fbgn = nbconfig.symbol2fbgn[gene] gene_annot.append((fbgn, gene, k)) lit = pd.DataFrame(gene_annot, columns=['FBgn', 'gene_symbol', 'literature']).set_index('FBgn') lit.literature.value_counts().to_frame() # + cluster_genes = biomakers.cluster.map(nbconfig.short_cluster_annot).to_frame() ( cluster_genes.join(lit, how='right') .groupby('literature') .cluster.value_counts() .to_frame() .sort_index() .rename(columns={'cluster': 'Genes in BioMarker'}) ) # - zscore_max = ( pd.read_parquet('../output/scrnaseq-wf/tpm_zscore.parquet', columns=nbconfig.cluster_order[:9]) .rename(columns=dict(zip(nbconfig.cluster_order[:9], nbconfig.short_cluster_order))) .idxmax(axis=1) .rename('best') ) ( lit.join(zscore_max, how='left') .groupby('literature') .best.value_counts() .rename('Highest Expressed Cluster') .rename_axis(['literature', 'cluster'], axis=0) .to_frame() )
notebook/2019-02-08_quick_numbers_for_paper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Two-Layer QG Model Example # Here is a quick overview of how to use the two-layer model. See the # :py:class:`pyqg.QGModel` api documentation for further details. # # First import numpy, matplotlib, and pyqg: # + import numpy as np from matplotlib import pyplot as plt # %matplotlib inline import pyqg from pyqg import diagnostic_tools as tools # - # ## Initialize and Run the Model ## # # Here we set up a model which will run for 10 years and start averaging # after 5 years. There are lots of parameters that can be specified as # keyword arguments but we are just using the defaults. year = 24*60*60*360. m = pyqg.QGModel(tmax=10*year, twrite=10000, tavestart=5*year) m.run() # ## Convert Model Outpt to an xarray Dataset ## # # Model variables, coordinates, attributes, and metadata can be stored conveniently as an xarray Dataset. (Notice that this feature requires xarray to be installed on your machine. See here for installation instructions: http://xarray.pydata.org/en/stable/getting-started-guide/installing.html#instructions) m_ds = m.to_dataset().isel(time=-1) m_ds # ## Visualize Output ## # # Let's assign a new data variable, ``q_upper``, as the **upper layer PV anomaly**. We access the PV values in the Dataset as ``m_ds.q``, which has two levels and a corresponding background PV gradient, ``m_ds.Qy``. m_ds['q_upper'] = m_ds.q.isel(lev=0) + m_ds.Qy.isel(lev=0)*m_ds.y m_ds['q_upper'].attrs = {'long_name': 'upper layer PV anomaly'} m_ds.q_upper.plot.contourf(levels=18, cmap='RdBu_r'); # ## Plot Diagnostics ## # # The model automatically accumulates averages of certain diagnostics. We can # find out what diagnostics are available by calling m.describe_diagnostics() # To look at the wavenumber energy spectrum, we plot the `KEspec` diagnostic. # (Note that summing along the l-axis, as in this example, does not give us # a true *isotropic* wavenumber spectrum.) # + kr, kespec_upper = tools.calc_ispec(m, m_ds.KEspec.isel(lev=0).data) _, kespec_lower = tools.calc_ispec(m, m_ds.KEspec.isel(lev=1).data) plt.loglog(kr, kespec_upper, 'b.-', label='upper layer') plt.loglog(kr, kespec_lower, 'g.-', label='lower layer') plt.legend(loc='lower left') plt.ylim([1e-14,1e-8]) plt.xlabel(r'k (m$^{-1}$)'); plt.grid() plt.title('Kinetic Energy Spectrum'); # - # We can also plot the spectral fluxes of energy and enstrophy. # + kr, APEgenspec = tools.calc_ispec(m, m_ds.APEgenspec.data) _, APEflux = tools.calc_ispec(m, m_ds.APEflux.data) _, KEflux = tools.calc_ispec(m, m_ds.KEflux.data) _, KEfrictionspec = tools.calc_ispec(m, m_ds.KEfrictionspec.data) _, Dissspec = tools.calc_ispec(m, m_ds.Dissspec.data) ebud = [ APEgenspec, APEflux, KEflux, KEfrictionspec, Dissspec] ebud.append(-np.vstack(ebud).sum(axis=0)) ebud_labels = ['APE gen','APE flux','KE flux','Bottom drag','Diss.','Resid.'] [plt.semilogx(kr, term) for term in ebud] plt.legend(ebud_labels, loc='upper right') plt.xlabel(r'k (m$^{-1}$)'); plt.grid() plt.title('Spectral Energy Transfer'); # + _, ENSflux = tools.calc_ispec(m, m_ds.ENSflux.data.squeeze()) _, ENSgenspec = tools.calc_ispec(m, m_ds.ENSgenspec.data.squeeze()) _, ENSfrictionspec = tools.calc_ispec(m, m_ds.ENSfrictionspec.data.squeeze()) _, ENSDissspec = tools.calc_ispec(m, m_ds.ENSDissspec.data.squeeze()) ebud = [ ENSgenspec, ENSflux, ENSDissspec, ENSfrictionspec] ebud.append(-np.vstack(ebud).sum(axis=0)) ebud_labels = ['ENS gen','ENS flux div.','Dissipation','Friction','Resid.'] [plt.semilogx(kr, term) for term in ebud] plt.legend(ebud_labels, loc='best') plt.xlabel(r'k (m$^{-1}$)'); plt.grid() plt.title('Spectral Enstrophy Transfer');
docs/examples/two-layer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook] * # language: python # name: conda-env-notebook-py # --- # + import os import rasterio as rio from rasterio.mask import mask from glob import glob import matplotlib.pyplot as plt import pandas as pd from shapely import geometry import geojson import json import numpy as np import gcsfs GCP_PROJECT_ID = '170771369993' OISST_GCP = 'oisst/oisst.zarr' import seaborn as sns import xarray as xr from dask.distributed import Client import dask from functools import partial import pyproj from shapely.ops import transform import cartopy.crs as ccrs from functools import partial import pyproj from shapely.ops import transform project = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3857')) # destination coordinate system # - client = Client() # ## Process Sentinel3 Chlorophyll imagery_root = '/tmp/nepacs3/' image_files = glob(os.path.join(imagery_root, "*/*nn*.tif")) def get_date_from_file(file): return(pd.to_datetime(os.path.splitext(file)[0].split('-')[1])) dates = list(map(get_date_from_file, image_files)) xr_ds = [ rio.open(image_file) for image_file in image_files ] len(xr_ds) # **Crop** each one to a buffered zone 'round the chosen isolate (id:240, the nepac one) plankton = pd.read_csv( '../data/Phytoplankton_temperature_growth_rate_dataset_2016_01_29/traits_derived_2016_01_29.csv', engine='python' ) isolate_code = 240 isolate = plankton[plankton['isolate.code'] == isolate_code] isolate # + project_merc = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3857')) # mercator, to compute area buffer project_wgs = partial( pyproj.transform, pyproj.Proj(init='epsg:3857'), # source coordinate system pyproj.Proj(init='epsg:4326')) # mercator, to compute area buffer # - isolate_point_wgs = geometry.Point( isolate['isolation.longitude'], isolate['isolation.latitude'] ) isolate_point_meters = transform(project, isolate_point_wgs) merc_500m_buffer = isolate_point_meters.buffer(20000) # + clipped_data = [] for ds in xr_ds: try: clipped_data.append(rio.mask.mask(ds, [merc_500m_buffer], crop=True)) except Exception as e: print(e) clipped_data.append(e) # - len(clipped_data) # Turn them all to masked arrays ma_clipped = [] for data in clipped_data: print(type(data)) if isinstance(data, Exception): ma_clipped.append(data) continue _ma = np.ma.MaskedArray( data = data[0][0], mask = (data[0][1] == 0) ) ma_clipped.append(_ma) # ## Compute Mean chl-$\alpha$ chl = [] for ma in ma_clipped: print(isinstance(ma, Exception)) if isinstance(ma, Exception): chl.append(np.nan) continue chl.append(np.mean(ma)) chl_df = pd.DataFrame({ 'date': dates, 'chl': chl }) chl_df.loc[chl_df.chl.apply(lambda x: isinstance(x, np.ma.core.MaskedConstant)), 'chl'] = np.nan chl_df=chl_df.sort_index() chl_df.index plt.scatter(chl_df.date, chl_df.chl) plt.xticks(rotation=30, ha='right') # + len(chl_df[~chl_df.chl.isna()]) # - # ## Can we look at performance around this time? isolate_performance_perf = xr.open_zarr("../data/isolate_mhw_performance.zarr/isolate_mhw_performance.zarr/") mhw_performance = isolate_performance_perf.sel( isolate=isolate_code, time=slice(chl_df.date.min(), chl_df.date.max()) ) mhw_performance.time.min() mhw_performance.performance.plot() plt.ylim([0, 1]) ax2 = plt.twinx() ax2.scatter(chl_df.date, chl_df.chl, color='red') plt.xticks(rotation=30, ha='right') chl_df['date_delta'] = (chl_df.reset_index()['date'] - chl_df.reset_index()['date'].min()) / np.timedelta64(1,'D') fg = sns.lmplot(x='date_delta', y='chl', data=chl_df.reset_index().dropna(), ) sns.scatterplot(mhw_performance.perf) fg.axes[0][0] chl_df[''(chl_df.reset_index()['date'] - chl_df.reset_index()['date'].min()) / np.timedelta64(1,'D') def lowess_smooth(data): from statsmodels.nonparametric.smoothers_lowess import lowess endog = data.values exog = data.index.values smooth = lowess(endog, exog) index, xformed = np.transpose(smooth) return pd.Series(xformed, index=pd.to_datetime(index)) plt.plot(lowess_smooth(chl_df.dropna(subset=['chl']).set_index('date').chl)) # + plt.figure(dpi=150) sns.set(style='ticks') ax1 = plt.axes() _p0 = mhw_performance.performance.plot(label='Isolate Performance', ax=ax1) ax1.set_ylim([0, 1]) ax2 = ax1.twinx() _p1 = ax2.scatter(chl_df.date, chl_df.chl, color='none', alpha=1, edgecolor='red', label=r'Sentinel-3 Chl-$\alpha$') plt.xticks(rotation=30, ha='right') ax2.set_ylabel(r"Chl-$\alpha$ (oc4me)") _p2 = plt.plot( lowess_smooth(chl_df.dropna(subset=['chl']).set_index('date').chl), linewidth=2, color='red', linestyle='--', label=r'Sentinel-3 Chl-$\alpha$ LOWESS' ) lines, labels = ax1.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax2.legend(lines + lines2, labels + labels2, loc='lower right') sns.despine(right=False) plt.savefig("nepac_s3_validate.pdf", bbox_inches='tight') # - # ## Create Paired plot fs = gcsfs.GCSFileSystem(project=GCP_PROJECT_ID, token="../gc-pangeo.json") oisst = xr.open_zarr(fs.get_mapper(OISST_GCP)) oisst = oisst.assign_coords(lon=(((oisst.lon + 180) % 360) - 180)).sortby('lon') selected_sst = oisst.sel( lat = mhw_performance.lat.values.item(), lon = mhw_performance.lon.values.item(), time = mhw_performance.time ) # + fig, axes = plt.subplots(1, 2, figsize=(13, 4.5), dpi=150) sst_ax = axes[0] selected_sst.sst.plot(ax=sst_ax, color='black') mhw_performance.clim_seas.plot(ax=sst_ax, color='blue') mhw_performance.clim_thresh.plot(ax=sst_ax, color='green') sst_ax.set_ylabel("SST [deg C]") sst_ax.set_title("") sst_ax.set_title("A) Marine Heat Wave", loc='left') chl_ax = axes[1] _p0 = mhw_performance.performance.plot(label='Isolate Performance', ax=chl_ax) chl_ax.set_ylim([0, 1]) chl_twinax = chl_ax.twinx() _p1 = chl_twinax.scatter(chl_df.date, chl_df.chl, color='none', alpha=1, edgecolor='red', zorder=-1, label=r'Sentinel-3 Chl-$\alpha$') chl_ax.set_xticklabels(chl_ax.get_xticks(), rotation=30, ha='right') chl_twinax.set_ylabel(r"Chl-$\alpha$ (oc4me)") _p2 = chl_twinax.plot( lowess_smooth(chl_df.dropna(subset=['chl']).set_index('date').chl), linewidth=2, color='red', linestyle='--', label=r'Sentinel-3 Chl-$\alpha$ LOWESS' ) lines, labels = chl_ax.get_legend_handles_labels() lines2, labels2 = chl_twinax.get_legend_handles_labels() chl_twinax.legend(lines + lines2, labels + labels2, loc='center right', fancybox=True, bbox_to_anchor=(1.75, 0.5)) chl_ax.set_title("") chl_ax.set_title("B) MHW Performance + Validation", loc='left') sns.despine(right=False) plt.savefig("nepac_s3_combined.pdf", bbox_inches='tight') # -
validation/nwpacific-example-sentinel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Лабораторная работа № 1. Линейная регрессия # # На основе набора данных об измерениях роста и веса решите следующую задачу. # # Обучите модель линейной регрессии оценивать вес человека. Для начала постройте одномерную модель, где в качестве единственного признака будет только вес, который человек сообщил о себе сам. Для обучения линейной модели используейте класс из библиотеки scikit-learn - [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) # # Файл с данными - [Davis.csv](https://github.com/sdukshis/ml-intro/blob/master/datasets/Davis.csv) # # Расширьте набор признаков добавив данные об измеренном и сообщенном весе, а также данные об поле человека. # # Обратите внимание, что в некоторых строках встречаются пропуски. Это обычная ситуация при работе с реальными данными. Эти пропуски необходиму убрать из таблицы вместе со всей строкой. Это можно сделать с помощью метода [pandas.DataFrame.dropna](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html) import pandas as pd # %pylab inline ds = pd.read_csv('https://raw.githubusercontent.com/sdukshis/ml-intro/master/datasets/Davis.csv', index_col=0) ds.head() from sklearn.linear_model import LinearRegression # ## Задание # # 1. Обучите модель линейной регресси на данных, используя в качестве целевой переменной рост (height), а в качестве признака вес (weight). # 2. Вычислите значение среднеквадратичной ошибки для построенной модели # 3. Постройте прямую регресии и точки обучающей выборки # 4. Расширьте пространство признаков, добавив туда пол и repwt. # 5. Оцените как изменилась среднеквадратичная ошибка
assignments/01_LinearRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sympy as sp import numpy as np print(f"SymPy Version: {sp.__version__}") # 数式をキレイに表示する。 sp.init_printing() # - # ### 論文中の数式をSymPyで再現する # # #### 対象論文 # # [Three-dimensional cascaded lattice Boltzmann method: Improved implementation and consistent forcing scheme](https://journals.aps.org/pre/pdf/10.1103/PhysRevE.97.053309) # # ##### [参考] 論文要約 # # ``` # Phys. Rev. E 73, 066705 (2006)]で提案されたカスケード法または中心モーメントベースの格子ボルツマン法(CLBM)は、非常に優れた数値安定性を持っています。しかし、3次元CLBMのシミュレーションには2つの制約があります。第一に、従来の3次元CLBMの実装は、単一緩和時間(SRT)LBMに比べて煩雑な操作を必要とし、計算コストが非常に高くなります。第二に、一般的な力場を3次元CLBMに正確に組み込むことは困難である。本論文では、3DでCLBMを実装するための改良された手法を紹介します。その主な戦略は、簡略化された中心モーメントセットを採用し、一般的な多緩和時間(GMRT)フレームワークに基づいて中心モーメントベースの衝突演算子を実行することである。次に、最近提案されたCLBMのための一貫した強制スキーム[Fei and Luo, Phys. Rev. E 96, 053307 (2017)]を拡張して、一般的な力場を3D CLBMに組み込む。最近開発された非直交CLBM[Rosis, Phys. Rev. E 95, 013310 (2017)]と比較して、我々の実装は計算コストを大幅に削減することが証明された。非直交CLBMにおいて離散的な平衡分布関数を採用することの不整合を分析し、検証しています。ここで開発された3D CLBMと一貫した強制スキームは、いくつかのカノニカルな力駆動の流れの数値シミュレーションによって検証され、精度、収束性、ノンスリップルールとの整合性の点で非常に優れた特性が強調されている。最後に,ここで開発した3次元CLBMの技術は,3次元MRT-LBMの実装と実行をより効率的にするために応用できる。 # ``` # # #### 再現内容 # # 論文中、**式(2)** で定義されている2つのベクトル、 # # $$ # k_{nmp} = \langle f_i | e^m_x e^n_y e^p_y \rangle, \hspace{5mm} \tilde{k}_{nmp} = \langle f_i | (e_x - u_x)^m (e_y - u_y)^n (e_z - u_z)^p \rangle # $$ # # について、以下で定義される変換行列 $N$ の各要素を計算する。 # # $$ # \tilde{k}_{nmp} = N k_{nmp} # $$ # # また、添字$\boldsymbol{nmp}$の順序は、次のようになる。 # # $$ # k_{nmp} = \left[ k_{000},k_{100},k_{010},k_{001},k_{110},k_{101},k_{011},k_{200},k_{020},k_{002},k_{120},k_{102},k_{210},k_{201},k_{012},k_{021},k_{111},k_{220},k_{202},k_{022},k_{211},k_{121},k_{112},k_{122},k_{212},k_{221},k_{222} \right] \\ # \tilde{k}_{nmp} = \left[ \tilde{k}_{000},\tilde{k}_{100},\tilde{k}_{010},\tilde{k}_{001},\tilde{k}_{110},\tilde{k}_{101},\tilde{k}_{011},\tilde{k}_{200},\tilde{k}_{020},\tilde{k}_{002},\tilde{k}_{120},\tilde{k}_{102},\tilde{k}_{210},\tilde{k}_{201},\tilde{k}_{012},\tilde{k}_{021},\tilde{k}_{111},\tilde{k}_{220},\tilde{k}_{202},\tilde{k}_{022},\tilde{k}_{211},\tilde{k}_{121},\tilde{k}_{112},\tilde{k}_{122},\tilde{k}_{212},\tilde{k}_{221},\tilde{k}_{222} \right] # $$ # ### 数式で使用するシンボルの定義 # # - $u_x$, $u_y$, $u_z$ # - $e_x$, $e_y$, $e_z$ ux, uy, uz = sp.symbols(r"u_x, u_y, u_z") ux, uy, uz ex, ey, ez = sp.symbols(r"e_x, e_y, e_z") ex, ey, ez # ### ベクトルの添字の順序を定義する k_list = "k000,k100,k010,k001,k110,k101,k011,k200,k020,k002,k120,\ k102,k210,k201,k012,k021,k111,k220,k202,k022,k211,k121,\ k112,k122,k212,k221,k222".split(',') print(k_list) # ### 変換行列 $N$ の各要素を計算する # + # 変換行列のサイズ n_rows, n_column = (27, 27) # 求めるべき変換行列 N N = sp.zeros(n_rows, n_column) for row, k in enumerate(k_list): m, n, p = int(k[1]), int(k[2]), int(k[3]) k_expand = (ex - ux) ** m * (ey - uy) ** n * (ez - uz) ** p k_expand = sp.simplify(sp.expand(k_expand)) for column, k2 in enumerate(k_list): m2, n2, p2 = int(k2[1]), int(k2[2]), int(k2[3]) N[row, column] = k_expand.coeff(ex, n=m2).coeff(ey, n=n2).coeff(ez, n=p2) N # - # あまり意味がないが、逆行列も確認しておく N.inv() # ### 変換行列 $N$ を使用したベクトルの変換をC言語として出力する # まずシンボル定義 k = sp.MatrixSymbol(r'k_nmp', n_rows, 1) k k_tilde = N * k k_tilde for i in range(n_rows): code = sp.ccode(k_tilde[i], assign_to=f"k_tilde[{i}]") print(code)
notebooks/05-Metaprogramming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Theory and Practice of Visualization Exercise 2 # + [markdown] nbgrader={} # ## Imports # + nbgrader={} from IPython.display import Image # + [markdown] nbgrader={} # ## Violations of graphical excellence and integrity # + [markdown] nbgrader={} # Find a data-focused visualization on one of the following websites that is a *negative* example of the principles that Tufte describes in *The Visual Display of Quantitative Information*. # # * [CNN](http://www.cnn.com/) # * [Fox News](http://www.foxnews.com/) # * [Time](http://time.com/) # # Upload the image for the visualization to this directory and display the image inline in this notebook. # + deletable=false nbgrader={"checksum": "bd4340d93d2efdf5c3864b5caca1f6ba", "grade": true, "grade_id": "theorypracticeex02a", "points": 2} # Add your filename and uncomment the following line: Image(filename='StockPicture.png') # + [markdown] nbgrader={} # Describe in detail the ways in which the visualization violates graphical *integrity* and *excellence*: # + [markdown] deletable=false nbgrader={"checksum": "51d112a58baebcf9db9e75eb596a408c", "grade": true, "grade_id": "theorypracticeex02b", "points": 8, "solution": true} # I do not even know what is going on in this grpah. The x and y axes are not labeled. I wish there was a grid to see the slopes relative to one another. I have no idea what "Open" means. I guess the frame is necessary, but it looks ugly. It's also small... I even tried going to the link for just the image but it was still tiny. I just wish this graph were bigger!
assignments/assignment04/TheoryAndPracticeEx02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Unit Testing `GiRaFFE_NRPy`: Fluxes of $\tilde{S}_i$ # # ### Author: <NAME> # # This notebook validates our new, NRPyfied HLLE solver against the function from the original `GiRaFFE` that calculates the flux for $\tilde{S}_i$ according to the method of Harten, Lax, von Leer, and Einfeldt (HLLE), assuming that we have calculated the values of the flux on the cell faces according to the piecewise-parabolic method (PPM) of [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf), modified for the case of GRFFE. # # **Module Status:** <font color=green><b> Validated: </b></font> This code has passed unit tests against the original `GiRaFFE` version. # # **Validation Notes:** This demonstrates the validation of [Tutorial-GiRaFFE_NRPy-Stilde-flux](../Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb). # # It is, in general, good coding practice to unit test functions individually to verify that they produce the expected and intended output. Here, we expect our functions `GRFFE__S_i__flux_in_dir_*.h` to produce identical output to the function `GRFFE__S_i__flux.C` in the original `GiRaFFE`. It should be noted that the two codes handle the parameter `flux_dirn` (the direction in which the code is presently calculating the flux through the cell) differently; in the original `GiRaFFE`, the function `GRFFE__S_i__flux()` expects a parameter `flux_dirn` with value 1, 2, or 3, corresponding to the functions `GRFFE__S_i__flux_in_dir_0()`, `GRFFE__S_i__flux_in_dir_1()`, and `GRFFE__S_i__flux_in_dir_2()`, respectively, in `GiRaFFE_NRPy`. # # ### NRPy+ Source Code for this module: # * [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb) Generates the code to compute the fluxes. # # ## Introduction: # # This notebook validates the NRPyfied Stilde_flux solver against the original `GiRaFFE` code. This will be done at a point with a random but realistic spacetime and a variety of magnetic fields and Valencia velocities to test edge cases. # # We'll write this in C because the codes we want to test are already written that way, and we would like to avoid modifying the files as much as possible. To do so, we will print the C code to a file. We will begin by including core functionality. We will also define standard parameters needed for GRFFE and NRPy+. # # When this notebook is run, the significant digits of agreement between the old `GiRaFFE` and new `GiRaFFE_NRPy` versions of the algorithm will be printed to the screen right after the code is run [here](#compile_run). # # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows # # 1. [Step 1](#setup): Set up core functions and parameters for unit testing the Stilde flux algorithm # 1. [Step 1.a](#c_flux) Write the C functions to compute the flux # 1. [Step 1.b](#free_params) Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` # 1. [Step 1.c](#download) Download files from `GiRaFFE` for comparison # 1. [Step 2](#mainc): `Stilde_flux_unit_test.c`: The Main C Code # 1. [Step 2.a](#compile_run): Compile and run the code to validate the output # 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='setup'></a> # # # Step 1: Set up core functions and parameters for unit testing the Stilde flux algorithm \[Back to [top](#toc)\] # # $$\label{setup}$$ # # We'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We will also declare the gridfunctions that are needed for this portion of the code. import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # The last things NRPy+ will require are the definition of type `REAL` and, of course, the functions we are testing. These files are generated on the fly. # + import shutil, os, sys # Standard Python modules for multiplatform OS-level functions nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) from outputC import outCfunction, lhrh # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import loop as lp # NRPy+: Generate C code loops import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface out_dir = "Validation" cmd.mkdir(out_dir) thismodule = "Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux" import GiRaFFE_NRPy.Stilde_flux as Sf # We will pass values of the gridfunction on the cell faces into the function. This requires us # to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix. alpha_face,gammadet_face = gri.register_gridfunctions("AUXEVOL",["alpha_face","gammadet_face"]) gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01") beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU") # We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU # on the right and left faces Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3) B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3) Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3) B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3) ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Stilde_flux_HLLED") gri.register_gridfunctions("AUXEVOL","cmax_x") gri.register_gridfunctions("AUXEVOL","cmin_x") gri.register_gridfunctions("AUXEVOL","cmax_y") gri.register_gridfunctions("AUXEVOL","cmin_y") gri.register_gridfunctions("AUXEVOL","cmax_z") gri.register_gridfunctions("AUXEVOL","cmin_z") sqrt4pi = par.Cparameters("REAL",thismodule,"sqrt4pi","sqrt(4.0*M_PI)") # And the function to which we'll write the output data: Stilde_fluxD = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Stilde_fluxD",DIM=3) # - # <a id='c_flux'></a> # # ## Step 1.a: Write the C functions to compute the flux \[Back to [top](#toc)\] # # $$\label{c_flux}$$ # Here, we will write out each of the three files that we need to generate to calculate the flux of $\tilde{S}_i$ in NRPy+. # + # And now, we'll write the files # In practice, the C functions should only loop over the interior; here, however, we can loop over # all points and set fewer parameters. subdir = "RHSs" cmd.mkdir(os.path.join(out_dir,subdir)) Sf.generate_C_code_for_Stilde_flux(os.path.join(out_dir,subdir),True, alpha_face, gamma_faceDD, beta_faceU, Valenciav_rU, B_rU, Valenciav_lU, B_lU, sqrt4pi, outCparams = "outCverbose=False,CSE_sorting=none",write_cmax_cmin=True) # - # <a id='free_params'></a> # # ## Step 1.b: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\] # # $$\label{free_params}$$ # # Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`. # # Then we output `free_parameters.h`, which sets some basic grid parameters as well as the speed limit parameter we need for this function. # + # Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h # par.generate_Cparameters_Ccodes(os.path.join(out_dir)) # Step 3.d.ii: Set free_parameters.h with open(os.path.join(out_dir,"free_parameters.h"),"w") as file: file.write(""" // Set free-parameter values. params.Nxx0 = 1; params.Nxx1 = 1; params.Nxx2 = 1; params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.0,-1.0,-1.0}; const REAL xxmax[3] = { 1.0, 1.0, 1.0}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx_plus_2NGHOSTS0-1.0); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx_plus_2NGHOSTS1-1.0); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx_plus_2NGHOSTS2-1.0); //printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; \n""") # Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(out_dir)) # - # <a id='download'></a> # # ## Step 1.c: Download files from `GiRaFFE` for comparison \[Back to [top](#toc)\] # # $$\label{download}$$ # # We'll also need to download the files in question from the `GiRaFFE` bitbucket repository. This code was originally written by <NAME> in the IllinoisGRMHD documentation; we have modified it to download the files we want. Of note is the addition of the `for` loop since we need three files (The function `GRFFE__S_i__flux()` depends on two other files for headers and functions). # + # First download the original IllinoisGRMHD source code import urllib original_file_url = ["https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/GiRaFFE_headers.h", "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/inlined_functions.C", "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/GiRaFFE/src/GRFFE__S_i__flux.C" ] original_file_name = ["GiRaFFE_headers.h", "inlined_functions.C", "GRFFE__S_i__flux.C" ] for i in range(len(original_file_url)): original_file_path = os.path.join(out_dir,original_file_name[i]) # Then download the original IllinoisGRMHD source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_file_code = urllib.request.urlopen(original_file_url[i]).read().decode('utf-8') except: original_file_code = urllib.urlopen(original_file_url[i]).read().decode('utf-8') # Write down the file the original IllinoisGRMHD source code with open(original_file_path,"w") as file: file.write(original_file_code) # - # <a id='mainc'></a> # # # Step 2: `Stilde_flux_unit_test.c`: The Main C Code \[Back to [top](#toc)\] # # $$\label{mainc}$$ # # Now that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access.Now we can write our C code. First, we will import our usual libraries and define the various constants and macros we need, taking care to imitate CCTK functionality wherever necessary. In the main function, we will fill all relevant arrays with (appropriate) random values. That is, if a certain gridfunction should never be negative, we will make sure to only generate positive numbers for it. We must also contend with the fact that in NRPy+, we chose to use the Valencia 3-velocity $v^i_{(n)}$, while in ETK, we used the drift velocity $v^i$; the two are related by $$v^i = \alpha v^i_{(n)} - \beta^i.$$ # + # %%writefile $out_dir/Stilde_flux_unit_test.C // These are common packages that we are likely to need. #include "stdio.h" #include "stdlib.h" #include "math.h" #include "string.h" // Needed for strncmp, etc. #include "stdint.h" // Needed for Windows GCC 6.x compatibility #include <time.h> // Needed to set a random seed. // Standard GRFFE parameters: const double GAMMA_SPEED_LIMIT = 2000.0; #define NGHOSTS 1 // Standard NRPy+ memory access: #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) // Standard CCTK memory access: #define CCTK_GFINDEX3D(thing,i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * (k) ) ) // Let's also #define the NRPy+ gridfunctions #define ALPHA_FACEGF 0 #define GAMMA_FACEDD00GF 1 #define GAMMA_FACEDD01GF 2 #define GAMMA_FACEDD02GF 3 #define GAMMA_FACEDD11GF 4 #define GAMMA_FACEDD12GF 5 #define GAMMA_FACEDD22GF 6 #define BETA_FACEU0GF 7 #define BETA_FACEU1GF 8 #define BETA_FACEU2GF 9 #define VALENCIAV_RU0GF 10 #define VALENCIAV_RU1GF 11 #define VALENCIAV_RU2GF 12 #define B_RU0GF 13 #define B_RU1GF 14 #define B_RU2GF 15 #define VALENCIAV_LU0GF 16 #define VALENCIAV_LU1GF 17 #define VALENCIAV_LU2GF 18 #define B_LU0GF 19 #define B_LU1GF 20 #define B_LU2GF 21 #define CMAX_XGF 22 #define CMIN_XGF 23 #define CMAX_YGF 24 #define CMIN_YGF 25 #define CMAX_ZGF 26 #define CMIN_ZGF 27 #define STILDE_FLUX_HLLED0GF 28 #define STILDE_FLUX_HLLED1GF 29 #define STILDE_FLUX_HLLED2GF 30 #define NUM_AUXEVOL_GFS 31 #define STILDED0GF 0 #define STILDED1GF 1 #define STILDED2GF 2 #define NUM_EVOL_GFS 3 // The NRPy+ versions of the function. These should require relatively little modification. // We will need this define, though: #define REAL double #include "declare_Cparameters_struct.h" #include "RHSs/calculate_Stilde_flux_D0.h" #include "RHSs/calculate_Stilde_flux_D1.h" #include "RHSs/calculate_Stilde_flux_D2.h" // Some needed workarounds to get the ETK version of the code to work #define CCTK_REAL double #define DECLARE_CCTK_PARAMETERS // struct cGH{}; const cGH cctkGH; // And include the code we want to test against #include "GiRaFFE_headers.h" #include "inlined_functions.C" #include "GRFFE__S_i__flux.C" // Include this one later because it requries kronecker_delta from GiRaFFE_headers.h #include "RHSs/calculate_Stilde_rhsD.h" int main() { paramstruct params; #include "set_Cparameters_default.h" // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // This is the array to which we'll write the NRPy+ variables. int total_num_gridpoints = NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0; REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * total_num_gridpoints); REAL *rhs_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * total_num_gridpoints); // These are the arrays to which we will write the ETK variables. CCTK_REAL dxi[4] = {0,invdx0,invdx1,invdx2}; // Note that we have to match flux_dirn=1:3 CCTK_REAL METRIC_LAP_PSI4[NUMVARS_METRIC_AUX]; CCTK_REAL Ur[MAXNUMVARS]; CCTK_REAL Ul[MAXNUMVARS]; CCTK_REAL FACEVAL[NUMVARS_FOR_METRIC_FACEVALS]; CCTK_REAL cmax[total_num_gridpoints]; CCTK_REAL cmin[total_num_gridpoints]; CCTK_REAL st_x_flux[total_num_gridpoints]; CCTK_REAL st_y_flux[total_num_gridpoints]; CCTK_REAL st_z_flux[total_num_gridpoints]; CCTK_REAL st_x_rhs[total_num_gridpoints]; CCTK_REAL st_y_rhs[total_num_gridpoints]; CCTK_REAL st_z_rhs[total_num_gridpoints]; for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) { for(int gf = 0; gf < NUM_EVOL_GFS; gf++) { // Initialize the RHSs to 0: rhs_gfs[IDX4S(gf,i0,i1,i2)] = 0; } st_x_rhs[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)] = 0.0; st_y_rhs[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)] = 0.0; st_z_rhs[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)] = 0.0; } // Note that we use the CCTK-style flux_dirn indexing, and run from 1 to 3. This is for compatibility with // the old code, since GiRaFFE_NRPy uses this integer for fewer things. for(int flux_dirn = 1; flux_dirn <= 3; flux_dirn++) { for (int i2 = NGHOSTS; i2 < NGHOSTS+Nxx2+1; i2++) for (int i1 = NGHOSTS; i1 < NGHOSTS+Nxx1+1; i1++) for (int i0 = NGHOSTS; i0 < NGHOSTS+Nxx0+1; i0++) { // for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) { // Now, it's time to make the random numbers. //const long int seed = time(NULL); // seed = 1570632212; is an example of a seed that produces // bad agreement for high speeds //const long int seed = 1574393335; //srand(seed); // Set the seed //printf("seed for random number generator = %ld; RECORD IF AGREEMENT IS BAD\n\n",seed); // We take care to make sure the corresponding quantities have the SAME value. auxevol_gfs[IDX4S(ALPHA_FACEGF, i0,i1,i2)] =1.0+(double)rand()/RAND_MAX-0.2-0.1; const double alpha = auxevol_gfs[IDX4S(ALPHA_FACEGF, i0,i1,i2)]; METRIC_LAP_PSI4[LAPSE] = alpha; //METRIC_LAP_PSI4[LAPM1] = METRIC_LAP_PSI4[LAPSE]-1; auxevol_gfs[IDX4S(GAMMA_FACEDD00GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD01GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD02GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD11GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD12GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; auxevol_gfs[IDX4S(GAMMA_FACEDD22GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*0.2-0.1; // Generated by NRPy+: const double gammaDD00 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF, i0,i1,i2)]; const double gammaDD01 = auxevol_gfs[IDX4S(GAMMA_FACEDD01GF, i0,i1,i2)]; const double gammaDD02 = auxevol_gfs[IDX4S(GAMMA_FACEDD02GF, i0,i1,i2)]; const double gammaDD11 = auxevol_gfs[IDX4S(GAMMA_FACEDD11GF, i0,i1,i2)]; const double gammaDD12 = auxevol_gfs[IDX4S(GAMMA_FACEDD12GF, i0,i1,i2)]; const double gammaDD22 = auxevol_gfs[IDX4S(GAMMA_FACEDD22GF, i0,i1,i2)]; /* * NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory: */ const double tmp0 = gammaDD11*gammaDD22; const double tmp1 = pow(gammaDD12, 2); const double tmp2 = gammaDD02*gammaDD12; const double tmp3 = pow(gammaDD01, 2); const double tmp4 = pow(gammaDD02, 2); const double tmp5 = gammaDD00*tmp0 - gammaDD00*tmp1 + 2*gammaDD01*tmp2 - gammaDD11*tmp4 - gammaDD22*tmp3; const double tmp6 = 1.0/tmp5; METRIC_LAP_PSI4[PSI6] = sqrt(tmp5); METRIC_LAP_PSI4[PSI2] = pow(METRIC_LAP_PSI4[PSI6],1.0/3.0); METRIC_LAP_PSI4[PSI4] = METRIC_LAP_PSI4[PSI2]*METRIC_LAP_PSI4[PSI2]; const double Psim4 = 1.0/METRIC_LAP_PSI4[PSI4]; METRIC_LAP_PSI4[PSIM4] = Psim4; // Copied from the ETK implementation CCTK_REAL gtxxL = gammaDD00*Psim4; CCTK_REAL gtxyL = gammaDD01*Psim4; CCTK_REAL gtxzL = gammaDD02*Psim4; CCTK_REAL gtyyL = gammaDD11*Psim4; CCTK_REAL gtyzL = gammaDD12*Psim4; CCTK_REAL gtzzL = gammaDD22*Psim4; /********************************* * Apply det gtij = 1 constraint * *********************************/ const CCTK_REAL gtijdet = gtxxL * gtyyL * gtzzL + gtxyL * gtyzL * gtxzL + gtxzL * gtxyL * gtyzL - gtxzL * gtyyL * gtxzL - gtxyL * gtxyL * gtzzL - gtxxL * gtyzL * gtyzL; /*const CCTK_REAL gtijdet_Fm1o3 = fabs(1.0/cbrt(gtijdet)); gtxxL = gtxxL * gtijdet_Fm1o3; gtxyL = gtxyL * gtijdet_Fm1o3; gtxzL = gtxzL * gtijdet_Fm1o3; gtyyL = gtyyL * gtijdet_Fm1o3; gtyzL = gtyzL * gtijdet_Fm1o3; gtzzL = gtzzL * gtijdet_Fm1o3;*/ FACEVAL[GXX] = gtxxL; FACEVAL[GXY] = gtxyL; FACEVAL[GXZ] = gtxzL; FACEVAL[GYY] = gtyyL; FACEVAL[GYZ] = gtyzL; FACEVAL[GZZ] = gtzzL; FACEVAL[GUPXX] = ( gtyyL * gtzzL - gtyzL * gtyzL )/gtijdet; FACEVAL[GUPYY] = ( gtxxL * gtzzL - gtxzL * gtxzL )/gtijdet; FACEVAL[GUPZZ] = ( gtxxL * gtyyL - gtxyL * gtxyL )/gtijdet; /*auxevol_gfs[IDX4S(GAMMA_FACEUU00GF, i0,i1,i2)] = FACEVAL[GUPXX]; auxevol_gfs[IDX4S(GAMMA_FACEUU11GF, i0,i1,i2)] = FACEVAL[GUPYY]; auxevol_gfs[IDX4S(GAMMA_FACEUU22GF, i0,i1,i2)] = FACEVAL[GUPZZ];*/ auxevol_gfs[IDX4S(BETA_FACEU0GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; const double betax = auxevol_gfs[IDX4S(BETA_FACEU0GF, i0,i1,i2)]; FACEVAL[SHIFTX] = betax; auxevol_gfs[IDX4S(BETA_FACEU1GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; const double betay = auxevol_gfs[IDX4S(BETA_FACEU1GF, i0,i1,i2)]; FACEVAL[SHIFTY] = betay; auxevol_gfs[IDX4S(BETA_FACEU2GF, i0,i1,i2)] = (double)rand()/RAND_MAX*0.2-0.1; const double betaz = auxevol_gfs[IDX4S(BETA_FACEU2GF, i0,i1,i2)]; FACEVAL[SHIFTZ] = betaz; /* Generate physically meaningful speeds */ auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; /* Superluminal speeds for testing */ /*auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0;*/ Ur[VX] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)]-betax; Ur[VY] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)]-betay; Ur[VZ] = alpha*auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)]-betaz; auxevol_gfs[IDX4S(B_RU0GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; Ur[BX_CENTER] = auxevol_gfs[IDX4S(B_RU0GF, i0,i1,i2)]; auxevol_gfs[IDX4S(B_RU1GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; Ur[BY_CENTER] = auxevol_gfs[IDX4S(B_RU1GF, i0,i1,i2)]; // Set Bz to enforce orthogonality of u and B; lower index on drift v^i, then set // Bz = -(ux*Bx+uy*By)/uz REAL ux,uy,uz; ux = gammaDD00*(Ur[VX]+betax) + gammaDD01*(Ur[VY]+betay) + gammaDD02*(Ur[VZ]+betaz); uy = gammaDD01*(Ur[VX]+betax) + gammaDD11*(Ur[VY]+betay) + gammaDD12*(Ur[VZ]+betaz); uz = gammaDD02*(Ur[VX]+betax) + gammaDD12*(Ur[VY]+betay) + gammaDD22*(Ur[VZ]+betaz); auxevol_gfs[IDX4S(B_RU2GF, i0,i1,i2)] = -(ux*Ur[BX_CENTER] + uy*Ur[BY_CENTER])/uz; Ur[BZ_CENTER] = auxevol_gfs[IDX4S(B_RU2GF, i0,i1,i2)]; /* Generate physically meaningful speeds */ auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; /* Superluminal speeds for testing */ /*auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0; auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)] = 1.0+(double)rand()/RAND_MAX*9.0;*/ Ul[VX] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)]-betax; Ul[VY] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)]-betay; Ul[VZ] = alpha*auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)]-betaz; auxevol_gfs[IDX4S(B_LU0GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; Ul[BX_CENTER] = auxevol_gfs[IDX4S(B_LU0GF, i0,i1,i2)]; auxevol_gfs[IDX4S(B_LU1GF, i0,i1,i2)] = (double)rand()/RAND_MAX*2.0-1.0; Ul[BY_CENTER] = auxevol_gfs[IDX4S(B_LU1GF, i0,i1,i2)]; // Set Bz to enforce orthogonality of u and B; lower index on drift v^i, then set // Bz = -(ux*Bx+uy*By)/uz ux = gammaDD00*(Ul[VX]+betax) + gammaDD01*(Ul[VY]+betay) + gammaDD02*(Ul[VZ]+betaz); uy = gammaDD01*(Ul[VX]+betax) + gammaDD11*(Ul[VY]+betay) + gammaDD12*(Ul[VZ]+betaz); uz = gammaDD02*(Ul[VX]+betax) + gammaDD12*(Ul[VY]+betay) + gammaDD22*(Ul[VZ]+betaz); auxevol_gfs[IDX4S(B_LU2GF, i0,i1,i2)] = -(ux*Ul[BX_CENTER] + uy*Ul[BY_CENTER])/uz; Ul[BZ_CENTER] = auxevol_gfs[IDX4S(B_LU2GF, i0,i1,i2)]; // Compute the fluxes for the ETK version const int index = CCTK_GFINDEX3D(cctkGH,i0,i1,i2); GRFFE__S_i__flux(i0,i1,i2,flux_dirn,Ul,Ur,FACEVAL,METRIC_LAP_PSI4,cmax[index],cmin[index],st_x_flux[index],st_y_flux[index],st_z_flux[index]); } // Compute the fluxes for the NRPy+ version. Note the one-offset between flux_dirn and the function names, // since we set flux_dirn for backwards compatibility. if(flux_dirn==1) calculate_Stilde_flux_D0(&params,auxevol_gfs,rhs_gfs); if(flux_dirn==2) calculate_Stilde_flux_D1(&params,auxevol_gfs,rhs_gfs); if(flux_dirn==3) calculate_Stilde_flux_D2(&params,auxevol_gfs,rhs_gfs); calculate_Stilde_rhsD(flux_dirn,&params,auxevol_gfs,rhs_gfs); for(int i0=0;i0<Nxx_plus_2NGHOSTS0-1;i0++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1-1;i1++) for(int i2=0;i2<Nxx_plus_2NGHOSTS2-1;i2++) { const int index = CCTK_GFINDEX3D(cctkGH,i0,i1,i2); const int indexp1 = CCTK_GFINDEX3D(cctkGH,i0+kronecker_delta[flux_dirn][0],i1+kronecker_delta[flux_dirn][1],i2+kronecker_delta[flux_dirn][2]); st_x_rhs[index] += (st_x_flux[index] - st_x_flux[indexp1] ) * dxi[flux_dirn]; st_y_rhs[index] += (st_y_flux[index] - st_y_flux[indexp1] ) * dxi[flux_dirn]; st_z_rhs[index] += (st_z_flux[index] - st_z_flux[indexp1] ) * dxi[flux_dirn]; } } #define SDA(a,b) 1.0-log10(2.0*fabs(a-b)/(fabs(a)+fabs(b))+1.0e-15) //printf("Valencia 3-velocity (right): %.4e, %.4e, %.4e\n",auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)]); //printf("Valencia 3-velocity (left): %.4e, %.4e, %.4e\n\n",auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)],auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)]); // We'll compare the output in-between each int i0 = 1, i1 = 1, i2 = 1; const int index = CCTK_GFINDEX3D(cctkGH,i0,i1,i2); printf("%.1f %.1f %.1f\n",SDA(rhs_gfs[IDX4S(STILDED0GF,i0,i1,i2)],st_x_rhs[index]), SDA(rhs_gfs[IDX4S(STILDED1GF,i0,i1,i2)],st_y_rhs[index]), SDA(rhs_gfs[IDX4S(STILDED2GF,i0,i1,i2)],st_z_rhs[index])); //printf("NRPy+ Results: %.16e, %.16e, %.16e\n",rhs_gfs[IDX4S(STILDED0GF, i0, i1, i2)],rhs_gfs[IDX4S(STILDED1GF, i0,i1,i2)],rhs_gfs[IDX4S(STILDED2GF, i0,i1,i2)]); //printf("ETK Results: %.16e, %.16e, %.16e\n",st_x_rhs[index],st_y_rhs[index],st_z_rhs[index]); } # - # <a id='compile_run'></a> # # ## Step 2.a: Compile and run the code to validate the output \[Back to [top](#toc)\] # # $$\label{compile_run}$$ # # And now, we will compile and run the C code. We also make python calls to time how long each of these steps takes. # + import cmdline_helper as cmd results_file = "out.txt" cmd.C_compile(os.path.join(out_dir,"Stilde_flux_unit_test.C"), os.path.join(out_dir,"Stilde_flux_unit_test")) cmd.Execute(os.path.join(out_dir,"Stilde_flux_unit_test"),file_to_redirect_stdout=os.path.join(out_dir,results_file)) # with open(os.path.join(out_dir,results_file),"r") as file: # for i in range(18): # output = file.readline() # if "NRPy+" in output: # substrings = output.split(": ") # substrings = substrings[1].split(", ") # for j in range(3): # if float(substrings[j])!=0.0: # sys.exit(1) # print(output) # - # Below are the numbers we care about. These are the Significant Digits of Agreement between the HLLE fluxes computed by NRPy+ and ETK. Each row represents a flux direction; each entry therein corresponds to a component of StildeD. Each pair of outputs should show at least 14 significant digits of agreement. # # Note that in the case of very high velocities, numerical error will accumulate # and reduce agreement significantly due to a catastrophic cancellation. print("Now comparing the significant digits of agreements in each direction...") with open(os.path.join(out_dir,results_file),"r") as file: output = file.readline() output = output.split() for i in range(3): print("SDA in Stilde_rhsD"+str(i)+" = "+output[i]) if float(output[i])<14: sys.exit(1) # <a id='latex_pdf_output'></a> # # # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux",location_of_template_file=os.path.join(".."))
in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Stilde_flux.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="9V2cVyfp2P3L" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') # + id="2D63YHk12P3Q" #brfss_total = pd.read_csv('./csv_data/brfss_total.csv') brfss_total = pd.read_csv('./brfss_total.csv', index_col=0) # + id="Z2kdkZdgGMNM" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="2b7285d5-508f-42a0-9d5a-63a8e2fe5757" brfss_total.head() # + jupyter={"outputs_hidden": true} id="o0DYXE6F2P4D" colab={"base_uri": "https://localhost:8080/"} outputId="0448ae1c-0f04-44e7-9b78-798d8295d4d8" brfss_total.info() # + id="NeQqoOq22P37" colab={"base_uri": "https://localhost:8080/"} outputId="3b789883-6f84-4097-eb07-e4f75fb731fe" brfss_total.shape # + id="wQm3-uM79JFH" # changing value to value labels for sex, race, education level, and income brfss_total['SEX'] = brfss_total['SEX'].map({1:'Male', 2:'Female'}) brfss_total['_PRACE'] = brfss_total['_PRACE'].map({0:'Unknown', 1:'White', 2:'Black', 3:'Asian', 4:'Native Hawaiian or Other Pacific Islander', 5:'American Indian or Alaskan Native', 6:'Other race'}) brfss_total['_EDUCAG'] = brfss_total['_EDUCAG'].map({0:'Unknown', 1:'Did not graduate high school', 2:'Graduated high school', 3:'Attended college or technical school', 4: 'Graduated from College or Technical School'}) brfss_total['_INCOMG'] = brfss_total['_INCOMG'].map({0:'Unknown', 1:'Less than $15000', 2:'$15,000 to $25,000', 3:'$25,000 to $35,000', 4:'$35,000 to $50,000', 5:'$50,000 or more'}) # + id="L0tEZ7yZ563g" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="ff5729ff-df43-4ee4-e725-5af14f6770e4" brfss_total['SEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="OcgkcthPwGwP" # getting rid of all unknown values so they don't appear in visualizations # got guidance from https://stackoverflow.com/a/50698043 brfss_total.drop(labels = brfss_total[brfss_total['_PRACE'] == 'Unknown'].index, axis=0, inplace=True) brfss_total.drop(labels = brfss_total[brfss_total['_EDUCAG'] == 'Unknown'].index, axis=0, inplace=True) brfss_total.drop(labels = brfss_total[brfss_total['_INCOMG'] == 'Unknown'].index, axis=0, inplace=True) # + id="qGHeHPwAto3b" colab={"base_uri": "https://localhost:8080/", "height": 451} outputId="704b39fa-6c6d-478f-e903-4aaa11556108" brfss_total.head() # + id="PeI2Q0Rh-ow7" colab={"base_uri": "https://localhost:8080/", "height": 753} outputId="347b307b-d3ca-48ae-efae-927dadad8651" plt.figure(figsize=(15, 12)) sns.histplot(data = brfss_total, x = 'SEX', hue = '_PRACE', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8, palette='Spectral') plt.xticks(rotation=45) plt.title('Probability of Race for Each Sex') plt.xlabel('Sex'); #plt.legend(title='Race', labels=brfss_total['_PRACE'].unique()); plt.savefig('sex_race.jpg') # + id="GOWSwK6t74-U" colab={"base_uri": "https://localhost:8080/", "height": 227} outputId="995cdd36-35ea-4a5c-caf6-916dde7b2536" brfss_total['_PRACE'].value_counts(normalize = True, ascending=False).to_frame() # + id="BVNCxFlc-wiZ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="adec91da-2f26-49ad-f640-a6d1e0aa5c80" plt.figure(figsize=(21, 21)) sns.histplot(data = brfss_total, x = '_EDUCAG', hue = '_PRACE', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8, palette='Spectral') plt.xticks(rotation=45) plt.title('Probability of Race for Each Education Category') plt.xlabel('Education Category') #plt.legend(title='Race', labels=brfss_total['_PRACE'].unique()); plt.savefig('race_education.jpg') # + id="qQITh6oTaCA1" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="d9795269-6987-4c85-e8a6-d0af6127e822" brfss_total.groupby('SEX')['_PRACE'].value_counts(normalize=True, ascending=False).to_frame() # + id="Dp6hW4sL8ocG" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="87905ab6-40fe-49b7-9432-03d8572ae2a4" brfss_total['_EDUCAG'].value_counts(normalize=True, ascending=False).to_frame() # + id="q_WYO410_IHL" colab={"base_uri": "https://localhost:8080/", "height": 882} outputId="6fb8b837-8c70-49d0-fc21-5d99e32e12c3" plt.figure(figsize=(15, 12)) sns.histplot(data = brfss_total, x = '_EDUCAG', hue = '_INCOMG', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8, palette='Spectral') plt.xticks(rotation=45) plt.title('Probability of Income Category for Each Education Category') plt.xlabel('Education Category') #plt.legend(title='Income Category') #labels=brfss_total['_INCOMG'].values); plt.savefig('income_education.jpg') # + id="Ai1tN8THbhBo" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="ff4a87ce-da3d-499c-c4f9-efda4696803e" brfss_total.groupby('_EDUCAG')['_PRACE'].value_counts(normalize=True, ascending=False).to_frame() # + id="6rBgezVW8u4A" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="a41ca420-241e-458f-8fdb-22996c81f527" brfss_total['_INCOMG'].value_counts(normalize=True, ascending=False).to_frame() # + id="pFcT9QA-_VJ0" colab={"base_uri": "https://localhost:8080/", "height": 790} outputId="ba36d33c-c15b-4efd-c6f3-d24bc1c4fa4c" plt.figure(figsize=(15, 12)) sns.histplot(data = brfss_total, x = '_INCOMG', hue='_PRACE', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8, palette='Spectral') plt.xticks(rotation=45) plt.title('Probability of Race for Each Income Category') plt.xlabel('Income Category') #plt.legend(title='Race', labels=brfss_total['_PRACE'].unique()); plt.savefig('income_education.jpg'); # + id="vbSjZsOBbpya" colab={"base_uri": "https://localhost:8080/", "height": 977} outputId="7920012e-c57e-4397-ae02-e27c2b15c7bc" brfss_total.groupby('_INCOMG')['_PRACE'].value_counts(normalize=True, ascending=False).to_frame() # + id="khgW4UBuV_8M" colab={"base_uri": "https://localhost:8080/"} outputId="17597b26-29ca-407b-9d33-0d0446a58527" # want to see which states are included by checking their values brfss_total['_STATE'].value_counts(normalize=True) # + id="1JSL0SIkWGRw" # changing value to value labels for states brfss_total['_STATE'] = brfss_total['_STATE'].map({53:'Washington', 27:'Minnesota', 55:'Wisconsin', 50:'Vermont', 37:'North Carolina', 30:'Montana', 22:'Louisiana', 47:'Tennessee', 19:'Iowa', 15:'Hawaii', 40:'Oklahoma', 5:'Arkansas', 11:'District of Columbia', 32:'Nevada'}) # + id="inHDMFeNAVfM" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4384bd06-e991-459c-8548-ddc3434a0179" plt.figure(figsize=(19, 19)) sns.histplot(data = brfss_total, x='_STATE', hue='SEX', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) plt.xticks(rotation=45) plt.title('Probability of Males and Females for Each State') plt.xlabel('State') #plt.legend(title='Sex', labels=brfss_total['SEX'].unique()); plt.savefig('sex_state.jpg'); # + id="FFjpdgD8Y68R" # creating a separate ACE dataset for analysis ace = brfss_total[['ACEDEPRS', 'ACEDRINK', 'ACEDRUGS', 'ACEPRISN', 'ACEDIVRC', 'ACEPUNCH', 'ACEHURT', 'ACESWEAR', 'ACETOUCH', 'ACETTHEM', 'ACEHVSEX']] # + id="IqZIEW1BMuez" colab={"base_uri": "https://localhost:8080/", "height": 642} outputId="a7cfdb8f-334f-437f-bf1b-79fde72309d6" plt.figure(figsize=(12, 10)) sns.heatmap(ace.corr(), annot=True); # + id="TSIvaOT0ZKek" colab={"base_uri": "https://localhost:8080/", "height": 587} outputId="23e2584d-4661-4e43-d33a-3797ebce1af0" # updating ACE columns to be a count depending on the question # first 5 questions are yes or no, so yes will be be counted as 1 and no will be counted as 0 # last 6 are questions of frequency, never = 0, once = 1, more than once = 2 (since not given an exact number) ace['ACEDEPRS'] = ace['ACEDEPRS'].map({1:1, 2:0, 0:0}) ace['ACEDRINK'] = ace['ACEDRINK'].map({1:1, 2:0, 0:0}) ace['ACEDRUGS'] = ace['ACEDRUGS'].map({1:1, 2:0, 0:0}) ace['ACEPRISN'] = ace['ACEPRISN'].map({1:1, 2:0, 0:0}) ace['ACEDIVRC'] = ace['ACEDIVRC'].map({1:1, 2:0, 0:0}) ace['ACEPUNCH'] = ace['ACEPUNCH'].map({1:0, 2:1, 3:2, 0:0}) ace['ACEHURT'] = ace['ACEHURT'].map({1:0, 2:1, 3:2, 0:0}) ace['ACESWEAR'] = ace['ACESWEAR'].map({1:0, 2:1, 3:2, 0:0}) ace['ACETOUCH'] = ace['ACETOUCH'].map({1:0, 2:1, 3:2, 0:0}) ace['ACETTHEM'] = ace['ACETTHEM'].map({1:0, 2:1, 3:2, 0:0}) ace['ACEHVSEX'] = ace['ACEHVSEX'].map({1:0, 2:1, 3:2, 0:0}) # creating a column for counting the number of ACEs an individual had ace['ACE_Count'] = ace.sum(axis = 1) brfss_total['ACE_Count'] = ace['ACE_Count'] # put in EDA notebook and create visualization(s) ace['ACE_Count'].value_counts(normalize=True, ascending=False).to_frame() # + id="VReg8CpHGU8B" # changing value to value labels for ACE questions brfss_total['ACEDEPRS'] = brfss_total['ACEDEPRS'].map({0:'Unknown', 1:'Yes', 2:'No'}) brfss_total['ACEDRINK'] = brfss_total['ACEDRINK'].map({0:'Unknown', 1:'Yes', 2:'No'}) brfss_total['ACEDRUGS'] = brfss_total['ACEDRUGS'].map({0:'Unknown', 1:'Yes', 2:'No'}) brfss_total['ACEPRISN'] = brfss_total['ACEPRISN'].map({0:'Unknown', 1:'Yes', 2:'No'}) brfss_total['ACEDIVRC'] = brfss_total['ACEDIVRC'].map({0:'Unknown', 1:'Yes', 2:'No'}) brfss_total['ACEPUNCH'] = brfss_total['ACEPUNCH'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) brfss_total['ACEHURT'] = brfss_total['ACEHURT'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) brfss_total['ACESWEAR'] = brfss_total['ACESWEAR'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) brfss_total['ACETOUCH'] = brfss_total['ACETOUCH'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) brfss_total['ACETTHEM'] = brfss_total['ACETTHEM'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) brfss_total['ACEHVSEX'] = brfss_total['ACEHVSEX'].map({0:'Unknown', 1:'Never', 2:'Once', 3:'More than once'}) # + id="diXOtO8-hGvk" brfss_total.to_csv('brfss_total_value_labels.csv') # + id="gp2w-nUxBFwB" colab={"base_uri": "https://localhost:8080/", "height": 743} outputId="c120bfcb-5991-482e-e115-4cc909bd3fe4" plt.figure(figsize=(15, 12)) sns.histplot(data = brfss_total, x = 'ACE_Count', hue = 'SEX', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) plt.xticks(rotation=45) plt.title('Distribution of ACE Count') plt.xlabel('ACE Count'); plt.savefig('ACE_count.jpg') # + id="qxpI3RMjBuGe" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="532dcb35-556d-41a5-8960-84e4996bc302" plt.figure(figsize=(15, 9)) sns.histplot(brfss_total['ACEDEPRS']) plt.xticks(rotation=45); # + id="8bNNLicwXe56" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1cb841b2-38a1-4166-ddb7-19774e7bf4e2" # got guidance for this from https://re-thought.com/pandas-value_counts/ brfss_total.groupby('_STATE')['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="1kwDChhdFQMq" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="d9f2474f-d902-4005-d3f5-33c6de0628c6" brfss_total.groupby('_PRACE')['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="UPoUlbt7FVA2" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="8565b0c5-9a91-4bf3-dd03-06a9f23747b3" brfss_total.groupby('SEX')['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="JqD-ksnMFkHL" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="6feae682-0a44-41fc-b195-16661add6493" brfss_total.groupby('_EDUCAG')['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="nyObkrUCFkO1" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="b5ca7d26-6cf9-4963-a215-c5743eb2caf3" brfss_total.groupby('_INCOMG')['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="fr-f3yxpX0O8" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="92805180-5196-47a4-9926-291f00addd03" brfss_total.groupby('_STATE')['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="6RtbiIFaGWUQ" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="17968e82-d9fb-46fa-da30-1970e7d29860" brfss_total.groupby('_PRACE')['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="5A7pN21gGWav" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="0a81d6d0-34fe-43e9-f260-a94339d1ba03" brfss_total.groupby('SEX')['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="A5SyoGQHGWhC" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="b00c976b-62d5-4c55-97cd-fb3089339474" brfss_total.groupby('_EDUCAG')['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="M4_2ykq_GWrJ" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="dc3a75cc-d493-4d4d-83f7-05122f5f1195" brfss_total.groupby('_INCOMG')['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="F1G-hLvLX5Gh" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="bac91edd-db5d-409a-9dfe-ea709dc1a02c" brfss_total.groupby('_STATE')['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="cZQhRBYy7J11" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="dc3b7f2e-29d0-4226-b60f-7224e86a5a7c" brfss_total.groupby('_PRACE')['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="8CQbDzIKGrNb" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="a032bd6d-bb81-47ec-aa05-4adeaaec85d5" brfss_total.groupby('SEX')['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="zCgC53BYGrUu" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="807c6455-412c-4ead-8030-2045ac168ce9" brfss_total.groupby('_EDUCAG')['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="pTUoTI5AGrcQ" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="8e83ef05-5d9a-42f6-dae0-1b1cc05ee91b" brfss_total.groupby('_INCOMG')['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="CUmUtEzqX9ik" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="73d416a4-fd94-4c4c-c156-5b85136a4097" brfss_total.groupby('_STATE')['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="bVX5IXktG7T3" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="f852e74f-2bac-492a-d9fa-1ae481496178" brfss_total.groupby('_PRACE')['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="WvRYHBMCG7e4" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="50966144-92b6-4ad8-b79c-363123fd8bc3" brfss_total.groupby('SEX')['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="kYfra3RlG7o3" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="a83bfa99-a61d-415b-940b-9b6e331703a5" brfss_total.groupby('_EDUCAG')['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="ECh6rbf57J8k" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="9cf5e2e0-b040-41e3-f3c9-fdb09066857b" brfss_total.groupby('_INCOMG')['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="OboO-bu4YBJd" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="aa78e179-6a4a-42c1-e3b5-59d99e00b11a" brfss_total.groupby('_STATE')['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="oW7Yb-EeHEhF" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="1b3344b2-82cc-4a6a-dc32-1dcfcc965cd3" brfss_total.groupby('_PRACE')['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="dW-uGBIxHEos" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="04a36367-da66-4e1e-eada-799a5277d810" brfss_total.groupby('SEX')['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="pnKtaLalHEwR" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="932e7a1d-d032-439f-ec76-54bbea98ea6a" brfss_total.groupby('_EDUCAG')['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="Stx50sG27KDt" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="e4733813-5e08-496b-ab1e-94411361506f" brfss_total.groupby('_INCOMG')['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="Yt7EnwfsYFMx" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="52e78d34-e552-482e-8f0d-0ab768742fed" brfss_total.groupby('_STATE')['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="e08DigZ0HR0m" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="03dab240-3ad3-4ac3-9587-2916c9b7f242" brfss_total.groupby('_PRACE')['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="ZfW2jw6SHR7k" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="54747e17-466d-4984-87cf-745665881aa5" brfss_total.groupby('SEX')['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="EIwx47FlHSCP" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="0644e756-9967-4b4c-c3db-75b9844d057b" brfss_total.groupby('_EDUCAG')['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="7bFrRt2a7KJ6" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="aff092d5-2995-4f6e-8792-4f34061eed73" brfss_total.groupby('_INCOMG')['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="4NJPUx0aYI-S" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="95ef2cb7-b2cf-41f1-fdf9-7d3c10187feb" brfss_total.groupby('_STATE')['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="2Zar-0IxHitE" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="5c62ef87-9100-4f2e-9680-4500a591c6c5" brfss_total.groupby('_PRACE')['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="Ab-8z4p1Hi0k" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="7fb9ea95-b722-4b61-baaf-7381f2eacfbc" brfss_total.groupby('SEX')['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="nPzji3ibHi8X" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="c824266d-eb6f-410a-bfd8-2b1d357c969e" brfss_total.groupby('_EDUCAG')['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="JDKCSXiW7KQ4" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="5d1629ca-7856-4be4-e2d8-b897b737e064" brfss_total.groupby('_INCOMG')['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="JEVeQqyvYMyY" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="59801465-c5a0-4c4c-a1db-d11ef911ca3f" brfss_total.groupby('_STATE')['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="_OJ5lhn5Htwb" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="f6751597-bd10-41bb-dbf5-21b1361450fd" brfss_total.groupby('_PRACE')['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="eXecy4_SHt4T" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="b6762811-fe47-47e3-bf3d-2baf3ae4903b" brfss_total.groupby('SEX')['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="xjv4BWVwHuAF" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="23721c07-6493-4d43-eb72-3cc3cc5efc9b" brfss_total.groupby('_EDUCAG')['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="lml2d2Kd7KYH" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="12122ae4-2659-4b90-e3f4-f82b5e1fc599" brfss_total.groupby('_INCOMG')['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="wxQhapVyYPvP" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8f5014f6-ee5e-4fb1-c609-99bb44ed68dd" brfss_total.groupby('_STATE')['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="trr-Yi1EH_7U" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="fd78f749-5f6e-482f-e597-5495514a8657" brfss_total.groupby('_PRACE')['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="zZGvhJ6MIADG" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="f6330b89-8246-4bd4-9123-dd96e19938ff" brfss_total.groupby('SEX')['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="CBjB2HrZIAKk" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="7ec4d371-f813-44a2-d6b3-cf6e8cf2c170" brfss_total.groupby('_EDUCAG')['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="rmmit-4q7Kih" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="e259fe6f-bc02-41d0-9b00-8751e4ed6adb" brfss_total.groupby('_INCOMG')['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="qRnGf-UHYS5H" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7a061237-9087-48ce-cd78-4612cbe1f41f" brfss_total.groupby('_STATE')['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="TpbVtTtNINim" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="ea50ade5-d7c8-43fb-bf7c-e060928625e8" brfss_total.groupby('_PRACE')['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="jpxxOdoHINqC" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="6b99116e-3f79-4f2d-b633-39467f3f095b" brfss_total.groupby('SEX')['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="lDv89VfEINwY" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="5e5654f4-4118-4c37-adb7-30d30c6ea3a2" brfss_total.groupby('_EDUCAG')['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="dNVFLXKh7jc2" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="e43f32e5-d304-4be2-e02c-17e0e824d73c" brfss_total.groupby('_INCOMG')['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="Q9iJuyCEYYAj" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="63652f21-152a-4651-b992-57dfb6a4aaac" brfss_total.groupby('_STATE')['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="iXyWsLwpIaye" colab={"base_uri": "https://localhost:8080/", "height": 797} outputId="10fba0ef-5539-4c39-b5e8-e5bc7115794d" brfss_total.groupby('_PRACE')['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="difaY0t3Ia5P" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="77dca60b-980b-496c-eeb6-e327678cec1c" brfss_total.groupby('SEX')['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="03C0RQ1mIbJE" colab={"base_uri": "https://localhost:8080/", "height": 557} outputId="43061528-6401-4bc3-e57b-73899095dff8" brfss_total.groupby('_EDUCAG')['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="4gzlEx9-7jiq" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="68bec2bb-aa46-4acb-af8c-f7c4b80221ac" brfss_total.groupby('_INCOMG')['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="Y1WmPWk02P4M" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="27de7b38-5007-45fd-9c74-76a1ff1a2af7" brfss_total['ACEDEPRS'].value_counts(normalize=True, ascending=False).to_frame() # + id="DWrcJ1kJI7CH" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="8e596eea-0366-4063-8caf-a5af146cda8f" brfss_total['ACEDRINK'].value_counts(normalize=True, ascending=False).to_frame() # + id="4_sWhpI1I7OV" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="6fb66f0d-4a70-4ff3-eb11-1268c5a9219b" brfss_total['ACEDRUGS'].value_counts(normalize=True, ascending=False).to_frame() # + id="kXJSQpo7JE7y" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="5e238654-2486-4e3a-a4b8-ed91307034a4" brfss_total['ACEPRISN'].value_counts(normalize=True, ascending=False).to_frame() # + id="_Zj56DlfJFGX" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="d9980ce2-a670-4736-fd13-cc08693b4fa0" brfss_total['ACEDIVRC'].value_counts(normalize=True, ascending=False).to_frame() # + id="DLgzPwAmJFMK" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="bb9cbed4-d6ac-43d4-a6b7-cc408b9fd575" brfss_total['ACEPUNCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="7RKZdYHgJFSA" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="3ba21817-3fea-49a3-f899-d87c23250bc7" brfss_total['ACEHURT'].value_counts(normalize=True, ascending=False).to_frame() # + id="E9c0xg6oJFYD" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="e5151206-11c9-4d9c-8983-ebedb4e1feef" brfss_total['ACESWEAR'].value_counts(normalize=True, ascending=False).to_frame() # + id="3aTS9EwKJVuZ" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="e6930999-b394-4c6a-daf3-068b1376969c" brfss_total['ACETOUCH'].value_counts(normalize=True, ascending=False).to_frame() # + id="Hy7w4hdBJV18" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="2485c465-afea-4e33-97c0-761c574febbe" brfss_total['ACETTHEM'].value_counts(normalize=True, ascending=False).to_frame() # + id="_kpEoHXiJWBs" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="7505ca7c-6df1-4868-b647-848aa7b18cbd" brfss_total['ACEHVSEX'].value_counts(normalize=True, ascending=False).to_frame() # + id="EV_kG7n3IGdu" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="593239d4-1c26-4fdf-cfe3-2e20c637c6f7" plt.figure(figsize=(15, 10)) sns.histplot(data=brfss_total, x='ACEDEPRS', stat='probability', discrete=True, shrink=0.8) # + id="8tbTTD992P4Q" colab={"base_uri": "https://localhost:8080/", "height": 583} outputId="cf23b4e4-ccdc-4383-f2db-083626cc4ac3" plt.figure(figsize=(12, 9)) plt.title('Household Challenges') plt.hist(x=[brfss_total['ACEDEPRS'], brfss_total['ACEDRINK'], brfss_total['ACEDRUGS'], brfss_total['ACEPRISN'], brfss_total['ACEDIVRC']]) plt.ylabel('Count') plt.legend(labels=['Mental Illness', 'Substance Abuse: Drinking', 'Substance Abuse: Drugs', 'Incarcerated Household Member', 'Parental Separation or Divorce'], loc='best') plt.xticks(rotation=45); plt.savefig('household_challenges.jpg') # + id="2r9ooj572P4a" colab={"base_uri": "https://localhost:8080/", "height": 604} outputId="4fdb4c8d-de5a-49fa-c8d7-1839c7be39ec" plt.figure(figsize=(12, 9)) plt.title('Abuse') plt.hist(x=[brfss_total['ACEHURT'], brfss_total['ACEHVSEX'], brfss_total['ACEPUNCH'], brfss_total['ACESWEAR'], brfss_total['ACETOUCH'], brfss_total['ACETTHEM']]) plt.ylabel('Count') plt.legend(labels=['Physical Abuse', 'Sexual Abuse: Rape', 'Intimate Partner Violence', 'Emotional Abuse', 'Sexual Abuse: Assaulted', 'Sexual Abuse: Forced to Touch']) plt.xticks(rotation=45); plt.savefig('abuse.jpg') # + id="90RQMZbVFBZ0" colab={"base_uri": "https://localhost:8080/", "height": 621} outputId="518e82bb-fa47-490c-a1b2-8b66b4c35e2b" plt.figure(figsize=(15, 10)) sns.histplot(data=brfss_total, x='ACEDEPRS', hue='_PRACE', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) plt.title('Probability of having a family member with mental illness by Race'); plt.savefig('depr_race.jpg') # + id="5g8sJjteFIr4" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="9ba9cb96-2221-4607-a7bd-9bd2a0cb7142" plt.figure(figsize=(15, 10)) sns.histplot(data=brfss_total, x='ACEDEPRS', hue='SEX', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) # + id="e5lujY4kHw58" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="a2821a86-0d93-4e1f-b788-63d098f0cf6e" plt.figure(figsize=(15, 10)) sns.histplot(data=brfss_total, x='ACEDEPRS', hue='_INCOMG', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) # + id="NvsEfhdqH3jx" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="84b4c325-9744-4e46-cc88-da8985bb662a" plt.figure(figsize=(15, 10)) sns.histplot(data=brfss_total, x='ACEDEPRS', hue='_EDUCAG', multiple='dodge', stat='probability', common_norm = False, discrete=True, shrink=0.8) # + id="VcpSc661IBOm"
3_eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: torch # language: python # name: torch # --- import numpy as np import agent import environment import matplotlib.pyplot as plt from importlib import reload # to change the modules and reload them updated reload(agent) reload(environment) episodes = 2000 # number of training episodes episode_length = 50 # maximum episode length x = 10 # horizontal size of the box y = 10 # vertical size of the box goal = [0, 3] # objective point discount = 1.-1./episode_length # exponential discount factor softmax = True # set to true to use Softmax policy sarsa = False # set to true to use the Sarsa algorithm R0 = 0 # Changes made: # 1. Consider a discount factor of $\delta = 1 - \frac{1}{T}$, where T is the length of an episode. This is equivalent to consider future payoffs without discount in T-1 out of T cases and equal to 0 in just 1 out of T cases (that is the case when the episode ends). # 2. Introduced a baseline negative reward for actions. This can be thought as a punishment for an agent that decides to stay still and it would be useful especially in cases where the episode ends when the agent reaches the goal (because there is a just a single positive reward in the entire episode, so to maximize the reward it has to reach the goal as soon as possible). # + #alpha = np.ones(episodes) * 0.25 #epsilon = np.linspace(0.8, 0.01, episodes) EPS_START = 0.8 eps = EPS_START # starting value of epsilon # generate an adaptive epsilon greedy algorithm, calibrated in order to have epsilon = 10^-2 at the last epoch epsilon = np.array(list(map(lambda i : eps*np.exp(-i*2*np.log(10)/episodes), np.arange(0,episodes+1)))) ALPHA_START = 1 a = ALPHA_START # starting value of alpha # generate an adaptive learning rate, calibrated in order to have alpha = 10^-1 at the last epoch alpha = np.array(list(map(lambda i : a*np.exp(-i*1*np.log(10)/episodes), np.arange(0,episodes+1)))) # - # Changes made: # # Exponential decaying schedule both for the exploration factor $\epsilon$ and the learning rate $\alpha$, in order to train fast and get a stable result. # initialize the agent learner = agent.Agent((x * y), 5, discount, max_reward=1, softmax=softmax, sarsa=sarsa) def train_agent(learner, alpha, epsilon, R0, episodes=2000, episode_length=50, x = 10, y = 10, goal = [0, 3], verbose = False, print_every=10): reward_log = [] # perform the training for index in range(0, episodes): # start from a random state initial = [np.random.randint(0, x), np.random.randint(0, y)] # initialize environment state = initial env = environment.Environment(x, y, state, goal, R0) reward = 0 # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] reward /= episode_length if verbose: if (index+1) % print_every == 0: print("Average reward of %.3f at episode %d"%(reward,index+1)) reward_log.append(reward) return learner, reward_log learner, reward_log = train_agent(learner, alpha, epsilon, R0, verbose=True) # save everything about a simulation learner_dict = dict(discount=discount, softmax=softmax, sarsa=sarsa) training_dict = dict(alpha=alpha, epsilon=epsilon) env_dict = dict(episodes=episodes, episode_length=episode_length, x=x, y=y, goal=goal) reward_dict = dict(reward_log=reward_log) simulation = [learner_dict, training_dict, env_dict, reward_dict] window = 50 mean_rewardQ = [np.mean(reward_logQ[i*window:(i+1)*window]) for i in range(int(len(reward_logQ)/window))] np.save("Results/Q_soft_d098_exp_alpha_eps", simulation) # # Analysis # ## Q-learning # + # load and unpack simulation_b = np.load("Results/initial_Q-learning.npy", allow_pickle=True) # basic simulation_s = np.load("Results/initial_Q_soft.npy", allow_pickle=True) # basic+softmax simulation_f = np.load("Results/Q_soft_d098_exp_alpha_eps.npy", allow_pickle=True) # final configuration [learner_dict_b, training_dict_b, env_dict_b, reward_dict_b] = simulation_b [learner_dict_s, training_dict_s, env_dict_s, reward_dict_s] = simulation_s [learner_dict_s, training_dict_f, env_dict_f, reward_dict_f] = simulation_f reward_logQ_b = reward_dict_b['reward_log'] reward_logQ_s = reward_dict_s['reward_log'] reward_logQ_f = reward_dict_f['reward_log'] window = 50 T = int(len(reward_logQ_b)/window) n_episodes2 = np.arange(T)*window mean_rewardQ_b = [np.mean(reward_logQ_b[i*window:(i+1)*window]) for i in range(T)] mean_rewardQ_s = [np.mean(reward_logQ_s[i*window:(i+1)*window]) for i in range(T)] mean_rewardQ_f = [np.mean(reward_logQ_f[i*window:(i+1)*window]) for i in range(T)] # - n_episodes = np.arange(len(reward_logQ_b)) plt.figure(figsize = (8,6)) plt.plot(n_episodes, reward_logQ_b) plt.xlabel("Number of episodes", fontsize=16) plt.ylabel("Average reward per turn", fontsize=16) if learner_dict_b['sarsa']: plt.title("SARSA learning - Softmax: %r - Discount: %.2f"%(learner_dict_b['softmax'],learner_dict_b['discount']), fontsize=16) else: plt.title("Q-learning - Softmax: %r - Discount: %.2f"%(learner_dict_b['softmax'],learner_dict_b['discount']), fontsize=16) plt.figure(figsize = (8,6)) plt.plot(n_episodes2, mean_rewardQ_b, label = 'Q-learning basic') plt.plot(n_episodes2, mean_rewardQ_s, label = 'Q-learning softmax') plt.plot(n_episodes2, mean_rewardQ_f, label = 'Q-learning final') plt.legend(fontsize=13) plt.xlabel("Number of episodes", fontsize=16) plt.ylabel("Average reward per turn", fontsize=16) plt.title("Different implementations of Q-learning", fontsize=16) plt.show() # ## SARSA # + # load and unpack simulation_b = np.load("Results/initial_SARSA.npy", allow_pickle=True) # basic simulation_s = np.load("Results/initial_SARSA_soft.npy", allow_pickle=True) # basic+softmax simulation_f = np.load("Results/SARSA_soft_d098_exp_alpha_eps.npy", allow_pickle=True) # final configuration [learner_dict_b, training_dict_b, env_dict_b, reward_dict_b] = simulation_b [learner_dict_s, training_dict_s, env_dict_s, reward_dict_s] = simulation_s [learner_dict_s, training_dict_f, env_dict_f, reward_dict_f] = simulation_f reward_logS_b = reward_dict_b['reward_log'] reward_logS_s = reward_dict_s['reward_log'] reward_logS_f = reward_dict_f['reward_log'] window = 50 T = int(len(reward_logS_b)/window) n_episodes2 = np.arange(T)*window mean_rewardS_b = [np.mean(reward_logS_b[i*window:(i+1)*window]) for i in range(T)] mean_rewardS_s = [np.mean(reward_logS_s[i*window:(i+1)*window]) for i in range(T)] mean_rewardS_f = [np.mean(reward_logS_f[i*window:(i+1)*window]) for i in range(T)] # - n_episodes = np.arange(len(reward_logS)) plt.figure(figsize = (8,6)) plt.plot(n_episodes, reward_logS) plt.xlabel("Number of episodes", fontsize=16) plt.ylabel("Average reward per turn", fontsize=16) if learner_dict['sarsa']: plt.title("SARSA learning - Softmax: %r - Discount: %.2f"%(learner_dict['softmax'],learner_dict['discount']), fontsize=16) else: plt.title("Q-learning - Softmax: %r - Discount: %.2f"%(learner_dict['softmax'],learner_dict['discount']), fontsize=16) plt.figure(figsize = (8,6)) plt.plot(n_episodes2, mean_rewardS_b, label = 'SARSA basic') plt.plot(n_episodes2, mean_rewardS_s, label = 'SARSA softmax') plt.plot(n_episodes2, mean_rewardS_f, label = 'SARSA final') plt.legend(fontsize=13) plt.title("Different implementations of SARSA", fontsize=16) plt.xlabel("Number of episodes", fontsize=16) plt.ylabel("Average reward per turn", fontsize=16) plt.show() # ## Confronting the two plt.figure(figsize = (8,6)) plt.plot(n_episodes2, mean_rewardQ_b, label = 'Q-learning basic') plt.plot(n_episodes2, mean_rewardS_b, label = 'SARSA basic') plt.plot(n_episodes2, mean_rewardQ_s, label = 'Q-learning softmax') plt.plot(n_episodes2, mean_rewardS_s, label = 'SARSA softmax') plt.plot(n_episodes2, mean_rewardQ_f, label = 'Q-learning final') plt.plot(n_episodes2, mean_rewardS_f, label = 'SARSA final') plt.legend(fontsize=13) plt.title("Q-learning vs SARSA", fontsize=16) plt.xlabel("Number of episodes", fontsize=16) plt.ylabel("Average reward per turn", fontsize=16) plt.show() # # Value map and rendering # # A value map is a useful and simple way to visualize what an agent has learned. The idea is to take the Q-values table, for each state choose the best action (optimal/greedy policy) and that is the expected return from the episode for being in a given state. # # Notice that for the goal cell, that value is equal to the reward of a single turn for staying in that cell multiplied by the number of turns. # # # %matplotlib inline value_map = learner.qtable.max(axis=1) plt.figure(figsize=(8,6)) plt.imshow(value_map.reshape(x,y)) plt.xticks([]) plt.yticks([]) plt.colorbar() plt.title("Learned values of the environment", fontsize=18) plt.tight_layout() plt.show() # The rendering instead shows the actual movements of an agent from the initial position untill when it reaches the goal (then it's interrupted because it's kind of boring to see it still). # # I usually use red pixel for the agent, white for the goal and custom colors to highlight the different regions of the environment (sometimes it's just to make it a bit more comprehensible). # + import time # %matplotlib notebook save = True index = -1 fig = plt.figure(figsize = (8,6)) initial = [9,9] # initialize environment state = initial goal = np.array([0,3]) env = environment.Environment(x, y, state, goal, R0) reward = 0 # rgb_map = np.full((x,y,3), [199,234,70])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[initial[0],initial[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) # show map plt.title("Sandbox Env - Turn: %d"%(0)) plt.yticks([]) plt.xticks([]) fig.show() time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/turn%.3d.png'%0) # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] plt.cla() # clear current axis from previous drawings -> prevents matplotlib from slowing down rgb_map = np.full((x,y,3), [199,234,70])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[state[0],state[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) plt.title("Sandbox Env - Turn: %d "%(step+1)) plt.yticks([]) # remove y ticks plt.xticks([]) # remove x ticks fig.canvas.draw() # update the figure time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/turn%.3d.png'%(step+1)) if state[0] == goal[0] and state[1] == goal[1]: break # - import os filenames = os.listdir('.raw_gif') # get the names of all the files in .raw_gif directory filenames.sort() # sort them by name (i.e. by turn in our specific case) # # ! pip install imageio import imageio images = [] for filename in filenames: images.append(imageio.imread('.raw_gif/'+filename)) imageio.mimsave('play_episode.gif', images, duration=0.75) # make gif # <img src="gifs/play_episode.gif"> # # Different environments # ## Seaside def train_agent_seaside(learner, alpha, epsilon, R0, episodes=2000, episode_length=50, x = 10, y = 10, goal = [9, 9], verbose = False, print_every=10): reward_log = [] # perform the training for index in range(0, episodes): # start from a random state initial = [0,0] # initialize environment state = initial env = environment.SeasideEnv(x, y, state, goal, R0) reward = 0 # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] reward /= episode_length if verbose: if (index+1) % print_every == 0: print("Average reward of %.3f at episode %d"%(reward,index+1)) reward_log.append(reward) return learner, reward_log episodes = 1000 # number of training episodes episode_length = 50 # maximum episode length x = 10 # horizontal size of the box y = 10 # vertical size of the box goal = [0, 3] # objective point discount = 1.-1./episode_length # exponential discount factor softmax = True # set to true to use Softmax policy sarsa = True # set to true to use the Sarsa algorithm R0 = -0.1 # + # TODO alpha and epsilon profile #alpha = np.ones(episodes) * 0.25 #epsilon = np.linspace(0.8, 0.01, episodes) EPS_START = 0.8 eps = EPS_START # starting value of epsilon # generate an adaptive epsilon greedy algorithm, calibrated in order to have epsilon = 10^-4 at the last epoch epsilon = np.array(list(map(lambda i : eps*np.exp(-i*2*np.log(10)/episodes), np.arange(0,episodes+1)))) ALPHA_START = 1 a = ALPHA_START # starting value of epsilon # generate an adaptive epsilon greedy algorithm, calibrated in order to have epsilon = 10^-2 at the last epoch alpha = np.array(list(map(lambda i : a*np.exp(-i*1*np.log(10)/episodes), np.arange(0,episodes+1)))) # - # initialize the agent learner = agent.Agent((x * y), 5, discount, max_reward=1, softmax=softmax, sarsa=sarsa) reload(environment) learner, reward_log = train_agent_seaside(learner, alpha, epsilon, R0, episodes=episodes, verbose=True) # %matplotlib inline value_map = learner.qtable.max(axis=1).reshape(x,y) plt.figure(figsize=(8,6)) plt.imshow(value_map) plt.xticks([]) plt.yticks([]) plt.colorbar() plt.title("Value map - Seaside Env", fontsize=16) plt.show() # + import time # %matplotlib notebook save = True index = -1 fig = plt.figure(figsize = (8,8)) initial = [0,0] # initialize environment state = initial goal = np.array([9,9]) env = environment.SeasideEnv(x, y, state, goal, R0) reward = 0 # rgb_map = np.full((x,y,3), [30,100,225])/255. rgb_map[:5,:,:] = np.array([252, 225, 102])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[initial[0],initial[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) # show map plt.title("Seaside Env - Turn: %d"%(0), fontsize = 16) plt.yticks([]) plt.xticks([]) fig.show() time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/Seaside_turn%.3d.png'%0) # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] plt.cla() # clear current axis from previous drawings -> prevents matplotlib from slowing down rgb_map = np.full((x,y,3), [30,100,225])/255. rgb_map[:5,:,:] = np.array([252, 225, 102])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[state[0],state[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) plt.title("Seaside Env - Turn: %d "%(step+1), fontsize = 16) plt.yticks([]) # remove y ticks plt.xticks([]) # remove x ticks fig.canvas.draw() # update the figure time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/Seaside_turn%.3d.png'%(step+1)) if state[0] == goal[0] and state[1] == goal[1]: break # - filenames = os.listdir('.raw_gif') # get the names of all the files in .raw_gif directory filenames.sort() # sort them by name (i.e. by turn in our specific case) images = [] for filename in filenames: images.append(imageio.imread('.raw_gif/'+filename)) imageio.mimsave('play_episode_seaside.gif', images, duration=0.75) # make gif # <img src="gifs/play_episode_seaside.gif"> # ## Two bridges suspended on void def train_agent_bridge(learner, alpha, epsilon, R0, episodes=2000, episode_length=50, x = 10, y = 10, verbose = False, print_every=10): reward_log = [] # perform the training for index in range(0, episodes): # start from a random state initial = [np.random.randint(x),0] # initialize environment state = initial env = environment.BridgeEnv(x, y, state, R0) reward = 0 # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] reward /= episode_length if verbose: if (index+1) % print_every == 0: print("Average reward of %.3f at episode %d"%(reward,index+1)) reward_log.append(reward) return learner, reward_log episodes = 500 # number of training episodes episode_length = 50 # maximum episode length x = 10 # horizontal size of the box y = 10 # vertical size of the box discount = 1.-1./episode_length # exponential discount factor softmax = True # set to true to use Softmax policy sarsa = True # set to true to use the Sarsa algorithm R0 = -0.1 # + EPS_START = 0.8 eps = EPS_START # starting value of epsilon # generate an adaptive epsilon greedy algorithm, calibrated in order to have epsilon = 10^-4 at the last epoch epsilon = np.array(list(map(lambda i : eps*np.exp(-i*2*np.log(10)/episodes), np.arange(0,episodes+1)))) ALPHA_START = 1 a = ALPHA_START # starting value of epsilon # generate an adaptive epsilon greedy algorithm, calibrated in order to have epsilon = 10^-2 at the last epoch alpha = np.array(list(map(lambda i : a*np.exp(-i*1*np.log(10)/episodes), np.arange(0,episodes+1)))) # - # initialize the agent learner = agent.Agent((x * y), 5, discount, max_reward=1, softmax=softmax, sarsa=sarsa) reload(environment) learner, reward_log = train_agent_bridge(learner, alpha, epsilon, R0, episodes=episodes, verbose=True) value_map = learner.qtable.max(axis=1).reshape(x,y) plt.figure(figsize=(8,6)) plt.imshow(value_map) plt.xticks([]) plt.yticks([]) plt.colorbar() plt.title("Value map - Bridge Env", fontsize=18) plt.tight_layout() plt.show() # + import time # %matplotlib notebook save = True index = -1 fig = plt.figure(figsize = (8,8)) initial = [6,0] # initialize environment state = initial goal = np.array([4,9]) env = environment.BridgeEnv(x, y, state, R0) reward = 0 # rgb_map = np.full((x,y,3), [199,234,70])/255. rgb_map[1:9,2:8,:] = np.array([0, 0, 0])/255. rgb_map[0,2:8,:] = np.array([181,101,29])/255. rgb_map[9,2:8,:] = np.array([181,101,29])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[initial[0],initial[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) # show map plt.title("Bridge Env - Turn: %d"%(0), fontsize = 16) plt.yticks([]) plt.xticks([]) fig.show() time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/Bridge_turn%.3d.png'%0) # run episode for step in range(0, episode_length): # find state index state_index = state[0] * y + state[1] # choose an action action = learner.select_action(state_index, epsilon[index]) # the agent moves in the environment result = env.move(action) # Q-learning update next_index = result[0][0] * y + result[0][1] learner.update(state_index, action, result[1], next_index, alpha[index], epsilon[index]) # update state and reward reward += result[1] state = result[0] plt.cla() # clear current axis from previous drawings -> prevents matplotlib from slowing down rgb_map = np.full((x,y,3), [199,234,70])/255. rgb_map[1:9,2:8,:] = np.array([0, 0, 0])/255. rgb_map[0,2:8,:] = np.array([181,101,29])/255. rgb_map[9,2:8,:] = np.array([181,101,29])/255. rgb_map[goal[0],goal[1],:] = np.array([255,255,255])/255. rgb_map[state[0],state[1],:] = np.array([225,30,100])/255. plt.imshow(rgb_map) plt.title("Bridge Env - Turn: %d "%(step+1), fontsize = 16) plt.yticks([]) # remove y ticks plt.xticks([]) # remove x ticks fig.canvas.draw() # update the figure time.sleep(0.75) #uncomment to slow down for visualization purposes if save: plt.savefig('.raw_gif/Bridge_turn%.3d.png'%(step+1)) if state[0] == goal[0] and state[1] == goal[1]: break # - filenames = os.listdir('.raw_gif') # get the names of all the files in .raw_gif directory filenames.sort() # sort them by name (i.e. by turn in our specific case) images = [] for filename in filenames: images.append(imageio.imread('.raw_gif/'+filename)) imageio.mimsave('play_episode_bridge.gif', images, duration=0.75) # make gif # <img src="gifs/play_episode_bridge.gif">
Nicola_Dainese_Ex5/Nicola_Dainese_Ex5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: bm # language: python # name: bm # --- import networkx as nx import numpy as np import scipy adjacency = scipy.sparse.random(100, 100) G = nx.from_numpy_matrix(adjacency.A) import matplotlib.pyplot as plt act_fn = lambda x: "-" if x < 0 else "+" act_fn(-1) from datetime import datetime import pandas as pd def generate_network(num_nodes, name): """Generate random sparse graph and save it.""" adjacency = scipy.sparse.random(num_nodes, num_nodes).A graph = nx.from_numpy_matrix(adjacency) graph.remove_edges_from(graph.selfloop_edges()) edgelist = [e for e in graph.edges.data()] network_df = pd.DataFrame(columns=["Source", "Dest", "Indicator"]) act_fn = lambda x: "-" if x < 0 else "+" for edge in edgelist: source, dest, weight_dict = edge weight = weight_dict['weight'] data_to_append = {"Source": source, "Dest": dest, "Indicator": act_fn(weight)} network_df = network_df.append(data_to_append, ignore_index=True) with open(name, "w") as network_file: network_df.to_csv(network_file, sep='\t', index=False, header=False) # + n_entities = 20 patients = [10, 20] def get_network_strings(): """Generates networnd saves them.""" exp_name = datetime.now().strftime("%H-%M-%S") network_strings = list() for idx, _ in enumerate(patients): network_string = "exp_name_{}_class_{}".format(exp_name, idx) generate_network(num_nodes=n_entities, name=network_string) network_strings.append(network_string) return network_strings # - get_network_strings()
sandbox/Data-Generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="jvh6jVccVuDz" import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, preprocessing from sklearn.naive_bayes import GaussianNB, MultinomialNB # + id="JUx7jPpPlxpx" dataset = pd.read_csv("./Dataset1.csv") # print(dataset) # + colab={"base_uri": "https://localhost:8080/"} id="4ZiqCqJGmhPD" outputId="43b0d6c3-37de-4248-986c-24a52fcf2b20" label_encoder=preprocessing.LabelEncoder() Y_row=None for data_heading in dataset: if data_heading!='Play': print(f"\n\nHeading :- {data_heading}") dummy =pd.get_dummies(dataset[data_heading]) # dataset=dataset.drop([data_heading],axis=1) dataset=pd.concat([dataset,dummy],axis=1) # print(dataset) else: Y_row=label_encoder.fit_transform(dataset[data_heading]) dataset=dataset.drop([data_heading],axis=1) # print(dataset) # + colab={"base_uri": "https://localhost:8080/"} id="Jg-gJcw4woUr" outputId="86aaa653-443e-4576-adb9-88ccb7b47508" print(dataset) print(Y_row) # + colab={"base_uri": "https://localhost:8080/"} id="3KWC9zFRxGjB" outputId="bb9ef7bd-bbd2-4e6f-a6b2-5360f3521480" from sklearn.model_selection import train_test_split X_train,X_test,Y_train,Y_test=train_test_split(dataset,Y_row,test_size=0.2,random_state =42) model =MultinomialNB() model.fit(X_train,Y_train) Y_predicted = model.predict(X_test) print(X_test) print(Y_predicted) # + colab={"base_uri": "https://localhost:8080/"} id="lKSMvEVSzOi5" outputId="63695cfe-587e-4147-f5c7-da429b685313" from sklearn import metrics print(f"Accuracy is :- {metrics.accuracy_score(Y_test, Y_predicted)}") # print precision and recall from sklearn.metrics import precision_score from sklearn.metrics import recall_score precision = precision_score(Y_test, Y_predicted) recall = recall_score(Y_test, Y_predicted) print(f"precision :- {precision}") print(f"recall :- {recall}") # + colab={"base_uri": "https://localhost:8080/"} id="AUUSzFtL3oW-" outputId="9c10eed4-f4b7-4433-87e6-31b6b2c5b2a9" output = model.predict([[0,1,0, 0,1,0 ,0,0,1 ,1,0]]) print(f"final prediction :- {output}") output = model.predict([[1,0,0 ,0,1,0 ,0,0,1 ,1,0]]) print(f"final prediction :- {output}") output = model.predict([[1,0,0, 0,0,1 ,0,0,1 ,0,1]]) print(f"final prediction :- {output}")
LAB 3/Lab_3_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # permutation_importances as reusable function # ## function code # nuclio: ignore import nuclio # + import numpy as np import pandas as pd import numbers import sklearn from sklearn.base import clone from sklearn.utils import check_random_state import matplotlib.pyplot as plt import seaborn as sns from cloudpickle import load from mlrun.execution import MLClientCtx from mlrun.datastore import DataItem from mlrun.artifacts import get_model, PlotArtifact from typing import Union, Callable, List def _get_n_samples_bootstrap(n_samples, max_samples) -> int: """get the number of samples in a bootstrap sample returns the total number of samples to draw for the bootstrap sample private api in sklearn >= v0.24, taken from sklearn.ensemble._forest.py :param n_samples: Number of samples in the dataset. :param max_samples: The maximum number of samples to draw from the total available: - if float, this indicates a fraction of the total and should be the interval `(0, 1)`; - if int, this indicates the exact number of samples; - if None, this indicates the total number of samples. """ if max_samples is None: return n_samples if isinstance(max_samples, numbers.Integral): if not (1 <= max_samples <= n_samples): msg = "`max_samples` must be in range 1 to {} but got value {}" raise ValueError(msg.format(n_samples, max_samples)) return max_samples if isinstance(max_samples, numbers.Real): if not (0 < max_samples < 1): msg = "`max_samples` must be in range (0, 1) but got value {}" raise ValueError(msg.format(max_samples)) return int(round(n_samples * max_samples)) msg = "`max_samples` should be int or float, but got type '{}'" raise TypeError(msg.format(type(max_samples))) def _get_unsampled_ix(random_state, n_samples: int) -> np.array: """ future-proof get unsampled indices """ n_bootstrap = _get_n_samples_bootstrap(n_samples, n_samples) random_instance = check_random_state(random_state) sample_indices = random_instance.randint(0, n_samples, n_bootstrap) sample_counts = np.bincount(sample_indices, minlength=n_samples) return np.arange(n_samples)[sample_counts==0] def _oob_classifier_accuracy(rf, X_train, y_train) -> float: """ Compute out-of-bag (OOB) accuracy for a scikit-learn forest classifier. https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L425 """ X = X_train.values if isinstance(X_train, pd.DataFrame) else X_train y = y_train.values if isinstance(y_train, pd.Series) else y_train n_samples = len(X) n_classes = len(np.unique(y)) predictions = np.zeros((n_samples, n_classes)) for tree in rf.estimators_: unsampled_indices = _get_unsampled_ix(tree.random_state, n_samples) tree_preds = tree.predict_proba(X[unsampled_indices, :]) predictions[unsampled_indices] += tree_preds predicted_class_indexes = np.argmax(predictions, axis=1) predicted_classes = [rf.classes_[i] for i in predicted_class_indexes] oob_score = np.mean(y == predicted_classes) return oob_score def permutation_importances( context: MLClientCtx, model: DataItem, dataset: DataItem, labels: str, figsz=(10, 5), plots_dest: str = "plots", fitype: str = "permute" ) -> pd.DataFrame: """calculate change in metric type 'permute' uses a pre-estimated model type 'dropcol' uses a re-estimates model :param context: the function's execution context :param model: a trained model :param dataset: features and ground truths, regression targets :param labels name of the ground truths column :param figsz: matplotlib figure size :param plots_dest: path within artifact store : """ model_file, model_data, _ = get_model(model.url, suffix='.pkl') model = load(open(str(model_file), "rb")) X = dataset.as_df() y = X.pop(labels) header = X.columns # this will be paramettrized next version, and include regression metric = _oob_classifier_accuracy baseline = metric(model, X, y) imp = [] for col in X.columns: if fitype is "permute": save = X[col].copy() X[col] = np.random.permutation(X[col]) m = metric(model, X, y) X[col] = save imp.append(baseline - m) elif fitype is "dropcol": X_ = X.drop(col, axis=1) model_ = clone(model) model_.random_state = random_state model_.fit(X_, y) o = model_.oob_score_ imp.append(baseline - o) else: raise ValueError("unknown fitype, only 'permute' or 'dropcol' permitted") # create a feature importance table with desired labels zipped = zip(imp, header) feature_imp = pd.DataFrame(sorted(zipped), columns=["importance", "feature"]) feature_imp.sort_values(by="importance", ascending=False, inplace=True) plt.clf() plt.figure(figsize=figsz) sns.barplot(x="importance", y="feature", data=feature_imp) plt.title(f"feature importances-{fitype}") plt.tight_layout() context.log_artifact(PlotArtifact(f"feature importances-{fitype}", body=plt.gcf()), local_path=f"{plots_dest}/feature-permutations.html") context.log_dataset(f"feature-importances-{fitype}-tbl", df=feature_imp, index=False) # + # nuclio: end-code # - # ## save function # + from mlrun import code_to_function from mlrun.platforms.other import auto_mount gpus = False # create job function object from notebook code fn_params = { "name" : "feature-perms", "handler" : "permutation_importances", "kind" : "job", "image" : "mlrun/ml-models" if not gpus else "mlrun/ml-models-gpu", "description" : "estimate feature importances using permutations", "categories" : ["analysis"], "labels" : {"author": "yjb"} } perms_fn = code_to_function(**fn_params) perms_fn.apply(auto_mount()) perms_fn.export("function.yaml") # - # ## tests from mlrun import import_function from mlrun import NewTask, mlconf # #### get some data # + data_url = "https://raw.githubusercontent.com/parrt/random-forest-importances/master/notebooks/data/rent.csv" fn = import_function("hub://arc_to_parquet", "a2p") fn.apply(auto_mount()) params = { "name" : "tasks arc-to-parq", "params" : {"key":"rent", "stats": True, "file_ext":"csv"} } acquire_run = fn.run(NewTask(**params),inputs={"archive_url" : data_url}, artifact_path=mlconf.artifact_path) # - # #### train a model # + fn = import_function("hub://sklearn_classifier", "skrf") fn.apply(auto_mount()) # define model params = { "name" : "tasks random forest", "params" : { "sample" : -5_000, # 5k random rows, "model_pkg_class" : "sklearn.ensemble.RandomForestClassifier", "label_column" : "interest_level", "CLASS_n_estimators" : 100, "CLASS_min_samples_leaf" : 1, "CLASS_n_jobs" : -1, "CLASS_oob_score" : True} } train_run = fn.run(NewTask(**params), inputs={"dataset" : acquire_run.outputs["rent"]}, artifact_path=mlconf.artifact_path) # - from IPython.display import HTML HTML(filename=train_run.outputs['feature-importances']) data = acquire_run.outputs["rent"] labels = "interest_level" model = train_run.outputs["model"] fi_perms = perms_fn.run( NewTask(params={"labels": labels, "plots_dest": "plots"}), inputs={"model": model, "dataset": data}, artifact_path=mlconf.artifact_path) from IPython.display import HTML HTML(filename=fi_perms.outputs['feature importances-permute'])
feature_perms/feature_perms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This tests algorithms to remove cosmic rays from multiepoch spectra (in particular from SDSS stripe 82 # spectra, which are too many for manual removal) # Created 2021 May 10 by E.S. # + import pandas as pd import numpy as np import glob import matplotlib.pyplot as plt from astropy.stats import sigma_clip # %matplotlib inline # - file_list = glob.glob("/Users/bandari/Documents/git.repos/rrlyrae_metallicity/" +\ "notebooks_for_development/data/sdss_stripe_82_data/input/" + "*") # + # find all parent names (i.e., one name for each target, whether or not multiepoch observations were made) parent_list = list(set([i.split("g00")[0] for i in file_list])) # + # initialize list to hold single-epoch spectra names ## TBD # - def plot_result(spec0, spec1): # remove from consideration the regions around the absorption lines, which change with time and can # be misidentified as a cosmic ray hit (a spectrum with an actual hit will have to be discarded manually) spec0_flux_copy = spec0["flux"].to_numpy() spec1_flux_copy = spec1["flux"].to_numpy() half_width = 20 cond_1 = np.logical_and(spec0["wavel"] > 3933.66-half_width, spec0["wavel"] < 3933.66+half_width) cond_2 = np.logical_and(spec0["wavel"] > 3970.075-half_width, spec0["wavel"] < 3970.075+half_width) cond_3 = np.logical_and(spec0["wavel"] > 4101.71-half_width, spec0["wavel"] < 4101.71+half_width) cond_4 = np.logical_and(spec0["wavel"] > 4340.472-half_width, spec0["wavel"] < 4340.472+half_width) cond_5 = np.logical_and(spec0["wavel"] > 4861.29-half_width, spec0["wavel"] < 4861.29+half_width) spec0_flux_copy[cond_1] = np.nan spec0_flux_copy[cond_2] = np.nan spec0_flux_copy[cond_3] = np.nan spec0_flux_copy[cond_4] = np.nan spec0_flux_copy[cond_5] = np.nan spec1_flux_copy[cond_1] = np.nan spec1_flux_copy[cond_2] = np.nan spec1_flux_copy[cond_3] = np.nan spec1_flux_copy[cond_4] = np.nan spec1_flux_copy[cond_5] = np.nan resids = np.subtract(spec0_flux_copy,spec1_flux_copy) # sigma clip # (note sigma lower is a large number, to keep track of which spectrum has the (+) cosmic ray) filtered_data = sigma_clip(resids, sigma_lower=50, sigma_upper=5, iters=1) # also remove points adjacent to those masked, by rolling spectra by two elements in each direction, # subtracting them and finding where difference is nan diff_roll_p1 = np.subtract(filtered_data,np.roll(filtered_data,1)) diff_roll_p2 = np.subtract(filtered_data,np.roll(filtered_data,2)) diff_roll_n1 = np.subtract(filtered_data,np.roll(filtered_data,-1)) diff_roll_n2 = np.subtract(filtered_data,np.roll(filtered_data,-2)) mark_bad_array = np.subtract(np.subtract(diff_roll_p1,diff_roll_p2),np.subtract(diff_roll_n1,diff_roll_n2)) mask_bad_pre_line_restore = np.ma.getmask(mark_bad_array) masked_flux_0 = np.ma.masked_array(spec0["flux"], mask=mask_bad) masked_wavel_0 = np.ma.masked_array(spec0["wavel"], mask=mask_bad) masked_flux_1 = np.ma.masked_array(spec1["flux"], mask=mask_bad) masked_wavel_1 = np.ma.masked_array(spec1["wavel"], mask=mask_bad) num_removed = np.subtract(len(resids), np.isfinite(filtered_data).sum()) plt.clf() fig = plt.figure(figsize=(24,9)) plt.plot(df_single_0["wavel"],resids,color="red") #plt.plot(df_single_0["wavel"],df_single_0["flux"]) plt.plot(df_single_0["wavel"],spec0_flux_copy) #plt.plot(df_single_1["wavel"],df_single_1["flux"]) plt.plot(masked_wavel_0,masked_flux_0,color="k") #plt.title("pts removed: " + str(num_removed)) #plt.show() string_rand = str(np.random.randint(low=0,high=10000)) plt.savefig("junk_"+string_rand+".png") return # + # find the file names of spectra corresponding to each parent; if there is only 1, ignore; # if >= 2, do median comparison to flag it for cosmic rays for t in range(0,60):#len(parent_list)): #print("----------") #print(t) matching = list(filter(lambda x: parent_list[t] in x, file_list)) if (len(matching) == 1): continue elif (len(matching) == 2): df_single_0 = pd.read_csv(matching[0], names=["wavel","flux","noise"], delim_whitespace=True) df_single_1 = pd.read_csv(matching[1], names=["wavel","flux","noise"], delim_whitespace=True) plot_result(df_single_0, df_single_1) elif (len(matching) == 3): df_single_0 = pd.read_csv(matching[0], names=["wavel","flux","noise"], delim_whitespace=True) df_single_1 = pd.read_csv(matching[1], names=["wavel","flux","noise"], delim_whitespace=True) df_single_2 = pd.read_csv(matching[2], names=["wavel","flux","noise"], delim_whitespace=True) elif (len(matching) > 3): continue # + # read in spectra for which continuum has been calculated stem_s82_norm = "/Users/bandari/Documents/git.repos/rrlyrae_metallicity/rrlyrae_metallicity/realizations_output/norm/"
notebooks_for_development/cosmic_ray_removal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.008495, "end_time": "2020-09-20T12:08:08.717357", "exception": false, "start_time": "2020-09-20T12:08:08.708862", "status": "completed"} tags=[] # # **How to plot the map of India using Python** # # > In this notebook, we will learn an easy way to plot state wise map of India. The python libraries required are : # 1. pandas # 2. matplotlib # 3. geopandas # # > We know that pandas and matplotlib are commmonly used python libraries for data analysis and plotting graphs. Now lets see what is *geopandas*? # # > **GeoPandas** is an open source project to make working with geospatial data in python easier. GeoPandas extends the datatypes used by **'pandas'** to allow spatial operations on geometric types. Geometric operations are performed by **'shapely'**. Geopandas further depends on 'fiona' for file access and **'descartes'** and **'matplotlib'** for plotting. More details can be found [here](https://geopandas.org/) # # > How to install geopandas : # > pip install geopandas # # > Don't forget to install all other dependencies required for geopandas! # # > Now that we are all set one more thing that we require is a *shape file*. # > > A **shapefile** is a simple, nontopological format for storing the geometric location and attribute information of geographic features. Geographic features in a shapefile can be represented by points, lines, or polygons (areas) # # > > You can download the shape file to plot the map of India with state boundaries from [this link](https://map.igismap.com/share-map/export-layer/Indian_States/06409663226af2f3114485aa4e0a23b4) # # > We are going to plot on the map - the number of **Paramedical Staffs** available at various district hospitals in India as of 31st March 2019. The data was collected from Open Goverment Data, India. Follow [this link](https://data.gov.in/) to know more. # + [markdown] papermill={"duration": 0.007492, "end_time": "2020-09-20T12:08:08.732644", "exception": false, "start_time": "2020-09-20T12:08:08.725152", "status": "completed"} tags=[] # # **1) Importing libraries** # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.973642, "end_time": "2020-09-20T12:08:09.713872", "exception": false, "start_time": "2020-09-20T12:08:08.740230", "status": "completed"} tags=[] import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt # + [markdown] papermill={"duration": 0.007667, "end_time": "2020-09-20T12:08:09.730210", "exception": false, "start_time": "2020-09-20T12:08:09.722543", "status": "completed"} tags=[] # # **2) Reading data file** # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.373298, "end_time": "2020-09-20T12:08:10.111446", "exception": false, "start_time": "2020-09-20T12:08:09.738148", "status": "completed"} tags=[] df = pd.read_excel('../input/paramedical-staff-in-india/paramed/paramedical_staff.xlsx') df.head() # + [markdown] papermill={"duration": 0.008641, "end_time": "2020-09-20T12:08:10.130753", "exception": false, "start_time": "2020-09-20T12:08:10.122112", "status": "completed"} tags=[] # # **3) Reading shape file** # + papermill={"duration": 0.695504, "end_time": "2020-09-20T12:08:10.834639", "exception": false, "start_time": "2020-09-20T12:08:10.139135", "status": "completed"} tags=[] shp_gdf = gpd.read_file('../input/india-gis-data/India States/Indian_states.shp') shp_gdf.head() # + [markdown] papermill={"duration": 0.008551, "end_time": "2020-09-20T12:08:10.852273", "exception": false, "start_time": "2020-09-20T12:08:10.843722", "status": "completed"} tags=[] # # **4) Merging data file and shape file based on names of Indian states** # + papermill={"duration": 0.490308, "end_time": "2020-09-20T12:08:11.351470", "exception": false, "start_time": "2020-09-20T12:08:10.861162", "status": "completed"} tags=[] merged = shp_gdf.set_index('st_nm').join(df.set_index('States')) merged.head() # + [markdown] papermill={"duration": 0.009172, "end_time": "2020-09-20T12:08:11.371995", "exception": false, "start_time": "2020-09-20T12:08:11.362823", "status": "completed"} tags=[] # # **5) Plotting map of India** # # > Now we are going to plot a *choropleth map* to represent the number of paramedical staff in India. # # > A **choropleth map** is a type of thematic map in which areas are shaded or patterned in proportion to a statistical variable that represents an aggregate summary of a geographic characteristic within each area, such as population density or per-capita income. # + papermill={"duration": 0.845884, "end_time": "2020-09-20T12:08:12.227380", "exception": false, "start_time": "2020-09-20T12:08:11.381496", "status": "completed"} tags=[] fig, ax = plt.subplots(1, figsize=(12, 12)) ax.axis('off') ax.set_title('Paramedical Staffs at District Hospitals in India as of 31st March 2019', fontdict={'fontsize': '15', 'fontweight' : '3'}) fig = merged.plot(column='Staff', cmap='RdYlGn', linewidth=0.5, ax=ax, edgecolor='0.2',legend=True) # + [markdown] papermill={"duration": 0.011895, "end_time": "2020-09-20T12:08:12.251813", "exception": false, "start_time": "2020-09-20T12:08:12.239918", "status": "completed"} tags=[] # > Here we can see that the states in **red** have less number of staff and those in **green** have more number.
how-to-plot-map-of-india-using-python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''buddhalight'': conda)' # name: python3 # --- # # Lab 8-2: XOR with Nerual Net # # **<NAME> 2021** # # **[Deep Learning By Torch] End to End study scripts of Deep Learning by implementing code practice with Pytorch.** # # If you have an any issue, please PR below. # # [[Deep Learning By Torch] - Github @JonyChoi](https://github.com/jonychoi/Deep-Learning-By-Torch) # # Here, we are going to learn about how the multi-layer perceptron can solve the XOR Problem. # ## Imports import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # + device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.manual_seed(1) if device == 'cuda': torch.cuda.manual_seed_all(1) # - # ## Define the XOR Problem X = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]).to(device) Y = torch.FloatTensor([[0], [1], [1], [0]]).to(device) # ## Create the Multi Layer Perceptron (Multi Linear Layer) # nn Layers linear1 = nn.Linear(2, 2) linear2 = nn.Linear(2, 1) sigmoid = nn.Sigmoid() #model model = nn.Sequential(linear1, sigmoid, linear2, sigmoid).to(device) #define cost & optimizer criterion = nn.BCELoss().to(device) optimizer = optim.SGD(model.parameters(), lr=1) # ## Train the multi layer perceptron(Neural Network) # # We can say multi layer(over 1 layer) perceptron with backpropagation as **'Neural Network'**. # # Actually, the MLP (Multi Layer Perceptron) is the subset of the DNN (Neural Network). # # About the DNN, NN, and MLP, please check additional writes at 08.0 - About the Neural Network.md for step in range(10001): #prediction pred = model(X) #cost cost = criterion(pred, Y) #Reduce cost optimizer.zero_grad() cost.backward() optimizer.step() if step % 1000 == 0: result = sigmoid(pred).squeeze().detach().cpu().numpy() print('Epoch:{:2d}/10000, result: {} cost: {:.6f}'.format(step, result, cost.item())) # ## Results # # As below, the prediction shows the multi layer perceptron solved the XOR problem. # # We can say this as ***non-linear*** function, that multi layering can act as non-linear function, otherwise the single layer perceptron can only be as ***linear*** function. # + #Accuracy computation #True if hypothesis > 0.5 else False with torch.no_grad(): prediction = model(X) predicted = (prediction > 0.5).float() accuracy = (predicted == Y).float().mean() print('Prediction: {} \nPredicted: {}\nAccuracy: {}'.format(prediction.squeeze().detach().cpu().numpy(), predicted.squeeze().detach().cpu().numpy(), accuracy)) # - # ## High-level Implementation with ```nn.Module``` class XOR_MultiLayer(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(2, 2) self.linear2 = nn.Linear(2, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): return nn.Sequential( linear1, sigmoid, linear2, sigmoid )(x) model = XOR_MultiLayer() optimizer = optim.SGD(model.parameters(), lr=1) # ### Take a Moment! # # just writing as ```nn.Sigmoid(pred)``` makes an error of "TypeError: __init__() takes 1 positional argument but 2 were given." # # => You are using it as an instance method so you must include self as the first argument # # https://stackoverflow.com/questions/50275814/sigmoid-takes-1-positional-argument-but-2-were-given # # --- # # So we should use torch.sigmoid if we want to apply the sigmoid. # + nb_epochs = 10001 for epoch in range(nb_epochs): #prediction pred = model(X) #cost function cost = F.binary_cross_entropy(pred, Y) #Reduce cost optimizer.zero_grad() cost.backward() optimizer.step() if epoch % 1000 == 0: result = torch.sigmoid(pred).squeeze().detach().cpu().numpy() print('Epoch: {:2d}/10000, result: {}, cost: {:.6f}'.format(epoch, result, cost.item())) # + #Accuracy computation #True if hypothesis > 0.5 else False with torch.no_grad(): prediction = model(X) predicted = (prediction > 0.5).float() accuracy = (predicted == Y).float().mean() print('Prediction: {} \nPredicted: {}\nAccuracy: {}'.format(prediction.squeeze().detach().cpu().numpy(), predicted.squeeze().detach().cpu().numpy(), accuracy))
09. XOR to Neural Network/9-3. XOR with Neural Net.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import argparse from typing import Union, List import cv2 import numpy import scipy from matplotlib import pyplot as plt from matplotlib.patches import Circle from scipy import ndimage from demo import Demonstration from ply import write_xyz_rgb_as_ply, Ply from trainer import Trainer # %matplotlib inline # + pycharm={"name": "#%%\n"} def read_image(path: Union[str, List[str]], img_type: str, history=False): """ Reads image into numpy array @param path: Path to image @param img_type: One of 'color', 'depth' @param history: Whether or not to read history for depth images @return: Array containing image contents """ # This is repeated several times in the code and should ideally be refactored into a function if img_type == "color": return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB) elif img_type == "depth": if history: return numpy.stack([cv2.imread(file, -1).astype(numpy.float32)/100000 for file in path], axis=-1) else: return numpy.stack([cv2.imread(path, -1)]*3, axis=-1).astype(numpy.float32)/100000 return None # + pycharm={"name": "#%% Read test color and depth height maps\n"} # Workspace limits workspace_limits = numpy.asarray([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.5]]) # Test images test_color_heightmap = read_image("logs/2021-02-28-17-19-14_Real-Stack-Imitation-Masked-Testing-Three-Step-History/data/color-heightmaps/000015.0.color.png", 'color') # test_depth_heightmap = read_image("logs/2021-02-28-17-19-14_Real-Stack-Imitation-Masked-Testing-Three-Step-History/data/depth-heightmaps/000015.0.depth.png", 'depth') test_depth_heightmap = read_image(["logs/2021-02-28-17-19-14_Real-Stack-Imitation-Masked-Testing-Three-Step-History/data/depth-heightmaps/000015.0.depth.png", "logs/2021-02-28-17-19-14_Real-Stack-Imitation-Masked-Testing-Three-Step-History/data/depth-heightmaps/000014.0.depth.png", "logs/2021-02-28-17-19-14_Real-Stack-Imitation-Masked-Testing-Three-Step-History/data/depth-heightmaps/000013.0.depth.png"], 'depth', True) print(test_depth_heightmap.shape) # + # stack_snapshot_file = 'logs/base_models/best_unstacking/snapshot.reinforcement_trial_success_rate_best_value.pth' # stack_snapshot_file = 'logs/base_models/rows_hist_densenet/snapshot.reinforcement_action_efficiency_best_value.pth' # stack_snapshot_file = 'logs/base_models/stacking_hist_densenet/snapshot.reinforcement_action_efficiency_best_value.pth' stack_snapshot_file = 'logs/base_models/unstacking_hist_densenet/snapshot.reinforcement_action_efficiency_best_value.pth' # stack_snapshot_file = 'logs/base_models/vertical_square_hist_densenet/snapshot.reinforcement_trial_success_rate_best_value.pth' # policy_name = 'row' # policy_name = 'stack' policy_name = 'unstack' # policy_name = 'square' # + pycharm={"name": "#%%\n"} # Demo files demo = Demonstration("logs/demos/stack_demos", 1, None) demo_color_heightmap, demo_depth_heightmap = demo.get_heightmaps("place", 11) stack_trainer = Trainer(method='reinforcement', push_rewards=True, future_reward_discount=0.5, is_testing=True, snapshot_file=stack_snapshot_file, force_cpu=False, goal_condition_len=0, place=True, pretrained=True, flops=False, network='densenet', common_sense=True, place_common_sense=True, show_heightmap=False, place_dilation=0.01, common_sense_backprop=True, trial_reward='spot', num_dilation=0) # + pycharm={"name": "#%% Compute index of demo action\n"} # get demo action index vector action_vector = demo.action_dict[2][2] # convert rotation angle to index best_rot_ind = numpy.around((numpy.rad2deg(action_vector[-2]) % 360) * 16 / 360).astype(int) # test_rot_ind = 15 # convert robot coordinates to pixel workspace_pixel_offset = workspace_limits[:2, 0] * -1 * 1000 best_action_xy = ((workspace_pixel_offset + 1000 * action_vector[:2]) / 2).astype(int) print(best_action_xy) print(best_rot_ind) # + pycharm={"name": "#%% Compute demo features\n"} _, _, demo_features = stack_trainer.forward(demo_color_heightmap, demo_depth_heightmap, is_volatile=True, keep_action_feat=True, demo_mask=True)[:3] demo_features = demo_features.filled(0.0) # + pycharm={"name": "#%% Compute test features\n"} _, _, test_features = stack_trainer.forward(test_color_heightmap, test_depth_heightmap, is_volatile=True, keep_action_feat=True, demo_mask=True)[:3] test_features = test_features.filled(0.0) # + pycharm={"name": "#%% Compute rematch distance\n"} demo_features_rot = demo_features[best_rot_ind,:,:,:] # test_features_rot = test_features[test_rot_ind,:,:,:] # Compute mask demo_mask = (demo_features_rot == 0).all(axis=0) # test_mask = (test_features_rot == 0).all(axis=0) test_mask = (test_features==0).all(axis=1) # + pycharm={"name": "#%% Compute match distance\n"} # match_dist = numpy.zeros([16, 224, 224]) match_dist = numpy.sum(numpy.square(numpy.expand_dims(demo_features_rot[:, best_action_xy[1], best_action_xy[0]],(0,2,3)) - test_features), axis=1) match_dist[test_mask] = numpy.max(match_dist) matched_action = numpy.unravel_index(numpy.argmin(match_dist), (16, 224, 224)) test_rot_ind = matched_action[0] test_features_rot = test_features[test_rot_ind,:,:,:] test_mask_rot = test_mask[test_rot_ind,:,:] # + pycharm={"name": "#%% Plot match distance\n"} fig, ax = plt.subplots(1) ax.imshow(match_dist[test_rot_ind,:,:], cmap="gray") circle = Circle((matched_action[2], matched_action[1])) ax.add_patch(circle) print(matched_action) # + pycharm={"name": "#%%\n"} rematch_squared_distance = numpy.zeros([224, 224]) for i in range(0, 224): for j in range(0, 224): x = numpy.expand_dims(test_features_rot[:,i,j], (1,2)) displacement = demo_features_rot - x distances = numpy.sum(numpy.square(displacement), axis=0) distances = ndimage.gaussian_filter(distances, sigma=(3,3)) distances[demo_mask] = numpy.max(distances) * 1.1 match_index = numpy.unravel_index(numpy.argmin(distances), (224, 224)) rematch_squared_distance[i,j] = numpy.sum(numpy.square(match_index - best_action_xy[[1, 0]])) rematch_distance = numpy.sqrt(rematch_squared_distance) # + pycharm={"name": "#%% Generate demo plot\n"} fig, ax = plt.subplots(1) ax.imshow(demo_color_heightmap) circle = Circle(best_action_xy) ax.add_patch(circle) # fig, ax = plt.imshow(demo_color_heightmap) # + pycharm={"name": "#%% Generate test plot\n"} plt.imshow(test_color_heightmap) # + pycharm={"name": "#%%\n"} plt.imshow(demo_mask, cmap='gray') # + pycharm={"name": "#%%\n"} plt.imshow(test_mask_rot, cmap='gray') # + pycharm={"name": "#%% Rematch plot\n"} rematch_distance[test_mask_rot] = numpy.max(rematch_distance) plt.imshow(rematch_distance, cmap='gray') # + pycharm={"name": "#%% 3d bar plot\n"} inverse_heights = numpy.max(rematch_distance) - rematch_distance inverse_heights[test_mask_rot] = 0 # inverse_heights[~test_mask] = 200 # + pycharm={"name": "#%%\n"} inverse_heights_scaled = (inverse_heights - numpy.mean(inverse_heights))/numpy.std(inverse_heights) # + inverse_heights_image = cv2.applyColorMap(((1-inverse_heights/numpy.max(inverse_heights))*255).astype(numpy.uint8), cv2.COLORMAP_JET) plt.imshow(inverse_heights_image) inverse_heights_image_reverse = cv2.applyColorMap(((inverse_heights/numpy.max(inverse_heights))*255).astype(numpy.uint8), cv2.COLORMAP_JET) cv2.imwrite("figures/fig4/inverse_heights_" + policy_name + ".png", inverse_heights_image_reverse) # cv2.imwrite("figures/fig4/inverse_heights_stack.png", inverse_heights_image_reverse) # cv2.imwrite("figures/fig4/inverse_heights_unstack.png", inverse_heights_image_reverse) # cv2.imwrite("figures/fig4/inverse_heights_square.png", inverse_heights_image_reverse) blended = cv2.addWeighted(inverse_heights_image_reverse, 0.5, test_color_heightmap, 0.5, 0) cv2.imwrite("figures/fig4/inverse_heights_blended_" + policy_name + ".png", blended) # + # inverse_heights_softmax = numpy.log(numpy.exp(inverse_heights_scaled)/numpy.sum(numpy.exp(inverse_heights_scaled))) inverse_heights_softmax = scipy.special.softmax(inverse_heights_scaled) inverse_heights_softmax = (inverse_heights_softmax - numpy.min(inverse_heights_softmax))/(numpy.max(inverse_heights_softmax-numpy.min(inverse_heights_softmax))) inverse_heights_image_softmax = cv2.applyColorMap(((inverse_heights_softmax/numpy.max(inverse_heights_softmax))*255).astype(numpy.uint8), cv2.COLORMAP_JET) cv2.imwrite("figures/fig4/inverse_heights_softmax_" + policy_name + ".png", inverse_heights_image_softmax) blended = cv2.addWeighted(inverse_heights_image_softmax, 0.5, test_color_heightmap, 0.5, 0) cv2.imwrite("figures/fig4/inverse_heights_softmax_blended_" + policy_name + ".png", blended) inverse_heights_image_softmax = cv2.applyColorMap(((1-inverse_heights_softmax/numpy.max(inverse_heights_softmax))*255).astype(numpy.uint8), cv2.COLORMAP_JET) plt.imshow(inverse_heights_image_softmax) # - print(numpy.max(inverse_heights_softmax)) print(numpy.min(inverse_heights_softmax)) # + # inverse_heights_softmax = numpy.log(numpy.exp(inverse_heights_scaled)/numpy.sum(numpy.exp(inverse_heights_scaled))) inverse_heights_log_softmax = scipy.special.log_softmax(inverse_heights_scaled) inverse_heights_log_softmax = (inverse_heights_log_softmax - numpy.min(inverse_heights_log_softmax))/(numpy.max(inverse_heights_log_softmax-numpy.min(inverse_heights_log_softmax))) # inverse_heights_avg_softmax_log_softmax = inverse_heights_softmax + inverse_heights_log_softmax * (inverse_heights_log_softmax < 0.5) inverse_heights_avg_softmax_log_softmax = inverse_heights_softmax + inverse_heights_log_softmax inverse_heights_avg_softmax_log_softmax_image = cv2.applyColorMap(((inverse_heights_avg_softmax_log_softmax/numpy.max(inverse_heights_avg_softmax_log_softmax))*255).astype(numpy.uint8), cv2.COLORMAP_JET) cv2.imwrite("figures/fig4/inverse_heights_avg_softmax_log_softmax_" + policy_name + ".png", inverse_heights_avg_softmax_log_softmax_image) blended = cv2.addWeighted(inverse_heights_avg_softmax_log_softmax_image, 0.5, test_color_heightmap, 0.5, 0) cv2.imwrite("figures/fig4/inverse_heights_avg_softmax_log_softmax_blended_" + policy_name + ".png", blended) inverse_heights_avg_softmax_log_softmax_image = cv2.applyColorMap(((1-inverse_heights_avg_softmax_log_softmax/numpy.max(inverse_heights_avg_softmax_log_softmax))*255).astype(numpy.uint8), cv2.COLORMAP_JET) plt.imshow(inverse_heights_avg_softmax_log_softmax_image) # - print(numpy.shape(inverse_heights_avg_softmax_log_softmax)) # + pycharm={"name": "#%%\n"} plt.imshow(test_color_heightmap) # + pycharm={"name": "#%%\n"} # Compute colors # colors = [test_color_heightmap[i,j,:]/255 for j in range(0, 224) for i in range(0, 224)] x = [i for j in range(0, 224) for i in range(0, 224)] y = [j for j in range(0, 224) for i in range(0, 224)] # top = [inverse_heights[i,j] for j in range(0, 224) for i in range(0, 224)] # top = [inverse_heights_softmax[i,j] for j in range(0, 224) for i in range(0, 224)] top = [inverse_heights_avg_softmax_log_softmax[i,j] for j in range(0, 224) for i in range(0, 224)] best_test_action_index = numpy.argmax(top) best_test_action = (y[best_test_action_index], x[best_test_action_index]) # inverse_heights_image = cv2.circle(inverse_heights_image, best_test_action, 2, (255, 0, 0), 2) # blended = cv2.addWeighted(inverse_heights_image, 0.25, test_color_heightmap, 0.75, 0) # blended = cv2.addWeighted(inverse_heights_image_softmax, 0.5, test_color_heightmap, 0.5, 0) blended = cv2.addWeighted(inverse_heights_avg_softmax_log_softmax_image, 0.5, test_color_heightmap, 0.5, 0) blended = cv2.circle(blended, best_test_action, 2, (255, 0, 0), 2) plt.imshow(blended) colors = [blended[i,j,:]/255 for j in range(0, 224) for i in range(0, 224)] # _x = numpy.arange(224) # _y = numpy.arange(224) # _xx, _yy = numpy.meshgrid(_x, _y) # x, y = _xx.ravel(), _yy.ravel() # top = inverse_heights.flatten('C') bottom = numpy.zeros(len(top)) # + pycharm={"name": "#%%\n"} figure = plt.figure(figsize=(6, 4), dpi=600) ax = figure.add_subplot(111, projection='3d') ax.view_init(elev=15., azim=90) # ax.view_init(elev=30., azim=0) # ax.plot_surface(numpy.array(x), numpy.array(y), numpy.array(top), color=colors) ax.bar3d(x, y, bottom, 1, 1, top, shade=False, color=colors) # ax.set_zlim(0, 500) ax.set_zlim(0, numpy.max(top)*5) plt.axis('off') plt.show() # + pycharm={"name": "#%%\n"} figure.savefig("figures/fig4/test_cc_figure_4_" + policy_name + ".png", transparent=True) # figure.savefig("figures/fig4/test_cc_figure_4_stack.png", transparent=True) # figure.savefig("figures/fig4/test_cc_figure_4_unstack.png", transparent=True) # figure.savefig("figures/fig4/test_cc_figure_4_square.png", transparent=True) # + pycharm={"name": "#%%\n"} masked_test_color_heightmap = test_color_heightmap.copy() masked_test_color_heightmap[test_mask_rot,:] = 0 # best_test_action = numpy.unravel_index(numpy.argmin(rematch_distance), (224, 224)) best_test_action_index = numpy.argmax(top) best_test_action = (y[best_test_action_index], x[best_test_action_index]) print(numpy.max(top)) print(inverse_heights[best_test_action]) print(test_mask_rot[best_test_action]) print(best_test_action) fig, ax = plt.subplots(1) # ax.imshow(masked_test_color_heightmap) # ax.imshow(inverse_heights, cmap='gray') ax.imshow(test_color_heightmap) # ax.imshow(test_mask) ax.add_patch(Circle(best_test_action)) # + pycharm={"name": "#%% Save ply\n"} scale_factor = 0.002 top_scale_factor = scale_factor * 10 points = numpy.stack((x,y,numpy.array(top)/top_scale_factor), -1) * scale_factor print(points.shape) rgb = (numpy.stack(colors) * 255).astype('uint8') ply = Ply(points, rgb) ply.write("figures/fig4/test_cc_figure_4_" + policy_name + ".ply") # ply.write("figures/fig4/test_cc_figure_4_stack.ply") # ply.write("figures/fig4/test_cc_figure_4_unstack.ply") # ply.write("figures/fig4/test_cc_figure_4_square.ply") # write_xyz_rgb_as_ply(points, blended, "figures/fig4/test_cc_figure_4_row.ply") # + pycharm={"name": "#%%\n"} # TODO: # Check rotation # Tighter mask - N/A # Subtract minimum nonzero value - done # Try adding the match distance # Save image files for figures - done # Refactor
cc_figure_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.6 # language: julia # name: julia-0.4 # --- # Setting up a custom stylesheet in IJulia file = open("style.css") # A .css file in the same folder as this notebook file styl = readall(file) # Read the file HTML("$styl") # Output as HTML # # Types # <h2>In this lesson</h2> # - [Introduction](#Introduction) # - [Importing the packages for this lesson](#Importing-the-packages-for-this-lesson) # - [Outcomes](#Outcomes) # - [The type system in Julia](#The-type-system-in-Julia) # - [Type creation](#Type-creation) # - [Conversion and promotion](#Convertion-and-promotion) # - [Parametrizing a type](#Parametrizing-a-type) # - [The equality of values](#The-equality-of-values) # - [Defining methods for functions that will use user-types](#Defining-methods-for-functions-that-will-use-user-types) # - [Constraining field values](#Constraining-field-values) # - [More complex parameters](#More-complex-parameters) # - [Screen output of a user-defined type](#Screen-output-of-a-user-defined-type) # <hr> # <h2>Introduction</h2> # A computer variable, which is a space in memory, holds values of different types, i.e. integers, floating point values, and strings. In some languages the type of the value to be held inside of a variable must be explicitely declared. These languages are termed *statically typed*. In *dynamically typed* languages nothing is known about the type of the value held inside the variable until runtime. Being able to write code operating on different types is termed *polymorphism*. # Julia is a dynamically typed language, yet it is possible to declare a type for values as well. Declaring a type allows for code that is clear to understand. As an assertion it can help to confirm that your code is working as expected. It can also allow for faster code execution by providing the compiler with extra information. # [Back to the top](#In-this-lesson) # <hr> # <h2>Outcomes</h2> # After successfully completing this lecture, you will be able to: # # - Understand the Julia type system # - Create your own user-defined types # - Parametrize your types # - Overload methods for Julia functions so that they can use your types # [Back to the top](#In-this-lesson) # <hr> # <h2>The type system in Julia</h2> # Julia holds a type hierarchy that flows like the branches of a tree. Right at the top we have a type called `Any`. All types are subtypes of this type. Right at the final tip of the branches we have concrete types. They can hold values. Supertypes of these concrete types are called abtsract types and they cannot hold values, i.e. we cannot create an instance of an abstract type. # We can use Julia to see if types are subtypes of a supertype. # Is Number a subtype of Any? Number <: Any # Is Float 64 a subtype of AbstractFloat? Float64 <: AbstractFloat # The subtypes of Any subtypes(Any) # Subtypes of AbstractString subtypes(AbstractString) # Subtypes of Number subtypes(Number) # Subtypes of Real subtypes(Real) # Subtypes of AbstractFloat subtypes(AbstractFloat) # Subtypes of Integer subtypes(Integer) # Subtypes of Signed subtypes(Signed) # [Back to the top](#In-this-lesson) # <hr> # <h2>Declaring a type</h2> # A type is declared by the double colon, `::`, sign. To the left we place the value (or placeholder for a variable, i.e. a variable name) and to the right the actual type. In the example below we want to express the fact that the value $ 2 + 2 $ is an instance of a 64-bit integer. (2 + 2)::Int64 # If we typed `(2 + 2)::Float64` we would get the following error: # ``` # LoadError: TypeError: typeassert: expected Float64, got Int64 # while loading In[3], in expression starting on line 1 # ``` # We used the declaration of a type as an assertion, which allowed us to see that there was something wrong with our code. We can imagine a program where the `+()` (or a more complicated user-defined) function is called and we need the arguments to be of a certain type. An error such as the one above can give us information about what went wrong. # Declaring a type of a local variable (inside of a function), we state that the type should always remain the same. This is more like what would happen in a statically typed language. It is really helpful if we want an error to be thrown should the type of a variable be changed by another part of our code. This can lead to type instability. It can really impact the speed of execution. # Creating a function with a local variable function static_local_variable() v::Int16 = 42 return v end # Calling the function static_local_variable() # Checking the type of the answer just give typeof(ans) # Remember that `v` is local to the function. If we try and look at it value by typing `v`, we would get the following error: # ``` # LoadError: UndefVarError: v not defined # while loading In[7], in expression starting on line 1 # ``` # Now that we know something about declaring a type, let's look at creating our own types. # [Back to the top](#In-this-lesson) # <hr> # <h2>Type creation</h2> # As mentioned, we can create our own types. Consider a Cartesian coordinate system along two perpendicular axes, say $ x $ and $ y $. A vector in the plane can be represented as a type. The keyword we use to create a type is `type`. If we want instances of our type to be immutable, we use the keyword `immutable`. # Creating a concrete type called Vector_2D type Vector_2D x::Float64 # x is a fieldname of the type and has an optional type y::Float64 # y is a fieldname of the type and has an optional type end # This is actually a composite type, since we have fields. For a type that is non-composite we can imagine a simple wrapper around an already defined type, such as we do below. # A non-composite type type NonComposite x::Float64 end my_non_composite = NonComposite(42) # Type typeof(ans) # Back to the more exciting composite types. We can now instantiate the concrete type `Vector_2D`. vector_1 = Vector_2D(2, 2) # The type of vector_1 typeof(vector_1) # Notice how we get floating point values even though we gave two integer values. The `convert()` functions was created to change allowable values to 64-bit floating point values. # Also notice that it looks like we called a function when we typed `Vector_2D(2, 2)`. When we define a type, constructors are created. They allow us to create an instance of that type (sometimes referred to as an *object* of that type). # As with functions, we can access the methods that were created with the type. methods(Vector_2D) # We can also access the fieldnames and their values. They are mutable, i.e. we can pass new values to them. # The available names (fields, fieldnames) # Note that they are of type Symbol fieldnames(Vector_2D) # Getting the value of the :x field vector_1.x # Alternative syntax using the field's symbol representation getfield(vector_1, :x) # Another alternative notation using the index number of the fields getfield(vector_1, 1) vector_1.x = 3 vector_1 # Another way to pass a value to a fieldname in a type is the `setfield()` function. We have to use the correct type for the value. If we use an integer such as `setfield!(vector_1, :x, 4)` we would get the follwoing error: # ``` # LoadError: TypeError: setfield!: expected Float64, got Int64 # while loading In[25], in expression starting on line 1 # ``` # Now we have to use a floating point value setfield!(vector_1, :x, 4.0) # vector_1 has been changed vector_1 # [Back to the top](#In-this-lesson) # <hr> # <h2>Convertion and promotion</h2> # Before we go any further, we must have a look behind the scenes. Above we saw that an integer was converted to a floating point value as specified for the fields of our new type. # Using the convert function convert(Float64, 10) # If precision is lost, convertion will result in an error. For instance, `convert(Int16, 10.1)` will return: # ``` # adError: InexactError() # while loading In[20], in expression starting on line 1 # ``` # Using `convert(Int16, 10.0)` will return a value of $ 10 $, though. # Julia has a type promotion system that will try to incorporate values into a single type. If we pass an integer and a floating point value, the integer will be promoted to a floating point value. promote(10, 10.0) typeof(10) typeof(10.0) # This promotion to a common type lifts the lid on multiple dispatch when a function is called with unspecified argument types (i.e. `Any`). # [Back to the top](#In-this-lesson) # <hr> # <h2>Parametrizing a type</h2> # When creating a user type, we need not specify the type explicitely. We could use a parameter. Have a look at the example below. type Vector_3D{T} x::T y::T z::T end # We use $ T $ as a parameter placeholder. When we instantiate the type we can use any appropriate type, as long as all the fields values are of the same type. # Using 64 bit integers vector_2 = Vector_3D(10, 12, 8) # If we were to execute `vector_2 = Vector_3D(10.1, 10, 8)`, we would get the following error: # ``` # LoadError: MethodError: `convert` has no method matching convert(::Type{Vector_3D{T}}, ::Float64, ::Int64, ::Int64) # This may have arisen from a call to the constructor Vector_3D{T}(...), # since type constructors fall back to convert methods. # Closest candidates are: # Vector_3D{T}(::T, !Matched::T, !Matched::T) # call{T}(::Type{T}, ::Any) # convert{T}(::Type{T}, !Matched::T) # while loading In[28], in expression starting on line 1 # ``` # We can constrain the parametric type. Below we allow all subtypes of of the abstract type `Real`. type Vector_3D_Real{T <: Real} x::T y::T z::T end # As an aside, we cannot redefine a type. If we would use: # ``` # type Vector_3D{T} # x::T # y::T # z::T # end # ``` # we would get the error: # ``` # LoadError: invalid redefinition of constant Vector_3D # while loading In[98], in expression starting on line 1 # ``` # Creating a new instance vector_3 = Vector_3D_Real(3, 3, 3) # [Back to the top](#In-this-lesson) # <hr> # <h2>The equality of values</h2> # When are two values equal? We use a double equal sign to return a Boolean value. # Using the functional notation ==(5, 5.0) # Numbers are immutable and are compared at the bit level. This includes their types. We can use the `===` sign or the `is()` function to check for equality. is(5, 5.0) # Where does this leave our user-defined types? We will see below that the address in memory is checked when dealing with more complex objects such as our user-defined, composite types. vector_a = Vector_2D(1.0, 1.0) vector_b = Vector_2D(1.0, 1.0) is(vector_a, vector_b) # [Back to the top](#In-this-lesson) # <hr> # <h2>Defining methods for functions that will use user-types</h2> # The summation function, `+()` has methods for adding different types. What if we want to add two instances of our `Vector_2D` user-type? If we were to add `vector_a` to `vector_b` we would get the following error: # ``` # LoadError: MethodError: `+` has no method matching +(::Vector_2D, ::Vector_2D) # Closest candidates are: # +(::Any, ::Any, !Matched::Any, !Matched::Any...) # ``` # Base methods for the +() function methods(+) # We have to create a method. import Base.+ +(u::Vector_2D, v::Vector_2D) = Vector_2D(u.x + v.x, u.y + v.y) +(vector_a, vector_b) # [Back to the top](#In-this-lesson) # <hr> # <h2>Constraining field values</h2> # We can well imagine needing to constain the values that a type can hold. Below we create the Bloodpressure type with two fields that hold integer values. They cannot be negative and the systolic blood pressure must be higher than the diastolic blood pressure. We solve this problem by creating an inner constructor. type BloodPressure # Don't leave as Any systolic::Int16 diastolic::Int16 function BloodPressure(s, d) # Using short-circuit evaluations && and || s < 0 && throw(ArgumentError("Negative pressures are not allowed!")) s <= d && throw(ArgumentError("The systolic blood pressure must be higher than the diastolic blood pressure!")) isa(s, Integer) || throw(ArgumentError("Only integer values allowed!")) isa(d, Integer) || throw(ArgumentError("Only integer values allowed!")) new(s, d) end end bp_1 = BloodPressure(120, 80) # Using `bp_2 = BloodPressure(-1, 90)` will result in the error: # ``` # LoadError: ArgumentError: Negative pressures are not allowed! # while loading In[32], in expression starting on line 1 # ``` # Using `bp_2 = BloodPressure(80, 120)` will result in the error: # ``` # LoadError: ArgumentError: The systolic blood pressure must be higher than the diastolic blood pressure # while loading In[56], in expression starting on line 1 # ``` # Using `bp_2 = BloodPressure(120.0, 80)` will result in the error: # ``` # LoadError: ArgumentError: Only integer values allowed! # while loading In[95], in expression starting on line 1 # ``` # Beware. Using inner constructors with parametrized types can lead to problems. type BloodPressureParametrized{T <: Real} # Don't leave as Any systolic::T diastolic::T function BloodPressureParametrized(s, d) s < 0 && throw(ArgumentError("Negative pressures are not allowed!")) s <= d && throw(ArgumentError("The systolic blood pressure must be higher than the diastolic blood pressure!")) isa(s, Integer) || throw(ArgumentError("Only integer values allowed!")) isa(d, Integer) || throw(ArgumentError("Only integer values allowed!")) new(s, d) end end # Using `bp_3 = BloodPressureParametrized(120, 80)` will result in the error; # ``` # LoadError: MethodError: `convert` has no method matching convert(::Type{BloodPressureParametrized{T<:Real}}, ::Int64, ::Int64) # This may have arisen from a call to the constructor BloodPressureParametrized{T<:Real}(...), # since type constructors fall back to convert methods. # Closest candidates are: # call{T}(::Type{T}, ::Any) # convert{T}(::Type{T}, !Matched::T) # while loading In[102], in expression starting on line 1 # # in call at essentials.jl:57 # ``` # Now we have to specify the type during the instantiation. bp_3 = BloodPressureParametrized{Int}(120, 80) type BloodPressureParametrizedFixed{T <: Real} systolic::T diastolic::T function BloodPressureParametrizedFixed(s, d) s < 0 && throw(ArgumentError("Negative pressures are not allowed!")) s <= d && throw(ArgumentError("The systolic blood pressure must be higher than the diastolic blood pressure!")) new(s, d) end end # We can fix this by the assignment below. # A bit of an effort BloodPressureParametrizedFixed{T}(systolic::T, diastolic::T) = BloodPressureParametrizedFixed{T}(systolic, diastolic) bp_4 = BloodPressureParametrizedFixed(120, 80) # We can get way more specific. In the code below we tell Julia that if we pass integers to the type, they should be expressed as floating point values. BloodPressureParametrizedFixed{T <: Int}(systolic::T, diastolic::T) = BloodPressureParametrizedFixed{Float64}(systolic, diastolic) bp_5 = BloodPressureParametrizedFixed(120, 80) # [Back to the top](#In-this-lesson) # <hr> # <h2>More complex parameters</h2> # Up until now we have constrained ourselves to a single parameter. It is possible, though, to create more than one. Below we create a type called `Relook`. It has one fieldname called `duration`, which must be of subtype, `Real`. There is also a second parameter. type Relook{N, T<:Real} duration::T end # Using `patient_1 = Relook(3, 60)` will result in the error: # ``` # LoadError: MethodError: `convert` has no method matching convert(::Type{Relook{N,T}}, ::Int64, ::Int64) # This may have arisen from a call to the constructor Relook{N,T}(...), # since type constructors fall back to convert methods. # Closest candidates are: # call{T}(::Type{T}, ::Any) # convert{T}(::Type{T}, !Matched::T) # while loading In[175], in expression starting on line 1 # # in call at essentials.jl:57 # ``` # We have to specify the type of the second parameter patient_1 = Relook{4, Int16}(60) patient_1.duration # We now want to add only objects (instances) with the same value in the first parameter. +{N, T}(u::Relook{N, T}, v::Relook{N, T}) = Relook{N, T}(u.duration + v.duration) patient_2 = Relook{4, Int16}(70) patient_1 + patient_2 patient_3 = Relook{3, Int16}(70) # Using `patient_1 + patient_3` will now result in the error: # ```LoadError: MethodError: `+` has no method matching +(::Relook{4,Int16}, ::Relook{3,Int16}) # Closest candidates are: # +(::Any, ::Any, !Matched::Any, !Matched::Any...) # +{N,T}(::Relook{N,T}, !Matched::Relook{N,T}) # while loading In[191], in expression starting on line 1 # ``` patient_4 = Relook{4.0, Int16}(70) # Using `patient_1 + patient_4` will also result in an error, because the types of `N` do not match. The error would be: # ``` # LoadError: MethodError: `+` has no method matching +(::Relook{4,Int16}, ::Relook{4.0,Int16}) # Closest candidates are: # +(::Any, ::Any, !Matched::Any, !Matched::Any...) # +{N,T}(::Relook{N,T}, !Matched::Relook{N,T}) # +{N,T1,T2}(::Relook{N,T1}, !Matched::Relook{N,T2}) # while loading In[210], in expression starting on line 1 # ``` # Below we fix the fieldname type mismatch. +{N, T1, T2}(u::Relook{N, T1}, v::Relook{N, T2}) = Relook{N, promote_type(T1, T2)}(u.duration + v.duration) patient_5 = Relook{4, Float64}(60) # N = 4 for both patient_1 and patient_5 # T for patient_1 is Int16 and T for patient_2 is Float64 patient_1 + patient_5 # If we want to throw an error if the number of relooks are not equal when trying to add to obejcts of the `Relook` type, we can do the following. +{N1, N2, T}(u::Relook{N1, T}, v::Relook{N2, T}) = throw(ArgumentError("Cannot add durations when the number of relooks do not match.")) # Using `patient_1 + patient_3` will now result in the error: # ``` # LoadError: ArgumentError: Cannot add durations when the number of relooks do not match. # while loading In[237], in expression starting on line 1 # # in + at In[236]:1 # ``` # What about calculating the natural logarithm of the duration of a `Relook` object? We could just specify the field name. log(patient_1.duration) # Better still, we could specify wat the `log` function actually does with a `Relook` object. import Base.log log(u::Relook) = log(u.duration) log(patient_1) # We can specify a `convert` method that will convert all our `Relook` objects to numerical values which we can pass to Julia functions. import Base.convert # The float() function tries to convert a value to a floating point value convert(::Type{AbstractFloat}, u::Relook) = float(u.duration) # Covreting our Relook object and passing it to log10() log10(convert(AbstractFloat, patient_1)) # [Back to the top](#In-this-lesson) # <hr> # <h2>Screen output of a user-defined type</h2> # We can create some meaning to our types by the way an object of the type is represented on the screen. Above we just saw two values we instantiation our type `Relook`. Let's change that a bit by overloading the `show` function. import Base.show show{N, T}(io::IO, u::Relook{N, T}) = print(io, "Patient with ", N, " relook procedures totalling ", u.duration, " minutes.") patient_6 = Relook{4, Int16}(60) # [Back to the top](#In-this-lesson)
JuliaCourseNotebooks-master/Week3_Honors1-Types.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Processing Board Game Data # ## Background # # This dataset comes from the [Board Game Geek database](http://boardgamegeek.com/). The site's database has more than 90,000 games, with crowd-sourced ratings. This particular subset is limited to only games with at least 50 ratings which were published between 1950 and 2016. This still leaves us with 10,532 games! For more information please check out the [tidytuesday repo](https://github.com/rfordatascience/tidytuesday/tree/master/data/2019/2019-03-12) which is where this example was taken from. # # # ## Data Cleaning import pandas as pd import janitor import os # ### One-Shot # This cell demonstrates the cleaning process using the call chaining approach championed in pyjanitor cleaned_df = ( pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-03-12//board_games.csv") # ingest raw data .clean_names() # removes whitespace, punctuation/symbols, capitalization .remove_empty() # removes entirely empty rows / columns .drop(columns = ["image","thumbnail","compilation","game_id"]) # drops unnecessary columns ) # ## Multi-Step # These cells repeat the process in a step-by-step manner in order to explain it in more detail # ### Read in the csv df = pd.read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-03-12/board_games.csv") df.head(3) # ### Remove the whitespace, punctuation/symbols, and capitalization form columns df = df.clean_names() df.head(3) # ### Remove all the empty rows and columns if present df = df.remove_empty() df.head(3) # ### Check to see whether "min_playtime" and "max_playtime" columns are equal len(df[df["min_playtime"] != df["max_playtime"]]) # ### Check to see what percentage of the values in the "compilation" column are not null len(df[df['compilation'].notnull()]) / len(df) # ### Drop unnecessary columns # The 'compilation' column was demonstrated to have little value, the "image" and "thumbnail" columns # link to images and are not a factor in this analysis. The "game_id" column can be replaced by using the index. df = df.drop(columns=["image", "thumbnail", "compilation", "game_id"]) df.head(3) # ## Sample Analysis # + import pandas as pd import seaborn as sns # allow plots to appear directly in the notebook # %matplotlib inline # - # ### What Categories appear most often? df['category'].value_counts().head(10) # ### What is the relationship between games' player numbers, reccomended minimum age, and the game's estimated length? sns.pairplot(df, x_vars=['min_age', 'min_players', 'min_playtime'], y_vars='users_rated', height=7, aspect=0.7); # ### Preliminary analysis # Without digging into the data too much more it becomes apparent that there are some entries that were improperly entered e.g. having a minimum playtime of 60000 minutes. Otherwise we see some nice bell curves.
examples/notebooks/board_games.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Coding Exercise #0609 import nltk from numpy.random import randint, seed from sklearn.feature_extraction.text import CountVectorizer # ### 1. n-Gram based autofill: # Text data for training. my_text = """Machine learning is the scientific study of algorithms and statistical models that computer systems use to effectively perform a specific task without using explicit instructions, relying on patterns and inference instead. It is seen as a subset of artificial intelligence. Machine learning algorithms build a mathematical model of sample data, known as "training data", in order to make predictions or decisions without being explicitly programmed to perform the task.[1][2]:2 Machine learning algorithms are used in the applications of email filtering, detection of network intruders, and computer vision, where it is infeasible to develop an algorithm of specific instructions for performing the task. Machine learning is closely related to computational statistics, which focuses on making predictions using computers. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a field of study within machine learning, and focuses on exploratory data analysis through unsupervised learning In its application across business problems, machine learning is also referred to as predictive analytics.""" my_text = [my_text.lower()] # Convert to lowercase and make a list. => Required by the CountVectorizer(). # #### 1.1. n-Gram trial run: n = 3 # Can be changed to a number equal or larger than 2. n_min = n n_max = n n_gram_type = 'word' # n-Gram with words. vectorizer = CountVectorizer(ngram_range=(n_min,n_max), analyzer = n_gram_type) n_grams = vectorizer.fit(my_text).get_feature_names() # Get the n-Grams as a list. n_gram_cts = vectorizer.transform(my_text).toarray() # The output is an array of array. n_gram_cts = list(n_gram_cts[0]) # Convert into a simple list. list(zip(n_grams,n_gram_cts)) # Make a list of tuples and show. # #### 1.2. Train by making a dictionary based on n-Grams: n = 3 # Can be changed to a number equal or larger than 2. n_min = n n_max = n n_gram_type = 'word' vectorizer = CountVectorizer(ngram_range=(n_min,n_max), analyzer = n_gram_type) n_grams = vectorizer.fit(my_text).get_feature_names() # A list of n-Grams. my_dict = {} for a_gram in n_grams: words = nltk.word_tokenize(a_gram) a_nm1_gram = ' '.join(words[0:n-1]) # (n-1)-Gram. next_word = words[-1] # Word after the a_nm1_gram. if a_nm1_gram not in my_dict.keys(): my_dict[a_nm1_gram] = [next_word] # a_nm1_gram is a new key. So, initialize the dictionary entry. else: my_dict[a_nm1_gram] += [next_word] # an_nm1_gram is already in the dictionary. # View the dictionary. my_dict # #### 1.3. Predict the next word: # Helper function that picks the following word. def predict_next(a_nm1_gram): value_list_size = len(my_dict[a_nm1_gram]) # length of the value corresponding to the key = a_nm1_gram. i_pick = randint(0, value_list_size) # A random number from the range 0 ~ value_list_size. return(my_dict[a_nm1_gram][i_pick]) # Return the randomly chosen next word. # Test. input_str = 'order to' # Has to be a VALID (n-1)-Gram! predict_next(input_str) # Another test. # Repeat for 10 times and see that the next word is chosen randomly with a probability proportional to the occurrence. input_str = 'machine learning' # Has to be a VALID (n-1)-Gram! for i in range(10): print(predict_next(input_str)) # #### 1.4. Predict a sequence: # Initialize the random seed. seed(123) # A seed string has to be input by the user. my_seed_str = 'machine learning' # Has to be a VALID (n-1)-Gram! # my_seed_str = 'in order' # Has to be a VALID (n-1)-Gram! a_nm1_gram = my_seed_str output_string = my_seed_str # Initialize the output string. while a_nm1_gram in my_dict: output_string += " " + predict_next(a_nm1_gram) words = nltk.word_tokenize(output_string) a_nm1_gram = ' '.join(words[-n+1:]) # Update a_nm1_gram. # Output the predicted sequence. output_string
Rafay notes/Samsung Course/Chapter 7/Exercises/ex_0609.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.2 64-bit # name: python37264bitcd41b4a367e042778b9588e45cc7cf61 # --- # + tags=[] import numpy as np A = np.array([ [56.0, 0.0, 4.4, 68.0], [1.2, 104.0, 52.0, 8.0], [1.8, 135.0, 99.0, 0.9] ]) print(A) # + tags=[] cal = A.sum(axis = 0) print(cal) # + tags=[] percentage = 100 * A / cal.reshape(1, 4) print(percentage) # -
c1/c1m2/broadcasting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## DL model to predict secondary structure of proteins from sequence # + import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import pandas as pd from datetime import datetime from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import BatchNormalization from keras.regularizers import l2 from keras.layers import GaussianNoise from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from keras.optimizers import SGD, Adam from keras.constraints import maxnorm from keras.utils import to_categorical from keras.callbacks import ReduceLROnPlateau from keras.callbacks import EarlyStopping from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score, classification_report from sklearn.pipeline import Pipeline # - # ## One-hot encoder for amino acids def onehotencoder_for_aminoacids(x): """ generate a OneHotEncoder for amino acids """ aminoacids = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y'] categories = [aminoacids for i in range(x.shape[1])] encoder = OneHotEncoder(categories=categories, handle_unknown='ignore') # for all amino acids # encoder = OneHotEncoder(handle_unknown='ignore') # only for amino acids present in data encoder.fit(x) return encoder # ## Encode a protein sequence def encode_sequence(seq, encoder): """ take a single sequence and encode it seq: e.g. 'AVGHY' encoder: already ceated by onehotencoder_for_aminoacids return encoded_seq """ nparray = np.array(list(seq)) encoded_seq = encoder.transform([nparray]) return encoded_seq def plot_history(history): fig, (ax1, ax2) = plt.subplots(1,2) ax1.plot(history.history['loss']) ax1.plot(history.history['val_loss']) ax1.set_title('model loss') ax1.set_ylabel('loss') ax1.set_xlabel('epoch') ax1.legend(['train', 'test']) ax2.plot(history.history['accuracy']) ax2.plot(history.history['val_accuracy']) ax2.set_title('model accuracy') ax2.set_ylabel('accuracy') ax2.set_xlabel('epoch') ax2.legend(['train', 'test']) plt.tight_layout() def evaluate_model(model, X_train, X_test, y_train, y_test): train_loss, train_accuracy = model.evaluate(X_train, y_train, verbose=0) print (f"Train Loss: {train_loss:.3f}, Train Accuracy: {train_accuracy:.3f}") test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0) print (f"Test Loss: {test_loss:.3f}, Test Accuracy: {test_accuracy:.3f}") return train_accuracy, test_accuracy # ## Read in data # # #### The secondary structure data was obtained from PDB https://www.rcsb.org/pages/download/http#ss # #### This was processed by parse_ss script to split our three secondary structures (helix, strand and coil). In this example the calculataion is limited to protein sequences of length 8 # + df = pd.read_csv("ss_data_length_8.csv") # df = pd.read_csv("ss_data_length_8_augmented.csv") # df2 = pd.read_csv("ss_data_length_8_augmented.csv") # df = pd.concat([df1,df2]).drop_duplicates() df = df.drop_duplicates() print (f"Number of samples: {df.shape[0]}") print (f"Top 5 rows of the data:\n {df.head(5)}") print (df.groupby('ss').count()) # - # ## Generate X and Y # Create data for Classification split_seq = df['sequence'].str.split('',expand=True) X = pd.DataFrame(split_seq) X = X[X!=''].dropna(axis=1).values Y = df['ss'].values # ## Encode label (Y) # + # encode the ss type # first convert labels to integers: this is a long way # le = LabelEncoder() # le.fit(Y) # encoded_Y = le.transform(Y) # # Now convert integers to one-hot vectors # encoded_Y = np_utils.to_categorical(encoded_Y) # encoded_Y[0] # - # encode the ss type # A shorter way Y = Y.reshape(-1,1) y_encoder = OneHotEncoder(handle_unknown='ignore') y_encoder.fit(Y) encoded_Y = y_encoder.transform(Y) # y_encoder.get_feature_names() # np.array(encoded_Y.toarray()) # ## Encode sequences (X) # encode the sequence to one hot values encoder = onehotencoder_for_aminoacids(X) encoded_X = encoder.transform(X) # check if the encoder works fine print (f"First sequence: {X[0]}") print(f"First encoded sequence inverse transformed to verify: {encoder.inverse_transform(encoded_X[0])}") print(f"Shapes of input and encoded sequences: {X.shape}, {encoded_X.shape}") input_dim = encoded_X.shape[1] print(f"Number of features: {input_dim}") # ## Test/train split # split into train test X_train, X_test, y_train, y_test = train_test_split(encoded_X, encoded_Y, test_size=0.3, random_state=0, stratify=encoded_Y.toarray()) # ## Create a model # create model def baseline_model(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model # ## Cross_val_score with a baseline model results = [] epochs = 40 # + kfold = KFold(n_splits=5, shuffle=True) # kfold = StratifiedKFold(n_splits=5, shuffle=True) y_train_arr = np.array(y_train.toarray()) # This could be sampled better using GridSearchCV. # GridSearchCV takes a while, so just checking batch size manually # batch_sizes = [256, 512, 1024, 2048] batch_sizes = [1024] accuracies = [] for b in batch_sizes: start = datetime.now() estimator = KerasClassifier(build_fn=baseline_model, input_dim=input_dim, epochs=40, batch_size=b, verbose=0) cv_results = cross_val_score(estimator, X_train, y_train_arr, cv=kfold) end = datetime.now() time_taken = str(end-start) print (f"Batch size: {b}, Mean accuracy: {cv_results.mean():.3f} ({cv_results.std():.3f}), Time taken: {time_taken}") accuracies.append(cv_results.mean()) # plt.plot(batch_sizes, accuracies) # plt.plot(batch_sizes, accuracies, 'o') # plt.title('Batch Size Effect') # plt.ylabel('Accuracy') # plt.xlabel('Batch size'); # - # ## Fit a baseline model # + batch_size = 1024 model = baseline_model(input_dim) # print (model.summary()) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Basic',train_accuracy,test_accuracy)) # - # ## Test Accuracy = 78.3 % # ### Need to try a few things to see if it improves # ### Model seems over fitted. Add dropout. dropout rate of 0.3 seems to give reasonable results. # + # create model def model2(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model2(input_dim) # print (model.summary()) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout',train_accuracy,test_accuracy)) # - # ## Test Accuracy = 79.3 % # ### Test Learning rate schedule # + # create model def model3(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) # Compile model # sgd = SGD(lr=lr, momentum=0.9, nesterov=False) # adam = Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=False) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model3(input_dim) # print (model.summary()) rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_delta=1E-7, verbose=1) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, callbacks=[rlrp], verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+LRS',train_accuracy,test_accuracy)) # - # ### Learning rate schedule does not make any difference. # # ## Try batch normalization # # + # create model def model4(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) # Compile model # sgd = SGD(lr=lr, momentum=0.9, nesterov=False) # adam = Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, amsgrad=False) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model4(input_dim) # print (model.summary()) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+BN',train_accuracy,test_accuracy)) # - # ### Batch Normalization improves Training accuracy at the expence of Test accuracy # ## Try regularization # + # create model def model5(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model5(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+L2_regul',train_accuracy,test_accuracy)) # - # ## Try adding noise # + # create model def model6(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(Dropout(0.3)) model.add(GaussianNoise(0.1)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model6(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+Noise',train_accuracy,test_accuracy)) # - # ## Try noise + Deeper model # + # create model def model7(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(Dropout(0.3)) model.add(GaussianNoise(0.1)) model.add(Dense(80, activation='relu')) model.add(Dropout(0.3)) model.add(GaussianNoise(0.1)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model7(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+Noise+Layers',train_accuracy,test_accuracy)) # - # ## Try L2 regularization + Deeper model # + # create model def model8(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.3)) model.add(Dense(80, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model8(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+L2_regul+Layers',train_accuracy,test_accuracy)) # - # ## Try Batch Normalization + Deeper model # + # create model def model9(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(80, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model9(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+BN+Layers',train_accuracy,test_accuracy)) # - # ## Try Batch Normalization + Deeper model; replace Dropout by l2_regularization # + # create model def model10(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu', kernel_regularizer=l2(0.01))) model.add(BatchNormalization()) model.add(Dense(80, activation='relu')) model.add(BatchNormalization()) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model10(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('L2_regul+BN+Layers',train_accuracy,test_accuracy)) # - # ## Try Dropout + l2_regularization + Batch Normalization + Deeper model # + # create model def model11(input_dim=100): model = Sequential() model.add(Dense(input_dim, input_dim=input_dim, activation='relu', kernel_regularizer=l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(80, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(3, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = model11(input_dim) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=0) plot_history(history) train_accuracy, test_accuracy = evaluate_model(model, X_train, X_test, y_train, y_test) results.append(('Dropout+L2_regul+BN+Layers',train_accuracy,test_accuracy)) # - # ## Plot various models # + results_df = pd.DataFrame(results, columns=['Model','Train','Test']) models = results_df['Model'] train = results_df['Train'] test = results_df['Test'] print (results_df.to_string(index=False)) results_df.to_csv("results.csv", index=False) fig = plt.figure() ax = plt.subplot(111) ind = np.arange(len(models)) # the x locations for the groups width = 0.4 # the width of the bars rects1 = ax.bar(ind, train, width, label='Train') rects2 = ax.bar(ind+width, test, width, label='Test') ax.set_title("Model Evaluation", fontweight='bold') ax.set_xlabel('Model', fontweight='bold') ax.set_ylabel('Accuracy', fontweight='bold') ax.legend(loc='best') ax.yaxis.grid() ax.set_ylim((0.6,1)) ax.set_xlim((-1,len(models)+1)) ax.set_xticks(ind+width*0.5) ax.set_xticklabels(models, rotation=-90 ); # - # ## Loss and Accuracy train_loss, train_accuracy = model.evaluate(X_train, y_train, verbose=0) print (f"Train Loss: {train_loss:.3f}, Train Accuracy: {train_accuracy:.3f}") test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0) print (f"Test Loss: {test_loss:.3f}, Test Accuracy: {test_accuracy:.3f}") # ## Clasification Report pred = model.predict(X_test) y_obs = y_encoder.inverse_transform(y_test) y_pred = y_encoder.inverse_transform(pred) print(classification_report(y_obs,y_pred)) # ## Spot Check input_sequence = df['sequence'].head(20).to_list() ss = df['ss'].head(20).to_list() for seq, label in zip(input_sequence,ss): encoded_sequence = encode_sequence(seq, encoder) pred = model.predict(encoded_sequence) pred_label = y_encoder.inverse_transform(pred)[0][0] correct = '' if label == pred_label else 'X' print (f"sequence: {seq}, Secondary Structure: {label}, Predicted: {pred_label}, {correct}")
Secondary_Structure/ss_predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import casadi as ca impot matplotlib.pyplot as plt # $m \ddot{x} + c \dot{x} +kx=u$ # $\vec{x} = \begin{bmatrix} x \\ \dot{x} \end{bmatrix}$ # $\vec{u} = \begin{bmatrix} u \end{bmatrix}$ # $\vec{y} = \begin{bmatrix} x \end{bmatrix}$ # $\ddot{x} = (-c \dot{x} -kx + u)/m$ # $\dot{\vec{x}} = \begin{bmatrix} \dot{x} \\ (-c \dot{x} -kx + u)/m \end{bmatrix}$ # + m = ca.SX.sym('m') c = ca.SX.sym('c') k = ca.SX.sym('k') p = ca.vertcat(m,c,k) u = ca.SX.sym('u') xvec = ca.SX.sym('x', 2) x = xvec[0] xdot = xvec[1] # or, ''' x = ca.SX.sym('x') xd = ca.SX.sym('xd') ''' xv_dot = ca.vertcat(xdot, (-c*xdot - k*x + u)/m) xv_dot # - f_rhs = ca.Function('rhs', [xvec, u, p], [xv_dot], ['x','u','p'],['x_dot'], {'jit':True}) f_rhs f_rhs([1,2],[0],[1,2,3]) import scipy.integrate import numpy as np scipy.integrate.solve_ivp(fun = lambda t, x: np.array(f_rhs(x, 1, [1,2,3])).reshape(-1), t_span=[0,1], y0 =[0,0], t_eval=np.arange(0,1,0.1)) plt.plot(res['t'],res['y'][0,:]); A = ca.jacobian(xv_dot, xvec) A
lectures/casadi_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Look at some data import datetime import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as dates import numpy as np # %matplotlib widget def comp(df_true, df_test): s_true = df_true.sum() s_test = df_test.sum() err = s_test - s_true err_rel = err/s_true print("err") print(err) print("err rel") print(err_rel) # + df = pd.read_csv("log.2021-05-22.2021-05-28", delim_whitespace=True, index_col=0, parse_dates=True, header=None, names=['time','angle','volume_ul'], skiprows=0, memory_map=True, engine="c") print(df.shape) # - UL_PER_GALLON = 3785411.784 df['volume_gal']=df['volume_ul']/UL_PER_GALLON df=df.drop(columns=['angle', 'volume_ul']) df = df.resample('S').sum() # # Noise # + tags=[] plt.subplots() plt.xlim([dates.datestr2num('2021-05-26 16:30'), dates.datestr2num('2021-05-26 16:50')]) plt.ylim([-0.001,0.001]) plt.scatter(x=df.index, y=df['volume_gal'].to_numpy(), s=1) # - # The shutoff events produce significant backwards spikes, which i think is real. The angle measure is up to 40k which is >2 turns, but mostly under 10k i.e. half a turn. # # The noise at zero is mostly small, like 100 on the angle scale (2 degrees), but there are periods with more noise, like up to 1000 (20 degrees), which seems like a lot for a flow that's really zero, and then at some times of day it's much higher, like 40k, several turns. In terms of volume it's negligible, but what is it? # # I don't think it's magnetometer noise, it has a negative trend during some hours of the day. The meter isn't *accurate* for such low flows but it does *work* so i think it's real. # # I think it's the __water heater.__ It doesn't exist in the early morning, but it turns on after showers. The expansion has to go *somewhere*. df_med = df.rolling(3, center=True).median().fillna(0) comp(df, df_med) plt.subplots() plt.plot(df.index, df['volume_gal'].to_numpy(), label="true") plt.plot(df_med.index, df_med['volume_gal'].to_numpy(), label="median") plt.legend() # # Discretization df_med_disc = 2 * (df_med * 0.5).round(2) print(df_med_disc) comp(df, df_med_disc) plt.subplots() plt.plot(df.index, df['volume_gal'].to_numpy(), label="true") plt.plot(df_med_disc.index, df_med_disc['volume_gal'].to_numpy(), label='disc') plt.legend() plt.grid() # Holy mackerel, this is pretty good, <1% error. print(df_med_disc.nunique()) deltas = df_med_disc[df_med_disc['volume_gal'].shift() != df_med_disc['volume_gal']].diff().fillna(0) print(len(deltas)) print(deltas.nunique()) # So this is a vocabulary with 30 unique tokens, and a week is <4000 tokens long, so a day would be in the ballpark for a transformer model.
analysis/Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/MKY01/TensorFlow/blob/master/Copy_of_TensorFlow_Workshop_Image_Recognition_With_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="83rwhErrVRak" colab_type="text" # #### Import the libraries # + id="n__9FGO1VRan" colab_type="code" outputId="45f42892-bd13-420e-ff21-e9f43e77a4f2" colab={"base_uri": "https://localhost:8080/", "height": 35} import keras from keras import backend as K from keras.layers.core import Dense, Activation from keras.optimizers import Adam from keras.metrics import categorical_crossentropy from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image from keras.models import Model from keras.applications import imagenet_utils import numpy as np from IPython.display import Image # + [markdown] id="t0uk70_pHwEl" colab_type="text" # #### Import MobileNet - the image recognition model # # + id="Yi5kab7SVRa3" colab_type="code" outputId="39239810-13a9-491e-90f8-5ac77133d772" colab={"base_uri": "https://localhost:8080/", "height": 72} mobile = keras.applications.mobilenet.MobileNet() # + [markdown] id="TAbpjBHLGaXw" colab_type="text" # #### Function to Prepare Images # + id="3-MzxgAcVRa-" colab_type="code" colab={} def prepare_image(file): img_path = '' img = image.load_img(img_path + file, target_size=(224, 224)) img_array = image.img_to_array(img) img_array_expanded_dims = np.expand_dims(img_array, axis=0) return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims) # + [markdown] id="KH_4apwIGlev" colab_type="text" # #### Preview Image # + id="3YcPHpdRVRbF" colab_type="code" outputId="7bec744c-e2ad-4d62-f669-8d2dc8e46a44" colab={"base_uri": "https://localhost:8080/", "height": 467} Image(filename='german-shepherd1.jpg') # + [markdown] id="XQ9fE0NZGvEo" colab_type="text" # #### Image Prediction # + id="vh4zghuRVRbP" colab_type="code" outputId="8181d33d-f199-4b61-ee58-df02ffaa1f98" colab={"base_uri": "https://localhost:8080/", "height": 159} preprocessed_image = prepare_image('german-shepherd1.jpg') predictions = mobile.predict(preprocessed_image) results = imagenet_utils.decode_predictions(predictions) results # + [markdown] id="s7m1EqTMG5kW" colab_type="text" # #### Preview + Predict for Other Images # + id="_rHqw1hLVRbZ" colab_type="code" outputId="29c4b007-0994-4006-e847-744158628c98" colab={"base_uri": "https://localhost:8080/", "height": 367} Image(filename='labrador1.jpg') # + id="-hIH3UXGVRbi" colab_type="code" outputId="d0c15bda-b19d-4537-a411-75f05daa465c" colab={"base_uri": "https://localhost:8080/", "height": 104} preprocessed_image = prepare_image('labrador1.jpg') predictions = mobile.predict(preprocessed_image) results = imagenet_utils.decode_predictions(predictions) results # + id="X8DpmgaxVRbr" colab_type="code" outputId="3862e9ce-63f5-4999-8ef5-897b751456a2" colab={"base_uri": "https://localhost:8080/", "height": 417} Image(filename='poodle1.jpg') # + id="mWwH06GPVRb3" colab_type="code" outputId="86e39a86-feed-4872-c127-38cab755f387" colab={"base_uri": "https://localhost:8080/", "height": 104} preprocessed_image = prepare_image('poodle1.jpg') predictions = mobile.predict(preprocessed_image) results = imagenet_utils.decode_predictions(predictions) results
Python/TensorFlow/TFworkshop/TensorFlow_Workshop_Image_Recognition_With_Colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib import matplotlib.pyplot as plt # jt -t monokai -cellw 95% -f dejavu -fs 12 from jupyterthemes import jtplot jtplot.style() import numpy as np import os import pandas as pd import random import pickle import bcolz from tqdm import tqdm from IPython.display import FileLink, FileLinks from IPython.display import SVG import scipy from sklearn import preprocessing from sklearn.metrics import fbeta_score, f1_score, precision_score, recall_score, accuracy_score from sklearn.ensemble import RandomForestClassifier from PIL import Image import cv2 os.environ["KERAS_BACKEND"] = "tensorflow" from keras.models import Sequential, Model, load_model from keras.layers import Dense, Dropout, Activation, Flatten, concatenate, Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D from keras import optimizers from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard from keras.utils.vis_utils import model_to_dot from keras import backend as K K.set_image_dim_ordering('tf') from keras.applications.xception import Xception, preprocess_input def get_raw(df, data_path): im_features = df.copy() rgb = [] for image_name in tqdm(im_features.image_name.values, mininterval=10): img = Image.open(data_path + image_name + '.jpg') img = img.resize((imagesize,imagesize)) img = np.array(img)[:,:,:3] # im = np.hstack( ( img[:,:,0].ravel(), img[:,:,1].ravel(), img[:,:,2].ravel() )) rgb.append( img ) return np.array(rgb) def getEdges(df, data_path): im_features = df.copy() edgeArr = [] for image_name in tqdm(im_features.image_name.values, mininterval=10): img = cv2.imread( data_path + image_name + '.jpg' , 0) img = cv2.resize(img, (imagesize, imagesize)) edges = cv2.Canny( img, 5, 25) edgeArr.append( np.sum(edges) ) return np.array(edgeArr) def getDistance(xypair): x_delta = abs(xypair[0] - xypair[2]) y_delta = abs(xypair[1] - xypair[3]) hypotenuse = (x_delta**2 + y_delta**2)**0.5 return hypotenuse def getLines(df, data_path): im_features = df.copy() lineArr = [] for image_name in tqdm(im_features.image_name.values, mininterval=10): img = cv2.imread( data_path + image_name + '.jpg' , 0) img = cv2.resize(img, (imagesize, imagesize)) edges = cv2.Canny( img, 100, 125) lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=50) zeros = np.zeros((imagesize, imagesize)) if lines is None: lineArr.append( np.sum(zeros) ) else: for line in lines: x1,y1,x2,y2 = line[0] cv2.line(zeros,(x1,y1),(x2,y2),(255),1) lineArr.append( np.sum(zeros) ) return np.array(lineArr) def getCorners(df, data_path): im_features = df.copy() cornerArr = [] for image_name in tqdm(im_features.image_name.values, mininterval=10): img = cv2.imread( data_path + image_name + '.jpg' , 0) img = cv2.resize(img, (imagesize, imagesize)) img = np.float32(img) dst = cv2.cornerHarris(img,2,3,0.04) thresholdIndices = dst > 0.05 * dst.max() matrix = np.zeros(shape=(dst.shape[0],dst.shape[1])) matrix[thresholdIndices] = 1 cornerArr.append( np.sum(matrix) ) return np.array(cornerArr) def extract_features(df, data_path): im_features = df.copy() r_mean = [] g_mean = [] b_mean = [] r_std = [] g_std = [] b_std = [] r_max = [] g_max = [] b_max = [] r_min = [] g_min = [] b_min = [] r_kurtosis = [] g_kurtosis = [] b_kurtosis = [] r_skewness = [] g_skewness = [] b_skewness = [] for image_name in tqdm(im_features.image_name.values, mininterval=10): im = Image.open(data_path + image_name + '.jpg') im = np.array(im)[:,:,:3] r_mean.append(np.mean(im[:,:,0].ravel())) g_mean.append(np.mean(im[:,:,1].ravel())) b_mean.append(np.mean(im[:,:,2].ravel())) r_std.append(np.std(im[:,:,0].ravel())) g_std.append(np.std(im[:,:,1].ravel())) b_std.append(np.std(im[:,:,2].ravel())) r_max.append(np.max(im[:,:,0].ravel())) g_max.append(np.max(im[:,:,1].ravel())) b_max.append(np.max(im[:,:,2].ravel())) r_min.append(np.min(im[:,:,0].ravel())) g_min.append(np.min(im[:,:,1].ravel())) b_min.append(np.min(im[:,:,2].ravel())) r_kurtosis.append(scipy.stats.kurtosis(im[:,:,0].ravel())) g_kurtosis.append(scipy.stats.kurtosis(im[:,:,1].ravel())) b_kurtosis.append(scipy.stats.kurtosis(im[:,:,2].ravel())) r_skewness.append(scipy.stats.skew(im[:,:,0].ravel())) g_skewness.append(scipy.stats.skew(im[:,:,1].ravel())) b_skewness.append(scipy.stats.skew(im[:,:,2].ravel())) im_features['r_mean'] = r_mean im_features['g_mean'] = g_mean im_features['b_mean'] = b_mean im_features['r_std'] = r_std im_features['g_std'] = g_std im_features['b_std'] = b_std im_features['r_max'] = r_max im_features['g_max'] = g_max im_features['b_max'] = b_max im_features['r_min'] = r_min im_features['g_min'] = g_min im_features['b_min'] = b_min im_features['r_kurtosis'] = r_kurtosis im_features['g_kurtosis'] = g_kurtosis im_features['b_kurtosis'] = b_kurtosis im_features['r_skewness'] = r_skewness im_features['g_skewness'] = g_skewness im_features['b_skewness'] = b_skewness return np.array(im_features.drop(['image_name', 'tags'], axis=1)) # def extract_features(df, data_path): # im_features = df.copy() # histArr = [] # for image_name in tqdm(im_features.image_name.values, mininterval=10): # img = cv2.imread( folderpath + 'train-jpg/train_0.jpg' ) # img = np.array(img) # img.shape # R = img[:,:,0] # G = img[:,:,1] # B = img[:,:,2] # RGBHistArr = [] # for channel in [R,G,B]: # placeholder = np.zeros( (256) ) # unique, counts = np.unique(R, return_counts=True) # placeholder[unique] = counts # RGBHistArr.append(placeholder) # histArr.append( np.hstack(tuple(RGBHistArr)) ) # histArr = np.array(histArr).astype('float32') # return histArr def splitSet(dataset, split1, split2): idx_split1 = int( len(dataset) * split1) idx_split2 = int( len(dataset) * split2) training = dataset[0:idx_split1] validation = dataset[idx_split1:idx_split2] test = dataset[idx_split2:] return [ training, validation, test ] def tf_th_ImgReshape(data): shapedData = [ np.array( [sample[:,:,0] , sample[:,:,1] , sample[:,:,2]] ) for sample in data] return np.array(shapedData) def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush() def load_array(fname): return bcolz.open(fname)[:] def xceptionPreprocess(rawFeatures): rawFeatures = rawFeatures.astype('float32') rawFeatures = preprocess_input(rawFeatures) return rawFeatures def shapingDataSet(rawFeatures, edgeFeatures): edgeFeaturesShaped = np.reshape(edgeFeatures, edgeFeatures.shape + (1,)) X = [ np.dstack((sampleRaw, sampleEdge)) for sampleRaw, sampleEdge in zip(rawFeatures, edgeFeaturesShaped) ] X = np.array(X) X = X.astype('float32') X -= 127 X /= 255 return X def dataGenerator(imgRGBArr, imgStatsArr, imgLabels, labelsBool=True, loopBool=True): batchsize = 32 datasetLength = len(imgRGBArr) while 1 and loopBool == True: for idx in range(0, datasetLength, batchsize): endIdx = idx+batchsize if endIdx > datasetLength: endIdx = datasetLength imgRGB = xceptionPreprocess(imgRGBArr[idx:idx+batchsize]) imgStat = imgStatsArr[idx:idx+batchsize] labels = imgLabels[idx:idx+batchsize] if labelsBool == True: yield ({'xception_input': imgRGB, 'aux_input': imgStat}, {'output': labels}) else: yield ({'xception_input': imgRGB, 'aux_input': imgStat}) def getLabelDistribution(labels, labelNameArray): labelCount = [ np.sum(labels[:,i]) for i in range(0, len(labels[0])) ] labelNameCount = {key: val for key, val in zip(labelNameArray, labelCount)} return labelNameCount, labelCount def getPrecision(labels, predictions): # False positive is a negative label but positive prediction Tp = float(0) Fp = float(0) for label, prediction in zip(labels, predictions): try: len(label) except: label = [label] prediction = [prediction] for idx in range(0, len(label)): if label[idx]==1 and prediction[idx]==1: Tp += 1 if label[idx]==0 and prediction[idx]==1: Fp += 1 if Tp+Fp == 0: return 0 return (Tp / ( Tp + Fp )) def getRecall(labels, predictions): # False negative is a positive label but negative prediction Tp = float(0) Fn = float(0) for label, prediction in zip(labels, predictions): try: len(label) except: label = [label] prediction = [prediction] for idx in range(0, len(label)): if label[idx]==1 and prediction[idx]==1: Tp += 1 if label[idx]==1 and prediction[idx]==0: Fn += 1 if Tp+Fn == 0: return 0 return (Tp / ( Tp + Fn )) assert_label = [ [0,0,0], [0,1,0], [0,1,0] ] assert_pred = [ [0,0,0], [0,0,1], [1,1,0] ] assert getPrecision(assert_label, assert_pred) == float(1)/3 assert getRecall(assert_label, assert_pred) == 0.5 assert_label2 = [[0], [1], [1]] assert_pred2 = [[0], [1], [0]] assert getPrecision(assert_label2, assert_pred2) == 1.0 assert getRecall(assert_label2, assert_pred2) == 0.5 def getStatistics(labels, predictions, labelNames): precision = [ getPrecision(labels[:, col], predictions[:, col]) for col in range(0, len(labels[0])) ] recall = [ getRecall(labels[:, col], predictions[:, col]) for col in range(0, len(labels[0])) ] f1 = [ f1_score(labels[:, col], predictions[:, col]) for col in range(0, len(labels[0])) ] precision = np.array(precision) recall = np.array(recall) labelPR = {labelName: (precision[idx], recall[idx]) for idx, labelName in enumerate(labelNames)} return labelPR, precision, recall, f1 def errorAnalyticsBarGraph(test_labels, test_predictions, labels): _, labelCounts = getLabelDistribution(test_labels, labels) labelPercentage = np.array( [ np.array([ count / np.sum(labelCounts) ]) for count in labelCounts ] ) _, precision, recall, f1 = getStatistics(test_labels, test_predictions, labels) plt.rcParams['figure.figsize'] = (14, 8) fig, ax = plt.subplots() index = np.arange(len(labels)) bar_width = 0.20 opacity = 0.8 rects1 = plt.bar(index, f1, bar_width, alpha=opacity, color='#6A93C6', label='F1') rects2 = plt.bar(index + bar_width, precision, bar_width, alpha=opacity, color='#C3C2BD', label='Precision') rects3 = plt.bar(index + bar_width + bar_width, recall, bar_width, alpha=opacity, color='#DFDFE2', label='Recall') rects4 = plt.bar(index + bar_width + bar_width + bar_width, labelPercentage, bar_width, alpha=opacity, color='#7BE686', label='Percentage') plt.xlabel('Label') plt.ylabel('Scores') plt.title('Scores by Label') plt.xticks(rotation=70, fontsize=14, fontweight='bold') plt.xticks(index + bar_width, (label for label in labels)) plt.yticks(fontsize=14, fontweight='bold') plt.legend() plt.tight_layout() plt.show() # + # Setting random_seed = 0 random.seed(random_seed) npRandomSeed = np.random.seed(random_seed) imagesize = 299 cutOff = 0.25 # Load data folderpath = os.getcwd() + '/' train_path = folderpath+'train-jpg/' test_path = folderpath+'test-jpg/' train = pd.read_csv(folderpath+'train.csv') test = pd.read_csv(folderpath+'sample_submission_v2.csv') # + print('Extracting Dataset Features') rerun = False if rerun == True: train_ImgRaw = get_raw(train, train_path) # train_ImgEdge = getEdges(train, train_path) # train_ImgLine = getLines(train, train_path) # train_ImgCorner = getCorners(train, train_path) train_ImgStats = extract_features(train, train_path) data_dic = {'pickleImgRaw': train_ImgRaw, # 'pickleImgEdge': train_ImgEdge, # 'pickleImgLine': train_ImgLine, # 'pickleImgCorner': train_ImgCorner, 'pickleImgStats': train_ImgStats } for key in data_dic: save_array(folderpath+key, data_dic[key]) else: train_ImgRaw = load_array('pickleImgRaw') # train_ImgEdge = load_array('pickleImgEdge') # train_ImgLine = load_array('pickleImgLine') # train_ImgCorner = load_array('pickleImgCorner') train_ImgStats = load_array('pickleImgStats') # Image RGB Features X_img = xceptionPreprocess(train_ImgRaw) # X_img = train_ImgRaw train_ImgRaw = 0 # Image Statistics features X_stats = train_ImgStats.astype('float32') scaler = preprocessing.StandardScaler().fit(X_stats) pickle.dump(scaler, open(folderpath+'scaler', 'wb')) X_stats = scaler.transform(X_stats) # + # # Reviewing image features # imgidx = 62 # plt.subplot(131),plt.imshow(train_ImgRaw[imgidx] ) # plt.title('Original Image'), plt.xticks([]), plt.yticks([]) # plt.subplot(132),plt.imshow(train_ImgLine[imgidx] ,cmap = 'gray') # plt.title('line Image'), plt.xticks([]), plt.yticks([]) # plt.subplot(133),plt.imshow(train_ImgCorner[imgidx] ,cmap = 'gray') # plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) # plt.show() # + # print('Setup Dataset Labels') y_train = [] # flatten = lambda l: [item for sublist in l for item in sublist] # labels = np.array(list(set(flatten([l.split(' ') for l in train['tags'].values])))) labels = np.array(['clear', 'partly_cloudy', 'cloudy', 'haze', 'primary', 'water', 'bare_ground', 'agriculture', 'cultivation', 'habitation', 'road', 'conventional_mine', 'artisinal_mine', 'selective_logging', 'slash_burn', 'blooming', 'blow_down']) label_map = {l: i for i, l in enumerate(labels)} inv_label_map = {i: l for l, i in label_map.items()} for tags in train.tags.values: targets = np.zeros(17) for t in tags.split(' '): targets[label_map[t]] = 1 y_train.append(targets) y = np.array(y_train).astype('float32') # - # + # Multi run averaging of random forest results X_stats = np.hstack((train_ImgStats, train_ImgEdge.reshape(-1,1), train_ImgLine.reshape(-1,1), train_ImgCorner.reshape(-1,1))) # X_stats = train_ImgStats numberRuns = 5 runResultsArr = [] for _ in range(numberRuns): randArr = np.array(range(len(y))) np.random.shuffle( randArr ) X_shuffled = X_stats[randArr] y_shuffled = y[randArr] train_dataset_stats, valid_dataset_stats, test_dataset_stats = splitSet(X_shuffled, 0.8, 0.9) train_labels, valid_labels, test_labels = splitSet(y_shuffled, 0.8, 0.9) clf = RandomForestClassifier(n_estimators=100) clf = clf.fit(train_dataset_stats, train_labels) test_predictions = [ clf.predict(test_chip.reshape(1,-1))[0] for test_chip in tqdm(test_dataset_stats, mininterval=10) ] test_predictions_threshold = np.array(test_predictions).astype('int') runResultsArr.append({'prediction':test_predictions_threshold,'labels': test_labels}) pickle.dump(runResultsArr, open(folderpath+'rf_rgbstats_results', 'wb')) # - # + check = ModelCheckpoint("weights.{epoch:02d}-{val_acc:.5f}.hdf5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True, mode='auto') earlyStop = EarlyStopping(monitor='val_loss') tensorBoard = TensorBoard(log_dir='./logs') def fbetaAccuracy(y_true, y_pred): return fbeta_score(y_true, y_pred, 2, average='samples') def setupModel(): base_model = Xception(include_top=False, weights='imagenet', input_tensor=None, input_shape=(299,299,3)) base_model.layers[0].name = 'xception_input' for layer in base_model.layers: layer.trainable = False x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(2048, activation='relu', name='xception_output')(x) x = Dropout(0.5)(x) auxiliary_input = Input(shape=(18,), name='aux_input') x = concatenate([x, auxiliary_input]) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) predictions = Dense(17, activation='sigmoid', name='output')(x) model = Model(inputs=[base_model.input, auxiliary_input], outputs=predictions) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) return model # + # Multi run averaging of CNN results numberRuns = 1 runResultsArr = [] for _ in range(numberRuns): randArr = np.array(range(len(y))) np.random.shuffle( randArr ) X_img = X_img[randArr] X_stats = X_stats[randArr] y = y[randArr] train_valid_split = 0.50 valid_test_split = 0.75 train_dataset_img, valid_dataset_img, test_dataset_img = splitSet(X_img, train_valid_split, valid_test_split) train_dataset_stats, valid_dataset_stats, test_dataset_stats = splitSet(X_stats, train_valid_split, valid_test_split) train_labels, valid_labels, test_labels = splitSet(y, train_valid_split, valid_test_split) model = setupModel() model.fit( [train_dataset_img, train_dataset_stats], train_labels, batch_size=128, epochs=3, callbacks=[check, earlyStop, tensorBoard], validation_data=([valid_dataset_img, valid_dataset_stats], valid_labels) ) valid_predictions = model.predict([valid_dataset_img, valid_dataset_stats], batch_size=64, verbose=1) test_predictions = model.predict([test_dataset_img, test_dataset_stats], batch_size=64, verbose=1) test_predictions_threshold = np.copy(test_predictions) test_predictions_threshold[test_predictions_threshold < cutOff ] = 0 test_predictions_threshold[test_predictions_threshold >= cutOff ] = 1 test_predictions_threshold = test_predictions_threshold.astype('int') test_labels = test_labels.astype('int') runResultsArr.append({'prediction':test_predictions_threshold,'labels': test_labels}) pickle.dump(runResultsArr, open(folderpath+'cnn_rawrgb_results', 'wb')) # model.save(folderpath+'my_model_yesrgbstats2.h5') # model = load_model('my_model.h5') # model.load_weights(folderpath+'weights.03-0.95024.hdf5') # - valid_predictions = model.predict([valid_dataset_img, valid_dataset_stats], batch_size=64, verbose=1) test_predictions = model.predict([test_dataset_img, test_dataset_stats], batch_size=64, verbose=1) thresholdSetData = {'valid_predictions': valid_predictions, 'valid_labels': valid_labels, 'test_predictions': test_predictions, 'test_labels': test_labels} pickle.dump(thresholdSetData, open(folderpath+'thresholdSetData', 'wb')) print(valid_predictions[0]) print(test_predictions[0]) data = pickle.load( open(folderpath+'thresholdSetData', 'rb')) # + def customThresholdEval(dataset, thresholdArr): holding = [] for idx, cutoff in enumerate(thresholdArr): datasetCol = np.copy(dataset[:, idx]) datasetCol[datasetCol < cutoff] = 0 datasetCol[datasetCol >= cutoff] = 1 holding.append(datasetCol.reshape(-1,1)) return np.hstack(tuple(holding)).astype('int') def basicThresholdEval(dataset, cutoff): test_predictions_threshold = np.copy(dataset) test_predictions_threshold[test_predictions_threshold < cutoff ] = 0 test_predictions_threshold[test_predictions_threshold >= cutoff ] = 1 return test_predictions_threshold.astype('int') # + def findBestCutoff(labels, predictions): thresholdArr = [] for col in range(0, labels.shape[1]): bestScore = 0 for cutoff in np.arange(0, 1, 0.01): prediction_thresholded = customThresholdEval(predictions[:, col].reshape(-1,1), [cutoff]) FBeta_val = accuracy_score(labels[:,col], prediction_thresholded) # print(col, FBeta_val) if FBeta_val >= bestScore: bestScore = FBeta_val else: break thresholdArr.append(cutoff) return thresholdArr valid_labels = data['valid_labels'] valid_predictions = data['valid_predictions'] thresholdArr = findBestCutoff(valid_labels, valid_predictions) print(thresholdArr) # - testidx= 105 print(test_labels[testidx]) print(basicThresholdEval(test_predictions, 0.1)[testidx]) print(1, fbeta_score(test_labels, basicThresholdEval(test_predictions, 0.5), 2, average='samples')) print(2, fbeta_score(test_labels, customThresholdEval(test_predictions, thresholdArr), 2, average='samples')) thresholdArr = [0.14000000000000001, 0.67000000000000004, 0.92000000000000004, 0.23000000000000001, 0.070000000000000007, 0.23000000000000001, 0.44, 0.23000000000000001, 0.47999999999999998, 0.29999999999999999, 0.23000000000000001, 0.98999999999999999, 0.32000000000000001, 0.38, 0.98999999999999999, 0.23999999999999999, 0.98999999999999999] # + # Analytics fBetaArr = [] for _ in runResultsArr: fBetaArr.append( fbeta_score(_['labels'], _['prediction'], 2, average='samples') ) combinedPredictions = np.vstack(tuple([run['prediction'] for run in runResultsArr])) combinedLabels = np.vstack(tuple([run['labels'] for run in runResultsArr])) errorAnalyticsBarGraph(combinedLabels, combinedPredictions, labels) print( [float("%.5f" % i) for i in fBetaArr] ) print("%.5f" % fbeta_score(combinedLabels, combinedPredictions, 2, average='samples') ) # - # Labels # ['selective_logging', 'conventional_mine', 'partly_cloudy', # 'artisinal_mine', 'haze', 'slash_burn', 'primary', 'clear', # 'bare_ground', 'blooming', 'water', 'road', 'cloudy', 'habitation', # 'agriculture', 'blow_down', 'cultivation'] # # Training set label distribution # {'slash_burn': 209.0, 'blooming': 332.0, 'water': 7262.0, 'cloudy': 2330.0, 'selective_logging': 340.0, # 'road': 8076.0, 'primary': 37840.0, 'clear': 28203.0, 'haze': 2695.0, 'agriculture': 12338.0, 'cultivation': 4477.0, # 'partly_cloudy': 7251.0, 'bare_ground': 859.0, 'conventional_mine': 100.0, 'artisinal_mine': 339.0, # 'habitation': 3662.0, 'blow_down': 98.0} # # Run Notes # # Iter1. # loss: 0.2231 - acc: 0.9143 Single epoch 80% training data # # Iter2. # los: 0.2166 - acc: 0.9166 Modified scaling of inputs to subtract 127 and divide by 255 # # Iter3. # loss: 0.2028 - acc: 0.9234 Changed convultions to 64, 64, 128, 128, while adding an additional 512 dense layer. Also switched to RMSprop optimizer # # Iter4. # val_loss: 0.1344 - val_acc: 0.9502 Using new model and RMSprop, trained for roughly 5 epochs. Likely due to better learning rate decay definition. Previously with SGD, it slowed down excessively towards the end of the epoch. # # Rerunning baseline with 80% data and one epoch got val_loss: 0.1750 - val_acc: 0.9341. Trained model for 5 epochs on all data and got a submission score of 0.86942. # # Iter5. # Utilizing Xception model and fine-tuning output layer. Final for standard 80%/single_epoch had accuracy of around ~0.93. But error analytics show much better results for bare ground, cultivation, habitation, artisianal mining, water, and road. # # Preprocessing the input using the provided preprocessor improved the result to val_loss: 0.1274 - val_acc: 0.9516 # # Iter6. # Training all data with the latest model provided a result of loss: 0.1074 - acc: 0.9585. The submission set scored at .89095 # # Iter 7. # Increased last dense layer to four times the number of nodes to 4096. However, it failed to yield marked better results achieving only loss 0.1145 - acc: 0.9554 at the third epoch. In addition, accuracy actually deteriorated on the 2nd epoch. # # Iter8. # Tried adding additional dense layer of 1024 nodes to end. However, it also failed to yield better results achieving loss: 0.1151 - acc: 0.9557 - val_loss: 0.1203 - val_acc: 0.9545 after four epochs. With loss: 0.1639 - acc: 0.9382 - val_loss: 0.1285 - val_acc: 0.9513 after one epoch. # # Iter9. # Tried incorporating RGB statistics which claimed loss: 0.1370 - acc: 0.9486 - val_loss: 0.1292 - val_acc: 0.9509 after one epoch. But the test set analysis looked terrible, it's probably overfitting and failing to generalize. # # Actually, running it a second time but scaling across the entire statistics dataset then breaking down into training/valid/test sets rather than scaling based on the training set and applying it to the valid/test sets resulted in loss: 0.1370 - acc: 0.9486 - val_loss: 0.1292 - val_acc: 0.9509 with test set error statistics that looked much better. Potentially I made a mistake in applying the preprocessing on the test set, since the validation accuracy was also good. Either way it's looking promising after doing so well after a single epoch. Will try training for 5 epochs on entire dataset. # # After 5 epochs on entire dataset achieved loss: 0.0987 - acc: 0.9619 with consistent incremental improvement from each epoch. Will try training for longer. Model predictions on submission set resulted in an almost 1% improvement, 0.89886 # # After another additional 5 epochs it achieved an improvement of loss: 0.0810 - acc: 0.9682. However, when applied to the submission set, the score dipped slightly to 0.89420, suggesting that it's overfitting. # # Iter10. # Testing using RGB histogram instead of aggregate statistics. 80% data and one epoch resulted in: loss: 0.1635 - acc: 0.9370 - val_loss: 0.1290 - val_acc: 0.9509. After five epochs resulted in: loss: 0.1109 - acc: 0.9569 - val_loss: 0.1207 - val_acc: 0.9546 with very little improvement from epoch one. # # Iter11. # Using previous xception and rgbhistogram, but increasing initial dense layer to 2048 matching original literature, adding drop out, and adding an additional dense layer to the end. Performance capped out around loss: 0.1345 - acc: 0.9504 - val_loss: 0.1386 - val_acc: 0.9518 # # Review Iter. # Pulling back and reviewing # Try converting image into RGB histograms and merging that with final dense layer # # Potential additions edge and line analysis can be combined with RGB statistics. # # Canny edge analysis and count how many 1s are there. # # Line edge analysis and count how many 1s are there. # # Corner analysis and count how many 1s are there. # # Modify RGB statistics to Purple, Blue, Green, Yellow, Red, Brown, White, Black? # # Check misclassification statistics # # Utilize an ensemble algorithm, so maybe a Random forest for color + edge statistics, and a separate like a CNN trained specifically to look for specific labels like blow down. This image feature algorithm may potentially use artificially generated data. model = load_model(folderpath+'final_model.h5') # model = setupModel() ##### SUBMISSION RUN ##### # Making Final Predictions using all training data model.fit( [X_img, X_stats], y, batch_size=128, epochs=3) model.save(folderpath+'final_model.h5') # model = load_model(folderpath+'final_model.h5') # + # print('Making submission predictions') rerun = True if rerun == True: submission_ImgRaw = get_raw(test, test_path) # submission_ImgEdge = getEdges(test, test_path) # submission_ImgLine = getLines(test, test_path) # submission_ImgCorner = getCorners(test, test_path) submission_ImgStats = extract_features(test, test_path) data_dic = {'submissionPickleImgRaw': submission_ImgRaw, # 'submissionPickleImgEdge': submission_ImgEdge, # 'submissionPickleImgLine': submission_ImgLine, # 'submissionPickleImgCorner': submission_ImgCorner, 'submissionPickleImgStats': submission_ImgStats } for key in data_dic: save_array(folderpath+key, data_dic[key]) else: submission_ImgRaw = load_array('submissionPickleImgRaw') # submission_ImgEdge = load_array('submissionPickleImgEdge') # submission_ImgLine = load_array('submissionPickleImgLine') # submission_ImgCorner = load_array('submissionPickleImgCorner') submission_ImgStats = load_array('submissionPickleImgStats') # - scaler = pickle.load(open(folderpath+'scaler', 'rb')) def batchSet(dataset, batches): arr = [] stepSize = int(len(dataset)/batches) for idx in range(0, len(dataset), stepSize): arr.append(dataset[idx:idx+stepSize]) return arr submision_subsetRGBArr = batchSet(submission_ImgRaw, 10) submision_subsetStatsArr = batchSet(submission_ImgStats, 10) # + submission_predictions = [] for idx in range(0, len(submision_subsetRGBArr)): subSetRGB = xceptionPreprocess(submision_subsetRGBArr[idx]) subSetStats = scaler.transform(submision_subsetStatsArr[idx]) submission_subSetPredictions = model.predict([subSetRGB, subSetStats], batch_size=64, verbose=1) submission_predictions.append(submission_subSetPredictions) # - submission_predictionsCombined = np.vstack( tuple(submission_predictions) ) thresholdArr = [0.14000000000000001, 0.67000000000000004, 0.92000000000000004, 0.23000000000000001, 0.070000000000000007, 0.23000000000000001, 0.44, 0.23000000000000001, 0.47999999999999998, 0.29999999999999999, 0.23000000000000001, 0.98999999999999999, 0.32000000000000001, 0.38, 0.98999999999999999, 0.23999999999999999, 0.98999999999999999] maxThresholdArr = [i if i < 0.25 else 0.25 for i in thresholdArr] print(maxThresholdArr) submission_predictions_thresholded = customThresholdEval(submission_predictionsCombined, maxThresholdArr) # + predictionLabels = [' '.join(labels[row > 0.2]) for row in submission_predictions_thresholded] subm = pd.DataFrame() subm['image_name'] = test.image_name.values subm['tags'] = predictionLabels subm.to_csv(folderpath+'submission.csv', index=False) # -
.ipynb_checkpoints/Keras9-checkpoint.ipynb