code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Logratios for Average Compositions # ---------------------------------------- # # # + import numpy as np import pandas as pd from pyrolite.comp.codata import ilr, inverse_ilr, close from pyrolite.util.synthetic import random_cov_matrix import matplotlib.pyplot as plt from pyrolite.plot import pyroplot np.random.seed(82) # - def random_compositional_trend(m1, m2, c1, c2, resolution=20, size=1000): """ Generate a compositional trend between two compositions with independent variances. """ # generate means intermediate between m1 and m2 mv = np.vstack([ilr(close(m1)).reshape(1, -1), ilr(close(m2)).reshape(1, -1)]) ms = np.apply_along_axis(lambda x: np.linspace(*x, resolution), 0, mv) # generate covariance matricies intermediate between c1 and c2 cv = np.vstack([c1.reshape(1, -1), c2.reshape(1, -1)]) cs = np.apply_along_axis(lambda x: np.linspace(*x, resolution), 0, cv) cs = cs.reshape(cs.shape[0], *c1.shape) # generate samples from each samples = np.vstack( [ np.random.multivariate_normal(m.flatten(), cs[ix], size=size // resolution) for ix, m in enumerate(ms) ] ) # combine together. return inverse_ilr(samples) # First we create an array of compositions which represent a trend. # # # # + m1, m2 = np.array([[0.3, 0.1, 2.1]]), np.array([[0.5, 2.5, 0.05]]) c1, c2 = ( random_cov_matrix(2, sigmas=[0.15, 0.05]), random_cov_matrix(2, sigmas=[0.05, 0.2]), ) trend = pd.DataFrame( random_compositional_trend(m1, m2, c1, c2, resolution=100, size=5000), columns=["A", "B", "C"], ) # - # We can visualise this compositional trend with a density plot. # # # ax = trend.pyroplot.density(mode="density", vmin=0.1, bins=100) plt.show() # First we can see where the geometric mean would fall: # # # geomean = trend.mean(axis=0).to_frame().T ax = geomean.pyroplot.scatter(ax=ax, marker="o", color="r", zorder=2, label="GeoMean") plt.show() # Finally, we can also see where the logratio mean would fall: # # # ilrmean = pd.DataFrame( inverse_ilr(np.nanmean(ilr(trend.values), axis=0)[np.newaxis, :]) ) ax = ilrmean.pyroplot.scatter(ax=ax, marker="D", color="k", label="LogMean") plt.show()
docs/source/examples/comp/logratios.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Corpus Validation # Clean and valid data is essential for successful machine learning. For this purpose the `validation` module provides different methods for validate a corpus on specific properties. import audiomate from audiomate.corpus import assets from audiomate.corpus import io from audiomate.corpus import validation # clear the data if already existing import shutil shutil.rmtree('output/fsd', ignore_errors=True) # ## Data # First we download the Free-spoken-digit corpus and load it. # + corpus_path = 'output/fsd' io.FreeSpokenDigitDownloader().download(corpus_path) corpus = audiomate.Corpus.load(corpus_path, reader='free-spoken-digits') # - # ## Perform validation and print result # We can either perform a single validation task ... # + val = validation.UtteranceTranscriptionRatioValidator(max_characters_per_second=6, label_list_idx=assets.LL_WORD_TRANSCRIPT) result = val.validate(corpus) print(result.get_report()) # - # Or we can combine multiple validation tasks to run in one go. # + val = validation.CombinedValidator(validators=[ validation.UtteranceTranscriptionRatioValidator(max_characters_per_second=6, label_list_idx=assets.LL_WORD_TRANSCRIPT), validation.LabelCountValidator(min_number_of_labels=1, label_list_idx=assets.LL_WORD_TRANSCRIPT) ]) result = val.validate(corpus) print(result.get_report())
examples/validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Artificial Intelligence Nanodegree # # ## Convolutional Neural Networks # # --- # # In this notebook, we visualize four activation maps in a CNN layer. # # # ### 1. Import the Image # + import cv2 import scipy.misc import matplotlib.pyplot as plt # %matplotlib inline # TODO: Feel free to try out your own images here by changing img_path # to a file path to another image on your computer! img_path = 'images/udacity_sdc.png' # load color image bgr_img = cv2.imread(img_path) # convert to grayscale gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY) # resize to smaller small_img = scipy.misc.imresize(gray_img, 0.3) # rescale entries to lie in [0,1] small_img = small_img.astype("float32")/255 # plot image plt.imshow(small_img, cmap='gray') plt.show() # - # ### 2. Specify the Filters # + import numpy as np # TODO: Feel free to modify the numbers here, to try out another filter! # Please don't change the size of the array ~ :D filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]]) ### do not modify the code below this line ### # define four filters filter_1 = filter_vals filter_2 = -filter_1 filter_3 = filter_1.T filter_4 = -filter_3 filters = [filter_1, filter_2, filter_3, filter_4] # visualize all filters fig = plt.figure(figsize=(10, 5)) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) ax.imshow(filters[i], cmap='gray') ax.set_title('Filter %s' % str(i+1)) width, height = filters[i].shape for x in range(width): for y in range(height): ax.annotate(str(filters[i][x][y]), xy=(y,x), horizontalalignment='center', verticalalignment='center', color='white' if filters[i][x][y]<0 else 'black') # - # ### 3. Visualize the Activation Maps for Each Filter # + from keras.models import Sequential from keras.layers.convolutional import Convolution2D import matplotlib.cm as cm # plot image plt.imshow(small_img, cmap='gray') # define a neural network with a single convolutional layer with one filter model = Sequential() model.add(Convolution2D(1, (4, 4), activation='relu', input_shape=(small_img.shape[0], small_img.shape[1], 1))) # apply convolutional filter and return output def apply_filter(img, index, filter_list, ax): # set the weights of the filter in the convolutional layer to filter_list[i] model.layers[0].set_weights([np.reshape(filter_list[i], (4,4,1,1)), np.array([0])]) # plot the corresponding activation map ax.imshow(np.squeeze(model.predict(np.reshape(img, (1, img.shape[0], img.shape[1], 1)))), cmap='gray') # visualize all filters fig = plt.figure(figsize=(12, 6)) fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) ax.imshow(filters[i], cmap='gray') ax.set_title('Filter %s' % str(i+1)) # visualize all activation maps fig = plt.figure(figsize=(20, 20)) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) apply_filter(small_img, i, filters, ax) ax.set_title('Activation Map for Filter %s' % str(i+1))
conv-visualization/conv_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import bqplot.pyplot as plt # ## Basic Histogram np.random.seed(0) x_data = np.random.randn(100) fig = plt.figure(padding_y=0) hist = plt.hist(x_data) fig hist.count # Changing the number of bins hist.bins = 20 # ## Properties of Histogram # normalizing the count fig = plt.figure(padding_y=0) hist = plt.hist(x_data, normalized=True) fig # changing the color hist.colors = ["orangered"] # stroke and opacity update hist.stroke = "orange" hist.opacities = [0.5] * hist.bins # ## Read-only properties of Histogram fig = plt.figure(padding_y=0) hist = plt.hist(x_data, normalized=True) fig # count is the number of elements in each interval hist.count # mid points are the mid points of each interval hist.midpoints
examples/Marks/Pyplot/Hist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mydlenv # language: python # name: mydlenv # --- import numpy as np from sklearn.datasets import make_blobs # + class LogisticRegression: def __init__(self, X): self.lr = 0.01 self.epochs = 1000 self.m, self.n = X.shape self.weights = np.zeros((self.n, 1)) self.bias = 0 def sigmoid(self, z): return 1 / (1 + np.exp(-z)) def cost(self, y_predict): return (-1 / self.m * np.sum(y * np.log(y_predict) + (1 - y)* np.log(1 - y_predict))) def gradient(self, y_predict): dw = 1 / self.m * np.dot(X.T, (y_predict - y)) db = 1 / self.m * np.sum(y_predict - y) return dw, db def run(self, X, y): for epoch in range(self.epochs + 1): y_predict = self.sigmoid(np.dot(X, self.weights) + self.bias) cost = self.cost(y_predict) dw, db = self.gradient(y_predict) self.weights -= self.lr * dw self.bias -= self.lr * db if epoch % 1000 == 0: print(f'Cost after iteration {epoch} : {cost}') return self.weights, self.bias def predict(self, X): y_predict = self.sigmoid(np.dot(X, self.weights) + self.bias) y_predict_labels = y_predict > 0.5 return y_predict_labels if __name__ == "__main__": np.random.seed(1) X, y = make_blobs(n_samples = 1000, centers=2) y = y[: , np.newaxis] logreg = LogisticRegression(X) w, b = logreg.run(X, y) y_predict = logreg.predict(X) print(f'Accuracy : {np.sum(y==y_predict)/X.shape[0]}') # -
Logistic_Regression/Day_2-Logistic_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib matplotlib.use("nbagg") import math import matplotlib.animation as anm import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np # - class World: def __init__(self, debug=False): self.objects = [] self.debug = debug def append(self, obj): self.objects.append(obj) def draw(self): fig = plt.figure(figsize=(4, 4)) ax = fig.add_subplot(111) ax.set_aspect("equal") ax.set_xlim(-5, 5) ax.set_ylim(-5, 5) ax.set_xlabel("X", fontsize=10) ax.set_ylabel("Y", fontsize=10) elems = [] time_increment = 0.1 if self.debug: for i in range(1000): self.one_step(i, elems, ax) else: self.ani = anm.FuncAnimation( fig, self.one_step, fargs=(elems, ax), frames=100, interval=1000, repeat=False, ) plt.show() def one_step( self, i, elems, ax ): ### fig:one_step1 (27-29行目) fig:one_step2 (27-31行目) while elems: elems.pop().remove() elems.append( ax.text(-4.4, 4.5, "t = " + str(i), fontsize=10) ) # 座標ベタ書きが気になるなら変数に for obj in self.objects: # 追加 obj.draw(ax, elems) # 追加 class IdealRobot: def __init__(self, pose, color="black"): self.pose = pose self.r = 0.2 self.color = color def draw(self, ax, elems): ### fig:append_elements (7-13行目) x, y, theta = self.pose xn = x + self.r * math.cos(theta) yn = y + self.r * math.sin(theta) elems += ax.plot([x, xn], [y, yn], color=self.color) # elems += を追加 c = patches.Circle( xy=(x, y), radius=self.r, fill=False, color=self.color ) # c = を追加 elems.append(ax.add_patch(c)) # elem.appendで包む # + world = World(debug=True) robot1 = IdealRobot(np.array([2, 3, math.pi / 6]).T) # ロボットのインスタンス生成(色を省略) robot2 = IdealRobot(np.array([-2, -1, math.pi / 5 * 6]).T, "red") # ロボットのインスタンス生成(色を指定) world.append(robot1) # ロボットを登録 world.append(robot2) world.draw() # -
section_robot/ideal_robot4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gradient Boosting Regressor # # ### Authors: # - <NAME> # - <NAME> # + from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.ml.regression import RandomForestRegressor from pyspark.ml.feature import VectorIndexer from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.feature import VectorAssembler import requests #https://www.silect.is/blog/2019/4/2/random-forest-in-spark-ml from pyspark.sql import SparkSession from pyspark.ml.regression import LinearRegression from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.regression import GBTRegressor from pyspark.ml.tuning import CrossValidator from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.tuning import ParamGridBuilder import numpy as np import matplotlib.pyplot as plt # - spark_session = SparkSession\ .builder\ .appName("Spark Regression")\ .getOrCreate() # ### Load data in csv format # Loads data dataset = spark_session\ .read\ .format("csv")\ .option("header", "true")\ .load("data/data_file.csv",inferSchema = True) dataset.printSchema() dataset.show() # ### Split data into train and test sets and normalize # # We indicate 70% for training and 30% for test. `randomSplit` normalizes the data. # Split the data into training and test sets (30% held out for testing) (trainingData, testData) = dataset.randomSplit([0.7, 0.3]) print(trainingData.select("label").show(10)) # ### Training # Spark models need the data as a svmformat, then we have to do a transformation. We need a feartures vector. We have to create a `VectorAssembler` and we indicates the features columns. # + feature_list = [] for col in dataset.columns: if col == 'label': continue else: feature_list.append(col) assembler = VectorAssembler(inputCols=feature_list, outputCol="features") assembler # - # Creating a model. We have to indicate features and taget (label). #lr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8) lr = LinearRegression(featuresCol = 'features', labelCol = 'label',) # We put in a workflow the vector assembler and the model into a ML pipeline. First getting the features vector and next training the model. # Now, we put our simple, two-stage workflow into an ML pipeline. pipeline = Pipeline(stages=[assembler, lr]) # We prepared `ParamGridBuilder`, builder for a param grid used in grid search-based model selection and find the best configuration. we indicate `maxIter`, `regParam` and `elasticNetParam`. paramGrid = ParamGridBuilder() \ .addGrid(lr.maxIter, [int(x) for x in np.linspace(start = 5, stop = 30, num = 6)]) \ .addGrid(lr.regParam, [float(x) for x in np.linspace(start = 0.1, stop = 0.9, num = 4)]) \ .addGrid(lr.elasticNetParam, [float(x) for x in np.linspace(start = 0.01, stop = 1.0, num = 6)]) \ .build() #.addGrid(rf.maxBins, [int(x) for x in np.linspace(start = 32, stop = 64, num = 3)]) \ # Cross Validation, we indicate model, paramGrid and evaluator. To evaluate we need a evaluator with metrics, since we are faced with a regression problem, we use root-mean-square error (rmse) as a metric. # + #evaluator evaluator = RegressionEvaluator(labelCol="label", predictionCol="prediction", metricName="rmse") #crossvalidation crossval = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=3) # - # We train using corss validaton fit. cvModel = crossval.fit(trainingData) # ### Evaluation # # We have to get predictions from test data. predictions = cvModel.transform(testData) print(predictions.select("prediction", "label", "features").show(10)) print(trainingData.select("label").show(10)) # We evaluate the predictions and show the results. We shoe real price to predicition price. A diagonal would be the perfect target. # + rmse = evaluator.evaluate(predictions) rfPred = cvModel.transform(dataset) rfResult = rfPred.toPandas() plt.plot(rfResult.label, rfResult.prediction, 'bo') plt.xlabel('Close price') plt.ylabel('Prediction') plt.suptitle("Model Performance RMSE: %f" % rmse) plt.show() print("RMSE:",rmse) # - # #### Best Model # # To get the best model we need to obtain from pipeline. First VectorFeatures, second model. # bestPipeline = cvModel.bestModel bestModel = bestPipeline.stages[1] bestModel.getRegParam() bestModel.getMaxIter() bestModel.getElasticNetParam() # #### Summarize the model # Print the coefficients and intercept for linear regression print("Coefficients: %s" % str(bestModel.coefficients)) print("Intercept: %s" % str(bestModel.intercept)) # Summarize the model over the training set and print out some metrics trainingSummary = bestModel.summary print("numIterations: %d" % trainingSummary.totalIterations) print("objectiveHistory: %s" % str(trainingSummary.objectiveHistory)) trainingSummary.residuals.show() print("RMSE: %f" % trainingSummary.rootMeanSquaredError) print("r2: %f" % trainingSummary.r2)
notebooks/Classification/linear-regresion/LinearRegression-csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Training a CNN on CIFAR10 # ===================== # # In this notebook we use a CNN to train a model on the CIFAR10 dataset. # This notebook follows the official PyTorch CNN tutorial which you can find [here](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html). import torch import torchvision import torchvision.transforms as transforms device = "cuda" if torch.cuda.is_available() else "cpu" device # ### 1. Load and inspect the data # + transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) batch_size = 4 data_path = '../data/cifar10' trainset = torchvision.datasets.CIFAR10(data_path, train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True) testset = torchvision.datasets.CIFAR10(data_path, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # - # Let us show some of the training images, for fun. # # # ### 2. Define a Convolutional Neural Network # + import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 8, 3) self.conv2 = nn.Conv2d(8, 16, 3) self.pool = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(16, 32, 3) self.conv4 = nn.Conv2d(32, 64, 3) self.fc1 = nn.Linear(64 * 5 * 5, 4096) self.fc2 = nn.Linear(4096, 1000) self.fc3 = nn.Linear(1000, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool(F.relu(self.conv2(x))) x = F.relu(self.conv3(x)) x = self.pool(F.relu(self.conv4(x))) x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # - # ### 4. Train the network # + import torch.optim as optim import time net = Net() net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) start_time = time.time() for epoch in range(2): running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs = inputs.to(device) #needed when using gpu labels = labels.to(device) #needed when using gpu # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}') running_loss = 0.0 print('Finished Training') print(f'Time: {time.time() - start_time:.1f}s') # - # ### 5. Test the network on the test data # + correct = 0 total = 0 # since we're not training, we don't need to calculate the gradients for our outputs with torch.no_grad(): for data in testloader: images, labels = data images = images.to(device) #needed when using gpu labels = labels.to(device) #needed when using gpu # calculate outputs by running images through the network outputs = net(images) # the class with the highest energy is what we choose as prediction predicted = torch.argmax(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy of the network on the 10000 test images: {100 * correct // total} %')
solutions/5_CNN_CIFAR10-Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/incgorillaz/android-fundamentals-apps-v2/blob/master/lab2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/", "height": 832} id="Z170nxc3c3_y" outputId="a707f600-f6e3-4089-b867-59c3a00b6f49" import pandas as pd import matplotlib.pyplot as plt from matplotlib.ticker import PercentFormatter df=pd.read_csv("/content/drive/MyDrive/Colab Notebooks/HelpDesk.csv") print(df) df.index = df['reason'] df = df.sort_values(by='frequency',ascending=False) df["cumulativePercentage"] = df["frequency"].cumsum()/df["frequency"].sum()*100 df["cumulativePercentage"]=df["cumulativePercentage"].apply(lambda x: round(x,2)) print(df) fig, ax = plt.subplots() ax.bar(df.index, df["frequency"], color="C0") ax.set_xticklabels( df['reason'], rotation=45) ax2 = ax.twinx() ax2.plot(df.index, df["cumulativePercentage"], color="C1", marker="D", ms=7) ax2.yaxis.set_major_formatter(PercentFormatter()) ax.tick_params(axis="y", colors="C0") ax2.tick_params(axis="y", colors="C1") plt.show()
lab2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #load libraries #import numpy as np import torch import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch import np from torch.autograd import Variable # + #intializing neural network class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(561, 200) self.fc2 = nn.Linear(200, 100) self.fc3 = nn.Linear(100, 7) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.005) # + #load data and labels xtrain=np.genfromtxt('X_train.txt') xtest=np.genfromtxt('X_test.txt') ytrain=np.genfromtxt('Y_train.txt') ytest=np.genfromtxt('Y_test.txt') # + # Convert data to tensors xtrain1=torch.from_numpy(xtrain).float() ytrain1=torch.from_numpy(ytrain).long() xtest1=torch.from_numpy(xtest).float() ytest1=torch.from_numpy(ytest).long() # Create data iterator import torch.utils.data as data_utils train = data_utils.TensorDataset(xtrain1, ytrain1) train_loader = data_utils.DataLoader(train, batch_size=50, shuffle=True) test = data_utils.TensorDataset(xtest1, ytest1) test_loader = data_utils.DataLoader(test, batch_size=1, shuffle=False) # - # Training over data epochs=50 error_log=np.zeros([epochs,1]) for epoch in range(epochs): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels = data # wrap them in Variable inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.data[0] if i % 50 == 49: # print every 100 mini-batches print('[%d, %5d] loss: %.5f' % (epoch+1, i+1, running_loss / 50)) if i % 100 == 99: error_log[epoch]=running_loss / 50 running_loss = 0.0 print('Finished Training') # + # Calculate accuracy correct1 = 0 total1 = 0 for data in train_loader: features, labels = data outputs = net(Variable(features)) _, predicted = torch.max(outputs.data, 1) total1 += labels.size(0) correct1 += (predicted == labels).sum() print('Accuracy of the network on the train data: %d %%' % (100 * correct1 / total1)) correct = 0 total = 0 for data in test_loader: features, labels = data outputs = net(Variable(features)) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() print('Accuracy of the network on the test data: %d %%' % (100 * correct / total)) # Visualize performance over epochs plt.plot(error_log) plt.show() # -
MLP_test_HAR_pyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: xview2 # language: python # name: xview2 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # - from PIL import Image import numpy as np from tqdm import tqdm import os def mkdir(path): if not os.path.exists(path): os.makedirs(path) USERDIR='/home/catskills/Desktop' DATADIR=f'{USERDIR}/dataxv2' PROJECT='xview2-catskills' CODEDIR=f'{USERDIR}/{PROJECT}' VERSION='v_catskills_0.2.1' MODEL_DIR=f'/home/catskills/Desktop/dataxv2/release/{VERSION}' LOCALIZATION_MODEL=f'{MODEL_DIR}/localization.hdf5' DAMAGE_MODEL=f'{MODEL_DIR}/classification.hdf5' MEAN_MODEL=f'{CODEDIR}/weights/mean.npy' VDIR=f'{DATADIR}/{VERSION}' TESTDIR=f'{VDIR}/images' INFER_DIR=f'{VDIR}/labels' POLYDIR=f'{VDIR}/damage_input' SUBMIT_DIR=f'{VDIR}/submit' OUTPUT_CSV=f'{VDIR}/output.csv' DAMAGE_JSON=f'{VDIR}/damage.json' COMBINED_JSON=f'{VDIR}/combined_json' DEBUGDIR=f"{DATADIR}/debug" mkdir(DEBUGDIR) for x in ['combined_json', 'damage_input', 'images', 'labels', 'prediction', 'gt_labels']: mkdir(f"{DEBUGDIR}/{x}") TRAIN_DIR=f'{DATADIR}/train' from glob import glob # + active="" # guatemala-volcano_00000000_post_disaster.png # guatemala-volcano_00000000_post_disaster.json # - fns=glob(f"{DATADIR}/train/images/*.png") roots=list(sorted(set(['_'.join(fn.split('/')[-1].split('_')[0:2]) for fn in fns]))) from shutil import copyfile from tqdm import tqdm fns[0] for i, root in tqdm(enumerate(roots)): for mode in ('pre', 'post'): for dr,ext,ndr in [('images','png', 'images'), ('labels', 'json', 'gt_labels')]: src = f"{DATADIR}/train/{dr}/{root}_{mode}_disaster.{ext}" dst = f"{DEBUGDIR}/{ndr}/test_{mode}_{i:05d}.{ext}" copyfile(src,dst)
notebooks/Set up debug run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="eAdPeqtCve3K" import matplotlib.pyplot as plt import seaborn as sns from tensorflow.keras.layers import Dense,RepeatVector, LSTM, Dropout from tensorflow.keras.layers import Flatten, Conv1D, MaxPooling1D from tensorflow.keras.layers import Bidirectional, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.utils import plot_model import pandas as pd import numpy as np # + id="_sSYzbinve3M" df = pd.read_csv("https://raw.githubusercontent.com/benvictoria21/pandas/master/dataset/testset.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="EpItKqH5ve3M" outputId="0655e9ac-1728-4743-bcf3-c786395da43d" df.head() # + colab={"base_uri": "https://localhost:8080/"} id="rkpXzdPTve3N" outputId="29999c99-1ec4-46ab-b74c-a0760fe5987c" df[' _conds'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 720} id="pYv7DzFIve3N" outputId="0440d566-78d4-449f-c8f2-6043b8c3e71c" plt.figure(figsize=(15,10)) df[' _conds'].value_counts().head(15).plot(kind='bar') plt.title('15 most common weathers in Delhi') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 654} id="-A5KpPvrve3O" outputId="56eaff39-78dd-42e2-eedc-59e2870bc567" plt.figure(figsize=(15, 10)) plt.title("Common wind direction in delhi") df[' _wdire'].value_counts().plot(kind="bar") plt.plot() # + colab={"base_uri": "https://localhost:8080/", "height": 675} id="MSAu92cdve3O" outputId="ac0646e3-3d01-43e8-a76d-621c8ddef874" plt.figure(figsize=(15, 10)) sns.distplot(df[' _tempm'],bins=[i for i in range(0,61,5)], kde=False) plt.title("Distribution of Temperatures") plt.grid() plt.show() # + id="SQ24Ms-Bve3P" df['datetime_utc'] = pd.to_datetime(df['datetime_utc']) # + colab={"base_uri": "https://localhost:8080/"} id="X1thmpLive3P" outputId="3558eb01-17cf-44df-807b-acfb88c804c2" df['datetime_utc'] # + id="yRcAU5yrve3Q" # imputing the missing value in temperatre feature with mean. df[' _tempm'].fillna(df[' _tempm'].mean(), inplace=True) # + colab={"base_uri": "https://localhost:8080/"} id="9Og-kebwve3Q" outputId="1847dfb8-14dd-4c7d-8e42-f6e4f597abbb" df[' _tempm'].isna().sum() # filled all missing values with mean() # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="DHyrlsXRve3Q" outputId="d6409d38-9fc9-4378-e7fa-ce660f0ac1c9" str(df['datetime_utc'][0]) # + id="akvMx38Gve3R" # a function to extract year part from the whole date def get_year(x): return x[0:4] # + id="pMGfguk7ve3R" # a function to extract month part from the whole date def get_month(x): return x[5:7] # + id="GxZMs9fuve3R" # making two new features year and month df['year'] = df['datetime_utc'].apply(lambda x: get_year(str(x))) df['month'] = df['datetime_utc'].apply(lambda x: get_month(str(x))) # + colab={"base_uri": "https://localhost:8080/"} id="fB0Lb1_hve3R" outputId="37da608f-015d-4541-e615-a895eaa8bc32" df['year'] # + id="JIXweDPXve3S" temp_year = pd.crosstab(df['year'], df['month'], values=df[' _tempm'], aggfunc='mean') # + colab={"base_uri": "https://localhost:8080/", "height": 621} id="dne-hDhTve3S" outputId="54a0745b-54ee-4b6e-de6b-684aec1664f5" plt.figure(figsize=(15, 10)) sns.heatmap(temp_year, cmap='coolwarm', annot=True) plt.title("Average Tempearture in Delhi from 1996 to 2017") plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="PKJXQEcDve3S" outputId="6a31ca0b-b608-4efb-ffc5-3dadd8a66a5e" df[' _hum'].isna().sum() # + id="_V8WgB3eve3S" # imputing missing values in _hum feature with mean df[' _hum'].fillna(df[' _hum'].mean(), inplace=True) # + id="YlqQt0wzve3T" humidity_year = pd.crosstab(df['year'], df['month'], values=df[' _hum'], aggfunc='mean') # + colab={"base_uri": "https://localhost:8080/", "height": 621} id="i1tsuascve3T" outputId="a2f78fe5-9b7c-41ef-cf30-8cc1eaa47f1e" plt.figure(figsize=(15, 10)) sns.heatmap(humidity_year, cmap='coolwarm', annot=True) plt.title("Average Humidity in Delhi from 1996 to 2017") plt.show() # + id="GI2q5siEve3T" # taking only temperature feature as values and datetime feature as index in the dataframe for time series forecasting of temperature data = pd.DataFrame(list(df[' _tempm']), index=df['datetime_utc'], columns=['temp']) # + colab={"base_uri": "https://localhost:8080/", "height": 431} id="TWqE5HYYve3T" outputId="e8d7e1c9-2d90-4ecc-b1a8-ce40b4c257b8" data # + id="CSsB-Rcwve3T" # resampling data with date frequency for time series forecasting data = data.resample('D').mean() # + colab={"base_uri": "https://localhost:8080/"} id="u0UeTLVTve3U" outputId="43133d72-c82e-4ccb-ffe7-07559337e186" data.temp.isna().sum() # + id="K1v_3z8sve3U" data.fillna(data['temp'].mean(), inplace=True) # + colab={"base_uri": "https://localhost:8080/"} id="ekLICY0_ve3U" outputId="c81143f9-a950-4640-d241-5d6d193acbfe" data.temp.isna().sum() # + colab={"base_uri": "https://localhost:8080/"} id="muSmwnzvve3U" outputId="20435ee8-f44e-4792-ff1a-6e3ac2838ac0" data.shape # + colab={"base_uri": "https://localhost:8080/", "height": 431} id="G2DhQoPOve3V" outputId="ae9e4d49-7e51-4584-ded2-4c6d96f257db" data # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="Fwjxo58fve3V" outputId="06262177-d23d-4c6a-d0af-c7ea73fee7c0" plt.figure(figsize=(25, 7)) plt.plot(data, linewidth=.5) plt.grid() plt.title("Time Series (Years vs Temp.)") plt.show() # + id="9FXxSwY_ve3V" # Scaling data to get rid of outliers from sklearn.preprocessing import MinMaxScaler scalar = MinMaxScaler(feature_range=(-1,1)) data_scaled = scalar.fit_transform(data) # + colab={"base_uri": "https://localhost:8080/"} id="c6UwTfvLve3V" outputId="0b5644c5-05ab-4505-a527-c5b89be72406" data_scaled # + colab={"base_uri": "https://localhost:8080/"} id="I1kEcDdlve3V" outputId="94381633-006d-4c6a-e25b-835071e75ed6" data_scaled.shape # + id="n3oLrzXFve3W" steps = 30 inp = [] out = [] for i in range(len(data_scaled)- (steps)): inp.append(data_scaled[i:i+steps]) out.append(data_scaled[i+steps]) # + id="goR4zBdXve3W" inp=np.asanyarray(inp) out=np.asanyarray(out) # + id="V4DPxkRfve3W" x_train = inp[:7300,:,:] x_test = inp[7300:,:,:] y_train = out[:7300] y_test= out[7300:] # + colab={"base_uri": "https://localhost:8080/"} id="4DccZCwEve3W" outputId="0c36d5ae-20d3-43a3-a964-db6b6a213d34" inp.shape # + colab={"base_uri": "https://localhost:8080/"} id="KgQJzGPave3W" outputId="c83a7014-5aa3-45f5-e53e-258dc58abcd8" x_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="aX5SlqNJve3X" outputId="6976c4cd-aed0-460d-9b73-fa3e2e385de6" x_test.shape # + colab={"base_uri": "https://localhost:8080/"} id="8eaYT6Zlve3X" outputId="24b97435-cba5-4b80-88f1-259d18018982" from keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping early_stop = EarlyStopping(monitor = "loss", mode = "min", patience = 7) model = Sequential() model.add(Conv1D(filters=256, kernel_size=2, activation='relu', input_shape=(30,1))) model.add(Conv1D(filters=128, kernel_size=2, activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(RepeatVector(30)) model.add(LSTM(units=100, return_sequences=True, activation='relu')) model.add(Dropout(0.2)) model.add(LSTM(units=100, return_sequences=True, activation='relu')) model.add(Dropout(0.2)) model.add(LSTM(units=100, return_sequences=True, activation='relu')) model.add(LSTM(units=100, return_sequences=True, activation='relu')) model.add(Bidirectional(LSTM(128, activation='relu'))) model.add(Dense(100, activation='relu')) model.add(Dense(1)) model.compile(loss='mse', optimizer='adam') # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="bkFoxCcYve3X" outputId="6ac2ccc7-0c8f-4344-a551-266c0353de72" plot_model(model, to_file='model.png') # + colab={"base_uri": "https://localhost:8080/"} id="vdsLmYhWve3X" outputId="72686699-106c-4333-83c3-e851455dc2b9" history = model.fit(x_train,y_train,epochs=100, verbose=1, callbacks = [early_stop] ) # + id="YM14h2Icve3Y" model.save("./regressor.hdf5") # + id="1cgR99G7ve3Y" predict = model.predict(x_test) # + id="hT86UImVve3Y" predict = scalar.inverse_transform(predict) # + id="bWI83a39ve3Y" Ytesting = scalar.inverse_transform(y_test) # + id="8omamUr2ve3Y" plt.figure(figsize=(20,9)) plt.plot(Ytesting , 'blue', linewidth=5) plt.plot(predict,'r' , linewidth=4) plt.legend(('Test','Predicted')) plt.show() # + id="Z6mZ5Ffnve3Y" from sklearn.metrics import mean_squared_error mean_squared_error(Ytesting, predict)
time_series_forecasting_of_temperature_cnn_lstms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Funciones para encontrar usuarios a testear import pandas as pd import requests from IPython.display import HTML, display from bs4 import BeautifulSoup from bs4 import Comment import json import html import re import urllib.parse from threading import Thread from tqdm import tqdm import pickle #configuro proxy y headers url_proxy = 'https://free-proxy-list.net/anonymous-proxy.html' response_pxy = requests.get(url_proxy) pxySoup = BeautifulSoup(response_pxy.text, 'html.parser') rowsPxy = [] headPxy = [] for tablePart in pxySoup.find_all(attrs={'id':'proxylisttable'})[0].find_all(True, recursive=False): if(tablePart.name == 'thead'): for row in tablePart.find_all(True, recursive=False): headPxy = [tr.text for tr in row] if(tablePart.name == 'tbody'): for row in tablePart.find_all(True, recursive=False): rowsPxy.append([tr.text for tr in row]) proxiesDf = pd.DataFrame(data=rowsPxy, columns=headPxy) proxiesDf = proxiesDf[(proxiesDf['Anonymity']=='elite proxy')].sort_values(by='Https', ascending=False) headersChrome = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36' } proxies = { "http": proxiesDf.loc[:,'IP Address'].values[0] } # + # targetUsers = pd.DataFrame(columns=['username']) targetUsers = pd.read_pickle('../datasets/targetUsersDataset2.pkl').head(1) print(len(targetUsers)) targetUsers.head() # + headersMore = { 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Host': 'twitter.com', 'Accept-Language': 'es-AR,es;q=0.8,en-US;q=0.5,en;q=0.3', 'Accept-Encoding': 'gzip, deflate, br', 'X-Twitter-Active-User': 'yes', 'X-Requested-With': 'XMLHttpRequest', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } def getNewPage(term, searchType, maxPosition): if(searchType != 'q'): term = '#'+term pageResponse = requests.get( 'https://twitter.com/i/search/timeline', params={ 'vertical': 'news', 'q': term, 'max_position': maxPosition, 'include_entities': 1, 'include_available_features': 1, 'reset_error_state': False }, proxies=proxies, headers=headersMore ) return(pageResponse.json()) # print(pageResponse.json().keys()) # print(pageResponse.json()['min_position']) # display(BeautifulSoup(pageResponse.json()['items_html'], 'html.parser')) return pageResponse.json()['min_position'], BeautifulSoup(pageResponse.json()['items_html'], 'html.parser') def getUserTweetData(tweet): try: username = tweet.findAll(attrs={'class':'username'})[0].text[1:] userId = tweet.findAll('div', attrs={'class':'js-stream-tweet'})[0].attrs['data-user-id'] targetUsers.loc[userId] = [username] except: print('ERROR---------------------') print(tweet) return [username, userId] def searchTerm(term, searchType='q', maxPages=15): pageResponse = False if(searchType == 'q'): pageResponse = requests.get('https://twitter.com/search?q='+term, headers=headersChrome, proxies=proxies) else: pageResponse = requests.get('https://twitter.com/hashtag/'+term, headers=headersChrome, proxies=proxies) twData = BeautifulSoup(pageResponse.text, 'html.parser') initialStream = twData.findAll(attrs={'id':'timeline'})[0].findAll(attrs={'class':'stream-container'})[0] dataMax = initialStream.attrs['data-max-position'] dataMin = initialStream.attrs['data-min-position'] streamItems = initialStream.findAll(attrs={'id':'stream-items-id'})[0] tweetList = streamItems.findAll('li', attrs={'data-item-type': "tweet"}, recursive=False) for tweet in tweetList: getUserTweetData(tweet) nextKey = dataMax for pageNum in tqdm(range(maxPages)): response = getNewPage(term, searchType, nextKey) nextKey = response['min_position'] htmlResponse = BeautifulSoup(response['items_html'], 'html.parser') if(len(htmlResponse.findAll('li', attrs={'data-item-type': "tweet"}, recursive=False)) == 0): print(response) print('NO MORE',htmlResponse) break for tweet in htmlResponse.findAll('li', attrs={'data-item-type': "tweet"}, recursive=False): getUserTweetData(tweet) # searchTerm('cfk', '#', 15) # searchTerm('cambiemos', '#', 5) # searchTerm('MacriVendepatria', '#', 5) # searchTerm('JuntosPorElCambio', '#', 5) # searchTerm('carrio', 'q', 100) # + import threading lock = threading.RLock() def process_id(id): """process a single ID""" # fetch the data r = requests.get(url_t % id) # parse the JSON reply data = r.json() # and update some data with PUT requests.put(url_t % id, data=data) return data def process_range(id_range, store=None): """process a number of ids, storing the results in a dict""" if store is None: store = {} for id in id_range: store[id] = searchTerm(id, 'q', 600) return store def threaded_process_range(nthreads, id_range): """process the id range in a specified number of threads""" store = {} threads = [] # create the threads for i in range(nthreads): ids = id_range[i::nthreads] with lock: t = Thread(target=process_range, args=(ids,store)) threads.append(t) # start the threads [ t.start() for t in threads ] # wait for the threads to finish [ t.join() for t in threads ] print('Done') print(len(targetUsers)) display(targetUsers.head()) return 'Done' termsToSearch = [ '%23HayEsperanza', '%23FernandezFernandez2019', '%23ElCircoDeMariuYMau', '%23AlbertoPresidente', 'TOD ☀️S', '%23EsConTodos', '%23HayOtroCamino', '%23MacriTeQuedaPoco', '%23AltaSuciedadPRO' ] threaded_process_range(8, termsToSearch) # filename = 'targetUsersDataset.pkl' # with open(filename, 'wb') as twUsersTarget: # pickle.dump(targetUsers, twUsersTarget) # termsToSearch = [ # 'macri FMI', # 'sinceramente falso', # 'ella miente', # 'massa', # 'cristina pobreza' # ] # threaded_process_range(8, termsToSearch) # filename = 'targetUsersDataset.pkl' # with open(filename, 'wb') as twUsersTarget: # pickle.dump(targetUsers, twUsersTarget) # - len(targetUsers) targetUsers #guardar resultados import pickle filename = '../datasets/targetUsersDataset_k.pkl' with open(filename, 'wb') as twUsersTarget: pickle.dump(targetUsers, twUsersTarget)
twitterScraper/Find target users-K.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.0 # language: julia # name: julia-1.0 # --- # # Complex Numbers # # A complex number is a number in the form `a + b * i` where `a` and `b` are real and `i` satisfies `i^2 = -1`. # # Assume the programming language you are using does not have an implementation of complex numbers. # # The Julia Base implementation of complex numbers can be found here: https://github.com/JuliaLang/julia/blob/master/base/complex.jl. # # --- # # You can work on the bonus exercises by changing `@test_skip` to `@test`. # # ## Bonus A # Implement the exponential function on complex numbers `exp(::ComplexNumber)`. # # ## Bonus B # Implement `jm` analogous to `im` so that `1 + 1jm == ComplexNumber(1, 1)`. # # ## Source # # Wikipedia [https://en.wikipedia.org/wiki/Complex_number](https://en.wikipedia.org/wiki/Complex_number) # # # ## Submitting Incomplete Solutions # It's possible to submit an incomplete solution so you can see how others have completed the exercise. # # + # submit import Base: real, imag, reim, conj, abs, +, -, *, /, exp, convert, promote_rule, isfinite struct ComplexNumber{T<:Real} <: Number re::T im::T end ComplexNumber(re::Real, im::Real) = ComplexNumber(promote(re, im)...) ComplexNumber(re::Real) = ComplexNumber(re, zero(re)) convert(::Type{ComplexNumber{T}}, x::Real) where {T<:Real} = ComplexNumber{T}(x, 0) convert(::Type{ComplexNumber{T}}, z::ComplexNumber) where {T<:Real} = ComplexNumber{T}(real(z), imag(z)) promote_rule(::Type{ComplexNumber{T}}, ::Type{S}) where {T<:Real, S<:Real} = ComplexNumber{promote_type(T, S)} promote_rule(::Type{ComplexNumber{T}}, ::Type{ComplexNumber{S}}) where {T<:Real, S<:Real} = ComplexNumber{promote_type(T, S)} real(z::ComplexNumber) = z.re imag(z::ComplexNumber) = z.im reim(z::ComplexNumber) = z.re, z.im conj(z::ComplexNumber) = ComplexNumber(real(z), -imag(z)) abs(z::ComplexNumber) = hypot(real(z), imag(z)) isfinite(z::ComplexNumber) = isfinite(real(z)) && isfinite(imag(z)) +(u::ComplexNumber, v::ComplexNumber) = ComplexNumber(real(u) + real(v), imag(u) + imag(v)) -(u::ComplexNumber, v::ComplexNumber) = ComplexNumber(real(u) - real(v), imag(u) - imag(v)) *(u::ComplexNumber, v::ComplexNumber) = ComplexNumber(real(u) * real(v) - imag(u) * imag(v), real(u) * imag(v) + imag(u) * real(v)) /(u::ComplexNumber, v::ComplexNumber) = ComplexNumber((real(u) * real(v) + imag(u) * imag(v)) / (real(v)^2 + imag(v)^2), (imag(u) * real(v) - real(u) * imag(v)) / (real(v)^2 + imag(v)^2)) -(z::ComplexNumber) = ComplexNumber(-real(z), -imag(z)) # - reim(z::ComplexNumber) = nothing # + # submit function exp(z::ComplexNumber) z_re, z_im = reim(z) e_re = exp(z_re) ComplexNumber(e_re * cos(z_im), e_re * sin(z_im)) end # + # submit const jm = ComplexNumber(0, 1) # + using Test # include("complex-numbers.jl") @test ComplexNumber <: Number @test ComplexNumber(0, 1)^2 == ComplexNumber(-1, 0) @testset "Arithmetic" begin @testset "Addition" begin @test ComplexNumber(1, 0) + ComplexNumber(2, 0) == ComplexNumber(3, 0) @test ComplexNumber(0, 1) + ComplexNumber(0, 2) == ComplexNumber(0, 3) @test ComplexNumber(1, 2) + ComplexNumber(3, 4) == ComplexNumber(4, 6) end @testset "Subtraction" begin @test ComplexNumber(1, 0) - ComplexNumber(2, 0) == ComplexNumber(-1, 0) @test ComplexNumber(0, 1) - ComplexNumber(0, 2) == ComplexNumber(0, -1) @test ComplexNumber(1, 2) - ComplexNumber(3, 4) == ComplexNumber(-2, -2) end @testset "Multiplication" begin @test ComplexNumber(1, 0) * ComplexNumber(2, 0) == ComplexNumber(2, 0) @test ComplexNumber(0, 1) * ComplexNumber(0, 2) == ComplexNumber(-2, 0) @test ComplexNumber(1, 2) * ComplexNumber(3, 4) == ComplexNumber(-5, 10) end @testset "Division" begin @test ComplexNumber(1, 0) / ComplexNumber(2, 0) == ComplexNumber(0.5, 0) @test ComplexNumber(0, 1) / ComplexNumber(0, 2) == ComplexNumber(0.5, 0) @test ComplexNumber(1, 2) / ComplexNumber(3, 4) == ComplexNumber(0.44, 0.08) end end @testset "Absolute value" begin @test abs(ComplexNumber(5, 0)) == 5 @test abs(ComplexNumber(-5, 0)) == 5 @test abs(ComplexNumber(0, 5)) == 5 @test abs(ComplexNumber(0, -5)) == 5 @test abs(ComplexNumber(3, 4)) == 5 end @testset "Complex conjugate" begin @test conj(ComplexNumber(5, 0)) == ComplexNumber(5, 0) @test conj(ComplexNumber(0, 5)) == ComplexNumber(0, -5) @test conj(ComplexNumber(1, 1)) == ComplexNumber(1, -1) end @testset "Real part" begin @test real(ComplexNumber(1, 0)) == 1 @test real(ComplexNumber(0, 1)) == 0 @test real(ComplexNumber(1, 2)) == 1 end @testset "Imaginary part" begin @test imag(ComplexNumber(1, 0)) == 0 @test imag(ComplexNumber(0, 1)) == 1 @test imag(ComplexNumber(1, 2)) == 2 end # Bonus A @testset "Complex exponential" begin @test_skip exp(ComplexNumber(0, π)) ≈ ComplexNumber(-1, 0) @test_skip exp(ComplexNumber(0, 0)) == ComplexNumber(1, 0) @test_skip exp(ComplexNumber(1, 0)) ≈ ComplexNumber(e, 0) end # Bonus B @testset "Syntax sugar jm" begin @test_skip ComplexNumber(0, 1) == jm @test_skip ComplexNumber(1, 0) == 1 + 0jm @test_skip ComplexNumber(1, 1) == 1 + 1jm @test_skip ComplexNumber(-1, 0) == jm^2 end # - using Exercism Exercism.create_submission("testnb")
test/example-exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Computer Vision Работал в следующих областях: # Object Detection (YOLOv3) - множественные лица в помещении # Object Segmentation (Mask R-CNN,) # Object Detection/Face Feature Extraction - получение landmarks JS YOLO # OCR (YOLOv3, CRAFT, keras_ocr): # Models: VGG-16, ResNet-50. # Datasets: Bosphorus, FaceWarehouse3D, # Tools: sklearn, Caffe, Keras, keras_ocr. # распознавание ценников с телефона. # Face Classification 3D: # Models: VGG-16, ResNet-50. # Datasets: Bosphorus, FaceWarehouse3D, # Tools: sklearn, Caffe, Keras. # Использовались
interviews.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Samrath49/AI_ML_DL/blob/main/08.%20RandomForest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="HvtnOo7Hx18G" # # Random Forest Regression # + [markdown] id="EDNemtLF4QNw" # Random Forest is not good for single feature dataset which is used in this example so the outcome is not great as compared to SVM, PLR, MLR # + id="-qmzS3xVt6NH" import numpy as np import pandas as pd import matplotlib.pyplot as plt # + [markdown] id="ez-fCWn2yARr" # ### Importing Dataset # + id="AfqAV2DJx_Yp" dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[:, 1:-1].values y = dataset.iloc[:, -1].values # + [markdown] id="eyel_fjRyAxP" # ### Training the Random Forest Regression model on the whole dataset # + colab={"base_uri": "https://localhost:8080/"} id="gDKtohZtzlPB" outputId="84435d8b-d504-4d5c-95de-63eeeba55fb1" from sklearn.ensemble import RandomForestRegressor regressor = RandomForestRegressor(n_estimators=10, random_state = 0) regressor.fit(X, y) # + [markdown] id="QEIE206Lzgli" # ### Predicting a new result # + colab={"base_uri": "https://localhost:8080/"} id="pmQs1vHnyBDC" outputId="9276ca55-3149-41cd-ae1e-bdd04ad834cc" regressor.predict([[6.5]]) # + [markdown] id="h3o-4JCWyBcb" # ### Visualising Random Forest Regression results (higher resolution) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="9UdD5UeDyBuI" outputId="36284355-ace0-4d48-c58c-901f68b9da80" X_grid = np.arange(min(X), max(X), 0.1) X_grid = X_grid.reshape((len(X_grid), 1)) plt.scatter(X, y, color='red') plt.plot(X_grid, regressor.predict(X_grid), color='grey') plt.title('Random Forest Regression') plt.xlabel('Position Level') plt.ylabel('Salary') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 47} id="3oGlDh4s8m_X" outputId="e69397ca-ac1f-420d-ecb9-1453fa4115bd" language="html" # <marquee style='width: 90%; color: orange; font-size:150%;'><b>Random Forest is performing poor because of less dimentions of the dataset😒 however it can be used for higher dimentional datasets😊</b></marquee>
08. RandomForest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple ODE solvers # This notebook explores the Euler and predictor-corrector methods for solving ODEs # ## Euler's method for solving ODEs # In this section we look at the Euler method for solving ODEs. For reasons discussed below this method is rarely used but it is simple to understand. import numpy as np import matplotlib.pyplot as plt # The below commands make the font and image size bigger plt.rcParams.update({'font.size': 22}) plt.rcParams["figure.figsize"] = (15,10) # The ODE we want to solve is $$\frac{dy}{dx} = -2x - y$$ with initial conditions $x_0 = 0, y_0 = -1$. We have chosen this test case as it has a simlpe analytic solution we can compare against $y(x) = -3e^{-x} -2x +2$ def dydx(x,y): return -2*x - y def yExact(x): return - 3*np.exp(-x) - 2*x + 2 def EulerMethod(dydx, yExact, dx, x0, y0, imax, printSteps=False, plotSteps=False, uColor='red'): i = 0 xi = x0 yi = y0 # Create arrays to store the steps in steps = np.zeros((imax+1,2)) steps[0,0] = x0 steps[0,1] = y0 # Create a loop to iteratively solve the ODE step-by-step while i < imax: # The next three lines implement the Euler method fi = dydx(xi, yi) xi += dx yi = yi + dx*fi i += 1 if(printSteps): diffi = np.abs(yi - yExact(xi)) Ei = np.abs(100*diffi/yExact(xi)) print("%d %.2f %.5f %.4f %.2f" % (i, xi, yi, diffi, Ei)) # Store the steps for plotting steps[i, 0] = xi steps[i, 1] = yi if(plotSteps): plt.scatter(steps[:,0], steps[:,1], color=uColor, linewidth=10) return [xi, yi] # + x = np.linspace(0, 0.5, 100) y = yExact(x) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.plot(x,y) EulerMethod(dydx, yExact, 0.025, 0, -1, 20, True, True, 'red') plt.legend(['exact solution', 'Euler method steps']); # - # ### Convergence of the Euler method # We can check the convergence rate of our code by varying $\Delta x$ and $n$ and checking the accuracy of the algorithm at a point $f(x*)$ # + x = np.linspace(0, 0.5, 100) y = yExact(x) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.plot(x,y) EulerMethod(dydx, yExact, 0.1, 0, -1, 5, False, True, 'red') EulerMethod(dydx, yExact, 0.05, 0, -1, 10, False, True, 'orange') EulerMethod(dydx, yExact, 0.025, 0, -1, 20, False, True, 'green') plt.legend(['exact solution', 'Lowest resolution', 'Middle resolution', 'Highest resolution' ]); # + nmax = 10 n = 1 diff = np.zeros(nmax) plt.grid(True) plt.yscale('log') plt.xlabel('n') plt.ylabel('|y_i - y(0.5)|') while n < nmax: res = EulerMethod(dydx, yExact, 0.1/2**n, 0, -1, 5*2**n) diff[n-1] = np.abs(res[1] - yExact(0.5)) n += 1 # Compute and plot reference curves for the convergence rate ns = np.linspace(1, nmax) deltax = (0.1/2**ns) firstOrder = deltax**1 plt.plot(ns, firstOrder) plt.ylim([5e-5, 5e-2]) plt.scatter(np.arange(1,nmax+1), diff, color='red', linewidth=10); plt.legend(['First-order convergence reference', 'Convergence of Euler method']); # - # ### Stability of the Euler method # The Euler method can be unstable, i.e., for some functions will not converge on the correct answer. One example is shown below. def dxdt(t, x): a = 1 return a*x def xExact(t): a = 1 return np.exp(a*t) # + t = np.linspace(0,0.5,100) x = xExact(t) plt.grid(True) plt.xlabel('x') plt.ylabel('y') plt.plot(t,x) print(EulerMethod(dxdt, xExact, 0.1, 0, 1, 5, False, True)) plt.legend(['exact solution', 'Euler method steps']); # - # If we now perform the error analysis similar to how we did above, we see that the in this case the Euler method does not converge (i.e., the error does not decrease as we decrease the step size) # + nmax = 10 n = 1 plt.grid(True) plt.yscale('log') plt.xlabel('n') plt.ylabel('y_i - y(0.5)') while n < nmax: res = EulerMethod(dxdt, xExact, 0.1/2**n, 0, -1, 5*2**n) diff = np.abs(res[1] - yExact(0.5)) plt.scatter(n, diff, color='red', linewidth=10) n += 1 # - # The poor convergence properties of the Euler method mean that is never used to solve ODEs in practice. It is mostly a pedagogical tool. # ## Predictor-corrector method for solving ODEs # # Let's look at a more stable method, that will also turn out to converge quicker. The predictor-corrector method first takes an Euler step and then applies a corrector step based on the average of the derivative at the current step and the new step. def PredictorCorrector(dydx, dx, x0, y0, imax, printSteps=False, plotSteps=False): i = 0 xi = x0 yi = y0 steps = np.zeros((imax+1,2)) steps[0,0] = x0 steps[0,1] = y0 while i < imax: # The next three lines implement the Euler method # as the predictor step fi = dydx(xi, yi) xi1 = xi + dx ytildei = yi + dx*fi # We then apply the corrector step which # use the average of the slope at x_i and x_{i+1} # to compute y_i fi1 = dydx(xi1, ytildei) xi = xi1 yi = yi + dx/2 * (fi + fi1) i += 1 # Store the steps for plotting steps[i, 0] = xi steps[i, 1] = yi if(plotSteps): plt.scatter(steps[:,0], steps[:,1], color='orange', linewidth=10) return [xi, yi] # + x = np.linspace(0,0.5,100) y = yExact(x) plt.grid(True) plt.plot(x,y) EulerMethod(dydx, yExact, 0.1, 0, -1, 5, False, True) PredictorCorrector(dydx, 0.1, 0, -1, 5, False, True) plt.legend(['exact solution', 'Euler method steps', 'Predictor-corrector steps']); # - # With the same number of steps it certainly looks like the Predictor-Corrector method is more accurate than the Euler method. Notice that this increase in accuracy comes at a cost, which is the RHS needs to be evaluated twice # ### Convergence of the methods # Let's now look at the convergence of the Predictor-corrector method vs the Euler method # + nmax = 12 diffEuler = np.zeros(nmax) diffPC = np.zeros(nmax) n = 1 while n < nmax: deltax = 0.1/2**n nsteps = 5*2**n resEuler = EulerMethod(dydx, yExact, deltax, 0, -1, nsteps) resPC = PredictorCorrector(dydx, deltax, 0, -1, nsteps) diffEuler[n] = np.abs(resEuler[1] - yExact(0.5)) diffPC[n] = np.abs(resPC[1] - yExact(0.5)) n += 1 plt.grid(True) plt.yscale('log') plt.xlabel('n') plt.ylabel('y_i - y(0.5)') plt.ylim([1e-10, 1]) # Compute and plot reference curves for the convergence rate x = np.linspace(1, nmax) deltax = (0.1/2**x) firstOrder = deltax**1 secondOrder = deltax**2 plt.plot(x, firstOrder) plt.plot(x, secondOrder) plt.scatter(np.arange(1,nmax+1), diffEuler, color='red', linewidth=10) plt.scatter(np.arange(1,nmax+1), diffPC, color='orange', linewidth=10) plt.legend(['First-order convergence reference', 'Second-order convergence reference', 'Convergence of Euler method', 'Convergence of predictor-corrector']); # - # We thus see that, as expected, the predictor-corrector method is **second-order** convergent, whereas the Euler method is **first-order** convergent
OrdinaryDifferentialEquations/EulerAndPredictor-CorrectorMethods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # 全卷积网络 # :label:`sec_fcn` # # 如 :numref:`sec_semantic_segmentation` 中所介绍的那样,语义分割能对图像中的每个像素分类。 # 全卷积网络 (fully convolutional network,FCN) 采用卷积神经网络实现了从图像像素到像素类别的变换 :cite:`Long.Shelhamer.Darrell.2015`。 # 与我们之前在图像分类或目标检测部分介绍的卷积神经网络不同,全卷积网络将中间层特征图的高和宽变换回输入图像的尺寸:这是通过 :numref:`sec_transposed_conv` 中引入的*转置卷积*(transposed convolution)层实现的。 # 因此,输出的类别预测与输入图像在像素级别上具有一一对应关系:给定空间维上的位置,通道维的输出即该位置对应像素的类别预测。 # # + origin_pos=2 tab=["pytorch"] # %matplotlib inline import torch import torchvision from torch import nn from torch.nn import functional as F from d2l import torch as d2l # + [markdown] origin_pos=3 # ## 构造模型 # # 下面我们了解一下全卷积网络模型最基本的设计。 # 如 :numref:`fig_fcn` 所示,全卷积网络先使用卷积神经网络抽取图像特征,然后通过 $1\times 1$ 卷积层将通道数变换为类别个数,最后在 :numref:`sec_transposed_conv` 中通过转置卷积层将特征图的高和宽变换为输入图像的尺寸。 # 因此,模型输出与输入图像的高和宽相同,且最终输出的通道包含了该空间位置像素的类别预测。 # # ![全卷积网络](../img/fcn.svg) # :label:`fig_fcn` # # 下面,我们[**使用在ImageNet数据集上预训练的ResNet-18模型来提取图像特征**],并将该网络实例记为`pretrained_net`。 # 该模型的最后几层包括全局平均汇聚层和全连接层,然而全卷积网络中不需要它们。 # # + origin_pos=5 tab=["pytorch"] pretrained_net = torchvision.models.resnet18(pretrained=True) list(pretrained_net.children())[-3:] # + [markdown] origin_pos=6 # 接下来,我们[**创建一个全卷积网络实例`net`**]。 # 它复制了Resnet-18中大部分的预训练层,但除去最终的全局平均汇聚层和最接近输出的全连接层。 # # + origin_pos=8 tab=["pytorch"] net = nn.Sequential(*list(pretrained_net.children())[:-2]) # + [markdown] origin_pos=9 # 给定高度和宽度分别为320和480的输入,`net`的前向计算将输入的高和宽减小至原来的$1/32$,即10和15。 # # + origin_pos=11 tab=["pytorch"] X = torch.rand(size=(1, 3, 320, 480)) net(X).shape # + [markdown] origin_pos=12 # 接下来,我们[**使用$1\times1$卷积层将输出通道数转换为Pascal VOC2012数据集的类数(21类)。**] # 最后,我们需要(**将要素地图的高度和宽度增加32倍**),从而将其变回输入图像的高和宽。 # 回想一下 :numref:`sec_padding`中卷积层输出形状的计算方法: # 由于$(320-64+16\times2+32)/32=10$且$(480-64+16\times2+32)/32=15$,我们构造一个步幅为$32$的转置卷积层,并将卷积核的高和宽设为$64$,填充为$16$。 # 我们可以看到如果步幅为$s$,填充为$s/2$(假设$s/2$是整数)且卷积核的高和宽为$2s$,转置卷积核会将输入的高和宽分别放大$s$倍。 # # + origin_pos=14 tab=["pytorch"] num_classes = 21 net.add_module('final_conv', nn.Conv2d(512, num_classes, kernel_size=1)) net.add_module('transpose_conv', nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, padding=16, stride=32)) # + [markdown] origin_pos=15 # ## [**初始化转置卷积层**] # # 在图像处理中,我们有时需要将图像放大,即*上采样*(upsampling)。 # *双线性插值*(bilinear interpolation) # 是常用的上采样方法之一,它也经常用于初始化转置卷积层。 # # 为了解释双线性插值,假设给定输入图像,我们想要计算上采样输出图像上的每个像素。 # 首先,将输出图像的坐标$(x,y)$映射到输入图像的坐标$(x',y')$上。 # 例如,根据输入与输出的尺寸之比来映射。 # 请注意,映射后的$x′$和$y′$是实数。 # 然后,在输入图像上找到离坐标$(x',y')$最近的4个像素。 # 最后,输出图像在坐标$(x,y)$上的像素依据输入图像上这4个像素及其与$(x',y')$的相对距离来计算。 # # 双线性插值的上采样可以通过转置卷积层实现,内核由以下`bilinear_kernel`函数构造。 # 限于篇幅,我们只给出`bilinear_kernel`函数的实现,不讨论算法的原理。 # # + origin_pos=17 tab=["pytorch"] def bilinear_kernel(in_channels, out_channels, kernel_size): factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = (torch.arange(kernel_size).reshape(-1, 1), torch.arange(kernel_size).reshape(1, -1)) filt = (1 - torch.abs(og[0] - center) / factor) * \ (1 - torch.abs(og[1] - center) / factor) weight = torch.zeros((in_channels, out_channels, kernel_size, kernel_size)) weight[range(in_channels), range(out_channels), :, :] = filt return weight # + [markdown] origin_pos=18 # 让我们用[**双线性插值的上采样实验**]它由转置卷积层实现。 # 我们构造一个将输入的高和宽放大2倍的转置卷积层,并将其卷积核用`bilinear_kernel`函数初始化。 # # + origin_pos=20 tab=["pytorch"] conv_trans = nn.ConvTranspose2d(3, 3, kernel_size=4, padding=1, stride=2, bias=False) conv_trans.weight.data.copy_(bilinear_kernel(3, 3, 4)); # + [markdown] origin_pos=21 # 读取图像`X`,将上采样的结果记作`Y`。为了打印图像,我们需要调整通道维的位置。 # # + origin_pos=23 tab=["pytorch"] img = torchvision.transforms.ToTensor()(d2l.Image.open('../img/catdog.jpg')) X = img.unsqueeze(0) Y = conv_trans(X) out_img = Y[0].permute(1, 2, 0).detach() # + [markdown] origin_pos=24 # 可以看到,转置卷积层将图像的高和宽分别放大了2倍。 # 除了坐标刻度不同,双线性插值放大的图像和在 :numref:`sec_bbox`中打印出的原图看上去没什么两样。 # # + origin_pos=26 tab=["pytorch"] d2l.set_figsize() print('input image shape:', img.permute(1, 2, 0).shape) d2l.plt.imshow(img.permute(1, 2, 0)); print('output image shape:', out_img.shape) d2l.plt.imshow(out_img); # + [markdown] origin_pos=27 # 在全卷积网络中,我们[**用双线性插值的上采样初始化转置卷积层。对于$1\times 1$卷积层,我们使用Xavier初始化参数。**] # # + origin_pos=29 tab=["pytorch"] W = bilinear_kernel(num_classes, num_classes, 64) net.transpose_conv.weight.data.copy_(W); # + [markdown] origin_pos=30 # ## [**读取数据集**] # # 我们用 :numref:`sec_semantic_segmentation` 中介绍的语义分割读取数据集。 # 指定随机裁剪的输出图像的形状为$320\times 480$:高和宽都可以被$32$整除。 # # + origin_pos=31 tab=["pytorch"] batch_size, crop_size = 32, (320, 480) train_iter, test_iter = d2l.load_data_voc(batch_size, crop_size) # + [markdown] origin_pos=32 # ## [**训练**] # # 现在我们可以训练全卷积网络了。 # 这里的损失函数和准确率计算与图像分类中的并没有本质上的不同,因为我们使用转置卷积层的通道来预测像素的类别,所以在损失计算中通道维是指定的。 # 此外,模型基于每个像素的预测类别是否正确来计算准确率。 # # + origin_pos=34 tab=["pytorch"] def loss(inputs, targets): return F.cross_entropy(inputs, targets, reduction='none').mean(1).mean(1) num_epochs, lr, wd, devices = 5, 0.001, 1e-3, d2l.try_all_gpus() trainer = torch.optim.SGD(net.parameters(), lr=lr, weight_decay=wd) d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices) # + [markdown] origin_pos=35 # ## [**预测**] # # 在预测时,我们需要将输入图像在各个通道做标准化,并转成卷积神经网络所需要的四维输入格式。 # # + origin_pos=37 tab=["pytorch"] def predict(img): X = test_iter.dataset.normalize_image(img).unsqueeze(0) pred = net(X.to(devices[0])).argmax(dim=1) return pred.reshape(pred.shape[1], pred.shape[2]) # + [markdown] origin_pos=38 # 为了[**可视化预测的类别**]给每个像素,我们将预测类别映射回它们在数据集中的标注颜色。 # # + origin_pos=40 tab=["pytorch"] def label2image(pred): colormap = torch.tensor(d2l.VOC_COLORMAP, device=devices[0]) X = pred.long() return colormap[X, :] # + [markdown] origin_pos=41 # 测试数据集中的图像大小和形状各异。 # 由于模型使用了步幅为32的转置卷积层,因此当输入图像的高或宽无法被32整除时,转置卷积层输出的高或宽会与输入图像的尺寸有偏差。 # 为了解决这个问题,我们可以在图像中截取多块高和宽为32的整数倍的矩形区域,并分别对这些区域中的像素做前向计算。 # 请注意,这些区域的并集需要完整覆盖输入图像。 # 当一个像素被多个区域所覆盖时,它在不同区域前向计算中转置卷积层输出的平均值可以作为`softmax`运算的输入,从而预测类别。 # # 为简单起见,我们只读取几张较大的测试图像,并从图像的左上角开始截取形状为$320\times480$的区域用于预测。 # 对于这些测试图像,我们逐一打印它们截取的区域,再打印预测结果,最后打印标注的类别。 # # + origin_pos=43 tab=["pytorch"] voc_dir = d2l.download_extract('voc2012', 'VOCdevkit/VOC2012') test_images, test_labels = d2l.read_voc_images(voc_dir, False) n, imgs = 4, [] for i in range(n): crop_rect = (0, 0, 320, 480) X = torchvision.transforms.functional.crop(test_images[i], *crop_rect) pred = label2image(predict(X)) imgs += [X.permute(1,2,0), pred.cpu(), torchvision.transforms.functional.crop( test_labels[i], *crop_rect).permute(1,2,0)] d2l.show_images(imgs[::3] + imgs[1::3] + imgs[2::3], 3, n, scale=2); # + [markdown] origin_pos=44 # ## 小结 # # * 全卷积网络先使用卷积神经网络抽取图像特征,然后通过$1\times 1$卷积层将通道数变换为类别个数,最后通过转置卷积层将特征图的高和宽变换为输入图像的尺寸。 # * 在全卷积网络中,我们可以将转置卷积层初始化为双线性插值的上采样。 # # ## 练习 # # 1. 如果将转置卷积层改用Xavier随机初始化,结果有什么变化? # 1. 调节超参数,能进一步提升模型的精度吗? # 1. 预测测试图像中所有像素的类别。 # 1. 最初的全卷积网络的论文中 :cite:`Long.Shelhamer.Darrell.2015` 还使用了卷积神经网络的某些中间层的输出。试着实现这个想法。 # # + [markdown] origin_pos=46 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/3297) #
d2l/chapter_computer-vision/fcn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cv101 # language: python # name: cv101 # --- # ## Image Rotation # Importing libraries # + # System libraries import os import sys sys.path.append(os.path.join("..")) # OpenCV and NumPy for image processing import cv2 import numpy as np # Display utilities from utils.imutils import jimshow from matplotlib import pyplot as plt # Pathlib from pathlib import Path # - # __Load Image__ image = cv2.imread(os.path.join("..", "data", "img", "trex.png")) image.shape jimshow(image, "Original Image") # __Defining the center of the image__ # When doing rotation we need to tell openCV where the center of the image is, in order to rotate the image around the center. Hence, we need to define the center of the image. # We are only interested in the height and width of the image when defining the center of the image height = image.shape[0] width = image.shape[1] # We want the center to be a tuple center = (width//2, height//2) # we use // in order to get an integer instead of a float. We could also have used int() print(center) # __Create rotation matrix__ # We want create a rotation matrix using OpenCV # We use a function from OpenCV to create the rotation matrix M = cv2.getRotationMatrix2D(center = center, angle = 45, scale = 1) # Theta/angle = the degree angle we want to rotate through. What we set this to defines how much we rotate the image. # Scale = the scaling factor. This determines how much we zoom in or out (scale) print(M) # M is just a rotation matrix. # We use the cv2.warpAffine() to rotate the image rotated = cv2.warpAffine(image, M, (width, height)) jimshow(rotated, "Rotated image") # __Create a function that rotates an image__ # Our solution: def img_rotate(image = image, theta = 0, scale = 0): # Define center point of image height, width = image.shape[:2] center = (width//2, height//2) # Define rotation matrix M = cv2.getRotationMatrix2D(center, theta, scale) # Rotated image rotated_image = cv2.warpAffine(image, M, (width, height)) return rotated_image jimshow(img_rotate(image, 45, 1.0), "Rotated") # Ross' solution: # # ![Sk%C3%A6rmbillede%202021-02-11%20kl.%2016.03.19.png](attachment:Sk%C3%A6rmbillede%202021-02-11%20kl.%2016.03.19.png) # ## Splitting Channels # We can use OpenCV to split an image into channels (blue, green, red arrays) # We create a tuple in which we have each channel/array (blue, green, red) = cv2.split(image) # Now we can print the arrays individually print(blue) print(red) print(green) # In order to view the channels individually we need to define a function that allows us to do it # This function can be found in imutils import matplotlib as mpl from utils.imutils import jimshow_channel # Now we can view the channels individually jimshow_channel(red, "Red") jimshow_channel(blue, "Blue") jimshow_channel(green, "Green") # Here we can see the intensity of each color # The lighter the grayscale the more intensive the color is. # __Define empty NumPy Array__ # We want an empty array in order to be able to display the color channels individually. # Creating an array comprising only 0s - an image with nothing in it # We still want the same shape as the original image but only consisting of 0s empty_array = np.zeros(image.shape[:2], dtype = "uint8") # uint means unsigned intergers, which means that we are not allowing negative integers # We use dtype = "uint8" to overwrite the default which is float - we do not want floats, but we want integers. And since we do not want negative values we use unassigned integers which ignores negative values # __Displaying red__ # We use the merge() function to merge the red array with the empty array red_image = cv2.merge([empty_array, empty_array,red]) jimshow(red_image) # We are only displaying the red in the image. This is what happens when we only display the red. # __Displaying green__ green_image = cv2.merge([empty_array, green, empty_array]) jimshow(green_image) # __Displaying blue__ blue_image = cv2.merge([blue, empty_array, empty_array]) jimshow(blue_image) # When we separate an image into color channels, we see that certain part of the image become more pronounced depending on which channels is displayed. # # __Why is this interesting?__ # # Because we often find that separating an image into separate channels, we are able to detect images much easier. This is nice for face recognition, hand writing recognition etc. Hence, this has practical implications. # ## Histograms # Making grayscale histograms. By separating the channels, we can make the distirbution of different values from within those channels. This allows us to make the grayscale histogram. We create this using the __matplotlib__ library, which is the default for most plotting in Python. # __Using matplotlib__ # # Creating a figure (any kind of visualization) # Creating a figure with default parameters plt.figure() # We can use the hist() to add a histogram to the figure # # # The hist() function takes three paramters: # 1. array # 2. number of bins # 3. a range of possible values # # plt.hist(array, number_of_bins [range_of_possible_values]) # We can use the flatten() function to flatten the image and reduce the number of dimensions. When we flatten an image, we take each array (each channel) and flatten them into a single dimension, i.e. a list. Then we merge the channels into one long one-dimensional array. Now we have an array of many values that each can go from 0-255. This value represent the intensity of the color. Hence, we have a grayscale image. # Hence, when we flatten an image we create a one-dimensional array of color intensities. # Creating the array (the greyscale image) by flattening the image: grey_image = image.flatten() # Adding the array (grayscale) to the histogram: plt.hist(grey_image, 256, [0,256]) plt.title("Greyscale histogram") # adding plot title plt.xlabel("Bin number: color intensity") # adding x-axis title plt.ylabel("Number of pixels") # adding y-axis title plt.show() # What we see is the distirbution of pixels in terms of color intensity. Hence, we have a grayscale histogram of the image. We can see that most pixels are in the high intensity range, which means that the image contains a lot of white color. # ## Studying Multiple Images # Take what we have done so far and creating a function def plot_greyscale(input_image, image_name): # Initialize figure plt.figure() # Plot greyscale histogram plt.hist(input_image.flatten(), 256, [0,256]) # Give plot title plt.title(f"{image_name}") # Create xlabel plt.xlabel("Bin number: color intensity") # Create ylabel plt.ylabel("Number of pixels") # Show plot plt.show() # Making sure the function works plot_greyscale(image) # ## Task # 1. Iterate over the files in data/img # 2. If it’s a png file # 3. Do plot_greyscale # # # Our solution: # Defining path to images data_path = os.path.join("..", "data", "img") # Creating loop for img in Path(data_path).glob("*.png"): image = cv2.imread(str(img)) plot_greyscale(image) # Ross' solution: # + # Import pathlib from pathlib import Path # Point to an image directory image_dir = os.path.join("..", "data", "img") # Iterate over each file for image in Path(image_dir).glob("*.png"): # We need to specify that the directory path is a string image_path = str(image) # read image image = cv2.imread(image_path) # Get image_name from image_path image_name = os.path.split(image_path)[1] # # taking the second part of the path which is the image name and not the path to it #plot greyscale plot_greyscale(image, image_name) # - # If you want to look for either png or jpg # This can be implemented into the loop above if this is something you want to add for image in os.listdir(image_dir): if image.endswith(".png"): print(image) elif image.endswith(".jpg"): print("this is a jpg") else: print("not a png or a jpg") # ## Experiment with splitting up images
notebooks/session2_ImageProcessing_au617836.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import emat import os emat.require_version("0.5.0a7") database_path = os.path.expanduser("~/EMAT-VE/ve2-rspm-2020-10-17.db") db = emat.SQLiteDB(database_path, readonly=True) db.info() scope = db.read_scope("VERSPM") # # Core Model Results # ## Feature Scores from emat.analysis import feature_scores feature_scores( scope=scope, design='ulhs', db=db, ) # ## Scatter Plot Matrix from emat.analysis import display_experiments figs = display_experiments(scope, 'ulhs', db=db) # ## Interactive Visualizer from emat.analysis import Visualizer viz = Visualizer( data='ulhs', scope=scope, reference_point='ref', db=db, ) measures_1 = [ 'DVMTPerCapita', 'AirPollutionEm', 'FuelUse', 'UrbanTransitTrips', 'UrbanHvyTrk_TotalDelay', 'UrbanHhCO2e', 'UrbanComSvcCO2e', 'UrbanWalkTrips', 'UrbanVehOperatingCost', 'UrbanHhCO2eReduction', ] # ### One Dimension Selectors viz.complete(measures=measures_1) # ### Scatter Plot Matrix viz.splom( rows=measures_1, cols=['Income', 'FuelCost', 'ValueOfTime', 'LandUse'], ) # # Metamodel mm = emat.create_metamodel(scope, design_name='ulhs', db=db, include_measures=measures_1) # ## Cross Validation mm.cross_val_scores() mm_design = mm.design_experiments(n_samples=(140, 120), sampler='ulhs', jointly=False) mm_design mm_results = mm.run_experiments(mm_design) # ## Interactive Visualizer viz1 = Visualizer(data=mm_results, scope=scope, db=db, reference_point='ref') # ### One Dimensional Selectors viz1.complete(measures=measures_1) viz1.two_way(x='FuelUse',y='Transit') viz1.new_box('Breathable', upper_bounds={'AirPollutionEm':850_000}) # ### PRIM prim_all = viz1.prim(target="Breathable") prim_all.tradeoff_selector() prim_levers = viz1.prim(target="Breathable", data='levers') prim_levers.tradeoff_selector() # ## Policy Contrast import importlib import emat.analysis.contrast importlib.reload(emat.analysis.contrast) from emat.analysis.contrast import * ab = AB_Viewer(mm, background=250) ab.interface ab.get_figures(measures_1[:6])
verspm-interactive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # importing Dependencies import pandas as pd # Reading/importing csv file # Printing csv file hop = "Resources/purchase_data.csv" hop_df = pd.read_csv(hop) hop_df.head() # + # Counting unique values in the SN column # Printing results into a table using pd.Dataframe total_players = len(pd.unique(hop_df['SN'])) pd.DataFrame({"Total Players": [total_players]}) # + # Counting unique values in the Item ID column # Calculating the average price using the mean function and rounding it to two decimals using the round function # Calculating the number of purchases using the length function # Calculating the sum of the Price column using the sum function # Printing the results into a table (rounded) and printing the total revenue using f-strings unique_items = len(pd.unique(hop_df['Item ID'])) average_price= round(hop_df['Price'].mean(), 2) purchases_number = len(hop_df['Price']) total_revenue = hop_df['Price'].sum() pd.DataFrame({"Number of Unique Items": [unique_items], "Average Price": ["$"'{:,.2f}'.format(average_price)], "Number of Purchases": purchases_number, "Total Revenue": ["$"'{:,.2f}'.format(total_revenue)]}) # + # Removing all duplicates from the SN column using the drop duplicates function # Calculating the total count of players by Gender using the value_counts function # converting Series to DataFrame (table1) # Renaming column "Total Count" using the rename function unique_players_df = hop_df.drop_duplicates(subset='SN') total_gender_value_count = unique_players_df['Gender'].value_counts() table1_df = total_gender_value_count.to_frame() table1_df.rename(columns = {'Gender':'Total Count'}, inplace = True) # Rewriting the total count of players by Gender (Calculated above) # Calculating the total numbers of players using the len function # Calculating the percentage of players by Gender # Rounding the percentage to 2 decimal places using the round function # Formatting the percentage of players (integers) to strings to add the percentage symbol at the end of values # Converting Series to DataFrame (table2) # Renaming column "Percentage of Players" using the rename function total_gender_value_count = unique_players_df['Gender'].value_counts() total_gender_count = len(unique_players_df['Gender']) total_percentage_count = (total_gender_value_count / total_gender_count) * 100 rounded_percentage = total_percentage_count.round(decimals=2) rounded_percentage = rounded_percentage.astype(str)+'%' table2_df = rounded_percentage.to_frame() table2_df.rename(columns = {'Gender':'Percentage of Players'}, inplace = True) # Merging DataFrames using the merge function # Printing the new merged DataFrame gender_demographics_df = pd.merge(table1_df, table2_df, left_index=True, right_index=True) gender_demographics_df.head() # + # Grouping data by Gender using the groupby function # Calculating the purchase count by gender using the count function # Converting series to dataframe (table3) # Renaming column "Purchase Count" using the rename function gender_group_df = hop_df.groupby('Gender') purchase_count = gender_group_df['Gender'].count() table3_df = purchase_count.to_frame() table3_df.rename(columns = {'Gender':'Purchase count'}, inplace=True) # Grouping data by Gender using the groupby function # Calculating the Average Purchase price by gender using the mean function # Converting series to dataframe (table4) # Renaming column "Average Purchase price" # Rounding values to 2 decimal places using the round function # Formatting the average purchase price by converting integers to strings and adding the $ sign before each value gender_group_df = hop_df.groupby('Gender') average = gender_group_df['Price'].mean() table4_df = average.to_frame() table4_df.rename(columns = {'Price':'Average Purchase Price'}, inplace = True) table4_df['Average Purchase Price'] = table4_df['Average Purchase Price'].round(2) table4_df['Average Purchase Price'] = '$' + table4_df['Average Purchase Price'].astype(str) # Grouping data by Gender using the groupby function # Calculating the Total Purchase Value by gender using the sum function # Converting series to DataFrame (table5) # Renaming column "Total Purchase Value" using the rename function # Rounding values to 2 decimal places using the round function # Formatting the total purchase value by converting integers to strings and adding the $ sign before each value gender_group_df = hop_df.groupby('Gender') total_purchase = (gender_group_df['Price'].sum()) table5_df = total_purchase.to_frame() table5_df.rename(columns = {'Price': 'Total Purchase Value'}, inplace = True) table5_df['Total Purchase Value'] = table5_df['Total Purchase Value'].round(2) table5_df['Total Purchase Value'] ='$' +table5_df['Total Purchase Value'].astype(str) # Grouping data by Gender using the groupby function # Calculating the Average Total Purchase per person by dividing the total purchase by the total gender value count # Converting series to DataFrame (table6) # Renaming column "Average Total Purchase per person" # Rounding the values to 2 decimal places using the round function # Formatting the avg total purchase per person by converting integers to strings and adding the $ sign before each value gender_group_df = hop_df.groupby('Gender') avg_total = total_purchase / total_gender_value_count table6_df = avg_total.to_frame() table6_df.rename(columns = {0 : 'Avg Total Purchase per Person'}, inplace = True) table6_df['Avg Total Purchase per Person'] = table6_df['Avg Total Purchase per Person'].round(2) table6_df['Avg Total Purchase per Person'] ='$' +table6_df['Avg Total Purchase per Person'].astype(str) # Merging DataFrames using the merge function # Printing the new merged DataFrame combined1_df = pd.merge(table3_df, table4_df, left_index=True, right_index=True) combined2_df = pd.merge(combined1_df, table5_df, left_index=True, right_index=True) combined3_df = pd.merge(combined2_df, table6_df, left_index=True, right_index=True) combined3_df.head() # + # Creating bins in which to place age values # Creating labels for these bins(age labels) # Slicing data and putting into bins using the pd.cut function bins = [0, 9, 14, 19, 24, 29, 34, 39, 50] age_labels = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+" ] pd.cut(unique_players_df["Age"], bins, labels=age_labels).head() # Placing the data series into a new column inside of the DataFrame called "Age Ranges" hop_df["Age Ranges"] = pd.cut(hop_df["Age"], bins, labels=age_labels) # Returning the DataFrame with duplicate rows from column SN removed # Grouping data by Age Ranges # Calculating the total numbers of players by age ranges using the count function # Converting series into DataFrame (total_count) # Renaming column "Total Count" using the rename function unique_age_df = hop_df.drop_duplicates(subset='SN') group_age = unique_age_df.groupby('Age Ranges') total_count = group_age['Age'].count() total_count_df = total_count.to_frame() total_count_df.rename(columns= {'Age': 'Total Count'}, inplace = True) # Calculating the percentage of players by age ranges # Converting series to DataFrame (percentage_players) # Renaming column "Percentage of Players" using the rename function # Rounding values to 2 decimal places # Formatting the percentage of players (integers) to strings to add the percentage symbol at the end of values percentage_players = (total_count / total_players) * 100 percentage_players_df = percentage_players.to_frame() percentage_players_df.rename(columns = {'Age': 'Percentage of Players'}, inplace = True) percentage_players_df['Percentage of Players'] = percentage_players_df['Percentage of Players'].round(2) percentage_players_df['Percentage of Players'] = percentage_players_df['Percentage of Players'].astype(str) + '%' # Merging the two DataFrames using the pd.merge function # Removing index name # Printing results age_demographics_df = pd.merge(total_count_df, percentage_players_df, left_index=True, right_index=True) age_demographics_df.index.name = None age_demographics_df.head(8) # + # Grouping data by Age Ranges using the groupby function # Calculating the Purchase count of players by age ranges (pc_age) using the count function # Converting series to DataFrame # Renaming column "Purchase Count" using the rename function pa_age = hop_df.groupby('Age Ranges') pc_age = pa_age['Price'].count() pc_age_df = pc_age.to_frame() pc_age_df.rename(columns = {'Price': 'Purchase Count'}, inplace = True) # Grouping data by Age Ranges using the groupby function # Calculating the average purchase price by age ranges (avg_pc) using the mean function # Converting series to DataFrame # Renaming column "Average Purchase Price" using the rename function # Rounding values to 2 decimal places using the round function # Formatting the average purchase price by converting integers to strings and adding the $ sign before each value pa_age = hop_df.groupby('Age Ranges') avg_pc = pa_age['Price'].mean() avg_pc_df = avg_pc.to_frame() avg_pc_df.rename(columns = {'Price': 'Average Purchase Price'}, inplace = True) avg_pc_df['Average Purchase Price'] = avg_pc_df['Average Purchase Price'].round(2) avg_pc_df['Average Purchase Price'] ='$' +avg_pc_df['Average Purchase Price'].astype(str) # Grouping data by Age Ranges using the groupby function # Calculating the total purchase value by age ranges (tpv) using the sum function # Converting series to DataFrame # Renaming column to "Total Purchase Value" using the rename function # Rounding values to 2 decimal places using the round function # Formatting the total purchase value by converting integers to strings and adding the $ sign before each value pa_age = hop_df.groupby('Age Ranges') tpv = pa_age['Price'].sum() tpv_df = tpv.to_frame() tpv_df.rename(columns = {'Price': 'Total Purchase Value'}, inplace = True) tpv_df['Total Purchase Value'] = tpv_df['Total Purchase Value'].round(2) tpv_df['Total Purchase Value'] ='$' +tpv_df['Total Purchase Value'].astype(str) # Grouping data by Age Ranges using the groupby function # Calculating the total players by each age ranges (total_players_age) using the value counts function # Calculating the average total purchase per person by age ranges # Converting series to DataFrame # Renaming column "Avg Total Purchase per Person" using the rename function # Rounding values to 2 decimal places using the round function # Formatting the avg total purchase per person by converting integers to strings and adding the $ sign before each value pa_age_df = hop_df.groupby('Age Ranges') unique_age_df = hop_df.drop_duplicates(subset='SN') group_age = unique_age_df.groupby('Age Ranges') total_players_age = unique_age_df['Age Ranges'].value_counts() avg_tp_age = tpv / total_players_age avg_tp_age_df = avg_tp_age.to_frame() avg_tp_age_df.rename(columns = {0 : 'Avg Total Purchase per Person'}, inplace = True) avg_tp_age_df['Avg Total Purchase per Person'] = avg_tp_age_df['Avg Total Purchase per Person'].round(2) avg_tp_age_df['Avg Total Purchase per Person'] ='$' +avg_tp_age_df['Avg Total Purchase per Person'].astype(str) # Merging the above DataFrames using the merge functionpa_age1_df = pd.merge(pc_age_df, avg_pc_df, left_index=True, right_index=True) pa_age1_df = pd.merge(pc_age_df, avg_pc_df, left_index=True, right_index=True) pa_age2_df = pd.merge(pa_age1_df, tpv_df, left_index=True, right_index=True) pa_age3_df = pd.merge(pa_age2_df, avg_tp_age_df, left_index=True, right_index=True) pa_age3_df.head(8) # + # Grouping data by SN using the groupby function ("ts" for top spenders) ts = hop_df.groupby("SN") # Calculating the purchase count (pc) using the count function # Converting series to DataFrames and renaming columns # Renaming column to "Purchase Count" pc = ts['Price'].count() pc_df = pc.to_frame() pc_df.rename(columns= {'Price': 'Purchase Count'}, inplace = True) # Calculating the average purchase price (app) using the mean function # Converting series to DataFrames and renaming columns # Renaming column to "Average Purchase Price" # Rounding values to 2 decimal places using the round function app = ts['Price'].mean() app_df = app.to_frame() app_df.rename(columns= {'Price': 'Average Purchase Price'}, inplace = True) app_df['Average Purchase Price'] = app_df['Average Purchase Price'].round(2) # Calculating the Total Purchase Value (tpv) using the sum function # Converting series to DataFrames and renaming columns # Renaming column to "Total Purchase Value" # Rounding values to 2 decimal places using the round function tpv = ts['Price'].sum() tpv_df = tpv.to_frame() tpv_df.rename(columns= {'Price': 'Total Purchase Value'}, inplace = True) tpv_df['Total Purchase Value'] = tpv_df['Total Purchase Value'].round(2) # Merging the above DataFrames merge1_df = pd.merge(pc_df, app_df, left_index=True, right_index=True) merge2_df = pd.merge(merge1_df, tpv_df, left_index=True, right_index=True) # Sorting the total purchase value column in descending order sorted_merge2_df = merge2_df.sort_values(by='Total Purchase Value', ascending=False).head() # Formatting the avg purchase price and the total purchase value by converting integers to strings and adding the $ sign before each value (in the sorted DataFrame) sorted_merge2_df['Average Purchase Price'] ='$' +sorted_merge2_df['Average Purchase Price'].astype(str) sorted_merge2_df['Total Purchase Value'] ='$' +sorted_merge2_df['Total Purchase Value'].astype(str) sorted_merge2_df.head() # + # Grouping data by Item ID and Item Name using the groupby function ("mpi" for Most Popular Items) # Calculating the purchase count of most popular items (pc2) using the count function # Converting Series to DataFrame # Renaming column "Purchase Count" using the rename function mpi = hop_df.groupby(["Item ID", "Item Name"]) pc2 = mpi['Price'].count() pc2_df = pc2.to_frame() pc2_df.rename(columns= {'Price': 'Purchase Count'}, inplace = True) # Calculating the Item price of the most popular items (ip) using the mean function # Converting Series to DataFrame # Renaming column "Item Price" using the rename function # Rounding values to 2 decimal places using the round function ip = mpi['Price'].mean() ip_df = ip.to_frame() ip_df.rename(columns= {'Price': 'Item Price'}, inplace = True) ip_df['Item Price'] = ip_df['Item Price'].round(2) # Calculating the total purchase value of the most popular items (tpv2) using the sum function # Converting Series to DataFrame # Renaming column "Total Purchase Value" using the rename function # Rounding values to 2 decimal places using the round function tpv2 = mpi['Price'].sum() tpv2_df = tpv2.to_frame() tpv2_df.rename(columns= {'Price': 'Total Purchase Value'}, inplace = True) tpv2_df['Total Purchase Value'] = tpv2_df['Total Purchase Value'].round(2) # Merging above DataFrames merge_mpi1_df = pd.merge(pc2_df, ip_df, left_index=True, right_index=True) merge_mpi2_df = pd.merge(merge_mpi1_df, tpv2_df, left_index=True, right_index=True) # Sorting the Purchase Count column in descending order sorted_merge_mpi2_df = merge_mpi2_df.sort_values(by='Purchase Count', ascending=False).head(5) # Formatting the item price and the total purchase value by converting integers to strings and adding the $ sign before each value (in the sorted DataFrame) sorted_merge_mpi2_df['Item Price'] ='$' +sorted_merge_mpi2_df['Item Price'].astype(str) sorted_merge_mpi2_df['Total Purchase Value'] ='$' +sorted_merge_mpi2_df['Total Purchase Value'].astype(str) sorted_merge_mpi2_df.head() # + # Sorting the above table by total purchase value in descending order sorted_merge_mpi3_df = merge_mpi2_df.sort_values(by='Total Purchase Value', ascending=False).head(5) # Formatting the item price and the total purchase value by converting integers to strings and adding the $ sign before each value (in the sorted DataFrame) sorted_merge_mpi3_df['Item Price'] ='$' +sorted_merge_mpi3_df['Item Price'].astype(str) sorted_merge_mpi3_df['Total Purchase Value'] ='$' +sorted_merge_mpi3_df['Total Purchase Value'].astype(str) sorted_merge_mpi3_df.head()
HeroesOfPymoli_HW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Numerical integration of Ordinary Differential Equations # This notebook serves as a quick refresher on ordinary differential equations. If you are familiar with the topic: feel free to skim this notebook. # # We will first consider the decay of tritium as an example: # # $$ # \mathrm{^3H \overset{\lambda}\rightarrow\ ^3He + e^- + \bar{\nu_e}} # $$ # # We will not concern ourselves with the products, instead we will only take interest in the number density of $\mathrm{^3H}$ as function of time, let's call it $y(t)$. The rate of change of $y(t)$ is proportional to itself and the decay constant ($\lambda$): # # $$ # \frac{dy(t)}{dt} = -\lambda y(t) # $$ # # you probably know the solution to this class of differential equations (either from experience or by guessing an appropriate ansatz). SymPy can of course also solve this equation: import sympy as sym sym.init_printing() t, l = sym.symbols('t lambda') y = sym.Function('y')(t) dydt = y.diff(t) expr = sym.Eq(dydt, -l*y) expr sym.dsolve(expr) # Now, pretend for a while that this function lacked an analytic solution. We could then integrate this equation *numerically* from an initial state for a predetermined amount of time by discretizing the time into a seriers of small steps. # ### Explicit methods # For each step taken we would update $y$ by multiplying the derivative with the step size (assuming that the derivate is approximately constant on the scale of the step-size), formally this method is known as "forward Euler": # # $$ # y_{n+1} = y_n + y'(t_n)\cdot \Delta h # $$ # # this is known as an *explicit* method, i.e. the derivative at the current time step is used to calculate the next step *forward*. # # For demonstration purposes only, we implement this in Python: import numpy as np def euler_fw(rhs, y0, tout, params): y0 = np.atleast_1d(np.asarray(y0, dtype=np.float64)) dydt = np.empty_like(y0) yout = np.zeros((len(tout), len(y0))) yout[0] = y0 t_old = tout[0] for i, t in enumerate(tout[1:], 1): dydt[:] = rhs(yout[i-1], t, *params) h = t - t_old yout[i] = yout[i-1] + dydt*h t_old = t return yout # applying this function on our model problem: def rhs(y, t, decay_constant): return -decay_constant*y # the rate does not depend on time ("t") tout = np.linspace(0, 2e9, 100) y0 = 3 params = (1.78e-9,) # 1 parameter, decay constant of tritium yout = euler_fw(rhs, y0, tout, params) # and plotting the solution & the numerical error using matplotlib: # + import matplotlib.pyplot as plt # %matplotlib inline def my_plot(tout, yout, params, xlbl='time / a.u.', ylabel=None, analytic=None): fig, axes = plt.subplots(1, 2 if analytic else 1, figsize=(14, 4)) axes = np.atleast_1d(axes) for i in range(yout.shape[1]): axes[0].plot(tout, yout[:, i], label='y%d' % i) if ylabel: axes[0].set_ylabel(ylabel) for ax in axes: ax.set_xlabel(xlbl) if analytic: axes[0].plot(tout, analytic(tout, yout, params), '--') axes[1].plot(tout, yout[:, 0] - yout[0]*np.exp(-params[0]*(tout-tout[0]))) if ylabel: axes[1].set_ylabel('Error in ' + ylabel) # - def analytic(tout, yout, params): return yout[0, 0]*np.exp(-params[0]*tout) my_plot(tout, yout, params, analytic=analytic, ylabel='number density / a.u.') # We see that 100 points gave us almost plotting accuracy. # # Unfortunately, Euler forward is not practical for most real world problems. Usually we want a higher order formula (the error in Euler forward scales only as $n^{-1}$), and we want to use an adaptive step size (larger steps when the function is smooth). So we use the well tested LSODA algorithm (provided in scipy as ``odeint``): from scipy.integrate import odeint yout, info = odeint(rhs, y0, tout, params, full_output=True) my_plot(tout, yout, params, analytic=analytic) print("Number of function evaluations: %d" % info['nfe'][-1]) # We can see that ``odeint`` was able to achieve a much higher precision using fewer number of function evaluations. # ### Implicit methods # For a large class of problems we need to base the step not on the derivative at the current time point, but rather at the next one (giving rise to an implicit expression). The simplest implicit stepper is "backward euler": # # $$ # y_{n+1} = y_n + y'(t_{n+1})\cdot \Delta h # $$ # # Problems requiring this type of steppers are known as "stiff". We will not go into the details of this (LSODA actually uses something more refined and switches between explicit and implicit steppers). # # In the upcoming notebooks we will use ``odeint`` to solve systems of ODEs (and not only linear equations as in this notebook). The emphasis is not on the numerical methods, but rather on how we, from symbolic expressions, can generate fast functions for the solver. # # ### Systems of differential equations # In order to show how we would formulate a system of differential equations we will here briefly look at the [van der Pol osciallator](https://en.wikipedia.org/wiki/Van_der_Pol_oscillator). It is a second order differential equation: # # $$ # {d^2y_0 \over dx^2}-\mu(1-y_0^2){dy_0 \over dx}+y_0= 0 # $$ # # One way to reduce the order of our second order differential equation is to formulate it as a system of first order ODEs, using: # # $$ y_1 = \dot y_0 $$ # # which gives us: # # $$ # \begin{cases} # \dot y_0 = y_1 \\ # \dot y_1 = \mu(1-y_0^2) y_1-y_0 # \end{cases} # $$ # # Let's call the function for this system of ordinary differential equations ``vdp``: def vdp(y, t, mu): return [ y[1], mu*(1-y[0]**2)*y[1] - y[0] ] # using "Euler forward": tout = np.linspace(0, 200, 1024) y_init, params = [1, 0], (17,) y_euler = euler_fw(vdp, y_init, tout, params) # never mind the warnings emitted here... my_plot(tout, y_euler, params) # That does not look like an oscillator. (we see that Euler forward has deviated to values with enormous magnitude), here the advanced treatment by the ``odeint`` solver is far superior: y_odeint, info = odeint(vdp, y_init, tout, params, full_output=True) print("Number of function evaluations: %d, number of Jacobian evaluations: %d" % (info['nfe'][-1], info['nje'][-1])) my_plot(tout, y_odeint, params) # We see that LSODA has evaluated the Jacobian. But we never gave it an explicit representation of it―so how could it? # # It estimated the Jacobian matrix by using finite differences. Let's see if we can do better if we provide a function to calculate the (analytic) Jacobian. # ## Exercise: manually write a function evaluating a Jacobian # First we need to know what signature ``odeint`` expects, we look at the documentation by using the ``help`` command: (or using ``?`` in IPython) help(odeint) # just skip to "Dfun" # so the signature needs to be: ``(state-vector, time, parameters) -> matrix`` # %load_ext scipy2017codegen.exercise # Use either the * ``%exercise`` * or * ``%load`` * magic to get the exercise / solution respecitvely (*i.e.* delete the whole contents of the cell except for the uncommented magic command). Replace **???** with the correct expression. # # Remember that our system is defined as: # $$ # \begin{cases} # \dot y_0 = y_1 \\ # \dot y_1 = \mu(1-y_0^2) y_1-y_0 # \end{cases} # $$ # %exercise exercise_jac_func.py J_func(y_init, tout[0], params[0]) y_odeint, info = odeint(vdp, y_init, tout, params, full_output=True, Dfun=J_func) my_plot(tout, y_odeint, params) print("Number of function evaluations: %d, number of Jacobian evaluations: %d" % (info['nfe'][-1], info['nje'][-1])) # So this time the integration needed to evaluate both the ODE system function and its Jacobian fewer times than when using finite difference approximations. The reason for this is that the more accurate the Jacobian is, the better is the convergence in the iterative (Newton's) method solving the implicit system of equations. # # For larger systems of ODEs the importance of providing a (correct) analytic Jacobian can be much bigger. # ### SymPy to the rescue # Instead of writing the jacobian function by hand we could have used SymPy's ``lambdify`` which we will introduce next. Here is a sneak peak on how it could be achieved: y = y0, y1 = sym.symbols('y0 y1') mu = sym.symbols('mu') J = sym.Matrix(vdp(y, None, mu)).jacobian(y) J_func = sym.lambdify((y, t, mu), J) J
notebooks/20-ordinary-differential-equations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # 📝 Exercise M3.02 # # The goal is to find the best set of hyperparameters which maximize the # generalization performance on a training set. # # Here again with limit the size of the training set to make computation # run faster. Feel free to increase the `train_size` value if your computer # is powerful enough. # + vscode={"languageId": "python"} import numpy as np import pandas as pd adult_census = pd.read_csv("../datasets/adult-census.csv") target_name = "class" target = adult_census[target_name] data = adult_census.drop(columns=[target_name, "education-num"]) from sklearn.model_selection import train_test_split data_train, data_test, target_train, target_test = train_test_split( data, target, train_size=0.2, random_state=42) # - # In this exercise, we will progressively define the classification pipeline # and later tune its hyperparameters. # # Our pipeline should: # * preprocess the categorical columns using a `OneHotEncoder` and use a # `StandardScaler` to normalize the numerical data. # * use a `LogisticRegression` as a predictive model. # # Start by defining the columns and the preprocessing pipelines to be applied # on each group of columns. # + vscode={"languageId": "python"} from sklearn.compose import make_column_selector as selector # solution categorical_columns_selector = selector(dtype_include=object) categorical_columns = categorical_columns_selector(data) numerical_columns_selector = selector(dtype_exclude=object) numerical_columns = numerical_columns_selector(data) # + vscode={"languageId": "python"} from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler # solution categorical_processor = OneHotEncoder(handle_unknown="ignore") numerical_processor = StandardScaler() # - # Subsequently, create a `ColumnTransformer` to redirect the specific columns # a preprocessing pipeline. # + vscode={"languageId": "python"} from sklearn.compose import ColumnTransformer # solution preprocessor = ColumnTransformer( [('cat_preprocessor', categorical_processor, categorical_columns), ('num_preprocessor', numerical_processor, numerical_columns)] ) # - # Assemble the final pipeline by combining the above preprocessor # with a logistic regression classifier. Force the maximum number of # iterations to `10_000` to ensure that the model will converge. # + vscode={"languageId": "python"} from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression # solution model = make_pipeline(preprocessor, LogisticRegression(max_iter=10_000)) # - # Use `RandomizedSearchCV` with `n_iter=20` to find the best set of # hyperparameters by tuning the following parameters of the `model`: # # - the parameter `C` of the `LogisticRegression` with values ranging from # 0.001 to 10. You can use a log-uniform distribution # (i.e. `scipy.stats.loguniform`); # - the parameter `with_mean` of the `StandardScaler` with possible values # `True` or `False`; # - the parameter `with_std` of the `StandardScaler` with possible values # `True` or `False`. # # Once the computation has completed, print the best combination of parameters # stored in the `best_params_` attribute. # + vscode={"languageId": "python"} from sklearn.model_selection import RandomizedSearchCV from scipy.stats import loguniform # solution param_distributions = { "logisticregression__C": loguniform(0.001, 10), "columntransformer__num_preprocessor__with_mean": [True, False], "columntransformer__num_preprocessor__with_std": [True, False], } model_random_search = RandomizedSearchCV( model, param_distributions=param_distributions, n_iter=20, error_score=np.nan, n_jobs=2, verbose=1, random_state=1) model_random_search.fit(data_train, target_train) model_random_search.best_params_ # + [markdown] tags=["solution"] # # So the best hyperparameters give a model where the features are scaled but # not centered and the final model is regularized. # # Getting the best parameter combinations is the main outcome of the # hyper-parameter optimization procedure. However it is also interesting to # assess the sensitivity of the best models to the choice of those parameters. # The following code, not required to answer the quiz question shows how to # conduct such an interactive analysis for this this pipeline using a parallel # coordinate plot using the `plotly` library. # # We could use `cv_results = model_random_search.cv_results_` to make a # parallel coordinate plot as we did in the previous notebook (you are more # than welcome to try!). Instead we are going to load the results obtained from # a similar search with many more iterations (1,000 instead of 20). # + tags=["solution"] vscode={"languageId": "python"} cv_results = pd.read_csv( "../figures/randomized_search_results_logistic_regression.csv") # + [markdown] tags=["solution"] # To simplify the axis of the plot, we will rename the column of the dataframe # and only select the mean test score and the value of the hyperparameters. # + tags=["solution"] vscode={"languageId": "python"} column_name_mapping = { "param_logisticregression__C": "C", "param_columntransformer__num_preprocessor__with_mean": "centering", "param_columntransformer__num_preprocessor__with_std": "scaling", "mean_test_score": "mean test accuracy", } cv_results = cv_results.rename(columns=column_name_mapping) cv_results = cv_results[column_name_mapping.values()].sort_values( "mean test accuracy", ascending=False) # + [markdown] tags=["solution"] # In addition, the parallel coordinate plot from `plotly` expects all data to # be numeric. Thus, we convert the boolean indicator informing whether or not # the data were centered or scaled into an integer, where True is mapped to 1 # and False is mapped to 0. # # We also take the logarithm of the `C` values to span the data on a broader # range for a better visualization. # + tags=["solution"] vscode={"languageId": "python"} column_scaler = ["centering", "scaling"] cv_results[column_scaler] = cv_results[column_scaler].astype(np.int64) cv_results['log C'] = np.log10(cv_results['C']) # + tags=["solution"] vscode={"languageId": "python"} import plotly.express as px fig = px.parallel_coordinates( cv_results, color="mean test accuracy", dimensions=["log C", "centering", "scaling", "mean test accuracy"], color_continuous_scale=px.colors.diverging.Tealrose, ) fig.show() # + [markdown] tags=["solution"] # We recall that it is possible to select a range of results by clicking and # holding on any axis of the parallel coordinate plot. You can then slide # (move) the range selection and cross two selections to see the intersections. # # Selecting the best performing models (i.e. above an accuracy of ~0.845), we # observe the following pattern: # # - scaling the data is important. All the best performing models are scaling # the data; # - centering the data does not have a strong impact. Both approaches, # centering and not centering, can lead to good models; # - using some regularization is fine but using too much is a problem. Recall # that a smaller value of C means a stronger regularization. In particular # no pipeline with C lower than 0.001 can be found among the best # models.
notebooks/M3 Hyperparamter Tuning - C2 Automated Tuning - S5 ex M3.02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd def compute_speed_and_error(distance, time, dtime): c = distance / (time * time - dtime * dtime) return c * time, c * dtime df = pd.DataFrame(columns=["Unit Type", "Speed", "Speed Tol", "Status", "Smoke Duration", "Health %", "Health Color"], ) pd.options.display.float_format = '{:.2f}'.format # ### Harkonnen # + speed_d, speed_error_d = compute_speed_and_error(8, 26, 1) speed_i, speed_error_i = compute_speed_and_error(8, 17, 1) df = df.append({ "Unit Type": "Devastator", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Devastator", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 26, "Health %": 40, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 7, 1) speed_i, speed_error_i = compute_speed_and_error(7, 6, 1) df = df.append({ "Unit Type": "Quad", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Quad", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Damaged", "Smoke Duration": 25, "Health %": 15, "Health Color": "Red" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 7, 1) speed_i, speed_error_i = compute_speed_and_error(8, 5, 1) df = df.append({ "Unit Type": "Trike", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Trike", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 36, "Health %": 20, "Health Color": "Red" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 15, 1) speed_i, speed_error_i = compute_speed_and_error(8, 14, 1) df = df.append({ "Unit Type": "Siege Tank", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Siege Tank", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 26, "Health %": 30, "Health Color": "Red" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(9, 13, 1) speed_i, speed_error_i = compute_speed_and_error(9, 9, 1) df = df.append({ "Unit Type": "Combat Tank H", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Combat Tank H", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 16, "Health %": 30, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 9, 1) speed_i, speed_error_i = compute_speed_and_error(8, 7, 1) df = df.append({ "Unit Type": "Missile Tank", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Missile Tank", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 25, "Health %": 25, "Health Color": "Red" }, ignore_index=True) # - # ### Ordos # + df = df.append({ "Unit Type": "Raider", "Smoke Duration": 40, "Status": "Smoking", "Health %": 45, "Health Color": "Yellow" }, ignore_index=True) df = df.append({ "Unit Type": "Raider", "Smoke Duration": 36, "Status": "Smoking", "Health %": 45, "Health Color": "Yellow" }, ignore_index=True) df = df.append({ "Unit Type": "Raider", "Smoke Duration": 34, "Status": "Smoking", "Health %": 45, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 8, 1) speed_i, speed_error_i = compute_speed_and_error(8, 6, 1) df = df.append({ "Unit Type": "Combat Tank O", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Combat Tank O", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 36, "Health %": 30, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 12, 1) speed_i, speed_error_i = compute_speed_and_error(8, 10, 1) df = df.append({ "Unit Type": "Deviator", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Deviator", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 21, "Health %": 30, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 5, 1) speed_i, speed_error_i = compute_speed_and_error(8, 3, 1) df = df.append({ "Unit Type": "Stealth Raider", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Stealth Raider", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 21, "Health %": 50, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 7, 1) speed_i, speed_error_i = compute_speed_and_error(8, 6, 1) df = df.append({ "Unit Type": "Quad", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Quad", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 31, "Health %": 30, "Health Color": "Red" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(7, 16, 1) speed_i, speed_error_i = compute_speed_and_error(8, 13, 1) df = df.append({ "Unit Type": "Siege Tank", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Siege Tank", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Smoke Duration": 37, "Health %": 50, "Health Color": "Yellow" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 15, 1) speed_i, speed_error_i = compute_speed_and_error(8, 13, 1) df = df.append({ "Unit Type": "Harvester", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Harvester", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Health %": 30, "Health Color": "Red" }, ignore_index=True) # + speed_d, speed_error_d = compute_speed_and_error(8, 5, 1) speed_i, speed_error_i = compute_speed_and_error(8, 3, 1) df = df.append({ "Unit Type": "Raider", "Speed": speed_i, "Speed Tol": speed_error_i, "Status": "Intact", "Health %": 100, "Health Color": "Green" }, ignore_index=True) df = df.append({ "Unit Type": "Raider", "Speed": speed_d, "Speed Tol": speed_error_d, "Status": "Smoking", "Health %": 45, "Health Color": "Yellow" }, ignore_index=True) # - df df.to_csv("Damaged Vehicle.csv")
Damaged Vehicles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- import urllib.request from zipfile import ZipFile import pandas as pd import os # + import datetime x = datetime.datetime.now() etl_date = x.strftime("%Y%m%d_%H%M%S") print(etl_date) # %set_env etl_date={etl_date} # + import simplejson with open('/opt/ml/metadata/resource-metadata.json') as fh: metadata = simplejson.loads(fh.read()) accountid = metadata['ResourceArn'].split(':')[4] # %set_env accountid={accountid} # %set_env bucket_name=lab-{accountid} # - # ## Change the Value of the file_name below! # + from pyarrow.parquet import ParquetDataset import s3fs folder_name= "s3://" + os.getenv('bucket_name') + "/data/analytics/best_movies/" s3 = s3fs.S3FileSystem() paths = [path for path in s3.ls(folder_name) if path.endswith(".parquet.snappy")] dataset = ParquetDataset(paths, filesystem=s3) table = dataset.read() movies_df = table.to_pandas() print("movies_df has %s lines" % movies_df.shape[0]) movies_df["flag"] = 1 movies_df[0:10] # - movies_df.to_parquet( "s3://" + os.getenv('bucket_name') + "/data/analytics/best_movies/best_movies_" + etl_date + ".parquet.snappy") # ## Now, run you crawler again!
LAB04.2_Changing_Schema.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## DIO | Cognizant Cloud Data Engineer # #### estudo iniciado em 1 de set. de 2021 17:59h # ## Fundamentos de ETL com Python # ###### Extrat: os dados são extraídos de diferentes fontes. # ###### Transform: propagados para a área de preparação de dados, onde são transformados e limpos. # ###### Load: carregados no warehouse. # ![cda804a712699845f08d8a3de2cffa29.jpg](attachment:cda804a712699845f08d8a3de2cffa29.jpg) # ## Ferramentas e pacotes para trabalhar com o Python # ![rqehp7jecwh1cp67vg8e.webp](attachment:rqehp7jecwh1cp67vg8e.webp) # ![simple-etl-in-python-35-with-bonobo-pyparis-2017-82-320.png](attachment:simple-etl-in-python-35-with-bonobo-pyparis-2017-82-320.png) # ![luigi.png](attachment:luigi.png) # ![images.png](attachment:images.png) # ![simple-etl-in-python-35-with-bonobo-pyparis-2017-82-320.png](attachment:simple-etl-in-python-35-with-bonobo-pyparis-2017-82-320.png) # ![petl.png](attachment:petl.png) # ![2560px-Pandas_logo.svg.png](attachment:2560px-Pandas_logo.svg.png) # ## Análise de dados com Python e Pandas
Python/Fundamentos de ETL com Python/fundamentos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.model_selection import train_test_split df = pd.read_csv('./newsCorpora_re.csv', header=None, sep='\t', names=['ID', 'TITLE', 'URL', 'PUBLISHER', 'CATEGORY', 'STORY', 'HOSTNAME', 'TIMESTAMP']) df = df.loc[df['PUBLISHER'].isin(['Reuters', 'Huffington Post', 'Businessweek', 'Contactmusic.com', 'Daily Mail']), ['TITLE', 'CATEGORY']] train, valid_test = train_test_split(df, test_size=0.2, shuffle=True, random_state=0, stratify=df["CATEGORY"]) valid, test = train_test_split(valid_test, test_size=0.5, shuffle=True, random_state=0, stratify=valid_test["CATEGORY"]) print(train["CATEGORY"].value_counts()) print(valid["CATEGORY"].value_counts()) print(valid["CATEGORY"].value_counts()) # + import string import re def preprocessing(text): table = str.maketrans(string.punctuation, " "*len(string.punctuation)) text = text.translate(table)#記号をスペース変換 text = text.lower() text = re.sub('[0-9]+', "0", text) return text df = pd.concat([train, valid, test], axis=0) df.reset_index(drop=True, inplace=True) df["TITLE"] = df["TITLE"].map(lambda x: preprocessing(x)) df.head() # + from sklearn.feature_extraction.text import TfidfVectorizer train_valid = df[:len(train) + len(valid)] test = df[len(train) + len(valid):] vec_tfidf = TfidfVectorizer(min_df=10, ngram_range=(1, 2)) X_train_valid = vec_tfidf.fit_transform(train_valid["TITLE"]) X_test = vec_tfidf.transform(test["TITLE"]) X_train_valid = pd.DataFrame(X_train_valid.toarray(), columns=vec_tfidf.get_feature_names()) X_test = pd.DataFrame(X_test.toarray(), columns=vec_tfidf.get_feature_names()) X_train = X_train_valid[:len(train)] X_valid = X_train_valid[len(train):] X_train.head() # - from sklearn.linear_model import LogisticRegression lg = LogisticRegression(random_state=0, max_iter=10000) lg.fit(X_train, train["CATEGORY"]) import numpy as np def score_lg(lg, X): return [np.max(lg.predict_proba(X), axis=1), lg.predict(X)] train_pred = score_lg(lg, X_train) test_pred = score_lg(lg, X_test) print(train_pred) # + from sklearn.metrics import accuracy_score train_accuracy = accuracy_score(train["CATEGORY"], train_pred[1]) test_accuracy = accuracy_score(test["CATEGORY"], test_pred[1]) print(f'正解率(学習データ):{train_accuracy:.3f}') print(f'正解率(テストデータ):{test_accuracy:.3f}') # + from sklearn.metrics import confusion_matrix import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline train_cm = confusion_matrix(train["CATEGORY"], train_pred[1]) print(train_cm) sns.heatmap(train_cm, annot=True, cmap="Blues", fmt=".4g") plt.show() # - test_cm = confusion_matrix(test["CATEGORY"], test_pred[1]) print(test_cm) sns.heatmap(test_cm, annot=True, fmt=".4g", cmap="Blues") plt.show() # + from sklearn.metrics import precision_score, recall_score, f1_score def calculate_score(y_true, y_pred): precision = precision_score(test["CATEGORY"], test_pred[1], average=None, labels=["b", "e", "t", "m"]) precision = np.append(precision, precision_score(y_true, y_pred, average="micro")) precision = np.append(precision, precision_score(y_true, y_pred, average="macro")) recall = recall_score(test["CATEGORY"], test_pred[1], average=None, labels=["b", "e", "t", "m"]) recall = np.append(recall, recall_score(y_true, y_pred, average="micro")) recall = np.append(recall, recall_score(y_true, y_pred, average="macro")) f1 = f1_score(test["CATEGORY"], test_pred[1], average=None, labels=["b", "e", "t", "m"]) f1 = np.append(f1, f1_score(y_true, y_pred, average="micro")) f1 = np.append(f1, f1_score(y_true, y_pred, average="macro")) scores = pd.DataFrame({"適合率":precision, "再現率":recall, "f1スコア":f1}, index=["b", "e", "t", "m", "マイクロ平均", "マクロ平均"]) return scores # - print(calculate_score(test["CATEGORY"], test_pred[1])) features = X_train.columns.values index = [i for i in range(1, 11)] for c, coef in zip(lg.classes_, lg.coef_): print(f'【カテゴリ】{c}') best10 = pd.DataFrame(features[np.argsort(coef)[::-1][:10]], columns=['重要度上位'], index=index).T worst10 = pd.DataFrame(features[np.argsort(coef)[:10]], columns=['重要度下位'], index=index).T display(pd.concat([best10, worst10], axis=0)) print('\n') from tqdm import tqdm result = [] for C in tqdm(np.logspace(-5, 4, 10, base=10)): lg = LogisticRegression(random_state=0, max_iter=10000, C=C) lg.fit(X_train, train["CATEGORY"]) train_pred = score_lg(lg, X_train) valid_pred = score_lg(lg, X_valid) test_pred = score_lg(lg, X_test) train_accuracy = accuracy_score(train["CATEGORY"], train_pred[1]) valid_accuracy = accuracy_score(valid["CATEGORY"], valid_pred[1]) test_accuracy = accuracy_score(test["CATEGORY"], test_pred[1]) result.append([C, train_accuracy, valid_accuracy, test_accuracy]) result = np.array(result).T plt.plot(result[0], result[1], label="train") plt.plot(result[0], result[2], label="valid") plt.plot(result[0], result[3], label="test") plt.ylim(0, 1.1) plt.ylabel("Accuracy") plt.xscale("log") plt.xlabel("C") plt.legend() plt.show() # + import optuna def objective_lg(trial): l1_ratio = trial.suggest_uniform("l1_ratio", 0, 1) C = trial.suggest_loguniform("C", 1e-4, 1e4) lg = LogisticRegression(random_state=0, max_iter=1000, penalty="elasticnet", solver="saga", l1_ratio = l1_ratio, C=C) lg.fit(X_train, train["CATEGORY"]) valid_pred = score_lg(lg, X_valid) valid_accuracy = accuracy_score(valid["CATEGORY"], valid_pred[1]) return valid_accuracy # + study = optuna.create_study(direction="maximize") study.optimize(objective_lg, timeout=300) print("Best Trial:") trial = study.best_trial print("Value:{:.3f}".format(trial.value)) print("Param") for key, value in trial.params.item(): print("{}:{}".format(key, value)) # -
nlp_100knock/ML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import logging import numpy as np import scipy as sp import sklearn import statsmodels.api as sm from statsmodels.formula.api import ols # %load_ext autoreload # %autoreload 2 import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import seaborn as sns sns.set_context("poster") sns.set(rc={'figure.figsize': (16, 9.)}) sns.set_style("whitegrid") import pandas as pd pd.set_option("display.max_rows", 120) pd.set_option("display.max_columns", 120) logging.basicConfig(level=logging.INFO, stream=sys.stdout) # + import torch import torch.nn as nn import torch.nn.functional as F import mlp import snalu # - SEED = 123 np.random.seed(SEED) torch.manual_seed(SEED); # + def create_data(min_val, max_val, n_elts, fun_op, fun_name, single_dim=False): if single_dim: if fun_name == 'sqrt': min_val = 0 x = torch.randint(low=min_val, high=max_val + 1, size=(n_elts, 1)).float() y = fun_op(x).reshape(-1) else: x = torch.randint(low=min_val, high=max_val + 1, size=(n_elts, 2)).float() if fun_name == 'div': x = x[torch.nonzero(x[:, 1]).squeeze()] y = fun_op(x[:, 0], x[:, 1]) return x, y def split_data(data, less, greater, test_percentage=0.2): x, y = data inter = torch.nonzero(((x >= less) & (x <= greater)).all(dim=1)) extra_less = torch.nonzero((x < less).any(dim=1)) extra_greater = torch.nonzero((x > greater).any(dim=1)) extra = torch.cat([extra_less, extra_greater], dim=0) x_extra = torch.index_select(x, 0, extra.squeeze()) y_extra = torch.index_select(y, 0, extra.squeeze()) x_inter = torch.index_select(x, 0, inter.squeeze()) y_inter = torch.index_select(y, 0, inter.squeeze()) cutoff = int((1.0 - test_percentage) * x_inter.shape[0]) x_inter_train = x_inter[:cutoff] x_inter_test = x_inter[cutoff:] y_inter_train = y_inter[:cutoff] y_inter_test = y_inter[cutoff:] return (x_inter_train, y_inter_train), (x_inter_test, y_inter_test), (x_extra, y_extra) def train(model, data, n_epochs, optimizer, lr, verbose=False): opt = optimizer(model.parameters(), lr=lr) x, y = data early_break = 0 early_break_max = 70000 for epoch in range(n_epochs): pred = model(x).reshape(-1) mse = F.mse_loss(pred, y) mae = torch.mean(torch.abs(pred - y)) if mse < 0.05 and mae < 0.05: early_break += 1 if early_break >= early_break_max: break else: early_break = 0 opt.zero_grad() mse.backward() opt.step() if verbose and epoch % 50000 == 0: print(f'Epoch: {epoch}: mse={round(mse.item(), 2)}; mae={round(mae.item(), 2)}') def test(model, data): x, y = data pred = model(x).reshape(-1) mse = F.mse_loss(pred, y) mae = torch.mean(torch.abs(pred - y)) return round(mse.item(), 2), round(mae.item(), 2) # + fun_dict = { 'add': lambda x, y: x + y, 'sub': lambda x, y: x - y, 'mul': lambda x, y: x * y, 'div': lambda x, y: x / y, 'sqr': lambda x: torch.pow(x, 2), 'sqrt': lambda x: torch.sqrt(x) } models = { 'tanh': nn.Tanh, 'sigmoid': nn.Sigmoid, 'relu6': nn.ReLU6, 'softsign': nn.Softsign, 'selu': nn.SELU, 'elu': nn.ELU, 'relu': nn.ReLU, 'none': None, 'NAC': None, 'NALU': None, 'SNALU': None, } # - N_LAYERS = 2 OUT_DIM = 1 HIDDEN_DIM = 2 N_EPOCHS = int(6e5) OPTIMIZER = torch.optim.Adam LR = 0.0001 DATA_RANGE = (-200, 200) LESS_THAN = -100 GREATER_THAN = 100 N_ELTS = 1000 # + interpolation_logs = {} extrapolation_logs = {} for fun_name, fun_op in fun_dict.items(): if fun_name in ['sqr', 'sqrt']: single_dim = True in_dim = 1 else: single_dim = False in_dim = 2 data = create_data(*DATA_RANGE, N_ELTS, fun_op, fun_name, single_dim) data_train, data_test, data_extra = split_data(data, less=LESS_THAN, greater=GREATER_THAN) interpolation_logs[fun_name] = {} extrapolation_logs[fun_name] = {} for model_name, act in models.items(): if model_name == 'NAC': model = snalu.StackedNAC(N_LAYERS, in_dim, OUT_DIM, HIDDEN_DIM) elif model_name == 'NALU': model = snalu.StackedNALU(N_LAYERS, in_dim, OUT_DIM, HIDDEN_DIM) elif model_name == 'SNALU': model = snalu.StackedSNALU(N_LAYERS, in_dim, OUT_DIM, HIDDEN_DIM) else: model = mlp.MLP(N_LAYERS, in_dim, OUT_DIM, HIDDEN_DIM, act) train(model, data_train, N_EPOCHS, OPTIMIZER, LR) _, mae_inter = test(model, data_test) _, mae_extra = test(model, data_extra) interpolation_logs[fun_name][model_name] = mae_inter extrapolation_logs[fun_name][model_name] = mae_extra print(f'{fun_name.ljust(10)}: {model_name.ljust(10)}: mae inter: {mae_inter}, mae extra: {mae_extra}') del model # - # ## Just for debugging data = create_data(*DATA_RANGE, N_ELTS, fun_dict['mul'], 'mul', single_dim=False) data_train, data_test, data_extra = split_data(data, less=LESS_THAN, greater=GREATER_THAN) model = snalu.StackedSNALU(N_LAYERS, in_dim=2, out_dim=OUT_DIM, hidden_dim=HIDDEN_DIM) # + #train(model, data_train, N_EPOCHS, OPTIMIZER, lr=0.0001, verbose=True) #print(test(model, data_test)) #print(test(model, data_extra)) # - # ## Plot results # + def autolabel(rects, ax): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2., 0.9 * height, str(height), ha='center', va='bottom') idx = 1 n_rows = len(interpolation_logs.keys()) figure = plt.figure(figsize=(20, 40)) for fun_name in interpolation_logs.keys(): ax1 = figure.add_subplot(n_rows, 2, idx) items = list(interpolation_logs[fun_name].keys()) y_pos = np.arange(len(items)) mae = list(interpolation_logs[fun_name].values()) rect1 = ax1.bar(y_pos, mae, align='center', alpha=0.5) ax1.set_xticks(np.arange(len(items))) ax1.set_xticklabels(items) ax1.set_ylabel('mae') ax1.set_title(f'{fun_name} (interpolation)') autolabel(rect1, ax1) ax2 = figure.add_subplot(n_rows, 2, idx + 1) items = list(extrapolation_logs[fun_name].keys()) y_pos = np.arange(len(items)) mae = list(extrapolation_logs[fun_name].values()) rect2 = ax2.bar(y_pos, mae, align='center', alpha=0.5) ax2.set_xticks(np.arange(len(items))) ax2.set_xticklabels(items) ax2.set_ylabel('mae') ax2.set_title(f'{fun_name} (extrapolation)') autolabel(rect2, ax2) idx += 2 #plt.show() plt.savefig('images/results.png') # -
train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alec2005/2021_22-Ratza-Intro-CS-Sem-2/blob/main/ALEC_BARNES_Copy_of_python_basics_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="2cOCCaF6Q4ys" # ### Key Terms # # Complete the following questions to solidify you're understanding of the key concepts. Feel free to use the lecture notebook or any other resources to help you, but do not copy and paste answers from the internet instead use your own words to present your understanding. # # ---- # + [markdown] id="e_bDYmbjRHNc" # What is a general use programming language? # + [markdown] id="ROigjjFsRHKB" # To use a basic string of code for the computer to understand and use # + [markdown] id="X4j6a8y4RHGG" # Who invented Python and when? # + [markdown] id="B5WWUhgXRHDt" # <NAME> 1991 # + [markdown] id="RHkgvf39RHBm" # What is the difference between front-end and back-end? # + [markdown] id="cl7LFD3pRG_H" # Front end you directly interact with and backend taeks it in and processes it # + [markdown] id="oTkkpwhVRG80" # What is a GUI? # + [markdown] id="aoVSrA67RG6D" # A front end component its the graphics user interface # + [markdown] id="nHjQwFW1RG4C" # What is an API? # + [markdown] id="0A6LKX_5RG1x" # A back end component the application program interface they define the interactions between programs and services # + [markdown] id="RWzJmW61RGzR" # What is Open Source Software? # + [markdown] id="J8Lcpl46RGwu" # Code designed to be accsesed to the public # + [markdown] id="gob_Rgp8RGqT" # What is a development environment? # + [markdown] id="BCPeV2UCRGZm" # Where a programer or devolper does their work # + [markdown] id="lKduQMuWSYoC" # What is meant by local and remote in the context of computers? # + [markdown] id="GcJMiJjFSfO2" # Local is on your pc and remote is accsessed through the internent # + [markdown] id="cb6Coc48Sgev" # What is an operating system? # + [markdown] id="586kJGbvSlw5" # An operating system is what the computer usses to run usually breaks down the kernal and shell # + [markdown] id="7tRiStzGSltk" # What is a kernel? # + [markdown] id="jp5g1ZgWSlq6" # Gives the os controll over parts of the computer # + [markdown] id="WdOAakQJSloH" # What is a shell? # + [markdown] id="Gpaic8JSSlle" # It exposes the opperating system # + [markdown] id="hn5QICK0Slio" # What shell do we have access to in Colab notebooks? # + [markdown] id="vYRb0JJXSlfx" # Bash # + [markdown] id="dmgmzh-ISldB" # What is an interpreter? # + [markdown] id="Y79bP4GLS3bI" # Converts the code into something be able to be interpreted by the machiene # + [markdown] id="A2q30j02S3G3" # What is a value in programming? # + [markdown] id="ilES-rT6S9vm" # You set a variable to have a value # + [markdown] id="KI34llQnS--5" # What is an expression in programming? # + [markdown] id="VHXYGhIcTBw9" # The lines of code that are written # + [markdown] id="pmKFicLMTCcD" # What is syntax? # + [markdown] id="sqCxeE2TTKvy" # The programs version of grammer # + [markdown] id="B8m5jqm6TLHE" # What do we call the process of discovering and resolving errors? # + [markdown] id="0TozR78oTRK4" # debugging # + [markdown] id="OJR_RDQpTRyR" # ### Code # + [markdown] id="zPvODBfiTWCP" # Let's revisit some of the things we practiced in the lecture. In the code cell below print your name to the console without first declaring it as a variable. # + id="mZb-v_UwTO7B" colab={"base_uri": "https://localhost:8080/"} outputId="2dc6679a-eeed-4c3a-d300-4007e582e3c3" print("Alec") # + [markdown] id="sZPksnwpTnTD" # Now declare your first name and last name as separate variables and combine them in the print statement. # + id="oqmZRhYLTztw" colab={"base_uri": "https://localhost:8080/"} outputId="d3c4c9b2-1e67-4203-a240-465fab<PASSWORD>" last_name = ("Barnes") first_name = ("Alec") print(f"{first_name} {last_name}") # + [markdown] id="cNe3K4WZT2_0" # In the cell below run the "Zen of Python" easter egg. # + id="FSkN7Q52UKyU" colab={"base_uri": "https://localhost:8080/"} outputId="c906af9d-a45f-4441-e630-4bb2f33290ba" import this # + [markdown] id="2ADI5kQAUMLI" # ### Explore # + [markdown] id="vchHFmicUOid" # This portion of the assignment contains things we didn't explicitly cover in the lecture, instead encouraging you to explore and experiment on your own to discover some of the different operators and expressions in Python. For each expression first describe what you expect to happen before running the code cell. # # Documentation for Python's numeric operators can be found [here](https://docs.python.org/3.10/library/stdtypes.html#numeric-types-int-float-complex) # + [markdown] id="_lTiBbJMU28S" # #### `5 + 2 * 2` # # What do you expect to happen? # + [markdown] id="P6-diOTwU_ir" # Two times two plus five # + id="ALTC2aYRUNRe" colab={"base_uri": "https://localhost:8080/"} outputId="d9f3284e-68ce-4d2f-c3aa-e8dfcede4b6c" 5 + 2 * 2 # + [markdown] id="zSMDH8osVEEN" # #### `2 / 3` # # What do you expect to happen? # + [markdown] id="MJ_mZbouVI9_" # 2 divided by 3 # + id="FYMUlyCEVHD1" colab={"base_uri": "https://localhost:8080/"} outputId="6d59632a-7507-49c4-c14b-b47beab5c26d" 2 / 3 # + [markdown] id="c8LQrbNQVIID" # #### `2.5 * 10` # # What do you expect to happen? # + [markdown] id="J1Ts7WLEVPU-" # 2.5 times 10 # + id="IuE7GclzVOpO" colab={"base_uri": "https://localhost:8080/"} outputId="472d28c7-b3b9-45a6-bc55-755e6991f593" 2.5 * 10 # + [markdown] id="YfKfY31nVSfy" # #### `a` # # What do you expect to happen? # + [markdown] id="Y_FQACtgVVL8" # An error to occur # + id="pukzPvzXVUgM" colab={"base_uri": "https://localhost:8080/", "height": 165} outputId="6362c7a2-75d9-436b-af88-f2d475706e4b" a # + [markdown] id="x_G2qoXLVhVj" # #### `'a'` # # # What do you expect to happen? # + [markdown] id="IjTY1xn_VoB4" # Print out an a # + id="e0PEkzRHVjVo" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="fee38134-98ed-4f32-a04f-b8f851a0e448" 'a' # + [markdown] id="2kCnvuRvVprG" # #### `521 // 5` # # What do you expect to happen? # + [markdown] id="QWOocovcV3i6" # To print out a remainder # + id="n9QgKjHxV7oX" colab={"base_uri": "https://localhost:8080/"} outputId="35f8a0c7-b5c7-40db-d836-ab80331b65d8" 521 // 5 # + id="7euWDIcY2yjr"
ALEC_BARNES_Copy_of_python_basics_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sage # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.2 # language: sage # name: sagemath # --- T = simplicial_complexes.Torus(); T X = SimplicialComplex([[0,0,0], [0,0,1], [0,1,0], [0,0,1]]) X.homology(reduced=False) T.homology(reduced=False) C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}) C.homology() d1=matrix(ZZ,[[-1,1,0,0,0],[1,0,-1,-1,0],[0,-1,1,0,-1],[0,0,0,1,1]]); d1 d2=matrix(ZZ,[[1,0],[1,0],[1,1],[0,-1],[0,1]]); d2 d1*d2 d2.right_kernel() cmplx = ChainComplex({1:d1,2:d2},degree=-1); cmplx.homology() cmplx = ChainComplex({1:matrix(ZZ,[0]),2:matrix(ZZ,[1,1])},degree=-1); cmplx.homology() matrix(ZZ,[-1,1]).right_kernel() d2=matrix(ZZ,[[-1,-1,0,0],[1,0,-1,0],[-1,0,0,1],[0,1,1,0],[0,0,-1,1],[0,-1,0,-1]]); d1 d1=matrix(ZZ,[[1,0,-1,0,0,-1],[-1,-1,0,-1,0,0],[0,0,0,1,1,1],[0,1,1,0,-1,0]]); d2 d3=matrix(ZZ,[[1],[-1],[1],[1]]); d3 d1*d2 d2*d3 cmplx = ChainComplex({1:d1,2:d2},degree=-1); cmplx.homology() X = SimplicialComplex([[0,1], [1,2], [2,3], [3,0]]) X.homology(reduced=False) X = SimplicialComplex([[0],[1],[2],[3],[4],[5],[6]]) X
ex_homology_computations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Iuzpb0CwjNa3" # # *Necessary Imports* # + id="u6O0UvRx0zcu" import numpy as np import pandas as pd import keras from keras.applications.vgg19 import VGG19 from keras.models import Model from keras.layers import Dense,Dropout,Flatten from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() from tensorflow.keras.utils import to_categorical from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint import matplotlib.pyplot as plt # %matplotlib inline import os from tqdm import tqdm from sklearn import preprocessing #from sklearn_model_selection import train_test_split import cv2 # + [markdown] id="y5eF89VyjS9m" # # **Loading the .npy files** # *The csv were converted to npy in the CNN task and thus not repeated here* # + colab={"base_uri": "https://localhost:8080/"} id="okrxFFJI2UFR" outputId="37777e2e-0d23-4926-c7ee-cb8480182980" # %cd /content/drive/MyDrive/URC/Fashion_MNIST/Test testX = np.load('fashion-mnist_test_X.npy') testY = np.load('fashion-mnist_test_Y.npy') # %cd /content/drive/MyDrive/URC/Fashion_MNIST/Validation validationX = np.load('validation20k_X.npy') validationY = np.load('validation20k_Y.npy') # %cd /content/drive/MyDrive/URC/Fashion_MNIST/Train trainX = np.load('fashion-mnist_train40k_X.npy') trainY = np.load('fashion-mnist_train40k_Y.npy') # + [markdown] id="mQ3IIouzjubS" # ## Preprocessing Functions # # # * ***reshape_image()*** : # # ~*VGG19 is only applicable to RGB images thus we concatenate the grayscale image 3 times along the 4th dimension.* # # ~*VGG19 needs a minimum dimension of 32x32 thus we pad the input by 2 units each* # * ***preprocessY()***: # # ~*converting the labels and Ydata in a form comprehendible by the network* # # # + id="QVaPz6yH2ZFW" def reshape_image (data, size): data = np.array(data) print(data.shape) data = data.reshape(size, 28, 28) data = np.repeat(data[..., np.newaxis], 3, -1) # concatenating 3 times in 4th dimension data = np.pad(data,((0,0),(2,2),(2,2),(0,0)),mode = 'edge') return data # + colab={"base_uri": "https://localhost:8080/"} id="g3xkT_KI2bSU" outputId="1c2e84ec-918a-4dd0-d316-c887608f65b1" #trainX = reshape_image(trainX, trainX.shape[0]) trainX = reshape_image(trainX, trainX.shape[0]) testX = reshape_image(testX, testX.shape[0]) validationX = reshape_image(validationX, validationX.shape[0]) # + colab={"base_uri": "https://localhost:8080/"} id="EL2Rv_fk-Jz9" outputId="731c7703-ab37-4c09-a59d-38b395431e6b" validationX.shape # + id="xvvb4P5D2dQv" def preprocessY (lst): integer_encoded = label_encoder.fit_transform(np.array(lst)) trainY = to_categorical(integer_encoded) #trainY = to_categorical(lst) return trainY # + id="AemTI43f2fHu" trainY = preprocessY(trainY) testY = preprocessY(testY) validationY = preprocessY(validationY) # + colab={"base_uri": "https://localhost:8080/"} id="Nw2iwTBi2fA9" outputId="a99f4a86-4ea5-4297-95c4-0373fe61fea2" trainX.shape # + [markdown] id="ZuIOedKJlIfI" # # ***Importing the Pretrained Model*** # # # + colab={"base_uri": "https://localhost:8080/"} id="PmQyxbru3x3f" outputId="380121ea-1ffc-4d12-c1a9-38068b07281d" from keras.applications.vgg19 import VGG19 from keras.applications.vgg19 import preprocess_input base_model = VGG19(weights = 'imagenet' ,include_top = False, input_shape =(32,32,3), classes = 10) #reason for include_top - False as to train a new output layer according to our needs #pretrained weights are being used of 'imagenet' x = base_model.output #adding the output layer according to our classes required - 10 x = Flatten()(x) predictions = Dense(10,activation='softmax')(x) model = Model(inputs = base_model.input, outputs = predictions) #onl training the Dense Layer for layer in base_model.layers: layer.trainable = False #Compiling the model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) callbacks_list = [keras.callbacks.EarlyStopping(monitor='val_acc', patience=3, verbose=1)] model.summary() # + id="OZCMHjJ_9A3O" #Augmenting the dataset so as to diversify it gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3, height_shift_range=0.08, zoom_range=0.08) # + [markdown] id="4AnmfDKZl2xg" # **Checkpoint Basics** # + id="vHEkjvDARTiR" checkpoint_path = "/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/VGG19/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create a callback that saves the model's weights cp_callback = ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) # + [markdown] id="Cj374dp9l9_K" # Tweaking the batch_size parameters # + id="qGP1QkKsTLzh" input_shape=(32, 32, 3) #batch_size =128 #after 21 epochs #batch_size = 64 batch_size = 1024 #batch_size = 4096 # + [markdown] id="epkaGJBtmKRb" # Loading weights in case of a crash # + colab={"base_uri": "https://localhost:8080/"} id="r43FJesOmJMK" outputId="ccc74ad3-6dc9-426a-82be-bbd141444b0d" model.load_weights('/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/VGG19/cp-0056.ckpt')#.ckpt file of weights you want to load # + [markdown] id="DYkpFrN0mI34" # # + [markdown] id="_v1wJt54mDD9" # The training cell # + colab={"base_uri": "https://localhost:8080/"} id="Vsvue9-ZQ9QL" outputId="e0218c26-fc86-49d8-f889-558e6b2ff885" #training cell history=model.fit_generator(gen.flow(trainX, trainY, batch_size), steps_per_epoch=trainY.shape[0]//batch_size, epochs=70, initial_epoch = 56, validation_data=gen.flow(validationX, validationY, batch_size), validation_steps=validationY.shape[0]//batch_size, callbacks=[cp_callback] ) # + [markdown] id="NZfj7zJ0mTNq" # Saving the model for further evaluation # + id="VDGCAZc5SZrV" model.save('/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/VGG19/65iterationstrial5.h5') # + [markdown] id="z8KzaJPcmZGJ" # Further Evaluation # + id="wFdAV6CMScqL" from tensorflow.keras.models import load_model # + id="HnowwlO6SgFe" # loading the model #model = load_model('/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/CNN/Model/model_bs_64_f_128each25epoch.h5')#88 #model = load_model('/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/CNN/Model/rough2.h5')#90 model = load_model('/content/drive/MyDrive/URC/Fashion_MNIST/Checkpoints/VGG19/65iterationstrial5.h5') # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="s45QA8PYSh6Y" outputId="3e7a7a4d-ec61-4a4c-e8cc-8c34bed12652" #plotting the graphs #getting all the keys history.history.keys() his = history accuracy = his.history['accuracy'] val_accuracy = his.history['val_accuracy'] loss = his.history['loss'] val_loss = his.history['val_loss'] epochs = range(len(accuracy)) #Training and Validation Accuracy vs Epoch plt.plot(epochs, accuracy, 'r', label='Training accuracy') plt.plot(epochs,val_accuracy,'b',label = 'Validation accuracy') plt.title('Training and Validation Accuracy') plt.legend() plt.figure() #Training Loss and Validation Loss vs Epoch plt.plot(epochs, loss, 'r', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="b2uulJTISmuH" outputId="54af8e77-b45f-4a18-abd3-09cd1fc9d051" #Testing Loss and Accuracy score = model.evaluate(testX, testY, verbose = 0) print('Test Loss:' , score[0]) print('Test accuracy', score[1])
Fashion_MNIST/VGG19_Transfer_Learning/VGG19_Fashion_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] _cell_guid="be4faf86-56fa-bd74-403d-b86fb9e98c5c" _uuid="f85b4f66be8f0037b3dd2f3a209f955894219605" # ## Full run through of raw images to classification with Convolutional Neural Network ## # # In this tutorial, we're going to be running through taking raw images that have been labeled for us already, and then feeding them through a convolutional neural network for classification. # # The images are either of dog(s) or cat(s). # # Once you have downloaded and extracted the data from https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/data, you're ready to begin. # + [markdown] _cell_guid="1e6df50d-d318-7398-304e-b0ae37f60e17" _uuid="2fe9978ca24a746095a5f6b1f5e4c654ccb9a7dc" # ## Part 1 - Preprocessing ## # # <iframe width="560" height="315" src="https://www.youtube.com/embed/gT4F3HGYXf4" frameborder="0" allowfullscreen></iframe> # # We've got the data, but we can't exactly just stuff raw images right through our convolutional neural network. First, we need all of the images to be the same size, and then we also will probably want to just grayscale them. Also, the labels of "cat" and "dog" are not useful, we want them to be one-hot arrays. # # Interestingly, we may be approaching a time when our data might not need to be all the same size. Looking into TensorFlow's research blog: https://research.googleblog.com/2017/02/announcing-tensorflow-fold-deep.html # # "TensorFlow Fold makes it easy to implement deep-learning models that operate over data of varying size and structure." # # Fascinating...but, for now, we'll do it the old fashioned way. # # **Package Requirements** # numpy (pip install numpy) # tqdm (pip install tqdm) # # I will be using the GPU version of TensorFlow along with tflearn. # # To install the CPU version of TensorFlow, just do pip install tensorflow # To install the GPU version of TensorFlow, you need to get alllll the dependencies and such. # # **TensorFlow Installation tutorials:** # # https://pythonprogramming.net/how-to-cuda-gpu-tensorflow-deep-learning-tutorial/ # # TensorFlow on Windows: https://www.youtube.com/watch?v=r7-WPbx8VuY # # **Using TensorFlow and concept tutorials:** # # Introduction to deep learning with neural networks: https://pythonprogramming.net/neural-networks-machine-learning-tutorial # # Introduction to TensorFlow: https://pythonprogramming.net/tensorflow-introduction-machine-learning-tutorial/ # # Intro to Convolutional Neural Networks: https://pythonprogramming.net/convolutional-neural-network-cnn-machine-learning-tutorial/ # # Convolutional Neural Network in TensorFlow tutorial: https://pythonprogramming.net/cnn-tensorflow-convolutional-nerual-network-machine-learning-tutorial/ # # Finally, I will be making use of https://pythonprogramming.net/tflearn-machine-learning-tutorial/. Once you have TensorFlow installed, do pip install tflearn. # # First, we'll get our imports and constants for preprocessing: # # + _cell_guid="d5afc7e8-ef5a-c672-4f51-c469ed97be86" _uuid="fb3d36f2730ec2c91f3bc8058e52aca7c8286f73" import cv2 # working with, mainly resizing, images import numpy as np # dealing with arrays import os # dealing with directories from random import shuffle # mixing up or currently ordered data that might lead our network astray in training. from tqdm import tqdm # a nice pretty percentage bar for tasks. Thanks to viewer <NAME> for this suggestion TRAIN_DIR = '../input/train' TEST_DIR = '../input/test' IMG_SIZE = 50 LR = 1e-3 MODEL_NAME = 'dogsvscats-{}-{}.model'.format(LR, '2conv-basic') # just so we remember which saved model is which, sizes must match # + [markdown] _cell_guid="da647f1a-7011-1555-6013-3e0e0126b246" _uuid="5f20656f079bf3f3a15bb62bfa9c4a1eeafb90a0" # Now, our first order of business is to convert the images and labels to array information that we can pass through our network. To do this, we'll need a helper function to convert the image name to an array. # # Our images are labeled like "cat.1" or "dog.3" and so on, so we can just split out the dog/cat, and then convert to an array like so: # + _cell_guid="d1c5fbd5-94a3-1c60-427d-883601dbd461" _uuid="c8c64c84f56ffd44f56dab603b1d6ed9e65c800c" def label_img(img): word_label = img.split('.')[-3] # conversion to one-hot array [cat,dog] # [much cat, no dog] if word_label == 'cat': return [1,0] # [no cat, very doggo] elif word_label == 'dog': return [0,1] # + [markdown] _cell_guid="0e1eafd3-99b2-7c7a-0164-31b3efa1e39a" _uuid="0b1bba8565a96fe1dec0deb1b2081bb60fff60cc" # Now, we can build another function to fully process the training images and their labels into arrays: # + _cell_guid="246e8504-fc0a-00ac-34d2-ea6b13af1732" _uuid="418205e2d7778991dd09c08d1d831779031fdc8d" def create_train_data(): training_data = [] for img in tqdm(os.listdir(TRAIN_DIR)): label = label_img(img) path = os.path.join(TRAIN_DIR,img) img = cv2.imread(path,cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (IMG_SIZE,IMG_SIZE)) training_data.append([np.array(img),np.array(label)]) shuffle(training_data) np.save('train_data.npy', training_data) return training_data # + [markdown] _cell_guid="e4935379-6d26-818a-b4a2-d92b3a3ee2a3" _uuid="880f3fedd9a401ab9aac4a6d2a42c3f075486ce1" # The tqdm module was introduced to me by one of my viewers, it's a really nice, pretty, way to measure where you are in a process, rather than printing things out at intervals...etc, it gives a progress bar. Super neat. # # Anyway, the above function converts the data for us into array data of the image and its label. # # When we've gone through all of the images, we shuffle them, then save. Shuffle modifies a variable in place, so there's no need to re-define it here. # # With this function, we will both save, and return the array data. This way, if we just change the neural network's structure, and not something with the images, like image size..etc..then we can just load the array file and save some processing time. While we're here, we might as well also make a function to process the testing data. This is the *actual* competition test data, NOT the data that we'll use to check the accuracy of our algorithm as we test. This data has no label. # + _cell_guid="277478aa-2ffa-eb36-f0ec-2fe04743124a" _uuid="e2669d0b780c483f78c3713860f9c601aebdd6e0" def process_test_data(): testing_data = [] for img in tqdm(os.listdir(TEST_DIR)): path = os.path.join(TEST_DIR,img) img_num = img.split('.')[0] img = cv2.imread(path,cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (IMG_SIZE,IMG_SIZE)) testing_data.append([np.array(img), img_num]) shuffle(testing_data) np.save('test_data.npy', testing_data) return testing_data # + [markdown] _cell_guid="5e3beebd-676c-3884-75df-77d42b255ad0" _uuid="8e012762544e54fab7ccaa05746a335c3a2d540b" # Now, we can run the training: # + _cell_guid="62b30187-0d69-13e9-5829-4b05962e5e79" _uuid="e28068797700f8a309392e3d7975219753893e11" train_data = create_train_data() # If you have already created the dataset: #train_data = np.load('train_data.npy') # + [markdown] _cell_guid="53cacc97-6c0e-e5ca-dc63-5eae37ca03d5" _uuid="13b2f21a7f2549ca62b2558d83c17d96ad3ac2bc" # ## Convolutional Neural Network ## # # <iframe width="560" height="315" src="https://www.youtube.com/embed/Ge65ukmJTzQ" frameborder="0" allowfullscreen></iframe> # # Next, we're ready to define our neural network: # + _cell_guid="6dfcdcae-fc1b-06de-875b-3b853f95ca55" _uuid="28720126c838135de3aece151743b5b7b0c761aa" import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input') convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') # + [markdown] _cell_guid="aed07464-bd4a-1321-c772-2a3cb252a7e4" _uuid="4350e693ed085e7f677906da19a3735d21fe5967" # What we have here is a nice, 2 layered convolutional neural network, with a fully connected layer, and then the output layer. It's been debated whether or not a fully connected layer is of any use. I'll leave it in anyway. # # This exact convnet was good enough for recognizing hand 28x28 written digits. Let's see how it does with cats and dogs at 50x50 resolution. # # <iframe width="560" height="315" src="https://www.youtube.com/embed/ViO56ASqeks" frameborder="0" allowfullscreen></iframe> # + [markdown] _cell_guid="e418f3b2-fb65-ab5a-3862-2ca2ab438bf4" _uuid="d182f3c8143e926b404d33708285cc84ee920656" # Now, it wont always be the case that you're training the network fresh every time. Maybe first you just want to see how 3 epochs trains, but then, after 3, maybe you're done, or maybe you want to see about 5 epochs. We want to be saving our model after every session, and reloading it if we have a saved version, so I will add this: # + _cell_guid="c6949aec-7b7a-297e-43d0-9a868d6c9b60" _uuid="22a34fc9d20be6e24486b37d736a3c0766e5af97" if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print('model loaded!') # + [markdown] _cell_guid="42c74821-c028-3e04-f04f-398f0531ef86" _uuid="94419947d089d3ba8906a094a75dea8aa358114c" # Now, let's split out training and testing data: # + _cell_guid="2770fddb-4083-8a0a-7743-9753620cb681" _uuid="6417fbd4fa89127b936c5e37bf30435c4337e6a9" train = train_data[:-500] test = train_data[-500:] # + [markdown] _cell_guid="5fab0abb-7783-cfb9-75c4-6d2ad8e2dcf2" _uuid="b6f6c9c986f0c0e85f2d41ed90f746f01f5a1c52" # Now, the training data and testing data are both labeled datasets. The training data is what we'll fit the neural network with, and the test data is what we're going to use to validate the results. The test data will be "out of sample," meaning the testing data will only be used to test the accuracy of the network, not to train it. # # We also have "test" images that we downloaded. THOSE images are not labeled at all, and those are what we'll submit to Kaggle for the competition. # # Next, we're going to create our data arrays. For some reason, typical numpy logic like: # # array[:,0] and array[:,1] did NOT work for me here. Not sure what I'm doing wrong, so I do this instead to separate my features and labels: # + _cell_guid="bd7c2022-a8e5-6a7f-e8e8-244d97b1c141" _uuid="4a948dba13e95c494e1d3dab2be7d0baca75ce5d" X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1) Y = [i[1] for i in train] test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1) test_y = [i[1] for i in test] # + [markdown] _cell_guid="e7e1dd3a-2681-9838-cd6b-fc565094f2d9" _uuid="0f5d2227bec8a36305cd73b15ab66e258206c769" # Now we fit for 2 epochs: # + _cell_guid="7d5325d3-2a96-53fd-952f-d353660abde8" _uuid="f5dd802aa7c856af4a2f568bdacec03b614c5773" model.fit({'input': X}, {'targets': Y}, n_epoch=2, validation_set=({'input': test_x}, {'targets': test_y}), snapshot_step=50000, show_metric=True, run_id=MODEL_NAME) # + [markdown] _cell_guid="b92bc0c4-c153-69c2-0b17-b810906d0d7b" _uuid="09fc78ef6cd837451963714833911467b4d1d788" # Hmm... it doesn't look like we've gotten anywhere at all. # + [markdown] _cell_guid="4ebe7b41-88d9-f5a5-f3b1-336bf0469fa6" _uuid="75ba3386c9b4d4ea35fbedd3b2b3c40b2d64b1a4" # We could keep trying, but, if you haven't made accuracy progress in the first 3 epochs, you're probably not going to at all, unless it's due to overfitment...at least in my experience. # # So... now what? # + [markdown] _cell_guid="64495318-5893-0917-7e6e-f214384ce6e7" _uuid="90910ef7b018b9bb1e929234680d12dba7508698" # ## Size Matters ## # # We're gonna need a bigger network # # First, we need to reset the graph instance, since we're doing this in a continuous environment: # + _cell_guid="5485d475-bc57-ba04-939b-dcb5eeb21fbc" _uuid="3951071696ac8faf9484a683e7ec434d3b246388" import tensorflow as tf tf.reset_default_graph() # + _cell_guid="13516279-c02c-9139-72ff-ba3e489c3c08" _uuid="9572949c008e86382a9a335a5d7b61e0dbf139a6" convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input') convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print('model loaded!') train = train_data[:-500] test = train_data[-500:] X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1) Y = [i[1] for i in train] test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1) test_y = [i[1] for i in test] model.fit({'input': X}, {'targets': Y}, n_epoch=4, validation_set=({'input': test_x}, {'targets': test_y}), snapshot_step=500, show_metric=True, run_id=MODEL_NAME) # + [markdown] _cell_guid="993c22d5-29dd-e022-44ea-0b114acb2505" _uuid="1608f084ea2935df7998799fee9579d72d3bf425" # WELL WELL WELL... Looks like we've got a winner. With neural networks, size matters a ton. We went from having apparently un-trainable data to having obviously trainable data, and this was only 3 epochs. # # If you are happy with the model, go ahead and save it: # + _cell_guid="ccd6d567-ab2a-15ce-29e2-62074df9c0c6" _uuid="d3cbb202a86fd2b9ed82c22974c8f3fa2b050091" model.save(MODEL_NAME) # + [markdown] _cell_guid="dd467f72-9f39-9d99-c0b9-db8fa020528e" _uuid="6533d2a96d32aadbbe3c705fc96a783d5f7c2570" # Now we can reload the model, and continue training (we don't NEED to reload the model here since this is continuous and the model is still in memory, but if you were running this as a program you would) # + [markdown] _cell_guid="4c591c48-2985-f138-5315-7054e99b147c" _uuid="db933a12b1e5567cb7dfb92d4dd316e63d994d4d" # ## You can be too big## # # Bigger is not always better, there does get to be a limit, at least from my experience. A bigger network figures things out better, and quicker, but tends to also overfit the training data. You can use dropout (sets randomly a certain % of nodes to not take part in the network for more robusts networks) to rectify this slightly, but there does seem to be a limit. # # Okay, now what? Let's see how we've done! # + [markdown] _cell_guid="0b7137d8-6ba2-d36a-e151-923cb54cb8b8" _uuid="9c6cc40bb88af590ff554f1fc7e9a887d7b3e95a" # ## Visually inspecting our network against unlabeled data ## # # <iframe width="560" height="315" src="https://www.youtube.com/embed/27FPv1VHSsQ" frameborder="0" allowfullscreen></iframe> # + _cell_guid="32f6bded-827a-f928-b567-cb2c1180ec7d" _uuid="070481d7646c9a7c3695d3729c83ad92b0eb5a6e" import matplotlib.pyplot as plt # if you need to create the data: test_data = process_test_data() # if you already have some saved: #test_data = np.load('test_data.npy') fig=plt.figure() for num,data in enumerate(test_data[:12]): # cat: [1,0] # dog: [0,1] img_num = data[1] img_data = data[0] y = fig.add_subplot(3,4,num+1) orig = img_data data = img_data.reshape(IMG_SIZE,IMG_SIZE,1) #model_out = model.predict([data])[0] model_out = model.predict([data])[0] if np.argmax(model_out) == 1: str_label='Dog' else: str_label='Cat' y.imshow(orig,cmap='gray') plt.title(str_label) y.axes.get_xaxis().set_visible(False) y.axes.get_yaxis().set_visible(False) plt.show() # + [markdown] _cell_guid="13e156c7-277d-abe1-fb52-7f2dd228245d" _uuid="33727f1a29dee92a611e7bf39e4ac7748fabfbe9" # Alright, so we made a couple mistakes, but not too bad actually! # # If you're happy with it, let's compete! # + _cell_guid="52d2f5e2-c47e-460d-5b92-dbe0cfd42864" _uuid="b651d4744947aa6bd4f5290d4551d908eaa1d6b5" with open('submission_file.csv','w') as f: f.write('id,label\n') with open('submission_file.csv','a') as f: for data in tqdm(test_data): img_num = data[1] img_data = data[0] orig = img_data data = img_data.reshape(IMG_SIZE,IMG_SIZE,1) model_out = model.predict([data])[0] f.write('{},{}\n'.format(img_num,model_out[1])) # + [markdown] _cell_guid="3a087c2c-b01e-0c0a-04a6-38796c2e5882" _uuid="a8fc396a51328d96d9ab5bf0ac13d4fd6281749e" # Heading to Kaggle > Competitions > Dogs vs. Cats Redux: Kernels Edition... Let's submit! # # This got me ~700th place with a logloss of 0.55508 when trained out to 10 epochs # + _cell_guid="30df960f-ba10-e18b-358a-3d5a2e994d83" _uuid="33d385d1162563eb30915a5d1ccc8bf777a9ed1c"
Notebooks/dog_cat_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from data import * # # data augmentation # # In deep learning tasks, a lot of data is need to train DNN model, when the dataset is not big enough, data augmentation should be applied. # # keras.preprocessing.image.ImageDataGenerator is a data generator, which can feed the DNN with data like : (data,label), it can also do data augmentation at the same time. # # It is very convenient for us to use keras.preprocessing.image.ImageDataGenerator to do data augmentation by implement image rotation, shift, rescale and so on... see [keras documentation](https://keras.io/preprocessing/image/) for detail. # # For image segmentation tasks, the image and mask must be transformed **together!!** # ## define your data generator # # If you want to visualize your data augmentation result, set save_to_dir = your path # + #if you don't want to do data augmentation, set data_gen_args as an empty dict. #data_gen_args = dict() data_gen_args = dict(rotation_range=0.2, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.05, zoom_range=0.05, horizontal_flip=True, fill_mode='nearest') myGenerator = trainGenerator(20,'data/membrane/train','image','label',data_gen_args,save_to_dir = "data/membrane/train/aug") # - # ## visualize your data augmentation result #you will see 60 transformed images and their masks in data/membrane/train/aug num_batch = 3 for i,batch in enumerate(myGenerator): if(i >= num_batch): break # ## create .npy data # # If your computer has enough memory, you can create npy files containing all your images and masks, and feed your DNN with them. image_arr,mask_arr = geneTrainNpy("data/membrane/train/aug/","data/membrane/train/aug/") #np.save("data/image_arr.npy",image_arr) #np.save("data/mask_arr.npy",mask_arr)
dataPrepare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Graphs # # This notebook is similar to the second part of the __Analysis notebook__. Its sole purpose is to create the same graphs as before, but in a format that can be used for the data story's moving network. # + #imports import json import itertools import collections import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from operator import itemgetter from community import community_louvain from networkx.readwrite import json_graph from networkx.algorithms.community.centrality import girvan_newman # %matplotlib inline import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') # - # We only use degree 1, as larger degrees resulted in unmanagably large graphs. But distance 1 is quite good enough for our purposes. # + #Global variables DEGREE = 1 DEGREE_FILE = '../generated/map/degree_' + str(DEGREE) +'/' def get_graph_elem_file(elem_type, leak): return DEGREE_FILE + leak + '_' + elem_type + '.csv' # - # ### Create the graphs # # Note that as some of the graphs have still very many nodes, we can choose to filter these down to only those connected to more than one node (unless a node is a match -- we still want to know who is not connected to anyone). # # Note the special attention drawn to the "bahamas" leak, which has a slightly different schema. # + def remove_loners(full_df, clusters_only, cluster_size=2): '''Removes nodes with fewer than 2 connections who are not matches''' node_counts = full_df['START_ID'].append(full_df['END_ID']).value_counts() ids_to_remove = [] for index, row in full_df.iterrows(): start_id = row['START_ID'] end_id = row['END_ID'] if clusters_only: if ((node_counts[start_id] < cluster_size) and (node_counts[end_id] < cluster_size)): ids_to_remove.append(index) else: if ((row['match_x'] == False and node_counts[start_id] < cluster_size) or (row['match_y'] == False and node_counts[end_id] < cluster_size)): ids_to_remove.append(index) return full_df.drop(full_df.index[ids_to_remove]).reset_index(drop = True) #The moving network needs node ids starting at 0, so we reset them def zero_id_nodes(full_df): '''Sets the indexes of the nodes to zero (needed for the graph)''' id_map = {} curr_id = 0 for index, row in full_df.iterrows(): start_id = row['START_ID'] end_id = row['END_ID'] if start_id not in id_map: id_map[start_id] = curr_id curr_id +=1 if end_id not in id_map: id_map[end_id] = curr_id curr_id +=1 full_df.at[index, 'START_ID'] = id_map[start_id] full_df.at[index, 'END_ID'] = id_map[end_id] return full_df def extract_nodes(full_df): start_nodes = full_df[['START_ID', 'name_x', 'match_x']] end_nodes = full_df[['END_ID', 'name_y', 'match_y']] start_nodes.rename(columns={'START_ID':'id', 'name_x': 'name', 'match_x': 'match'}, inplace = True) end_nodes.rename(columns={'END_ID':'id', 'name_y': 'name', 'match_y': 'match'}, inplace = True) nodes = start_nodes.append(end_nodes).drop_duplicates(['id', 'name', 'match']) return nodes def graph_leak_matches(leak, dense = False, clusters_only = False, cluster_size = 2): '''Creates graphs of matches found in a given leak''' bahamas = (leak == 'bahamas') node_id = '0' name_index = '1' if bahamas: node_id = '4' name_index = '7' #Load the data nodes = pd.read_csv(get_graph_elem_file('nodes', leak), index_col = 0)\ .reset_index(drop = True)[[node_id, name_index, 'Match']] nodes.rename(columns={node_id:'node_id', name_index:'name', 'Match':'match'}, inplace = True) edges = pd.read_csv(get_graph_elem_file('edges', leak), index_col = 0).reset_index(drop = True)[['START_ID', 'END_ID']] #Format the data as a set of edges with information about the nodes full_df = pd.merge(nodes, edges, left_on = 'node_id', right_on = 'START_ID') full_df = pd.merge(full_df, nodes, left_on = 'END_ID', right_on = 'node_id').drop(['node_id_x', 'node_id_y'], axis = 1) #filter too large graphs if told so if dense: print('Size before removing loners: ' + str(len(full_df))) full_df = remove_loners(full_df, clusters_only, cluster_size) print('Size after removing loners: ' + str(len(full_df))) full_df = zero_id_nodes(full_df) graph_nodes = extract_nodes(full_df) #create the graph graph = nx.DiGraph() for i in range(0,len(full_df)): graph.add_edge(int(full_df.loc[i,"START_ID"]), int(full_df.loc[i,'END_ID'])) for n in graph: graph.node[n]['name'] = graph_nodes.loc[graph_nodes['id'] == n, 'name'].to_string(index=False) graph.node[n]['match'] = graph_nodes.loc[graph_nodes['id'] == n, 'match'].bool() #export as .json d = json_graph.node_link_data(graph) name = leak if dense: name = leak + "_small" json.dump(d, open('../results/graphs/graph_'+ name +'.json','w')) graph # - graph_leak_matches('panama') graph_leak_matches('paradise') graph_leak_matches('paradise', True) graph_leak_matches('offshore') graph_leak_matches('offshore', True) graph_leak_matches('bahamas')
src/Graphing_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 12.850 - Assignment 2 | <NAME> # # ##Modeling Diffusion over Time # This program models diffusion in a 1D field with a time-varying forcing, using a no-slip bottom boundary condition, free-slip surface boundary condition, a Crank-Nicolson time iteration scheme, and an SOR iterative solving scheme. The SOR scheme used in Assignment 1 (and written below) is unable to solve matrix equations in which the right-hand side is negative; therefore, as stated below, in the interest of time, I have used the scipy sparse-matrix linear solver. # + # Import clusters from scipy import sparse #Allows me to create sparse matrices (i.e. not store all of the zeros in the 'A' matrix) import scipy.sparse.linalg as spla #To solve each time iteration, as my SOR code (below) grows exponentially when attempting to solve with negative values on the right-hand side. from numpy import * #To make matrices and do matrix manipulation import matplotlib.pyplot as plt #for plotting purposes # %matplotlib inline # Define Successive Over-Relaxation scheme def sor(A,x,B,tolerance,w): '''This iteration scheme relaxes Gauss-Seidel estimates toward the converging value INPUTS: A = Relation Matrix x = initial estimated field B = boundary conditions tolerance = allowable difference between iterations w = SOR factor - usually 1.5, but seems to work best with 1.85 OUTPUT: T = final estimated field''' # Initialize scheme resid = x[:,0].dot(100.) m = 0 T=x[:,0] while mean(divide(resid,abs(B).max())*100)>tolerance: T=append(T,T[:,0].dot(0.),axis=1) for i in range(0,size(A,1)): #Calculate individual estimates cleaned_list1 = [ x for x in range(0,size(A,1)) if x < i ]#Previous Solutions cleaned_list2 = [ x for x in range(0,size(A,1)) if x > i ]#Future Solutions #Use estimates of T for next timestep T_hat=(B[i]-(A[i,cleaned_list1]*T[cleaned_list1,m+1])-(A[i,cleaned_list2]*T[cleaned_list2,m]))/A[i,i] T[i,m+1]=(w*T_hat)+((1-w)*T[i,m]) #Adjust based on relaxation factor resid=abs((A*T[:,m])-B) #print(str(mean(divide(resid,abs(B).max())*100))+'%') print('.'), m=m+1 print('') return T[:,m-1] # - # ##Motivation and Discretizing # # This problem was motivated as an extension of the probelm I posed in Assignment 1; after solving for the velocity profile forced by a given barotropic tide (parameterized as a fixed surface slope), I proposed modeling the velocity profile under a time-dependent tide (taken to be a sinusoidal M2 tide, with a tidal period of 12.44 hours). In keeping with the earlier problem, I utilize a no-slip bottom boundary condition (taking $u_0$ equal to zero) and a free-slip surface boundary condition (where the flux to the surface is equal to zero). # # I begin with the Navier-Stokes equation for $u$: # $$\frac{\partial u}{\partial t} + u \cdot \nabla u = -\frac{1}{\rho}\frac{\partial p}{\partial x} + \frac{\partial}{\partial z}\left[K \frac{\partial u}{\partial z}\right] + F$$ # # where $u$ is the horizontal velocity, $K$ is an eddy diffusivity, and $\frac{\partial p}{\partial x}$ is the barotropic pressure gradient induced by one phase of the tide, similar to Assigment 1. I can now neglect advection and external forcings (as the forcing for the assignment will be contained within the pressure gradient). In addition, I assume hydrostatic conditions so that $\frac{\partial p}{\partial x} = \rho g \frac{\partial h}{\partial x}$. Our initial equation has now become: # $$\frac{\partial u}{\partial t} = -g\frac{\partial h}{\partial x} + \frac{\partial}{\partial z}\left[K \frac{\partial u}{\partial z}\right]$$ # # in which $\frac{\partial h}{\partial x}$ is prescribed. As I wish for the barotropic pressure gradient to vary in time (to simulate the M2 tide), I assume that $h = \eta\cos\left(2\pi\frac{time}{T_{tidal}}\right)$, where $T_{tidal}$ is the period of our tide (12.44 hours). Substituting for $h$ and rewriting: # $$\frac{\partial u}{\partial t} + g\cos\left(2\pi\frac{time}{T_{tidal}}\right)\frac{\partial \eta}{\partial x} = \frac{\partial}{\partial z}\left[K \frac{\partial u}{\partial z}\right]$$ # # Similar to Assignment 1, I am still prescribing the slope, but here it sets a maximum slope amplitude for the oscillating pressure gradient. In addition to the time-dependent forcing, the new acceleration term on the left-hand side requires a recalculation of the nondimensional parameter. After adjusting the parameters to reflect nondimensional quantities (shown with asterisks), I reach the following equation: # $$\frac{\partial u^{\ast}}{\partial t^{\ast}}+\frac{gT\eta H^{2}}{L\kappa U^{2}}\cos\left(2\pi\frac{time}{T_{tidal}}\right)\frac{\partial \eta^{\ast}}{\partial x^{\ast}} = \frac{\partial}{\partial z^{\ast}}\left[K^{\ast} \frac{\partial u^{\ast}}{\partial z^{\ast}}\right]$$ # # By simplifying the nondimensional quantity into a Reynolds number ($Re=\frac{HU}{\kappa}$), a Froude number ($Fr=\frac{U}{\sqrt{gH}}$), and taking $U = \frac{L}{T}$, I am able to reach the final equation: # $$\frac{\partial u^{\ast}}{\partial t^{\ast}}+\frac{\eta}{U^{2}}\frac{Re}{Fr^{2}}\cos\left(2\pi\frac{time}{T_{tidal}}\right)\frac{\partial h^{\ast}}{\partial x^{\ast}} = \frac{\partial}{\partial z^{\ast}}\left[K^{\ast} \frac{\partial u^{\ast}}{\partial z^{\ast}}\right]$$ # ###Crank-Nicolson Stability Criterion # # Having reached our nondimensional number, it is important to check on the stability of the problem. I begin by discretizing the equation using the Crank-Nicolson scheme, whereby $$u_{k}^{n+1} = u_{k}^{n} + \Delta t\left[ \frac{1}{2}\left(\frac{F_{k}^{n+1} - F_{k-1}^{n+1}}{\Delta z} + f_{k}^{n+1} \right) + \frac{1}{2}\left(\frac{F_{k}^{n} - F_{k-1}^{n}}{\Delta z} + f_{k}^{n} \right)\right] ; where~ F_{k}^{n} = \nu_{k}^{n}\left(\frac{u_{k+1}^{n} - u_{k}^{n}}{\Delta z} \right)$$ # # I take $u(x,t)_{j}^{n} = \sum u(t)^{n}e^{ikj \Delta x}$, decomposing by fourier series, and substitute into the discrete Crank-Nicolson scheme (assuming momentarily that $\Delta z_{f} = \Delta z_{c}$ and we have negligible forcing) to reach the following: # $$u(t)^{n+1} \left[e^{ik(j) \Delta x} - \frac{\Delta t}{2}\frac{\nu_{k}}{\Delta z^{2}} \left(e^{ik(j+1) \Delta x} - 2 e^{ik(j) \Delta x} + e^{ik(j-1) \Delta x} \right) \right] = u(t)^{n} \left[e^{ik(j) \Delta x} + \frac{\Delta t}{2}\frac{\nu_{k}}{\Delta z^{2}} \left(e^{ik(j+1) \Delta x} - 2 e^{ik(j) \Delta x} + e^{ik(j-1) \Delta x} \right) \right]$$ # # Dividing both sides by $e^{ikj \Delta x}$, and taking $e^{ik \Delta x} + e^{-ik \Delta x} = 2\cos(k \Delta x)$, the above equation simplifies to # $$\frac{u(t)^{n+1}}{u(t)^{n}} = \frac{\left[1+\frac{\Delta t}{2}\frac{\nu_{k}}{\Delta z^{2}}\left(2\cos (k \Delta x) -2 \right) \right]}{\left[1-\frac{\Delta t}{2}\frac{\nu_{k}}{\Delta z^{2}}\left(2\cos (k \Delta x) -2 \right) \right]}$$ # # Taking $\cos(k \Delta x) = 1 - 2\sin^{2}(k \Delta x)$ and rearranging, we find that the Crank-Nicolson scheme is stable under all conditions, as $\sin^{2}$ is always positive and the numerator is always smaller than the denominator: # $$\left|\frac{u(t)^{n+1}}{u(t)^{n}}\right| = \left|\frac{\left[1-2\Delta t\frac{\nu_{k}}{\Delta z^{2}}\sin^{2} (k \Delta x) \right]}{\left[1+2\Delta t\frac{\nu_{k}}{\Delta z^{2}}\sin^{2} (k \Delta x) \right]}\right| < 1$$ # # ### Discretizing & Boundary Conditions # # Building on the discretizing scheme presented above for time-stepping, I solve the discrete equation to isolate 'n+1' terms to the left-hand side and 'n' terms to the right-hand side: # $$\frac{2}{\Delta t}u_{k}^{n+1} - \left[ \frac{\nu_{k}^{n+1}}{\Delta z_{c}}\left(\frac{u_{k+1}^{n+1} - u_{k}^{n+1}}{\Delta z_{f}} \right) - \frac{\nu_{k-1}^{n+1}}{\Delta z_{c}}\left(\frac{u_{k}^{n+1} - u_{k-1}^{n+1}}{\Delta z_{f}} \right) + f_{k}^{n+1}\right] = \frac{2}{\Delta t}u_{k}^{n} + \left[ \frac{\nu_{k}^{n}}{\Delta z_{c}}\left(\frac{u_{k+1}^{n} - u_{k}^{n}}{\Delta z_{f}} \right) - \frac{\nu_{k-1}^{n}}{\Delta z_{c}}\left(\frac{u_{k}^{n} - u_{k-1}^{n}}{\Delta z_{f}} \right) + f_{k}^{n}\right]$$ # # I can further simplify the problem into our unknown terms and known values, isolating each $u$: # $$\left[\frac{-\nu_{k}^{n+1}}{\Delta z_{f} \Delta z_{c}}\right]u_{k+1}^{n+1} + \left[\frac{2}{\Delta t} + \frac{\nu_{k}^{n+1}}{\Delta z_{f} \Delta z_{c}} + \frac{\nu_{k-1}^{n+1}}{\Delta z_{f} \Delta z_{c}}\right]u_{k}^{n+1} + \left[\frac{-\nu_{k-1}^{n+1}}{\Delta z_{f} \Delta z_{c}}\right]u_{k-1}^{n+1} = \left[\frac{\nu_{k}^{n+1}}{\Delta z_{f} \Delta z_{c}}\right]u_{k+1}^{n} + \left[\frac{2}{\Delta t} - \frac{\nu_{k}^{n}}{\Delta z_{f} \Delta z_{c}} - \frac{\nu_{k-1}^{n}}{\Delta z_{f} \Delta z_{c}}\right]u_{k}^{n} + \left[\frac{\nu_{k-1}^{n}}{\Delta z_{f} \Delta z_{c}}\right]u_{k-1}^{n} + \left[f_{k}^{n+1}+f_{k}^{n}\right]$$ # # I chose to implement the discrete equation as is, in which $[A][u]^{n+1} = [B][u]^{n} + f^{n+1}+f^{n}$, where the forcings are identical for all grid cells. In the code below, I solve for the matrices $[A]$ and $[B]$ at the same time, calculate the right-hand side of our equation, then use the right-hand side in combination with $[A]$ to solve for $[u]^{n+1}$. # # Similar to Assignment 1, the bottom boundary condition is implemented by setting $u_{-1}=0$ (which appears in the lack of a $u_{k-1}$ term when solving for $u_{0}$) and the surface boundary condition is implemented by setting the flux above the grid cell ($F_{k}$) equal to zero for the surface cell. These boundary conditions adjust the values within the matrices $[A]$ and $[B]$, but do not affect the matrix $[u]^{n}$ as the parameters are set equal to zero. As a result, the boundary condition matrix when solving for the initial conditions is simply composed of the barotropic pressure gradient. # # In order to show these values, the code below has been adjusted to show matrix $[A]$ and the right-hand side of the equation when solving for $[u]^{n+1}$ in the first time iteration. # def cranknicolson(T,T_del,Timesteps,n,eta,u0,U,Fr,Re,K): '''This iteration scheme moves a Gauss-Seidel estimate forward at a given time interval using a Crank-Nicolson scheme INPUTS: T = Period of forcing fluctuations T_del = timestep Timesteps = number of timesteps to move forward n = number of depth bins eta = amplitude of sinusoidal forcing u0 = bottom boundary condition U = mean barotropic velocity Fr = Froude number of flow Re = Reynolds number of flow K = diffusivity matrix OUTPUT: Time = time field Depth = depth field (grid center) Q = final estimated field over time''' #K=ones(n+1) #K=array(range(1,n+2)) #K=array(range(n+2,1,-1)) K=K/float(max(K)) #Nondimensionalize K # Define distances at Face (F) and Center (C) = ensure nondimensionalized Z_f=range(-n,0,1);Z_f=[x / float(n) for x in Z_f];Z_f=append(append([-1-(1/float(n))],Z_f),[0]);Z_f=Z_f[0:size(Z_f)-1]+(diff(Z_f))/2.0; Z_c=range(-n,0,1);Z_c=[x / float(n) for x in Z_c];Z_c=append(append([-1-(1/float(n))],Z_c),[0]); #Begin stepping forward in time for time in range(0,Timesteps): #Solve for initial state using boundary conditions #Construct A matrix - dependent on time iteration scheme if time == 0: #Solving for initial conditions print(time), # Construct 'A' Matrix A=zeros((n,n)) for item in range(1,n+1): #Start from bed and work to surface if item>1: A[item-1,item-2]=-(K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1])) ) A[item-1,item-1]=+( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + (K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item]))) ) if item == n: #Sets free-slip boundary condition at the surface A[item-1,item-1]=+( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) if item != n: A[item-1,item]=-(K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item])) ) # Construct Boundary Condition Matrix = using non-dimensional parameter b=ones(size(A,1))*(Re/(Fr*Fr))*((eta/(U*U))*cos(2*pi*(float(time)/T))) b[0]=b[0] + (u0* (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) #Because u0 is zero, this line does nothing. # Define + Apply guess + boundary conditions x=matrix(b[:]).T b=matrix(b).T # Solve Problem using hard-wired iterative scheme #T = jacobi(A,x,b,0.05) #T = gaussseidel(A,x,b,0.05) Q = sor(A,x,b,0.05,1.85) #Iterate forward in time using the Crank-Nicolson scheme else: print(', '+str(time)), Q=append(Q,Q[:,0].dot(0.),axis=1) #increase size to match time dimension # Construct 'A' Matrix A=zeros((3,n)) # For solving for 'n+1' solution B=zeros((3,n)) # For using 'n' solution for item in range(1,n+1): #Start from bed and work to surface #j-1 if item>1: A[0,item-2]=-(K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1])) ) B[0,item-2]=+(K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1])) ) #j A[1,item-1]=+(2/T_del)+( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + (K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item]))) ) B[1,item-1]=+(2/T_del)-( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) + (K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item]))) ) if item == n: #Sets free-slip boundary condition at the surface A[1,item-1]=+(2/T_del)+( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) B[1,item-1]=+(2/T_del)-( (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) #j+1 if item != n: A[2,item]=-(K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item])) ) B[2,item]=+(K[item]/((Z_f[item]-Z_f[item-1])*(Z_c[item+1]-Z_c[item])) ) A = sparse.spdiags(A,array([-1,0,1]),n,n) B = sparse.spdiags(B,array([-1,0,1]),n,n) RHS = B.dot(Q[:,time-1]) RHS[0] = RHS[0] + (u0* (K[item-1]/((Z_f[item]-Z_f[item-1])*(Z_c[item]-Z_c[item-1]))) ) #Because u0 is zero, this line does nothing. RHS = RHS + (Re/(Fr*Fr))*((eta/(U*U))*cos(2*pi*((float(time-1)*T_del)/T))) + (Re/(Fr*Fr))*((eta/(U*U))*cos(2*pi*((float(time-1)*T_del)/T))) #To show the matrix A and boundary conditions for the first time-step if time == 1: print(A) print(RHS) Q[:,time] = matrix(spla.spsolve( sparse.csr_matrix(A.toarray()),RHS)).T #Q[:,time] = sor(matrix(A.toarray()),RHS,RHS,0.05,1.85) #There is an issue with my SOR code in which it does not like to solve equations with negative values on the right-hand side. Time = matrix(range(0,Timesteps))*T_del Depth = Z_c return [Time,Depth,Q] # ##Running the Program # # Having reduced the equation to a discrete form of nondimensional terms (with the key dimensionless constant $\frac{Re}{Fr^{2}}\frac{\eta}{U^{2}}$ ), I was able to proceed to solving for the time-varying solution. # # While first writing the code, I ran into numerous bugs in the process of writing the proper discretized equations (confusing signs for the '$\eta+1$' and '$\eta$' matrices), and discovered that my SOR code only works properly when the right-hand side of the equation (the boundary conditions) is positive and when the left-hand side of the equation is an array (as opposed to a sparse matrix). # # As the solution using my personal SOR code grows logarithmically with time (due to the issues mentioned above), and in the interest of time, I have made use of the scipy sparse-matrix linear solver, as I have implimented sparse matrices in this assignment to allow for efficient computation of high-resolution problems. One added benefit to this is the drastic reduction in computation time caused by solving using sparse matrices (in which the zeros with matrix $[A]$ are not stored). The parameters for the initial run are specified below. # + ## Begin Program eta = 0.001 #m of sea level anomaly U=1 #mean velocity H=1.0 #depth of the water column K=1e-2 Fr = U/sqrt(9.81*H) #Froude number from given conditions Re = (H*U)/K #Reynolds number from a specified maximum diffusivity # Set number of cells = similar to resolution, as cells are evenly placed between -1 and 0. n = 25 # Set bottom boundary condition u0 = 0 # Create K matrix K=ones(n+1)*K print('The dimensionless constant = '+str((Re/(Fr*Fr))*((eta/(U*U))))) #The dimensionless constant # - ## Run the Crank-Nicolson scheme, having initialized the fields [Time,Depth,Q]=cranknicolson(12.44,0.5,40,n,eta,u0,U,Fr,Re,K) # ##Checking the Forcing # To check that the sinusoidal pressure gradient forcing is inducing a reasonable time-dependent signal in the velocity, I'm plotting the nondimensional surface velocity ($u^{\ast}$) against time. It looks correct, as I'm inducing a pressure gradient dependent on $\cos\left(\frac{time}{T_{tidal}}\right)$, and the surface velocity follows the trend of the forcing (being at the free-slip surface condition). # Plot of Surface Velocity plt.style.use('fivethirtyeight') plt.plot(Time.T,Q[n-1,:].T)#,linewidth=2,marker='o') plt.title('Surface Velocity') plt.ylabel(r'$u^{\ast}$', fontsize=20) plt.xlabel(r'$Time$', fontsize=20) #plt.savefig('SurfaceVel.pdf', format='pdf', dpi=1200) # ##The velocity field over time # Having run the semi-implicit Crank-Nicolson scheme forward in time through one tidal cycle, I checked to ensure that the scheme was producing reasonable velocity profiles. # # In the figure below, I've shown the velocity profile changing over one half of the tidal cycle, smoothly switching directions as the forced pressure gradient changes sign. The result appears to be accurate; as the diffusivity is constant throughout the water column, each grid cell adjusts to the barotropic pressure gradient without retaining information from the previous timestep. In essence, the forward steps through time in this case could have been modelled independently and joined into a single matrix after the calculations. # + #Plot of changes over time plt.figure() plt.style.use('fivethirtyeight') for int in range(0,16): #lines=plt.plot(append(Q[:,int],Q[19,int]),(matrix([range(0,20)]).T/20.0)) lines=plt.plot(-append(append(Q[0,0]*0,Q[:,int*1]),Q[size(Q,0)-1,int*1]),matrix(Depth).T) #Append boundary conditions plt.text(-Q[size(Q,0)-1,int*1]-0.025, 0.03, (Time[0,int]*1), fontproperties='serif', fontsize=12) #plt.setp(lines, linewidth=2.0) plt.title('$u$ over Time (hours)') plt.ylim([Depth.min(),0.1]);#plt.xlim([-5e6,5e6]) plt.ylabel(r'$z^{\ast}$', fontsize=20) plt.xlabel(r'$u^{\ast}$', fontsize=20) plt.grid('on') #plt.legend(['0','20','40','60','80','100','120']) #plt.xscale('log') plt.show() # - # ##Variations on the Problem - Linear Diffusivity # # To vary the conditions going into the problem, I first decided to adjust the diffusivity field to more accurately portray a decrease in mixing as you move down through the water column. The figure immediately below reflects the new linear diffusivity profile. # + # Create K matrix K=ones(n+1)*(10**-2) new=list(arange(1,n+1)) for l in new: K[l]=(K[l]*float(l)) #New varying diffusivity, with higher values closer to the surface # Plot diffusivity profile plt.plot(append(K,K[n]),Depth) plt.title('Linear Diffusivity Profile') plt.ylim([Depth.min(),0.1]) plt.ylabel(r'$z^{\ast}$', fontsize=20) plt.xlabel(r'$K$', fontsize=20) plt.show() ## Re-Run the Crank-Nicolson scheme, having initialized the fields [Time,Depth,Q]=cranknicolson(12.44,0.5,40,n,eta,u0,U,Fr,Re,K) # - # The plot below illustrates the effects of the linear diffusivity profile; the most grabbing adjustment is the asymmetry of the solution due to the initial condition. I'm not sure why the initial profile is provides an extreme starting point for the time iteration scheme, but plotting over longer periods of time (i.e. a full tidal cycle) reveals that the stable surface velocity maxima are between -1.5 and 1.5 $u^{\ast}$. Focussing on the smaller deviations, you can barely see the effects of the linear diffusivity near -1.8 $z^{\ast}$, in which the grid cells closer to the bathymetry adjust to the forcing at a slightly faster rate than the cells near the surface. #Plot of changes over time plt.figure() plt.style.use('fivethirtyeight') for int in range(0,20): lines=plt.plot(-append(append(Q[0,0]*0,Q[:,int*1]),Q[size(Q,0)-1,int*1]),matrix(Depth).T) #Append boundary conditions plt.text(-Q[size(Q,0)-1,int*1]-0.05, 0.03, (Time[0,int]*1), fontproperties='serif', fontsize=12) #plt.setp(lines, linewidth=2.0) plt.title('$u$ over Time (hours)') plt.ylim([Depth.min(),0.1]);#plt.xlim([-5e6,5e6]) plt.ylabel(r'$z^{\ast}$', fontsize=20) plt.xlabel(r'$u^{\ast}$', fontsize=20) plt.grid('on') # ##Variations on the Problem - Higher temporal resolution # # Having seen the flow respond more realistically to the barotropic forcing with the linear diffusivity profile, I was still interested to see how well the process could be resolved temporally. To this end, the following code includes the linear diffusivity profile used above in conjunction with a time-step of 0.1 instead of 0.5 hours. ## Re-Run the Crank-Nicolson scheme, having initialized the fields [Time,Depth,Q]=cranknicolson(12.44,0.1,180,n,eta,u0,U,Fr,Re,K) # As shown in the figure below, the code is able to resolve temporal variability quite well, including the adjustment of the flow from the very strong negative velocities to slightly weaker positive ones. The realistic problem could be further improved with a logarithmic diffusivity, but it is nice to see that the velocity profile is responding as it ought to with a linear $\nu$ profile. # # It appears that even without a pressure gradient (approximately hour 6 of our cosine-based M2 tide), there is a depth-uniform nonzero velocity profile. This appears to be an indication of another bug in the code, but it could be a reaction of the system to the over-estimated initial conditions, relaxing quickly to the opposite sign. However, the model with larger timesteps accurately placed the 6-hour velocity profile about zero, so perhaps it is an issue with time-stepping. #Plot of changes over time plt.figure() plt.style.use('fivethirtyeight') for int in range(53,65): lines=plt.plot(-append(append(Q[0,0]*0,Q[:,int*1]),Q[size(Q,0)-1,int*1]),matrix(Depth).T) #Append boundary conditions plt.text(-Q[size(Q,0)-1,int*1]-0.05, 0.03, (Time[0,int]*1), fontproperties='serif', fontsize=12) #plt.setp(lines, linewidth=2.0) plt.title('$u$ over Time (hours)') plt.ylim([Depth.min(),0.1]);#plt.xlim([-5e6,5e6]) plt.ylabel(r'$z^{\ast}$', fontsize=20) plt.xlabel(r'$u^{\ast}$', fontsize=20) plt.grid('on') # To briefly check the output of the model, I am showing the surface velocity pattern in the figure below. It's quite clear that the initial conditions are an over-estimate of the velocities produced at the peak of the tide, but I'm not sure what has caused this error to pop up, as the initial conditions are independent of the time-step; I'll be coming back to this in the next few days to find the root of the issue, as I do not have the time to do so now. # Plot of Surface Velocity plt.style.use('fivethirtyeight') plt.plot(Time.T,Q[n-1,:].T)#,linewidth=2,marker='o') plt.title('Surface Velocity') plt.ylabel(r'$u^{\ast}$', fontsize=20) plt.xlabel(r'$Time$', fontsize=20) #plt.savefig('SurfaceVel.pdf', format='pdf', dpi=1200) # ##Lessons from the Problem # # I have learned quite a bit this week, from going through the iteration process to troubleshooting almost all of the code to find out that the primary bug was in the previous week's SOR code. My primary objective of learning to implement the Crank-Nicolson scheme was realized, after understanding how important signs are when constructing matrices. In addition, I was surprised to find how much time was gained by implementing sparse matrices into the solving method rather than storing all numbers within matrices $[A]$ and $[B]$. # # Having increased the complexity of my original problem posed in Assignment 1, it is quite clear that these simple models can be used to diagnose effects of single parameters, and increasing the temporal and spatial resolution of the problem can allow you to pinpoint effects that would easily be missed in field studies. These models are great for forming an intuition of processes; however, as they become increasingly complex, it is increasingly important to write the models efficiently and clearly. In the coming week(s), I plan to revisit this problem to check over my code for bugs related to the time-step issue noted in the section above, and to correct the SOR code to run (correctly) with sparse matrices. #
Corlett_Assign2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Loading dependencies import numpy import sklearn import sklearn.metrics.pairwise from sklearn.metrics.pairwise import pairwise_distances import string import collections from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.cluster import KMeans from sklearn.metrics import adjusted_rand_score from nltk import word_tokenize from nltk.stem import PorterStemmer from nltk.corpus import stopwords from pprint import pprint import nltk nltk.download('stopwords') nltk.download('punkt') # # Data cleaning # The first step of nearly any machine learning analysis project is data cleaning. This is done in order to allow a larger variety of models to work with a predictable input, such that exceptions (in this case special characters such as quotation marks, '[comma]' and others) will not cause any disturbance in the model. The following code loads the data, 'cleans' it, and afterwards sets the entire cleaned data in an array. Comments are added in the code for interpretability. NLTK is also used for cleaning. # ### Cleaning def process_text(text, stem=True): """ Tokenize text and stem words removing punctuation """ text = text.translate(string.punctuation) tokens = word_tokenize(text) if stem: stemmer = PorterStemmer() tokens = [stemmer.stem(t) for t in tokens] return tokens # + ## Set an empty list variable descriptions = [] with open('descriptions.txt', encoding = "utf8") as f: for line in f: text = line.lower() ## Lowercase all characters text = text.replace("[comma]"," ") ## Replace [commas] with empty space for ch in text: if ch < "0" or (ch < "a" and ch > "9") or ch > "z": ## The cleaning operation happens here, remove all special characters text = text.replace(ch," ") text = ' '.join(text.split()) ## Remove double spacing from sentences descriptions.append(text) dataSet = numpy.array(descriptions) print('After running first results, the following sentence was found : ') print() print(dataSet[496]) print() print() print('Although this sentence is not meaningful, it will remain in the dataset in order to have consistent results.') # - # ### Representation # # Since the input vector now is 'clean', different representations can be made, which in turn can then be 'trained' to obtain accuracy measures of classification. Firstly, countVectorizer by scikitLearn (which counts all the instances of words) will run on our cleaned dataset. Afterwards TfIdf will run, in order the have the Term frequency, inverse document frequency (which will essentially put less importance on non-informative words suchs as: 'the', 'and', 'a'). Scikit-learn provides a neat function to do this in a single function, namely TdIdfVectorizer # + vectorizer = TfidfVectorizer(stop_words='english') TfIdf_dataSet = vectorizer.fit_transform(dataSet) print("What our Tf-Idf looks like: ") print() print(TfIdf_dataSet[0:1]) vectorVocab = vectorizer._validate_vocabulary() # - # The data is now cleaned and neatly fit into an sparse array. Some basic information about the cleaned array will be provided in the following code. print('The size of our data set: ', dataSet.size) print('The dimension of our dataset are: ', dataSet.shape) print('\n') print('-- 0th element of our dataSet --', '\n', dataSet[0]) print('\n') print('-- 1st element of our dataSet --', '\n', dataSet[1]) # # Distance metrics # ## Cosine similarity # # Now we can safely compute the distance between each document. After sorting, the most similar top 5 documents will be provided. The first vector in the matrix represents the 'base' sentence. The vectors following are the sentences most similar to that 'base' sentence. This should be read per row. For example, the second element of the first row is most similar to the first element of the first row. ## Make use of SKlearn cosine similarity cosineSimilarity = sklearn.metrics.pairwise.cosine_similarity(TfIdf_dataSet) print(cosineSimilarity) ## Adjust the cosineSimilarity matrix accordingly to sort and get results numpy.fill_diagonal(cosineSimilarity,1.1) cosineSimilaritySorted = numpy.argsort((-1*(cosineSimilarity)),axis=1) top5similar = (cosineSimilaritySorted[:,0:5]) print() print(top5similar) # ### Interpretation of the cosine similarity # # Following the cosine metric, the first sentence in our dataSet is closest to the 1455 sentence in our data set. Let's see what they both look like: print('Sentence 1 in the dataSet: ') print(dataSet[0]) print() print('Sentence 1455 in the dataSet: ') print(dataSet[1454]) # ## Euclidean distance euclid = pairwise_distances(TfIdf_dataSet, metric='euclidean') euclidSorted = numpy.argsort(euclid, axis=1) top5SimilarEuclidean = euclidSorted[:,0:6] print(top5SimilarEuclidean) # Interpretation of euclidean distance, which is similar to our cosine similarity. print('Sentence 1 in the dataSet: ') print(dataSet[0]) print() print('Sentence 1455 in the dataSet: ') print(dataSet[1454]) # # KMeans clustering # # Besides finding similar documents by cosine similarity, an implementation of KMeans clustering is done in the following code. This is more meaningful, since it is known that there are 5 sentences that are equal to each other, therefore making the number of clusters to 296. Also, it allows for topic extraction, which can be interpreted as the most important words for each cluster. # ## Cleaning the dataset # + sentences = [] with open('descriptions.txt', encoding = "utf8") as f: for line in f: text = line.lower() ## Lowercase all characters text = text.replace("[comma]"," ") ## Replace [commas] with empty space for ch in text: if ch < "0" or (ch < "a" and ch > "9") or ch > "z": ## The cleaning operation happens here, remove all special characters text = text.replace(ch," ") text = ' '.join(text.split()) ## Remove double spacing from sentences sentences.append(text) #sentences = sentences[0:100] nclusters = int(len(sentences)/5) print(len(sentences), nclusters) # - def word_tokenizer(text): #tokenizes and stems the text tokens = word_tokenize(text) stemmer = PorterStemmer() tokens = [stemmer.stem(t) for t in tokens if t not in stopwords.words('english')] return tokens # We transform the dataset to work with KMeans def cluster_sentences(sentences, nb_of_clusters): tfidf_vectorizer = TfidfVectorizer(tokenizer=word_tokenizer, stop_words=stopwords.words('english'), max_df=0.9, min_df=0.1, lowercase=True) #builds a tf-idf matrix for the sentences tfidf_matrix = tfidf_vectorizer.fit_transform(sentences) kmeans = KMeans(n_clusters=nb_of_clusters) kmeans.fit(tfidf_matrix) kmeans_array = kmeans.fit_predict(tfidf_matrix) clusters = collections.defaultdict(list) for i, label in enumerate(kmeans.labels_): clusters[label].append(i) return dict(clusters), kmeans_array # We train the KMeans in the next lines of code. clusters, kmeans_predict = cluster_sentences(sentences, nclusters) # + output = [] for i in range(len(kmeans_predict)): temp = [i] for j in range(1,len(kmeans_predict)): jl = [j] if kmeans_predict[i]==kmeans_predict[j] and j not in temp: temp += [j] output.append(temp) # + for line in output: if len(line) > 5: index = output.index(line) output[index] = line[0:5] if len(line) > 1 and len(line) < 5: line += [line[1]]*(5-len(line)) if len(line) == 1: line += [line[0]]*4 for line in output[0:11]: print(line) # - # ## Interpretation of the KMeans results print('The first sentence:') print(dataSet[0]) print() print('The 159th sentence:') print(dataSet[158]) # It might be necessary to run the KMeans two or three times to get similar results.
Group Assignment_OLD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 2강 독립사건과 확률 # # Independent # # $P(A|B) = P(A),P(B|A) = P(B)$ # # $P(A\cap B) = P(A)P(B)$ # ### combinational anolysis : 순열 조합 수, 경우의 수 # # 1. permutation(순열) : 서로 다른 n개를 1열로 나열 (line arrangement of n different objects) # # $\frac{n!}{(n-r)!}$ # # 2. group permutation (중복이 있는 것의 순열) # # $\frac{n!}{n_{1}!n_{2}!...n_{k}!}$ # # 3. circular permutation # # $\frac{n!}{n}$ 여기에 몇개마다 다른 위치 되는지 곱해주면 된다. # # ### combination: 조합 # # select r objects out of n, 순서 상관 없음 # # n개 중 r개를 뽑아내겠다. # # $_{n}C_{r} = \frac{_{n}P_{r}}{r!} = \frac{n!}{(n-r)!r!}$ # ### binomial Theories : 이항 정리 # # $(a+b)^{n} = A_{0}a^{n} + A_{1}a^{n-1}b^{1} + A_{2}a^{n-2}b^{2} + ... + A_{n}b^{n}$ # # $B(n, P)$ 반복실행시 이항 정리 이용해서 $(n k) p^{k}(1-p)^{n-k}$ # # ### Stirling's formula # # n! 이게 커지면 계산하기 어렵다. 그래서 approximation 사용 # # $\sqrt{2\pi n} (\frac{n}{e})^{n}$ # # # ## Realiablity : 고장나지 않고 잘 동작하는지의 기간 # duration of useful functioning of system # # R(t) : probability that a system will be functioning at time t # # R(t) = $R_{1}(t)R_{2}(t)...R_{n}(t)$
Hyelan/Probability_and_Statistics/02_independent_events_and_probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kkorhone/Infinite_Borehole_Field/blob/main/budapest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="VzzulnGkJbzf" # # Pygfunction test # # This code attempts to find how much energy can be annually extracted from the ground using a 2-by-5 borehole field. # + [markdown] id="D0wzBJpgJN8v" # **First, the pygfunction library needs to be installed.** # + colab={"base_uri": "https://localhost:8080/", "height": 731} id="_25t8ChDHZ0-" outputId="14bd14cf-1745-4c75-a0ac-10c6072218c4" pip install pygfunction # + id="tffzeZaLHvnj" colab={"base_uri": "https://localhost:8080/", "height": 502} outputId="3aeac4bf-9810-4f99-da8f-f1b088b3e5fb" import matplotlib.pyplot as plt import scipy.interpolate import scipy.optimize import scipy.signal import pygfunction import numpy as np def main(NB): monthly_fraction = np.ones(12) / 12 T_surface = 5.8 # [degC] q_geothermal = 42.9e-3 # [W/m^2] k_rock = 2.71 # [W/(m*K)] Cp_rock = 728.0 # [J/(kg*K)] rho_rock = 2731.0 # [kg/m^3] R_borehole = 0.085 # [K/(W/m)] borehole_length = 200.0 # [m] borehole_radius = 0.115 / 2 # [m] num_years = 25 # [1] spf = 3.0 # [1] T_target = -1.0 # [degC] a_rock = k_rock / (rho_rock * Cp_rock) # [m^2/s] t_max = num_years * 365 * 24 * 3600.0 # [s] delta_t = 730 * 3600.0 # [s] borehole_geometry = (NB, NB) borehole_spacing = (20, 20) T_initial = T_surface + q_geothermal / k_rock * (0.5 * borehole_length) ts = borehole_length**2 / (9.0 * a_rock) borehole_field = pygfunction.boreholes.rectangle_field(N_1=borehole_geometry[0], N_2=borehole_geometry[1], B_1=borehole_spacing[0], B_2=borehole_spacing[1], H=borehole_length, D=0, r_b=borehole_radius) total_borehole_length = borehole_geometry[0] * borehole_geometry[1] * borehole_length t = pygfunction.utilities.time_geometric(delta_t, t_max, 50) g = pygfunction.gfunction.uniform_temperature(borehole_field, t, a_rock, nSegments=1, disp=False) ti = np.arange(delta_t, t_max+delta_t, delta_t) gi = scipy.interpolate.interp1d(t, g)(ti) #plt.figure() #plt.plot(np.log(t/ts), g, "b.") #plt.plot(np.log(ti/ts), gi, "r-") #plt.xlabel("ln(t/ts)") #plt.ylabel("g-function") def evaluate_mean_fluid_temperatures(annual_heat_load): monthly_heat_load = annual_heat_load * monthly_fraction heat_rate = np.ravel(np.tile(monthly_heat_load*1_000_000/730.0, (1, num_years))) specific_heat_rate = heat_rate / total_borehole_length delta_q = np.hstack((-specific_heat_rate[0], np.diff(-specific_heat_rate))) T_wall = T_initial + scipy.signal.fftconvolve(delta_q, gi/(2.0*np.pi*k_rock), mode="full")[:len(ti)] T_fluid = T_wall - R_borehole * specific_heat_rate return T_fluid def cost_function(annual_heat_load): T_fluid = evaluate_mean_fluid_temperatures(annual_heat_load) return np.abs(np.min(T_fluid) - T_target) annual_heat_load = scipy.optimize.fminbound(cost_function, 1, 100000, xtol=0.001) T_fluid = evaluate_mean_fluid_temperatures(annual_heat_load) #plt.figure() #plt.plot(ti/(365*24*3600), T_fluid) #plt.axhline(T_target, ls="--", color="k") #plt.xlabel("Year") #plt.ylabel(u"Mean fluid temperature [\xb0C]") #plt.title(f"annual_heat_load = {spf/(spf-1)*annual_heat_load:.3f} MWh") print(borehole_geometry[0]*borehole_geometry[1], spf/(spf-1)*annual_heat_load) if __name__ == "__main__": for NB in [41, 42, 43, 44, 45, 46, 47, 48, 49, 51, 52, 53, 54, 55, 56, 57, 58, 59]: main(NB)
infinite_field.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nlp_gpu # language: python # name: nlp_gpu # --- # *Copyright (c) Microsoft Corporation. All rights reserved.* # # *Licensed under the MIT License.* # # # Text Classification of MultiNLI Sentences using BERT # # Before You Start # # > **Tip**: If you want to run through the notebook quickly, you can set the **`QUICK_RUN`** flag in the cell below to **`True`** to run the notebook on a small subset of the data and a smaller number of epochs. # # The table below provides some reference running time on different machine configurations. # # |QUICK_RUN|Machine Configurations|Running time| # |:---------|:----------------------|:------------| # |True|4 **CPU**s, 14GB memory| ~ 15 minutes| # |False|4 **CPU**s, 14GB memory| ~19.5 hours| # |True|1 NVIDIA Tesla K80 GPUs, 12GB GPU memory| ~ 3 minutes | # |False|1 NVIDIA Tesla K80 GPUs, 12GB GPU memory| ~ 1.5 hours| # # If you run into CUDA out-of-memory error or the jupyter kernel dies constantly, try reducing the `BATCH_SIZE` and `MAX_LEN`, but note that model performance will be compromised. ## Set QUICK_RUN = True to run the notebook on a small subset of data and a smaller number of epochs. QUICK_RUN = False # + import sys sys.path.append("../../") import os import json import pandas as pd import numpy as np import scrapbook as sb from sklearn.metrics import classification_report, accuracy_score from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split import torch import torch.nn as nn from utils_nlp.dataset.multinli import load_pandas_df from utils_nlp.models.bert.sequence_classification import BERTSequenceClassifier from utils_nlp.models.bert.common import Language, Tokenizer from utils_nlp.common.timer import Timer # - # ## Introduction # In this notebook, we fine-tune and evaluate a pretrained [BERT](https://arxiv.org/abs/1810.04805) model on a subset of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) dataset. # # We use a [sequence classifier](../../utils_nlp/models/bert/sequence_classification.py) that wraps [Hugging Face's PyTorch implementation](https://github.com/huggingface/pytorch-pretrained-BERT) of Google's [BERT](https://github.com/google-research/bert). # + tags=["parameters"] TRAIN_DATA_FRACTION = 1 TEST_DATA_FRACTION = 1 NUM_EPOCHS = 1 if QUICK_RUN: TRAIN_DATA_FRACTION = 0.01 TEST_DATA_FRACTION = 0.01 NUM_EPOCHS = 1 if torch.cuda.is_available(): BATCH_SIZE = 32 else: BATCH_SIZE = 8 DATA_FOLDER = "./temp" BERT_CACHE_DIR = "./temp" LANGUAGE = Language.ENGLISH TO_LOWER = True MAX_LEN = 150 BATCH_SIZE_PRED = 512 TRAIN_SIZE = 0.6 LABEL_COL = "genre" TEXT_COL = "sentence1" # - # ## Read Dataset # We start by loading a subset of the data. The following function also downloads and extracts the files, if they don't exist in the data folder. # # The MultiNLI dataset is mainly used for natural language inference (NLI) tasks, where the inputs are sentence pairs and the labels are entailment indicators. The sentence pairs are also classified into *genres* that allow for more coverage and better evaluation of NLI models. # # For our classification task, we use the first sentence only as the text input, and the corresponding genre as the label. We select the examples corresponding to one of the entailment labels (*neutral* in this case) to avoid duplicate rows, as the sentences are not unique, whereas the sentence pairs are. df = load_pandas_df(DATA_FOLDER, "train") df = df[df["gold_label"]=="neutral"] # get unique sentences df[[LABEL_COL, TEXT_COL]].head() # The examples in the dataset are grouped into 5 genres: df[LABEL_COL].value_counts() # We split the data for training and testing, and encode the class labels: # split df_train, df_test = train_test_split(df, train_size = TRAIN_SIZE, random_state=0) df_train = df_train.sample(frac=TRAIN_DATA_FRACTION).reset_index(drop=True) df_test = df_test.sample(frac=TEST_DATA_FRACTION).reset_index(drop=True) # + # encode labels label_encoder = LabelEncoder() labels_train = label_encoder.fit_transform(df_train[LABEL_COL]) labels_test = label_encoder.transform(df_test[LABEL_COL]) num_labels = len(np.unique(labels_train)) # - print("Number of unique labels: {}".format(num_labels)) print("Number of training examples: {}".format(df_train.shape[0])) print("Number of testing examples: {}".format(df_test.shape[0])) # ## Tokenize and Preprocess # Before training, we tokenize the text documents and convert them to lists of tokens. The following steps instantiate a BERT tokenizer given the language, and tokenize the text of the training and testing sets. # + tokenizer = Tokenizer(LANGUAGE, to_lower=TO_LOWER, cache_dir=BERT_CACHE_DIR) tokens_train = tokenizer.tokenize(list(df_train[TEXT_COL])) tokens_test = tokenizer.tokenize(list(df_test[TEXT_COL])) # - # In addition, we perform the following preprocessing steps in the cell below: # - Convert the tokens into token indices corresponding to the BERT tokenizer's vocabulary # - Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence # - Pad or truncate the token lists to the specified max length # - Return mask lists that indicate paddings' positions # - Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification) # # *See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.* tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens( tokens_train, MAX_LEN ) tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens( tokens_test, MAX_LEN ) # ## Create Model # Next, we create a sequence classifier that loads a pre-trained BERT model, given the language and number of labels. classifier = BERTSequenceClassifier( language=LANGUAGE, num_labels=num_labels, cache_dir=BERT_CACHE_DIR ) # ## Train # We train the classifier using the training examples. This involves fine-tuning the BERT Transformer and learning a linear classification layer on top of that: with Timer() as t: classifier.fit( token_ids=tokens_train, input_mask=mask_train, labels=labels_train, num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=True, ) print("[Training time: {:.3f} hrs]".format(t.interval / 3600)) # ## Score # We score the test set using the trained classifier: preds = classifier.predict(token_ids=tokens_test, input_mask=mask_test, batch_size=BATCH_SIZE_PRED) # ## Evaluate Results # Finally, we compute the accuracy, precision, recall, and F1 metrics of the evaluation on the test set. report = classification_report(labels_test, preds, target_names=label_encoder.classes_, output_dict=True) accuracy = accuracy_score(labels_test, preds ) print("accuracy: {}".format(accuracy)) print(json.dumps(report, indent=4, sort_keys=True)) # for testing sb.glue("accuracy", accuracy) sb.glue("precision", report["macro avg"]["precision"]) sb.glue("recall", report["macro avg"]["recall"]) sb.glue("f1", report["macro avg"]["f1-score"])
examples/text_classification/tc_mnli_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WHY # ## ...Jupyter? # # Jupyter clients are some of the most powerful pieces of technology users can run in their web browsers. By developing cross-client capabilities, either as kernel-level extensions, widgets, media types, or other confections, you are helping to advance fields of inquiry you might not even know exist. # ## ...Acceptance Tests? # # Unit tests and strongly-typed languages are superb for rapid, confident iteration on even large codebases. But users will be installing **Your Code** next to an unknown number of **Other People's Code**, and then write **Their Code**. If`^H^H` When it breaks, they might not be able to tell that it's the subtle interaction between these things. Testing **All the Code** together, _as your user will use it_, gives you greater confidence in your ability to ship. # ## ...JupyterLibrary? # # Powered by [Robot Framework](https://robotframework.org), `JupyterLibrary` allows you to: # # - write tests in concise, plain language # - and extend this language to meet your needs # - run in multiple browsers (even at the same time) # - run on multiple operating systems # - view rich reports of your test results # - but also compare your reports over time with machine-readable formats # - generate screenshots to augment your documentation
docs/WHY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import this # **1.1 Hello World!** # Starting with hello world! print("hello world!") # + # Best template for python program. By having this conditional statement at the bottom that calls main, # it actually forces the interpreter to read the entire script before it executes any of the code. This # allows a more procedural style of programming, and it's because Python requires that a function is # defined before it's called. # Import plateform for printing the python version details import platform def main(): message() def message(): print("Python versio is {}".format(platform.python_version())) version = platform.python_version(); print(f"Python versio is { version }") # f function if (__name__ == '__main__'): main() # - # **1.2 Blocks, Scope and condition** # + z = 100 def main(): x = 10 y = 20 print("z inside function = {}".format(z)) if x == y: print("x == y where x is {} and y is {}.".format(x,y)) elif x < y: print("x < y where x is {} and y is {}.".format(x,y)) else: print("x > y where x is {} and y is {}.".format(x,y)) # print('this is not indented. uncomment this line to see error.') # ternary statement r = "x is less than y" if x < y else "x is greater or equal to the y" print(r) # No switch in python bcoz there is no need if (__name__ == '__main__'): main() # - # **1.3 Loops** # + # while loop chars = ['a', 'e', 'i', 'o', 'u'] n = 0 while(n < 5): print('char at index {} is {}'.format(n,chars[n])) n += 1 # for loop for char in chars: print(char, end = ' ') # - # Simple example of fibonacci series a,b = 0,1 while b < 100000: print(b, end = ' ', flush = True) a, b = b, a+b # **1.4 Functions** # + # The function is defined with the D-E-F, def keyword that defines a function, then we have the name # of the function, and it will always have parenthesis, even if it does not take any arguments. def test(n = 1): print('This is a test function and value of n is {}'.format(n)) return n*2 test() x = test(4) print(x) # + # Prime checker function def isPrime(x): if x <= 1: return False for n in range(2, x): if x % n == 0: return False else: return True print('13 is prime = {}'.format(isPrime(13))) print('6 is prime = {}'.format(isPrime(6))) print() # end line # List of prime numbers def listPrime(endLimit = 100): for number in range (1,endLimit): if isPrime(number): print(number, end = ' ', flush = True) listPrime(200) # - # **1.5 Objects** # + # defination of a class class Duck: sound = 'quack quack...' walking = 'walks like a duck.' # define two function quack and walk. The first argument for a method inside a class is self def quack(self): print(self.sound) def walk(self): print(self.walking) def main(): donald = Duck() donald.quack() donald.walk() if __name__ == '__main__': main();
1. Basic Overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Visualisation # # In this notebook, we will visualise the data we have combined: # - LF Data # - RF Data # - Wrist Data # - Waist Data # # We can select the Subject Id and the Activity that we are interested in. # + # # !pip install ggplot # - from ggplot import * import pandas as pd # + ####### Select Subject Id ####### # Key in a number from 1 to 20. # 1 to 11: indoors # 12 to 20: outdoors SUBJECT_ID = '4' ###### Select Activity ###### # Key in one of the options below: ## For indoors: # treadmill_walk # treadmill_walknrun # treadmill_slope_walk # indoor_walk # indoor_walknrun ## For outdoors: # outdoor_walk # outdoor_walknrun ACTIVITY = 'indoor_walknrun' ###### Select Segment ###### start = 1 end = 500 # + # Read in the data DATA_PATH = './Combined Data_csv format/' df = pd.read_csv(DATA_PATH + 'Sub_'+ SUBJECT_ID + '.csv', header = 0) # - # df = df.drop(df.columns[0], axis=1) df.tail() # + # Subset out the data by activity of interest k1=df[df[ACTIVITY]==1] k1.head() # - k1.index k1.reset_index(level=0, inplace=True) ggplot(aes(x='index'), data=k1[start:end]) +\ geom_line(aes(y='accX_LF'), color='blue') +\ geom_line(aes(y='accY_LF'), color='red') +\ geom_line(aes(y='accZ_LF'), color='green') # + # multiply the HS and TO by 30 (15) and -30 (15) # this is to make it more obvious in the visualisation k1['LF_HS_mult']= k1.LF_HS.apply(lambda x: (x*30)) k1['RF_HS_mult']= k1.RF_HS.apply(lambda x: (x*30)) k1['LF_TO_mult']= k1.LF_TO.apply(lambda x: (x*-30)) k1['RF_TO_mult']= k1.RF_TO.apply(lambda x: (x*-30)) k1['LF_HS_mult2']= k1.LF_HS.apply(lambda x: (x*15)) k1['RF_HS_mult2']= k1.RF_HS.apply(lambda x: (x*15)) k1['LF_TO_mult2']= k1.LF_TO.apply(lambda x: (x*-15)) k1['RF_TO_mult2']= k1.RF_TO.apply(lambda x: (x*-15)) # - #Left Foot ggplot(aes(x='index'), data=k1[start:end]) +\ geom_line(aes(y='accX_LF'), color='blue') +\ geom_line(aes(y='accY_LF'), color='red') +\ geom_line(aes(y='accZ_LF'), color='green') +\ geom_point(aes(y='LF_HS_mult'), color='steelblue', size=100) +\ geom_point(aes(y='LF_TO_mult'), color='blue', size=100) +\ geom_point(aes(y='RF_HS_mult'), color='hotpink', size=100) +\ geom_point(aes(y='RF_TO_mult'), color='pink', size=100) +\ scale_y_continuous(limits=(-40,40)) +\ ggtitle('Left Foot Acceleration') +\ xlab('index') +\ ylab('Acceleration') #Right Foot ggplot(aes(x='index'), data=k1[start:end]) +\ geom_line(aes(y='accX_RF'), color='blue') +\ geom_line(aes(y='accY_RF'), color='red') +\ geom_line(aes(y='accZ_RF'), color='green') +\ geom_point(aes(y='LF_HS_mult'), color='steelblue', size=100) +\ geom_point(aes(y='LF_TO_mult'), color='slateblue', size=100) +\ geom_point(aes(y='RF_HS_mult'), color='hotpink', size=100) +\ geom_point(aes(y='RF_TO_mult'), color='pink', size=100) +\ scale_y_continuous(limits=(-40,40)) +\ ggtitle('Right Foot Acceleration') +\ xlab('index') +\ ylab('Acceleration') #Waist ggplot(aes(x='index'), data=k1[start:end]) +\ geom_line(aes(y='accX_Waist'), color='blue') +\ geom_line(aes(y='accY_Waist'), color='red') +\ geom_line(aes(y='accZ_Waist'), color='green') +\ geom_point(aes(y='LF_HS_mult2'), color='steelblue', size=100) +\ geom_point(aes(y='LF_TO_mult2'), color='slateblue', size=100) +\ geom_point(aes(y='RF_HS_mult2'), color='hotpink', size=100) +\ geom_point(aes(y='RF_TO_mult2'), color='pink', size=100) +\ scale_y_continuous(limits=(-20,20)) +\ ggtitle('Waist Foot Acceleration') +\ xlab('index') +\ ylab('Acceleration') #Wrist ggplot(aes(x='index'), data=k1[start:end]) +\ geom_line(aes(y='accX_Wrist'), color='blue') +\ geom_line(aes(y='accY_Wrist'), color='red') +\ geom_line(aes(y='accZ_Wrist'), color='green') +\ geom_point(aes(y='LF_HS_mult2'), color='steelblue', size=100) +\ geom_point(aes(y='LF_TO_mult2'), color='slateblue', size=100) +\ geom_point(aes(y='RF_HS_mult2'), color='hotpink', size=100) +\ geom_point(aes(y='RF_TO_mult2'), color='pink', size=100) +\ scale_y_continuous(limits=(-20,20)) +\ ggtitle('Wrist Foot Acceleration') +\ xlab('index') +\ ylab('Acceleration')
Data Visualisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Wikipedia Article Scrapper # # Scrape related Wikipedia article # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 12.578974, "end_time": "2020-11-14T04:51:23.303975", "exception": false, "start_time": "2020-11-14T04:51:10.725001", "status": "completed"} tags=[] # ! pip install wikipedia -q # + papermill={"duration": 2.24878, "end_time": "2020-11-14T04:51:25.622015", "exception": false, "start_time": "2020-11-14T04:51:23.373235", "status": "completed"} tags=[] import spacy nlp = spacy.load('en') # + papermill={"duration": 0.360435, "end_time": "2020-11-14T04:51:25.990633", "exception": false, "start_time": "2020-11-14T04:51:25.630198", "status": "completed"} tags=[] import wikipedia import pandas as pd import re from joblib import Parallel, delayed from tqdm.notebook import tqdm import json # + papermill={"duration": 0.024356, "end_time": "2020-11-14T04:51:23.336957", "exception": false, "start_time": "2020-11-14T04:51:23.312601", "status": "completed"} tags=[] DICT = json.load(open('../input/sdu-shared/diction.json')) expansion2acronym = {} for k, v in DICT.items(): for w in v: expansion2acronym[w] = k # + papermill={"duration": 0.022293, "end_time": "2020-11-14T04:51:26.021557", "exception": false, "start_time": "2020-11-14T04:51:25.999264", "status": "completed"} tags=[] def extract(term): try: summary = wikipedia.page(term).content except: try: suggest_term = wikipedia.suggest(term) return wikipedia.page(suggest_term).content except: return '' return summary # + papermill={"duration": 1082.688115, "end_time": "2020-11-14T05:09:28.719810", "exception": false, "start_time": "2020-11-14T04:51:26.031695", "status": "completed"} tags=[] data = Parallel(-1)(delayed(extract)(word) for word in tqdm(expansion2acronym.keys())) # + papermill={"duration": 0.232314, "end_time": "2020-11-14T05:09:28.961969", "exception": false, "start_time": "2020-11-14T05:09:28.729655", "status": "completed"} tags=[] with open('wiki_article.txt', 'w') as f: for article in data: f.write(article + '\n')
notebooks/sdu-wiki-article.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.2.0 # language: julia # name: julia-1.2 # --- using Pkg pkg"add Images QuartzImageIO TestImages Unitful AxisArrays" pkg"add Colors ImageInTerminal" using Images using TestImages using Unitful, AxisArrays img = testimage("m") img = rand(4,4) img = Array(reshape(range(0,stop=1,length=10^4),100,100)) img_c = img[51:70,21:70] img_v = @view img[16:35,41:90]; fill!(img_c,1) fill!(img_v,1) img # + using Unitful, AxisArrays using Unitful:mm,s img = AxisArray(rand(256, 256, 6, 50), (:x, :y, :z, :time), (0.4mm, 0.4mm, 1mm, 2s)) # - display.((Gray(0.0),Gray(0.8))) display([RGB(1.0,0.0,0.0),RGB(0.0,1.0,0.0),RGB(0.0,0.0,1.0)]) img_gray = rand(Gray,2,2) img_rgb = rand(RGB,2,2) RGB.(img_gray) Gray.(img_rgb) img_CHW = channelview(img_rgb) img_HWC = permutedims(img_CHW,(2,3,1)) img_rgb = colorview(RGB,img_CHW) img_rgb = rand(RGB,3,3) cv=channelview(img_rgb) fill!(cv,0.5) img_rgb img_rgb = rand(RGB,3,3) #CHW -> HWC # permutedims makes copy img_hwc=permutedims(channelview(img_rgb),(2,3,1)) fill!(img_hwc,0.5) img_rgb #CHW -> HWC # permutedims makes copy img_hwc=permuteddimsview(channelview(img_rgb),(2,3,1)) fill!(img_hwc,0.5) img_rgb arr = rand(4,4) gray=Gray.(arr) fill!(arr,0.5) img_num = Float64.(gray) gray arr = rand(4,4) gray=colorview(Gray,arr) fill!(arr,0.5) gray arr = rand(4,4) gray=colorview(Gray,arr) gray_view = channelview(gray) fill!(gray_view,0.5) arr img_n0f8 = rand(N0f8, 2, 2) pkg"add Colors ImageInTerminal"
imageProcessing/notebooks/images_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pytorch] # language: python # name: conda-env-pytorch-py # --- # --- # # Module Dependency # + # %matplotlib inline import os import time import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm, tqdm_notebook from torch.autograd import Variable import pandas as pd from skimage import io, transform, color, morphology import numpy as np import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils, models plt.rcParams['figure.figsize'] = (10.0, 8.0) # - # --- # # Define Constant tqdm.monitor_interval = 0 USE_GPU = torch.cuda.is_available() TRAIN_DATA_PATH = '/media/dennis/HDD/AerialImageDataset/train/512/train_data/' TRAIN_LABEL_PATH = '/media/dennis/HDD/AerialImageDataset/train/512/train_label/' VAL_DATA_PATH = '/media/dennis/HDD/AerialImageDataset/train/512/val_data/' VAL_LABEL_PATH = '/media/dennis/HDD/AerialImageDataset/train/512/val_label/' # --- # # Load data and data preprocessing """ Get file names of training and testing data """ train = (os.listdir(TRAIN_DATA_PATH), os.listdir(TRAIN_LABEL_PATH)) val = (os.listdir(VAL_DATA_PATH), os.listdir(VAL_LABEL_PATH)) print(len(train[0]), len(train[1])) print(len(val[0]), len(val[1])) """ Create dataset object to load aerial data """ class AerialImageDataset(Dataset): """ It is used to load the aerial image dataset """ def __init__(self, data_dir, label_dir, data_list, label_list, transform=None): self.data_dir = data_dir self.label_dir = label_dir self.datas = data_list self.labels = label_list self.transform = transform def __len__(self): return len(self.datas) def __getitem__(self, idx): img_name = self.data_dir + str(self.datas[idx]) lbl_name = self.label_dir + str(self.labels[idx]) img = io.imread(img_name) lbl = io.imread(lbl_name) if self.transform: img = self.transform(img) lbl = transforms.ToPILImage()(np.expand_dims(lbl, axis=2)) # lbl = transforms.Resize((512, 512))(lbl) lbl = transforms.ToTensor()(lbl) lbl[lbl >= 0.5] = 1 lbl[lbl < 0.5] = 0 return (img, lbl) """ Build training dataset containing data and label, both of them are in torch.Tensor data type. """ train_dataset = AerialImageDataset( data_dir = TRAIN_DATA_PATH, label_dir = TRAIN_LABEL_PATH, data_list = train[0], label_list = train[1], transform = transforms.Compose([ transforms.ToPILImage(), # transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) ) # fig, axs = plt.subplots(2) # axs[0].imshow(train_dataset[30][0]) # axs[1].imshow(train_dataset[30][1]) print(train_dataset[17099][0].size()) print(train_dataset[17099][1].size()) """ Build Val dataset containing data and label, both of them are in torch.Tensor data type. """ val_dataset = AerialImageDataset( data_dir = VAL_DATA_PATH, label_dir = VAL_LABEL_PATH, data_list = val[0], label_list = val[1], transform = transforms.Compose([ transforms.ToPILImage(), # transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) ) print(val_dataset[899][0].size()) print(val_dataset[899][1].size()) """ Build dataloader for both training and val data, the size of training data will be [4, 3, 500, 500] the size of val data will be [4, 1, 500, 500] """ train_dataloader = DataLoader(train_dataset, 4, shuffle=True) val_dataloader = DataLoader(val_dataset, 4) # --- # # Build Network from model_test import PSPNet, PSPModule, PSPUpsample # + slideshow={"slide_type": "-"} densenet121 = models.densenet121(pretrained=True) print(densenet121) # + slideshow={"slide_type": "fragment"} print(list(densenet121.features.children())[:7]) # + slideshow={"slide_type": "-"} def train_model(model, criterion, optimizer, num_epochs=8): since = time.time() best_model_wts = model.state_dict() best_acc = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) model.train(True) running_loss = 0.0 running_acc = 0.0 # Iterate over data. for i, data in tqdm_notebook(enumerate(train_dataloader)): if(i == 2000): break # get the inputs inputs, labels = data[0], data[1] # wrap them in Variable if USE_GPU: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward outputs = model(inputs) predict = outputs.data > 0.5 loss = criterion(outputs, labels) loss.backward() optimizer.step() # statistics running_loss += loss.data[0] running_acc += (torch.sum(predict.int() == labels.data.int())/(4*512*512)) epoch_loss = running_loss / 2000 epoch_acc = running_acc / 2000 print('{} Loss: {:.4f}, Acc: {:.4f}'.format('train', epoch_loss, epoch_acc)) # if epoch_acc > best_acc: # best_acc = epoch_acc # best_model_wts = model.state_dict() model.eval() running_loss = 0.0 running_acc = 0.0 # Iterate over data. for i, data in enumerate(val_dataloader): # get the inputs inputs, labels = data[0], data[1] # wrap them in Variable if USE_GPU: inputs = Variable(inputs.cuda(), volatile=True) labels = Variable(labels.cuda(), volatile=True) else: inputs, labels = Variable(inputs, volatile=True), Variable(labels, volatile=True) # zero the parameter gradients optimizer.zero_grad() # forward outputs = model(inputs) predict = outputs.data > 0.5 loss = criterion(outputs, labels) # statistics running_loss += loss.data[0] running_acc += torch.sum(predict.int() == labels.data.int())/(4*512*512) epoch_loss = running_loss / len(val_dataloader) epoch_acc = running_acc / len(val_dataloader) if epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = model.state_dict() torch.save(model, './temp.pt') print('{} Loss: {:.4f}, Acc: {:.4f}'.format('val', epoch_loss, epoch_acc)) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) # load best model weights model.load_state_dict(best_model_wts) return model, best_acc # - # --- # # Training # + slideshow={"slide_type": "-"} model = PSPNet(densenet121) if USE_GPU: model.cuda() print(model) # - criterion = nn.BCELoss() optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters())) # optimizer = optim.Adam(model.parameters()) # optimizer = optim.RMSprop(model.parameters()) model, acc = train_model(model, criterion, optimizer, 20) torch.save(model, './model-0108-test-{:.4f}.pt'.format(acc)) # --- # # Test Model # + # from model_old import PSPNet, PSPModule, PSPUpsample # + # model = PSPNet(densenet161) # model = torch.load('./model-1226-0.9504.pt') # - test = val_dataset[10] print(test[0].size(), test[1].size()) inputs, labels = test inputs = inputs.unsqueeze(0) labels = labels.unsqueeze(0) if USE_GPU: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) print(inputs.size(), labels.size()) model.eval() outputs = model(inputs) print(outputs) outputs = transforms.ToPILImage()(outputs.data.cpu()[0]) outputs = np.array(outputs) labels = transforms.ToPILImage()(labels.data.cpu()[0]) labels = np.array(labels) print(outputs.shape) print(labels.shape) labels fig, axs = plt.subplots(1,2) axs[0].imshow(outputs > int(255*0.3), cmap='gray') axs[1].imshow(labels, cmap='gray') val_dataset.datas[1] val_dataset.labels[2] val[0].index('6.tif')
Final/.ipynb_checkpoints/pspnet-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 单量子比特门 # # *版权所有 (c) 2021 百度量子计算研究所,保留所有权利。* # ## 内容概要 # 本教程介绍如何产生高保真度的单量子比特门的脉冲。本教程的大纲如下: # - 背景介绍 # - 准备工作 # - 构造哈密顿量 # - 生成 Hadamard 门的优化脉冲 # - 生成 X 门的优化脉冲 # - 生成 Z 门的优化脉冲 # - 生成任意单量子比特门的优化脉冲 # - 总结 # ## 背景介绍 # 我们将对量子比特的操作称为**量子门**,而在超导量子比特中,量子门操作往往通过在量子比特上施加外部微波脉冲驱动和用于调控量子比特的磁通,从而实现量子态的演化。单量子比特门可以表示为酉算符 $U$,数学上可以用一个 $2\times2$ 的酉矩阵来表示。单量子比特门操作也可以用布洛赫球 (Bloch Sphere) 上态矢量 $|\psi\rangle$ 的演化来表示,其中 $|\psi\rangle=\cos(\theta/2)|0\rangle+e^{i\phi}\sin(\theta/2)|1\rangle$ 表示两个量子态 $|0\rangle$ 和 $|1\rangle$ 的叠加态。通过进行单量子比特门操作,我们可以实现不同量子态在布洛赫球面上的转化 \[1\]。 # ![bloch sphere](figures/sphere.png) # 我们总结了一些典型的单量子比特门及其算符和矩阵形式,如下表所示: # | 量子门 | 布洛赫球上的操作 | 算符 | 矩阵形式 | # |----------|:-------------:|:-:|:-:| # | $X$ | 绕 $x$ 轴旋转 $\pi$ 的角度 | $\hat{\sigma}_x$ | $\left(\begin{array}{cc} 0 & 1\\1 & 0\end{array}\right)$ | # | $Y$ | 绕 $y$ 轴旋转 $\pi$ 的角度 | $\hat{\sigma}_y$ |$\left(\begin{array}{cc} 0 & -i\\i & 0\end{array}\right)$ | # | $Z$ | 绕 $z$ 轴旋转 $\pi$ 的角度 | $\hat{\sigma}_z$ |$\left(\begin{array}{cc} 1 & 0\\0 & -1\end{array}\right)$ | # | $S$ | 绕 $z$ 轴旋转 $\pi \over 2$ 的角度 | |$\left(\begin{array}{cc} 1 & 0\\0 & e^{i\frac{\pi}{2}}\end{array}\right)$ | # | $T$ | 绕 $z$ 轴旋转 $\pi \over 4$ 的角度 | |$\left(\begin{array}{cc} 1 & 0\\0 & e^{i\frac{\pi}{4}}\end{array}\right)$ | # | $H$ | 先绕 $x$ 轴旋转 $\pi$ 的角度,再绕 $z$ 轴旋转 $\pi \over 2$ 的角度 | |$\frac{1}{\sqrt{2}}\left(\begin{array}{cc} 1 & 1\\1 & -1\end{array}\right)$ | # **量脉中的单量子门** # # # 在量脉中,对单个量子比特的任意操作都是通过以下等式实现的(包含了一个全局相位项,这里没有 $R_x$ 分量): # $$ # U(\theta, \phi, \lambda) = e^{i(\phi/2+\lambda/2)} R_z(\phi) R_y(\theta) R_z(\lambda) = # \begin{bmatrix} # \cos(\theta/2) & - e^{i\lambda} \sin(\theta/2) \\ # e^{i\phi} \sin(\theta/2) & e^{i(\phi + \lambda)} \cos(\theta/2) # \end{bmatrix} , # $$ # # 其中, $e^{i(\phi/2+\lambda/2)}$ 是全局相位。 # # # 有关其他门的更多信息,请参阅我们的[API文档](https://quanlse.baidu.com/api/)。 # 现在我们简单介绍一下单量子比特门物理实现的原理。在超导量子比特上,有两种实现单量子比特门的方法: # # - **微波控制**通过向超导量子比特施加微波脉冲信号进行调控(X,Y 通道控制)。 # # - **磁通调控**通过施加局部磁场改变量子比特的共振频率(Z 通道控制)。 # # 下图描绘了超导量子比特的 X/Y/Z 通道: # # ![X/Y/Z controls for single superconducting qubit](figures/hardware_qubit_control.png) # # **微波脉冲的实现** # # 实验上,为了实现对超导量子比特的微波调控(X,Y 通道调控),需要先由本机振荡器(Local Oscillator)产⽣一个高频的微波信号,频率记为 $\omega_{\rm LO}$,然后再⽤低频的信号进行调制。通常,低频信号由任意波发生器(Arbitrary Wave Generator)产生,频率记为 $\omega_{\rm AWG}$。最后我们可以得到频率 $\omega_{d}=\omega_{\rm LO}\pm\omega_{\rm AWG}$ 的高斯型或正切等函数型包络的脉冲。需要指出的是,此处以及后面提及的频率均指的是角频率。 # # **磁通脉冲的实现** # # 实现磁通调控(Z 通道调控),常见的做法是将单个约瑟夫森结替换为超导量子干涉仪 (Superconducting Quantum Interference Device),即一个由两个约瑟夫森结构成的闭环。通过向垂直于闭环的方向施加外部磁场,可以实现对量子比特频率的调控。 # ## 准备工作 # # 成功安装量脉后,您可以按照本教程运行下面的量脉程序。在运行此教程前,您需要从量脉(Quanlse)和其它常用的 Python 库导入以下包: # + # Import numpy and math from numpy import round from math import pi # Import the Hamiltonian module from Quanlse.QHamiltonian import QHamiltonian as QHam # Import simulator interface on Quanlse Cloud Service from Quanlse.remoteOptimizer import remoteOptimize1Qubit # Import related packages from Quanlse.Utils.Functions import project from Quanlse.QOperator import duff from Quanlse.QOperation import FixedGate # - # ## 构造哈密顿量 # # 现在,我们将展示如何使用量脉实现单量子比特门。在这个教程中,我们将模拟一个由一个三能级 transmon 组成的系统。在理想情况下,超导量子比特中失谐性带来的能级差允许调整驱动频率对能级跃迁进行选择性驱动。然而,由于 transmon 量子比特的失谐性较弱,再加上驱动场的频率带宽有限,从而计算空间中的量子态可能会被激发至更高的能级,从而产生能级泄露。在我们的模型中,我们将超导量子比特视为一个简化的三能级系统,从而考虑了泄露到 $| 2\rangle$ 能级的情况。在旋转坐标系(Rotating Frame)中,描述该三能级量子系统的哈密顿量可以写为 \[2\]: # # $$ # \hat{H}=\alpha_q\lvert2\rangle\langle 2\lvert+\frac{\Omega^x(t)}{2}\left[ \hat{a}^\dagger + \hat{a} \right] + \frac{\Omega^y(t)}{2} i \left[\hat{a}^\dagger - \hat{a}\right]+\Omega^z(t)\hat{a}^{\dagger}\hat{a}, # $$ # # 其中 $\alpha_q$ 是失谐频率; $\Omega^x(t)$ 是 X 通道的驱动脉冲的强度;$\Omega^y(t)$ 是 Y 通道的驱动脉冲强度;$\Omega^z(t)$ 是 Z 通道的磁通脉冲强度。这里,$\hat{a}^\dagger = |1\rangle\langle 0| + \sqrt{2}|2\rangle\langle 1|$ 和 $\hat{a} = |0\rangle\langle 1| + \sqrt{2}|1\rangle\langle 2|$ 分别是产生和湮灭算符。 # 量脉可以用来于实现任意的单量子比特门。量脉支持各种波形的定义,这里我们以高斯脉冲为例。高斯脉冲函数的形式如下: # $$ # A^{x}(t)=A^{x} e^{-((t-\tau^{x})/2\sigma^{x})^2}, # $$ # # $$ # A^{y}(t)=A^{y} e^{-((t-\tau^{y})/2\sigma^{y})^2} . # $$ # 上面的等式中, $A^{x}, A^{y}, \tau^{x}, \tau^{y}, \sigma^{x}, \sigma^{y}$ 是待优化的参数。与微波控制不同,磁通量的输入采用方波的形式,$A^{z}(t) = A^{z}$ ,其中 $A^{z}$ 是待优化的参数。 # # 现在,我们需要用量脉来构造上面的哈密顿量。在量脉中,所有关于哈密顿量的信息都存储在一个哈密顿量对象中。首先,我们定义构建哈密顿量所需的一些基本参数:采样周期、系统中的量子比特数、以及要考虑的系统能级: # + # Sampling period dt = 0.2 # Number of qubits qubits = 1 # System energy level level = 3 # - # 然后,我们定义量子比特的失谐频率: # Define anharmonicity qubitArgs = { "qubit_anharm": - 0.33 * (2 * pi), # Anharmonicity of the qubit } # 最后,我们使用 `QHam()` 实例化一个哈密顿量对象,并同时传入我们上面定义的参数。然后我们通过 `addDrift()` 方法加入比特的失谐项。该方法参数包括失谐项算符,量子比特索引编号和失谐项的强度。 # + # Create the Hamiltonian. ham = QHam(qubits, level, dt) # Add the drift term(s). ham.addDrift(duff, 0, coef=qubitArgs["qubit_anharm"] / 2) # - # ## 生成 Hadamard 门的优化脉冲 # # 创建了系统哈密顿量之后,我们可以使用 `remoteOptimize1Qubit()` 方法生成并优化脉冲(这里我们先以 Hadamard 门为例)。该方法参数包括一个哈密顿量对象,目标比特门,最大脉冲数量,以及目标保真度。在本地设备上进行优化通常需要很长时间,但是,我们提供的云服务可以显著加快这一过程。在使用量脉云服务之前,用户需要从 http://quantum-hub.baidu.com 获取一个 token,并使用以下命令将任务提交到量脉的服务器上。对于这个例子,我们可以通过以下代码实现: # + # Import Define class and set the token # Please visit http://quantum-hub.baidu.com from Quanlse import Define Define.hubToken = '' # Run the optimization gateJob, infidelity = remoteOptimize1Qubit(ham, FixedGate.H.getMatrix(), depth=4, targetInfid=0.0001) # - # 在本教程中,我们用这个公式来定义量子门的失真度: ${\rm infid} = 1 - \frac{1}{d}\left|{\rm Tr}[U^\dagger_{\rm goal}P(U)]\right|$,其中 $U{\rm goal}$ 是单量子比特门的目标演化的酉矩阵; $d$ 是 $U{\rm goal}$ 的维度;$U$ 是实际演化的酉矩阵。这里,$P(U)$ 是投影到计算空间的演化算符。 # # 在这个例子中,我们选择只在 X 和 Y 通道上产生脉冲。我们可以看到,我们生成了保真度较高的脉冲,我们鼓励用户尝试改变这些参数以获得最佳结果。 # # `plot()` 方法允许我们将生成的脉冲可视化。我们还可以通过 `simulate()`和 `project()` 方法得到表示系统演化的矩阵: # + # Print infidelity and the waveforms print(f"minimum infidelity: {infidelity}") gateJob.plot(dark='True') # Print the evolution process. result = ham.simulate(job=gateJob) projectedEvolution = project(result.result[0]["unitary"], qubits, level, 2) print("Projected evolution:\n", round(projectedEvolution, 2)) # - # `plot()` 的方法参数包括一个可选的 bool 参数 `dark`(该参数为 `True` 时启用暗色模式)。此外,用户可以使用 `color` 参数为脉冲指定颜色(如果脉冲数多于颜色数,颜色将重复)。(其它参数请参见 API ) # # 下面是优化 X 门、Z 门和任意单量子门脉冲的演示,使用的是我们在上文中已经定义的系统哈密顿量。 # # ## 生成 X 门的优化脉冲 # # 以下代码演示如何生成并优化一个 X 门的脉冲,该过程类似上述对 Hadamard 门的优化过程。 # + # Run the optimization gateJob, infidelity = remoteOptimize1Qubit(ham, FixedGate.Z.getMatrix(), depth=4, targetInfid=0.0001) # Print infidelity and the waveforms print(f"minimum infidelity: {infidelity}") gateJob.plot(dark='True') # Print the evolution process. result = ham.simulate(job=gateJob) projectedEvolution = project(result.result[0]["unitary"], qubits, level, 2) print("Projected evolution:\n", round(projectedEvolution, 2)) # - # ## 生成 Z 门的优化脉冲 # # 下面的代码生成并优化一个 Z 门的脉冲,同样地,该过程与上文所介绍的优化过程相似。 # + # Run the optimization gateJob, infidelity = remoteOptimize1Qubit(ham, FixedGate.Z.getMatrix(), depth=4, targetInfid=0.0001) # Print infidelity and the waveforms print(f"minimum infidelity: {infidelity}") gateJob.plot(dark='True') # Print the evolution process. result = ham.simulate(job=gateJob) projectedEvolution = project(result.result[0]["unitary"], qubits, level, 2) print("Projected evolution:\n", round(projectedEvolution, 2)) # - # ## 生成任意单量子比特门的优化脉冲 # # 以下代码生成并优化任意单量子门 `U(θ=-1.231,φ=1.231,lamda=-1.231)`的脉冲。这里,用户需要从 `RotationGate` 导入 `U`。 # + from Quanlse.QOperation.RotationGate import U # Define a U3 gate aGate = U(theta=-1.231, phi=1.231, lamda=-1.231) # Run the optimization gateJob, infidelity = remoteOptimize1Qubit(ham, aGate.getMatrix(), depth=4, targetInfid=0.0001) # Print infidelity and the waveforms print(f"minimum infidelity: {infidelity}") gateJob.plot(dark='True') # Print the evolution process. result = ham.simulate(job=gateJob) projectedEvolution = project(result.result[0]["unitary"], qubits, level, 2) print("Projected evolution:\n", round(projectedEvolution, 2)) # - # # ## 总结 # # # 本教程介绍了使用量脉为任何单量子比特门生成和优化脉冲的完整过程。用户可以点击这个链接 [tutorial-single-qubit-gate.ipynb](https://github.com/baidu/Quanlse/blob/main/Tutorial/CN/tutorial-single-qubit-cn.ipynb) 跳转到此 Jupyter Notebook 文档相应的 GitHub 页面来获得相关代码以运行程序。我们鼓励用户尝试不同于本教程的参数值以获得最佳结果。 # ## 参考文献 # # \[1\] [<NAME>., and <NAME>. Quantum Computation and Quantum Information: 10th Anniversary Edition. Cambridge University Press, 2010.](https://doi.org/10.1017/CBO9780511976667) # # \[2\] [<NAME>., et al. "An introduction into optimal control for quantum technologies." *arXiv preprint arXiv:2003.10132* (2020).](https://arxiv.org/abs/2003.10132)
Tutorial/CN/tutorial-single-qubit-cn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Symbolic System # Create a symbolic three-state system: import markoviandynamics as md sym_system = md.SymbolicDiscreteSystem(3) # Get the symbolic equilibrium distribution: sym_system.equilibrium() # Create a symbolic three-state system with potential energy barriers: sym_system = md.SymbolicDiscreteSystemArrhenius(3) # It's the same object as the previous one, only with additional symbolic barriers: sym_system.B_ij # We can assing values to the free parameters in the equilibrium distribution: sym_system.equilibrium(energies=[0, 0.1, 1]) sym_system.equilibrium(energies=[0, 0.1, 1], temperature=1.5) # and create multiple equilibrium points by assigning temperature sequence: import numpy as np temperature_range = np.linspace(0.01, 10, 300) # + equilibrium_line = sym_system.equilibrium([0, 0.1, 1], temperature_range) equilibrium_line.shape # - # # Symbolic rate matrix # Create a symbolic rate matrix with Arrhenius process transitions: # + sym_rate_matrix = md.SymbolicRateMatrixArrhenius(3) sym_rate_matrix # - # Energies and barriers can be substituted at once: # + energies = [0, 0.1, 1] barriers = [[0, 0.11, 1.1], [0.11, 0, 10], [1.1, 10, 0]] sym_rate_matrix.subs_symbols(energies, barriers) # - sym_rate_matrix.subs_symbols(energies, barriers, temperature=2.5) # A symbolic rate matrix can be also lambdified (transform to lambda function): rate_matrix_lambdified = sym_rate_matrix.lambdify() # The parameters of this function are the free symbols in the rate matrix: rate_matrix_lambdified.__code__.co_varnames # They are positioned in ascending order. First the temperature, then the energies and the barriers. Sequence of rate matrices can be created by calling this function with a sequence for each parameter. # # Dynamics # We start by computing an initial probability distribution by assigning the energies and temperature: # + p_initial = sym_system.equilibrium(energies, 0.5) p_initial # - # ## Trajectory - evolve by a fixed rate matrix # Compute the rate matrix by substituting free symbols: # + rate_matrix = md.rate_matrix_arrhenius(energies, barriers, 1.2) rate_matrix # - # Create trajectory of probability distributions in time: # + import numpy as np # Create time sequence t_range = np.linspace(0, 5, 100) trajectory = md.evolve(p_initial, rate_matrix, t_range) trajectory.shape # + import matplotlib.pyplot as plt # %matplotlib inline for i in [0, 1, 2]: plt.plot(t_range, trajectory[i,0,:], label='$p_{}(t)$'.format(i + 1)) plt.xlabel('$t$') plt.legend() # - # ## Trajectory - evolve by a time-dependent rate matrix # Create a temperature sequence in time: temperature_time = 1.4 + np.sin(4. * t_range) # Create a rate matrix as a function of the temperature sequence: # + # Array of stacked rate matrices that corresponds to ``temperature_time`` rate_matrix_time = md.rate_matrix_arrhenius(energies, barriers, temperature_time) rate_matrix_time.shape # + crazy_trajectory = md.evolve(p_initial, rate_matrix_time, t_range) crazy_trajectory.shape # - for i in [0, 1, 2]: plt.plot(t_range, crazy_trajectory[i,0,:], label='$p_{}(t)$'.format(i + 1)) plt.xlabel('$t$') plt.legend() # # Diagonalize the rate matrix # Calculate the eigenvalues, left and right eigenvectors: # + U, eigenvalues, V = md.eigensystem(rate_matrix) U.shape, eigenvalues.shape, V.shape # - # The eigenvalues are in descending order (the eigenvectors are ordered accordingly): eigenvalues # We can also compute the eigensystem for multiple rate matrices at once (or evolution of a rate matrix, i.e., `rate_matrix_time`): # + U, eigenvalues, V = md.eigensystem(rate_matrix_time) U.shape, eigenvalues.shape, V.shape # - # # Decompose to rate matrix eigenvectors # A probability distribution, in general, can be decomposed to the right eigenvectors of the rate matrix: # # $$\left|p\right\rangle = a_1\left|v_1\right\rangle + a_2\left|v_2\right\rangle + a_3\left|v_3\right\rangle$$ # # where $a_i$ is the coefficient of the i'th right eigenvector $\left|v_i\right\rangle$. A rate matrix that satisfies detailed balance has its first eigenvector as the equilibrium distribution $\left|\pi\right\rangle$. Therefore, *markovian-dynamics* normalizes $a_1$ to $1$ and decompose a probability distribution to # # $$\left|p\right\rangle = \left|\pi\right\rangle + a_2\left|v_2\right\rangle + a_3\left|v_3\right\rangle$$ # Decompose ``p_initial``: md.decompose(p_initial, rate_matrix) # We can decompose also multiple points and/or by multiple rate matrices. For example, decompose multiple points: # + first_decomposition = md.decompose(equilibrium_line, rate_matrix) first_decomposition.shape # - for i in [0, 1, 2]: plt.plot(temperature_range, first_decomposition[i,:], label='$a_{}(T)$'.format(i + 1)) plt.xlabel('$T$') plt.legend() # or decompose a trajectory: # + second_decomposition = md.decompose(trajectory, rate_matrix) second_decomposition.shape # - for i in [0, 1, 2]: plt.plot(t_range, second_decomposition[i,0,:], label='$a_{}(t)$'.format(i + 1)) plt.xlabel('$t$') plt.legend() # Decompose single point using multiple rate matrices: # + third_decomposition = md.decompose(p_initial, rate_matrix_time) third_decomposition.shape # - for i in [0, 1, 2]: plt.plot(t_range, third_decomposition[i,0,:], label='$a_{}(t)$'.format(i + 1)) plt.legend() # Decompose, for every time $t$, the corresponding point $\left|p(t)\right\rangle$ using the temporal rate matrix $R(t)$ # + fourth_decomposition = md.decompose(trajectory, rate_matrix_time) fourth_decomposition.shape # - for i in [0, 1, 2]: plt.plot(t_range, fourth_decomposition[i,0,:], label='$a_{}(t)$'.format(i + 1)) plt.legend() # # Plotting the 2D probability simplex for three-state system # The probability space of a three-state system is a three dimensional space. However, the normalization constraint $\sum_{i}p_i=1$ together with $0 < p_i \le 1$, form a 2D triangular plane in which all of the possible probability points reside. # We'll start by importing the plotting module: # + import markoviandynamics.plotting.plotting2d as plt2d # Use latex rendering plt2d.latex() # - # Plot the probability plane: plt2d.figure(figsize=(7, 5.5)) plt2d.equilibrium_line(equilibrium_line) plt2d.legend() # We can plot many objects on the probability plane, such as trajectories, points, and eigenvectors of the rate matrix: # Final equilibrium point p_final = sym_system.equilibrium(energies, 1.2) # + plt2d.figure(focus=True, figsize=(7, 5.5)) plt2d.equilibrium_line(equilibrium_line) # Plot trajectory plt2d.plot(trajectory, c='r', label=r'$\left|p(t)\right>$') # Initial & final points plt2d.point(p_initial, c='k', label=r'$\left|p_0\right>$') plt2d.point(p_final, c='r', label=r'$\left|\pi\right>$') # Eigenvectors plt2d.eigenvectors(md.eigensystem(rate_matrix), kwargs_arrow={'zorder': 1}) plt2d.legend() # - # Plot multiple trajectories at once: # + # Create temperature sequence temperature_range = np.logspace(np.log10(0.01), np.log10(10), 50) # Create the equilibrium line points equilibrium_line = sym_system.equilibrium(energies, temperature_range) # Create a trajectory for every point on ``equilibrium_line`` equilibrium_line_trajectory = md.evolve(equilibrium_line, rate_matrix, t_range) # - plt2d.figure(focus=True, figsize=(7, 5)) plt2d.equilibrium_line(equilibrium_line) plt2d.plot(equilibrium_line_trajectory, c='g', alpha=0.2) plt2d.point(p_final, c='r', label=r'$\left|\pi\right>$') plt2d.legend() # Create a trajectory for every point on ``equilibrium_line`` equilibrium_line_crazy_trajectory = md.evolve(equilibrium_line, rate_matrix_time, t_range) plt2d.figure(focus=True, figsize=(7, 5)) plt2d.equilibrium_line(equilibrium_line) plt2d.plot(equilibrium_line_crazy_trajectory, c='r', alpha=0.1) plt2d.text(p_final, r'Text $\alpha$', delta_x=0.05) plt2d.legend()
notebooks/usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Complete Simulation Chain for Visibility Calculation # ## Read in Config import vipy.simulation.utils as ut import vipy.layouts.layouts as layouts import astropy.constants as const from astropy import units as un import time as t import numpy as np # %config Completer.use_jedi = False # %load_ext autoreload # %autoreload 2 # + # rc = ut.read_config('/net/nfshome/home/sfroese/vipy/config/default.toml') rc = ut.read_config("../config/default.toml") array_layout = layouts.get_array_layout('eht') src_crd = rc['src_coord'] wave1 = const.c/((float(rc['channel'].split(':')[0])-float(rc['channel'].split(':')[1])/2)*10**6/un.second)/un.meter wave2 = const.c/((float(rc['channel'].split(':')[0])+float(rc['channel'].split(':')[1])/2)*10**6/un.second)/un.meter # - # ## Generate (l,m)-plane / FOV import vipy.simulation.scan as scan import matplotlib.pyplot as plt rd = scan.rd_grid(rc['fov_size']*np.pi/(3600*180),256, src_crd) lm = scan.lm_grid(rd, src_crd) plt.imshow(np.rad2deg(rd[:,:,1])) plt.colorbar() print(rc['fov_size']*np.pi/(3600*180)) print(np.max(lm[:,:,0])) # ## Calculate Start and Stop times for every measurement time = ut.calc_time_steps(rc) # ## Calculate Baselines for one measurement baselines = scan.get_baselines(src_crd, time[:31], array_layout) # ## Calculate Visibilities import numpy as np import torch from astropy.io import fits import matplotlib.pyplot as plt # + hdul = fits.open('celestial-03-05.fits') img = hdul[0].data.astype(np.float32) img = torch.tensor(img) I = torch.zeros((img.shape[0],img.shape[1],4), dtype=torch.cdouble) I[...,0] = img I[...,1] = img # - # %%time start = t.time() torch.set_num_threads(8) X1 = scan.corrupted(lm, baselines, wave1, time[0:31], src_crd, array_layout, I, rd) X2 = scan.corrupted(lm, baselines, wave2, time[0:31], src_crd, array_layout, I, rd) plt.imshow(X1[:,:,5,0,0].real) plt.colorbar() plt.show() from PIL import Image img = np.asarray(Image.open('150.jpg')) img = img/np.sum(img) print(np.sum(img)) plt.imshow(img) plt.colorbar() img = torch.tensor(img) I = torch.zeros((img.shape[0],img.shape[1],4), dtype=torch.cdouble) I[...,0] = img I[...,1] = img # ## Integration vis = scan.integrate(X1,X2) # ## All scan loop from tqdm import tqdm torch.set_num_threads(48) # + from dataclasses import dataclass # stokes, IFs? @dataclass class Visibilities: I: [complex] Q: [complex] U: [complex] V: [complex] num: [int] scan: [int] base_num: [int] u: [float] v: [float] w: [float] date: [float] _date: [float] def __getitem__(self, i): baseline = Vis( self.I[i], self.Q[i], self.U[i], self.V[i], self.num[i], self.scan[i], self.base_num[i], self.u[i], self.v[i], self.w[i], self.date[i], self._date[i], ) return baseline def get_values(self): return np.array([self.I, self.Q, self.U, self.V]) def add(self, visibilities): self.I = np.concatenate([self.I, visibilities.I]) self.Q = np.concatenate([self.Q, visibilities.Q]) self.U = np.concatenate([self.U, visibilities.U]) self.V = np.concatenate([self.V, visibilities.V]) self.num = np.concatenate([self.num, visibilities.num]) self.scan = np.concatenate([self.scan, visibilities.scan]) self.base_num = np.concatenate([self.base_num, visibilities.base_num]) self.u = np.concatenate([self.u, visibilities.u]) self.v = np.concatenate([self.v, visibilities.v]) self.w = np.concatenate([self.w, visibilities.w]) self.date = np.concatenate([self.date, visibilities.date]) self._date = np.concatenate([self._date, visibilities._date]) @dataclass class Vis: I: complex Q: complex U: complex V: complex num: int scan: int base_num: int u: float v: float w: float date: float _date: float # + hdul = fits.open('celestial-03-05.fits') img = hdul[0].data.astype(np.float32) img = torch.tensor(img) I = torch.zeros((img.shape[0],img.shape[1],4), dtype=torch.cdouble) I[...,0] = img I[...,1] = img # - from PIL import Image img = np.asarray(Image.open('150.jpg')) img = img/np.sum(img) print(np.sum(img)) plt.imshow(img) plt.colorbar() img = torch.tensor(img) I = torch.zeros((img.shape[0],img.shape[1],4), dtype=torch.cdouble) I[...,0] = img I[...,1] = img from vipy.simulation.scan import get_valid_baselines import astropy.units as un from astropy.time import Time visibilities = Visibilities([], [], [], [], [], [], [], [], [], [], [], []) vis_num = np.zeros(1) memory = np.array([]) #i in total number of scans for i in tqdm(range(72)): t = time[i*31:(i+1)*31] baselines = scan.get_baselines(src_crd, t, array_layout) valid = baselines.valid.reshape(-1, 28) mask = np.array(valid[:-1]).astype(bool) & np.array(valid[1:]).astype(bool) u = baselines.u.reshape(-1, 28) v = baselines.v.reshape(-1, 28) w = baselines.w.reshape(-1, 28) base_valid = np.arange(len(baselines.u)).reshape(-1, 28)[:-1][mask] u_valid = u[:-1][mask] v_valid = v[:-1][mask] w_valid = w[:-1][mask] date = np.repeat((t[:-1]+rc['corr_int_time']*un.second/2).jd.reshape(-1, 1), 28, axis=1)[mask] _date = np.zeros(len(u_valid)) X1 = scan.uncorrupted(lm, baselines, wave1, time, src_crd, array_layout, I) if X1.shape[0] == 1: import os, psutil memory = np.append(memory,psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2) continue X2 = scan.uncorrupted(lm, baselines, wave1, time, src_crd, array_layout, I) vis_num = np.arange(X1.shape[2]//2) + 1 + vis_num.max() int_values = scan.integrate(X1, X2) int_values = int_values.reshape(-1,4) vis = Visibilities( int_values[:, 0], int_values[:, 1], int_values[:, 2], int_values[:, 3], vis_num, np.repeat(i+1, len(vis_num)), np.array([baselines[i].baselineNum() for i in base_valid]), u_valid, v_valid, w_valid, date, _date, ) visibilities.add(vis) import os, psutil memory = np.append(memory,psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2) plt.plot(np.arange(0,72), memory, 'x') plt.show() import vipy.simulation.utils as ut import vipy.fits.writer as writer conf = ut.read_config("../config/default.toml") hdu_list = writer.create_hdu_list(visibilities, conf) hdu_list.writeto("test150_default.fits", overwrite=True) hdu_list[0].data
examples/visibility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # importing the required libraries import pandas as pd import numpy as np # Visualisation libraries import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import plotly.express as px import plotly.graph_objects as go import folium from folium import plugins # Manipulating the default plot size plt.rcParams['figure.figsize'] = 10, 12 # Disable warnings import warnings warnings.filterwarnings('ignore') # - # Reading the datasets df= pd.read_csv('COVID19INDIA.csv') df_india = df.copy() df # Coordinates of India States and Union Territories India_coord = pd.read_excel('Indian Coordinates.xlsx') df['Total cases'] = df['Total Confirmed cases*'] total_cases = df['Total Confirmed cases*'].sum() print('Total number of confirmed COVID 2019 cases across India till date (22nd March, 2020):', total_cases) total_cases df.style.background_gradient(cmap='Reds') # + #Total Active is the Total cases - (Number of death + Cured) df['Total Active'] = df['Total cases'] - (df['Deaths**'] + df['Cured/Discharged/Migrated']) total_active = df['Total Active'].sum() print('Total number of active COVID 2019 cases across India:', total_active) Tot_Cases = df.groupby('Name of State / UT')['Total Active'].sum().sort_values(ascending=False).to_frame() Tot_Cases.style.background_gradient(cmap='Reds') # + # Learn how to use folium to create a zoomable map df_full = pd.merge(India_coord,df,on='Name of State / UT') map = folium.Map(location=[20,70], zoom_start=4,tiles='Stamenterrain') for lat, lon, value, name in zip(df_full['Latitude'], df_full['Longitude'], df_full['Total cases'], df_full['Name of State / UT']): folium.CircleMarker([lat, lon], radius=value*0.003, popup = ('<strong>State</strong>: ' + str(name).capitalize() + '<br>''<strong>Total Cases</strong>: ' + str(value) + '<br>'),color='red',fill_color='red',fill_opacity=0.1).add_to(map) map # + #Learn how to use Seaborn for visualization f, ax = plt.subplots(figsize=(12, 8)) data = df_full[['Name of State / UT','Total cases','Cured/Discharged/Migrated','Deaths**']] data.sort_values('Total cases',ascending=False,inplace=True) sns.set_color_codes("pastel") sns.barplot(x="Total cases", y="Name of State / UT", data=data,label="Total", color="r") sns.set_color_codes("muted") sns.barplot(x="Cured/Discharged/Migrated", y="Name of State / UT", data=data, label="Cured/Discharged/Migrated", color="g") # Add a legend and informative axis label ax.legend(ncol=2, loc="lower right", frameon=True) ax.set(xlim=(0, 55000), ylabel="",xlabel="Cases") sns.despine(left=True, bottom=True) # + df = pd.read_csv('covid_19_clean_complete2.csv',parse_dates=['Date']) df.rename(columns={'ObservationDate':'Date', 'Country/Region':'Country'}, inplace=True) df_confirmed = pd.read_csv("time_series_covid19_confirmed_global.csv") df_recovered = pd.read_csv("time_series_covid19_recovered_global.csv") df_deaths = pd.read_csv("time_series_covid19_deaths_global(1).csv") df_confirmed.rename(columns={'Country/Region':'Country'}, inplace=True) df_recovered.rename(columns={'Country/Region':'Country'}, inplace=True) df_deaths.rename(columns={'Country/Region':'Country'}, inplace=True) # - df_deaths.head() df_confirmed.head() df2 = df.groupby(["Date", "Country", "Province/State"])[['Date', 'Province/State', 'Country', 'Confirmed', 'Deaths', 'Recovered']].sum().reset_index() df2.head() # Check for India's data india_cases=df.query('Country=="India"').groupby("Date")[['Confirmed', 'Deaths', 'Recovered']].sum().reset_index() india_cases # + sns.lineplot(x='Date',y='Confirmed',data=india_cases) plt.grid(True) plt.show() # - china=df[(df["Country"]=="China")] china sns.lineplot(x='Date',y='Confirmed',data=(china)) plt.grid(True) plt.show() india_china=df.loc[(df["Country"]=="China")|(df["Country"].isin(["India"]))] sns.set(rc={'figure.figsize':(50,20)}) sns.barplot(x="Country",y="Confirmed",data=india_china,hue="Date") plt.show() from fbprophet import Prophet confirmed = india_cases.groupby('Date').sum()['Confirmed'].reset_index() deaths = india_cases.groupby('Date').sum()['Deaths'].reset_index() recovered = india_cases.groupby('Date').sum()['Recovered'].reset_index() confirmed.columns = ['ds','y'] #confirmed['ds'] = confirmed['ds'].dt.date confirmed['ds'] = pd.to_datetime(confirmed['ds']) confirmed.tail() m = Prophet(interval_width=0.95) m.fit(confirmed) future = m.make_future_dataframe(periods=16) future.tail() #predicting the future with date, and upper and lower limit of y value forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() confirmed_forecast_plot = m.plot(forecast) confirmed_forecast_plot =m.plot_components(forecast) deaths.columns = ['ds','y'] deaths['ds'] = pd.to_datetime(deaths['ds']) m = Prophet(interval_width=0.95) m.fit(deaths) future = m.make_future_dataframe(periods=16) future.tail() forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() deaths_forecast_plot = m.plot(forecast) deaths_forecast_plot = m.plot_components(forecast)
India_Covid19-Predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import division, print_function import os import torch import pandas import numpy as np from torch.utils.data import DataLoader,Dataset from torchvision import utils, transforms from skimage import io, transform import matplotlib.pyplot as plt import warnings #ignore warnings warnings.filterwarnings("ignore") plt.ion() #interactive mode on # - # The dataset being used is the face pose detection dataset, which annotates the data using 68 landmark points. The dataset has a csv file that contains the annotation for the images. # + # Import CSV file landmarks_csv = pandas.read_csv("data/faces/face_landmarks.csv") # Extracting info from the CSV file n = 65 img_name = landmarks_csv.iloc[n,0] landmarks = landmarks_csv.iloc[n,1:].as_matrix() landmarks = landmarks.astype('float').reshape(-1,2) # Print a few of the datasets for having a look at # the dataset print('Image name: {}'.format(img_name)) print('Landmarks shape: {}'.format(landmarks.shape)) print('First 4 Landmarks: {}'.format(landmarks[:4])) # - # Now that we have seen the landmark values let's plot a function to display the landmarks on an image # + def plot_landmarks(image, landmarks): plt.imshow(image) plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, c='r', marker='.') plt.pause(0.01) plt.figure() plot_landmarks(io.imread(os.path.join('data/faces/',img_name)),landmarks) plt.show() # - # To use customa datasets we need to use the <b>(torch.utils.data.Dataset) Dataset</b> class provided. It is an abstract class and hence the custom class should inherit it and override the # <b>__len__</b> method and the # <b>__getitem__</b> method # The __getitem__ method is used to provide the ith sample from the dataset class FaceLandmarkDataset(Dataset): # We will read the file here def __init__(self,csv_file, root_dir, transform=None): """ Args: csv_file : string : path to csv file root_dir : string : root directory which contains all the images transform : callable, optional : Optional transform to be applied to the images """ self.landmarks_frame = pandas.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.landmarks_frame) def __getitem__(self, idx): """ Args: idx (integer): the ith sample """ image_name = os.path.join(self.root_dir,self.landmarks_frame.iloc[idx, 0]) image = io.imread(image_name) landmarks = np.array([self.landmarks_frame.iloc[idx, 1:]]) landmarks = landmarks.astype("float").reshape(-1, 2) sample = {"image":image,"landmarks":landmarks} if self.transform: sample = self.transform(sample) return sample # + face_dataset = FaceLandmarkDataset(csv_file='data/faces/face_landmarks.csv', root_dir='data/faces/') fig = plt.figure() for i in range(len(face_dataset)): sample = face_dataset[i] print(i, sample['image'].shape, sample['landmarks'].shape) ax = plt.subplot(1, 4, i + 1) plt.tight_layout() ax.set_title('Sample #{}'.format(i)) ax.axis('off') plot_landmarks(**sample) if i == 3: plt.show() break # - # Now that we have the dataset , we can move on to preprocessing the data. We use the transforms class for this. # We will be using callable classes of the transformations we need so that the parameters do not need to be passed again and again. For better description refer the <a href="https://pytorch.org/tutorials/beginner/data_loading_tutorial.html">tutorial</a> from PyTorch. # # To implement callable classes we just need to implement the __call__ method and if required __init__ method of the class. # # Here we will be using autocrop , Reshape and To Tensor transformations. # # __** NOTE **__<br> # In PyTorch the default style for image Tensors is <span>n_channels * Height * Width</span> as opposed to the Tensordlow default of <span>Height * Width * n_channels</span>. But all the images in the real world have the tensorflow default format and hence we need to do that change in the ToTensor class that we will implement. # + # Implementing the Rescale class class Rescale(object): """Rescale the input image to a given size Args: output_size (int or tuple):Desired output size. If tuple, output is matched to output_size. If int, smaller of image edges is matched to output_size keeping aspect ratio the same """ def __init__(self,output_size): assert isinstance(output_size,(int,tuple)) self.output_size = output_size def __call__(self,sample): image, landmarks = samplep['image'], sample['landmarks'] h, w = image.shape[:2] if isinstance(self.output_size,int): if h>w: new_h, new_w = self.output_size * h/w, self.output_size else: new_h, new_w = slef.output_size, self.output_size *w/h else: new_h, new_w = self.output_size image = transform.resize(image, (new_h, new_w)) # h and w are swapped for landmarks because for images, # x and y axes are axis 1 and 0 respectively landmarks = landmarks * [new_w / w, new_h / h] return {"image": image, "landmarks": landmarks} # Implementing Random Crop class RandomCrop(object): """Crop randomly the image in a sample Args: output_size(tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): images, landmarks = sample['image'], sample['landmarks'] h, w = images.shape[:,2] new_h, new_w = self.output_size top = np.random.randn(0, h-new_h) left = np.random.randn(0, w-new_w) images = images[top:top + new_h, left:left + new_w] landmarks = landmarks - [left, top] sample = {"image":images, "landmarks": landmarks} return sample # Implementing To Tensor class ToTensor(object): """Convert the PIL image into a tensor""" def __call__(self,sample): image, landmarks = sample['image'], sample['landmarks'] # Need to transpose # Numpy image : H x W x C # Torch image : C x H x W image = image.transpose((2, 0, 1)) sample = {"image":torch.from_numpy(image),"landmarks":torch.from_numpy(landmarks)} # - # #### Iterating through the dataset transformed_dataset = FaceLandmarkDataset(csv_file='data/faces/face_landmarks.csv', root_dir='data/faces/', transform=transforms.Compose([Rescale(256) ,RandomCrop(224), ToTensor()])) # Here the transformed dataset is stored. Now the next step is to iterate through it. This can be done using a for loop. <br> # <code>for i in range(len(transformed_dataset)): # sample = transformed_dataset[i] # print(i, sample['image'].size(), sample['landmarks'].size()) # if i == 3: # break # </code> # # But using this we loose out on # <ul> # <li> Batching the data </li> # <li> Shuffling the data </li> # <li> Multiprocessing / Use GPU </li> # </ul> # # So it's better to use the <code>torch.utils.data.DataLoader</code> class as it does all this job. trainloader = DataLoader(transformed_dataset, batch_size=4, shuffle=True, num_workers=2) # Rest is the same as when we implemented the classifier. # # END!!!!!!!!!!!!!!!
Data Loading and Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nielsneerhoff/malware_challenge/blob/master/main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="qLuVTUuLIe7b" colab_type="text" # # Setup # + [markdown] id="-YzJAcPtKXoP" colab_type="text" # ### Import the required packages. # + id="Oc8YQ84IKNuK" colab_type="code" outputId="35679b45-b469-4a6a-c6da-1d2da7caf191" colab={"base_uri": "https://localhost:8080/", "height": 51} import os import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/drive', force_remount = True) # Force remount to overwrite current mount. DATA_ROOT = '/content/drive/My\ Drive/' TAR_FILE = DATA_ROOT + 'data.tar' # Downloaded from Brightspace. print(f'Assuming .tar file is at {TAR_FILE}') # + [markdown] id="cTYQN4Mh_Ymw" colab_type="text" # ### Unpacking the code and setting up the Conda environment # Throws an error if the cloned files are still in Colab memory (will be deleted every 24 hours or so). # + id="R6W_Ti-68OL1" colab_type="code" outputId="72445a2f-ddc4-41b2-cec2-dca8713a763b" colab={"base_uri": "https://localhost:8080/", "height": 34} # Unpacking code from competition. # !git clone https://github.com/ALFA-group/malware_challenge.git # Downloading Conda prerequisites. # !wget -c https://repo.continuum.io/archive/Anaconda2-2019.03-Linux-x86_64.sh # !chmod +x Anaconda2-2019.03-Linux-x86_64.sh # !bash ./Anaconda2-2019.03-Linux-x86_64.sh -b -f -p /usr/local # Create Conda env. # !cd malware_challenge && conda env create -f ./helper_files/linux_environment.yml # + [markdown] id="ajBRNvHJ_fe3" colab_type="text" # ### Upload the data and unpack # # + id="ZvL7i-2u2oZS" colab_type="code" outputId="977f34a2-ada6-40ec-fe33-2e02bf747ce2" colab={"base_uri": "https://localhost:8080/", "height": 34} untar_cmd = f'tar xf {TAR_FILE}' os.system(untar_cmd) # + id="m9Xxx-fLpw5v" colab_type="code" colab={} # + [markdown] id="lr9GZpwFIPKy" colab_type="text" # # Familiarization task - 1 A4 # + [markdown] id="7Qjxr84m_opg" colab_type="text" # Edit the parameters.ini file and point to the newly unpacked data files, then run the experiments. # The path to malicous/benign datasets do not require " around it. # + [markdown] id="NSZPZDRZJOTC" colab_type="text" # **Analyse the obtained files and create a visualization that shows the differences between benign, malicious, and adversarial examples.** # + id="iy_JOr8fK2S4" colab_type="code" outputId="9b95c9b1-bcb0-438b-972e-0e7dc65337ee" colab={"base_uri": "https://localhost:8080/", "height": 430} bening_data = np.random.rand(10, 12) # Sample data, replace with BENIGN. malicious_data = np.random.rand(10, 12) # Sample data, replace with MALICIOUS. fig, axs = plt.subplots(1, 2, sharex = True, figsize = (20, 7)) sns.heatmap(bening_data, ax = axs[0]) axs[0].set_title('Benign data') sns.heatmap(malicious_data, ax = axs[1]) _ = axs[1].set_title('Malicious data') # + [markdown] id="oXa5EnmDJTTO" colab_type="text" # **Are the modifications in an empty part of the input space, or do they really look like benign examples?** # # EXPLAIN # + [markdown] id="8KcZlcKBJagI" colab_type="text" # **After 50 epochs.** # + id="-Dovj6VnLvIw" colab_type="code" outputId="ed29d584-0e8f-479a-e30f-fa14f2d269fd" colab={"base_uri": "https://localhost:8080/", "height": 430} bening_data = np.random.rand(10, 12) # Sample data, replace with BENIGN. malicious_data = np.random.rand(10, 12) # Sample data, replace with MALICIOUS. fig, axs = plt.subplots(1, 2, sharex = True, figsize = (20, 7)) sns.heatmap(bening_data, ax = axs[0]) axs[0].set_title('Benign data') sns.heatmap(malicious_data, ax = axs[1]) _ = axs[1].set_title('Malicious data') # + [markdown] id="unY93VLJJjMM" colab_type="text" # **Do you notice any differences? Is the model more robust to adversarial modification? Explain any differences you observe and justify your conclusions.** # # EXPLAIN # + [markdown] id="hLmEDEcsSB1z" colab_type="text" # Run the framework.py file # + id="cdjsKlExkgRG" colab_type="code" outputId="6bb25f37-b880-49f2-82c7-118048d310f6" colab={"base_uri": "https://localhost:8080/", "height": 252} # !source activate nn_mal && cd malware_challenge && python ./framework.py # + [markdown] id="bqXhM5PaKiKc" colab_type="text" # # Inner maximizer task – 1 A4 # + [markdown] id="-rO2t7hIMDFN" colab_type="text" # *Q: You are asked to build your own own method for generating adversarial examples. Things to consider is when to clip your tensor, when to round, when to enforce the or constraint, how many rounds to run it, how to use the gradient information. See the topk paper for inspiration. It is essentially a black-box optimization function with gradient as search heuristic.* # + [markdown] id="ti3Ey2FmMNK8" colab_type="text" # **Compare your method on run-time and f1-score against the baseline methods.** # + [markdown] id="tLwrTwg-MSgc" colab_type="text" # *Q: Modify the parameters.ini file to attack=True and defend=False to generate modifications using your inner maximizer using the provided malicious from the attack folder. Modify your inner maximizer to perform attacks, i.e., by running more iterations, using random restarts, etc.* # + [markdown] id="o8tZGY5yMffV" colab_type="text" # **Compare the performance of your attack on run-time and evasion rate against the baseline methods.** # # *Q: Note that the comparisons require to learn models using the baseline methods. Save the best model you learned, and best modification of the training data you found.* # + id="RkXy9Z9YMg2Z" colab_type="code" colab={}
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CheckSum for nHealth import random import csv def generateIDs(prefix, site_prefix, upper_limit, repeats): i = 0 pid = "" ids = [] for i in range (1, upper_limit): if int(i) < 10: i = '000'+str(i) elif int(i) < 100: i = '00'+str(i) elif int(i) < 1000: i = '0'+str(i) pid = str(prefix + site_prefix + '-' + str(i)) for r in range(0, repeats): ids.append(pid) return (ids) input_ids = generateIDs('P', 'K', 100, 5) print(input_ids[0]) ### Takes in the ID with characters # Converts the characters to ASCII int # Returns the digits which are then used to generate the checksum via Luhn algorithm # we use the UPPERCASE ascii as it is always 2 digits def convertIDToDigits(string): # strip off the first three characters and convert them to ascii # append the numbers to the remaining digits # strip out any spaces or dashes #print(string) s = string.replace('-', '') digits = s[2:] #print(digits) chars = s[:2] #print(chars) # reverse the list so when we append the numbers we preserve the order chars = list(reversed(chars)) for c in chars: num = ord(c.upper()) digits = str(num) + digits #print(digits) return(digits) def convertIDToVarChar(string): # re-assemble the ID back to the original format # take the first six digits digits = string[4:] d1 = digits[:4] d2 = digits[4:] chars = string[:4] c1 = chr(int(chars[:2])) c2 = chr(int(chars[2:4])) #c3 = chr(int(chars[4:6])) return c1 + c2 + '-' + d1 + '-' + d2 # + # from https://github.com/mmcloughlin/luhn/blob/master/luhn.py def checksum(string): """ Compute the Luhn checksum for the provided string of digits. Note this assumes the check digit is in place. """ digits = list(map(int, string)) odd_sum = sum(digits[-1::-2]) even_sum = sum([sum(divmod(2 * d, 10)) for d in digits[-2::-2]]) l_sum = (odd_sum + even_sum) * 9 return (odd_sum + even_sum) % 10 def verify(string): """ Check if the provided string of digits satisfies the Luhn checksum. >>> verify('356938035643809') True >>> verify('534618613411236') False """ # if it contains only digits if not string.isdigit(): string = convertIDToDigits(string) return (checksum(string) == 0) def generate(string): """ Generate the Luhn check digit to append to the provided string. >>> generate('35693803564380') 9 >>> generate('53461861341123') 4 """ cksum = checksum(string + '0') return (10 - cksum) % 10 def append(string): """ Append Luhn check digit to the end of the provided string. >>> append('53461861341123') '534618613411234' """ return string + str(generate(string)) # - computed_ids = [] participant_ids = [] for i in input_ids: i = convertIDToDigits(i) new_id = convertIDToVarChar(append(i)) computed_ids.append(new_id) # + #print(computed_ids) # - ### convert these basck to the required format for i in computed_ids: p_id = convertIDToVarChar(i) participant_ids.append(p_id) print(participant_ids) # + # check LUHN #for id in computed_ids: # print("id:", id, verify(id)) # + #for id in computed_ids: # print("id:", id, verify(id)) # + # print out the final participant_id generated #print("From the raw input\n") #print(raw_ids) #print("\nWe generate these Ids with check-sums. Hyphens optional \n") #print(computed_ids) output = [] for x in range(len(input_ids)): raw = input_ids[x] computed = computed_ids[x] verified = verify(computed) output.append((raw, computed, verified)) #print(output) # - print(computed_ids) label_file = open('participant_labels.csv', 'w') with label_file: fields = ['participant_ID', 'ID_label'] writer = csv.DictWriter(label_file, fieldnames=fields) writer.writeheader() for pid in computed_ids: writer.writerow({'participant_ID' : pid, 'ID_label': pid})
Modified Luhn for Generating IDs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Creating And Cleaning Features: Combine Existing Features Into New Feature # ### Read In Data # + # Read in data import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline titanic = pd.read_csv('../../../data/titanic_cabin_ind.csv') titanic.head() # - # ### Combine `SibSp` & `Parch` Into New `Family_cnt` Feature # Generate categorical plots for ordinal features for col in ['SibSp', 'Parch']: sns.catplot(x=col, y='Survived', data=titanic, kind='point', aspect=2, ) plt.ylim(0, 1) # Create a new family count feature titanic['Family_cnt'] = titanic['SibSp'] + titanic['Parch'] sns.catplot(x='Family_cnt', y='Survived', data=titanic, kind='point', aspect=2, ) plt.ylim(0, 1) # Create new CSV with updated data titanic.to_csv('../../../data/titanic_family_cnt.csv', index=False)
ml_feature/04_Clean_Features/04_06/End/04_06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ralsouza/python_fundamentos/blob/master/src/07_Exploratory_Analysis/02_Explotatory_Analysis_Exercise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="xZmcgxr617F0" colab_type="text" # ## Exercício: Análise Exploratória de Dados com Python # # Neste exercício, você vai realizar uma análise exploratória em um dos mais famosos datasets para Machine Learning, o dataset iris com informações sobre 3 tipos de plantas. # # Esse dataset é comumente usado em problemas de Machine Learning de classificação, quando nosso objetivo é prever a classe dos dados. **No caso deste dataset, prever a categoria de uma planta a partir de medidas da planta (sepal e petal).** # # # Em cada célula, você encontra a tarefa a ser realizada. Faça todo o exercício e depois compare com a solução proposta. # Dataset (já disponível com o Scikit-Learn): https://archive.ics.uci.edu/ml/datasets/iris # + id="Pfb-4B2w16MU" colab_type="code" colab={} # Imports import time import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.datasets import load_iris # %matplotlib inline fontsize = 14 ticklabelsize = 14 # + id="By1lxHbS2KOu" colab_type="code" outputId="bdea84ad-fbee-40e2-c561-35675611dbcb" colab={"base_uri": "https://localhost:8080/", "height": 35} # Carregando o dataset iris = load_iris() # Transformar dataset em um pandas dataframe df = pd.DataFrame(iris.data, columns=iris.feature_names) print(len(df)) # + id="Y4VJw8naTEWi" colab_type="code" colab={} # http://www.jtrive.com/determining-histogram-bin-width-using-the-freedman-diaconis-rule.html def freedman_diaconis(data, returnas="width"): """ Use Freedman Diaconis rule to compute optimal histogram bin width. ``returnas`` can be one of "width" or "bins", indicating whether the bin width or number of bins should be returned respectively. Parameters ---------- data: np.ndarray One-dimensional array. returnas: {"width", "bins"} If "width", return the estimated width for each histogram bin. If "bins", return the number of bins suggested by rule. """ data = np.asarray(data, dtype=np.float_) IQR = stats.iqr(data, rng=(25, 75), scale="raw", nan_policy="omit") N = data.size bw = (2 * IQR) / np.power(N, 1/3) if returnas=="width": result = bw else: datmin, datmax = data.min(), data.max() datrng = datmax - datmin result = int((datrng / bw) + 1) return(result) # + id="835Zzm6C5_QS" colab_type="code" outputId="86180d11-64a6-4c30-9dd0-45494a88881b" colab={"base_uri": "https://localhost:8080/", "height": 34} # Check the shape print(df.shape) # + id="WD9gWh8B6K-l" colab_type="code" outputId="d1340d9f-2fcd-4484-e4a0-47c4f6047ff1" colab={"base_uri": "https://localhost:8080/", "height": 204} # Show first rows df.head() # + [markdown] id="yO_Y2QuSEKRZ" colab_type="text" # # 1. Extração e Transformação de Dados # + id="DxuYJm1RzcXb" colab_type="code" outputId="66a8a3f9-b8f3-4192-819d-900e69566e55" colab={"base_uri": "https://localhost:8080/", "height": 34} # Imprima os nomes das variáveis target (o que queremos prever), # 3 possíveis categorias de plantas: setosa, versicolor ou virginica. # Print target names iris.target_names # + id="F4iXSkhsB6ij" colab_type="code" outputId="e0e16088-06ec-4af0-93bd-a3e4a65edd57" colab={"base_uri": "https://localhost:8080/", "height": 136} # Imprima os valores numéricos da variável target (o que queremos prever), # 3 possíveis categorias de plantas: 0, 1 ou 2 # Print target values iris.target # + id="4EaPUog1CACy" colab_type="code" colab={} # Adicione ao dataset uma nova coluna com os nomes das espécies, pois é isso que vamos tentar prever (variável target) # pandas.Categorical.from_codes: Make a Categorical type from codes and categories or dtype. # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Categorical.from_codes.html df['species'] = pd.Categorical.from_codes(iris.target,iris.target_names) # + id="8tG1nXM2F9iA" colab_type="code" outputId="6464981e-b2bd-425f-d70d-7f421afbc0c8" colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="FjhYkrQwCDaC" colab_type="code" outputId="62afdbdc-7711-4340-ad13-297601001606" colab={"base_uri": "https://localhost:8080/", "height": 258} # Inclua no dataset uma coluna com os valores numéricos da variável target df['target'] = iris.target df.head() # + id="E0PQAF9CECH5" colab_type="code" outputId="b8c7d2ad-9d9f-4f25-e83f-d5e34038c4b7" colab={"base_uri": "https://localhost:8080/", "height": 35} # Extraia as features (atributos) do dataset e imprima features_iris = iris.feature_names print(features_iris) # + id="jA5AhCelECs-" colab_type="code" outputId="02ada63a-9742-4099-caa9-283484fb334b" colab={"base_uri": "https://localhost:8080/", "height": 173} # Calcule a média de cada feature para as 3 classes df.groupby('species').mean() # + [markdown] id="0jhhvDp_Eb9m" colab_type="text" # # Exploração de Dados # + id="0kU3BukdEfMb" colab_type="code" outputId="f63b275a-4e1d-4915-d2a5-563cc16ea20e" colab={"base_uri": "https://localhost:8080/", "height": 235} # Imprima uma Transposta do dataset (transforme linhas e colunas e colunas em linhas) df.head(10).T # + id="8qTIumnyEjOT" colab_type="code" outputId="a5f34092-f15d-40f5-8302-e8ee1002ccd3" colab={"base_uri": "https://localhost:8080/", "height": 233} # Utilize a função Info do dataset para obter um resumo sobre o dataset print(df.info()) # + id="R-43NTVuElzz" colab_type="code" outputId="76e9e5c9-6a8a-4614-8fe0-8052ad8e7098" colab={"base_uri": "https://localhost:8080/", "height": 215} # Faça um resumo estatístico do dataset print(df.describe()) # + id="8SfXjsp5EnxK" colab_type="code" outputId="bcff79c7-4658-4b5b-f7d6-a5e8d66ac4cb" colab={"base_uri": "https://localhost:8080/", "height": 143} # Verifique se existem valores nulos no dataset # https://chartio.com/resources/tutorials/how-to-check-if-any-value-is-nan-in-a-pandas-dataframe/ print(df.isnull().sum(axis=0)) # + id="Hcmr0qBKErTI" colab_type="code" outputId="6d27b884-d432-4639-89d3-57fbcf1d05c0" colab={"base_uri": "https://localhost:8080/", "height": 665} # Faça uma contagem de valores de sepal length df['sepal length (cm)'].value_counts() # + [markdown] id="WPS5CBTlEugM" colab_type="text" # # Plot # + id="ytx5HKiaEDrw" colab_type="code" colab={} nbr_bins = freedman_diaconis(data=df['sepal length (cm)'],returnas='bins') # + id="8CUQ19uzE0xB" colab_type="code" outputId="4a32c3c3-d7ea-472c-c33a-b47fa5f6784e" colab={"base_uri": "https://localhost:8080/", "height": 295} # Crie um Histograma de sepal length plt.hist(x=df['sepal length (cm)'],edgecolor='white',bins=nbr_bins) plt.title('Sepal Length (cm)') plt.ylabel('Frequency',fontsize=14) plt.xlabel('Centimeters') plt.show() # + id="NnPT7xvD52RH" colab_type="code" outputId="a53ccb15-dfea-4ea2-f921-edb1cd37e79f" colab={"base_uri": "https://localhost:8080/", "height": 295} # Crie um gráfico de dispersão (scatter plot) da variável sepal length versus número da linha, # colorido por marcadores da variável target plt.scatter(x=range(len(df)),y=df['sepal length (cm)'],c=df['target']) plt.title('Sepal Length vs Number of Rows') plt.ylabel('centimeters') plt.xlabel('Number of row') plt.show() # + id="h8GgCyB6E4G6" colab_type="code" outputId="fe32bc0c-7e11-4b15-e737-533571762c11" colab={"base_uri": "https://localhost:8080/", "height": 295} # Crie um Scatter Plot de 2 Features (atributos) plt.scatter(x=df['sepal length (cm)'],y=df['sepal width (cm)'],c=df['target']) plt.title('Sepal Length vs Sepal Width') plt.ylabel('Sepal widtth (cm)') plt.xlabel('Sepal length (cm)') plt.show() # + id="1xhCu0OFVjMb" colab_type="code" outputId="bf2f95cf-6e7a-4a4b-9e9c-e9bce4be4429" colab={"base_uri": "https://localhost:8080/", "height": 315} plt.scatter(x=df['petal length (cm)'],y=df['petal width (cm)'],c=df['target']) plt.title('Pegal Length vs Petal Width') plt.ylabel('Petal width (cm)') plt.xlabel('Pegal Length (cm)') plt.show() # + id="_oYCemlVz_lD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 933} outputId="73c044ac-a4fb-4410-b942-748b44a77d0e" attributes = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)'] pd.plotting.scatter_matrix(df[attributes], figsize=(16, 12)) plt.show() # + id="g4vaUxf6E9EW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4c54eb08-6b3b-4e52-af39-323df065e8eb" # Crie um Histograma de todas as features df.hist(edgecolor='white',grid=False,figsize=(12,12))
src/07_Exploratory_Analysis/02_Explotatory_Analysis_Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ecpaperenv] # language: python # name: conda-env-ecpaperenv-py # --- # + import importlib import xarray as xr import numpy as np import sys import matplotlib.pyplot as plt from CASutils import mapplot_utils as maps from CASutils import readdata_utils as read from CASutils import calendar_utils as cal from CASutils import colorbar_utils as cbars from CASutils import shapefile_utils as shp from math import nan importlib.reload(maps) importlib.reload(read) importlib.reload(cbars) # - plotpath="/project/cas/islas/python_plots/snowpaper/FIGURES/" era5ps = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/era5_ps/PS_ERA5_1979_2014_DJF.nc") era5ps = era5ps.ps era5ps = era5ps/100. era5ps = era5ps.drop('season') # + datera5 = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/flux_trefht_gradients/gradients_ERA5.nc") datclm5 = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/flux_trefht_gradients/gradients_Isla_CAM6_CLM5_002.nc") datsnowd = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/flux_trefht_gradients/gradients_CAM6_CLM5_snowdensity_002.nc") datera5_comp = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/trefhtptile_composites/pos_neg_extremes/ERA5_minmax_trefhtptilecomposite.nc") datclm5_comp = xr.open_dataset( "/project/cas/islas/python_savs/snowpaper/DATA_SORT/trefhtptile_composites/pos_neg_extremes/Isla_CAM6_CLM5_002_minmax_trefhtptilecomposite.nc") datsnowd_comp = xr.open_dataset( "/project/cas/islas/python_savs/snowpaper/DATA_SORT/trefhtptile_composites/pos_neg_extremes/CAM6_CLM5_snowdensity_002_minmax_trefhtptilecomposite.nc") # - print(landfrac) # + ### greenland mask shpfile="/project/cas/islas/shapefiles/greenland/gadm36_GRL_0.shp" maskgreenland = shp.maskgen(shpfile, era5ps, ['Greenland']) landfrac = xr.open_dataset('/project/cas/islas/cesmle/fx/landfraclens.nc') landfrac = landfrac.LANDFRAC.isel(time=0) path="/project/cas02/islas/CLM5_CLM4/raw/Isla_CAM6_CLM5_002/mon/lnd/" fsno_clm5 = read.read_sfc_cesm(path+"FSNO_f.e21.FHIST.f09_f09.cesm2_cam6_clm5.002.1979-01_2014-12.nc","1979-01","2014-12") fsno_clm5_djf = cal.season_mean(fsno_clm5,"FSNO",season="DJF") path="/project/cas02/islas/CLM5_CLM4/raw/CAM6_CLM5_snowdensity_002/mon/lnd/" fsno_snowd = read.read_sfc_cesm(path+"FSNO_f.e21.FHIST_BGC.f09_f09.cesm2_cam6_clm5_snowdensity.002.1979-01_2014-12.nc","1979-01","2014-12") fsno_snowd_djf = cal.season_mean(fsno_snowd,"FSNO", season="DJF") maskclm5 = np.empty([landfrac.lat.size, landfrac.lon.size]) maskclm5[ (fsno_clm5_djf > 0.5)] = 1 maskclm5inverse = np.where( (maskclm5 == 1) | (landfrac < 0.5), nan, 1) masksnowd = np.empty([landfrac.lat.size, landfrac.lon.size]) masksnowd[ fsno_snowd_djf > 0.5] = 1 psmask = np.zeros([landfrac.lat.size, landfrac.lon.size]) psmask[:,:] = nan psmask[ era5ps <= 900] = 1 # + datera5 = datera5*np.array(landfrac)*maskclm5 datclm5 = datclm5*np.array(landfrac)*maskclm5 datsnowd = datsnowd*np.array(landfrac)*maskclm5 datera5_unmasked = datera5_comp*np.array(landfrac) datclm5_unmasked = datclm5_comp*np.array(landfrac) datsnowd_unmasked = datsnowd_comp*np.array(landfrac) datera5_comp = datera5_comp*np.array(landfrac)*maskclm5 datclm5_comp = datclm5_comp*np.array(landfrac)*maskclm5 datsnowd_comp = datsnowd_comp*np.array(landfrac)*maskclm5 # + maxnet_era5 = -1.*datera5_comp.maxfsns - datera5_comp.maxflns - datera5_comp.maxshflx - datera5_comp.maxlhflx minnet_era5 = -1.*datera5_comp.minfsns - datera5_comp.minflns - datera5_comp.minshflx - datera5_comp.minlhflx maxnet_clm5 = -1.*datclm5_comp.maxfsns + datclm5_comp.maxflns + datclm5_comp.maxshflx + datclm5_comp.maxlhflx minnet_clm5 = -1.*datclm5_comp.minfsns + datclm5_comp.minflns + datclm5_comp.minshflx + datclm5_comp.minlhflx maxnet_snowd = -1.*datsnowd_comp.maxfsns + datsnowd_comp.maxflns + datsnowd_comp.maxshflx + datsnowd_comp.maxlhflx minnet_snowd = -1.*datsnowd_comp.minfsns + datsnowd_comp.minflns + datsnowd_comp.minshflx + datsnowd_comp.minlhflx maxnet_era5_unmasked = -1.*datera5_unmasked.maxfsns - datera5_unmasked.maxflns - datera5_unmasked.maxshflx - datera5_unmasked.maxlhflx minnet_era5_unmasked = -1.*datera5_unmasked.minfsns - datera5_unmasked.minflns - datera5_unmasked.minshflx - datera5_unmasked.minlhflx maxnet_clm5_unmasked = -1.*datclm5_unmasked.maxfsns + datclm5_unmasked.maxflns + datclm5_unmasked.maxshflx + datclm5_unmasked.maxlhflx minnet_clm5_unmasked = -1.*datclm5_unmasked.minfsns + datclm5_unmasked.minflns + datclm5_unmasked.minshflx + datclm5_unmasked.minlhflx maxnet_snowd_unmasked = -1.*datsnowd_unmasked.maxfsns + datsnowd_unmasked.maxflns + datsnowd_unmasked.maxshflx + datsnowd_unmasked.maxlhflx minnet_snowd_unmasked = -1.*datsnowd_unmasked.minfsns + datsnowd_unmasked.minflns + datsnowd_unmasked.minshflx + datsnowd_unmasked.minlhflx # + fig = plt.figure(figsize=(16,16)) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datera5_comp.maxt850 - datera5_comp.mint850, datera5_comp.lon, datera5_comp.lat, 1,-25,25,'(a) ERA5 T850$_{hot}-$T850$_{cold}$',0.05, 0.31,0.8,0.95) ax.contourf(datera5_comp.lon, datera5_comp.lat, psmask, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 1,-25,25,'T850 (K)',0.32,0.325, 0.8,0.95, orient='vertical', fsize=10, ticks=[-25,-20,-15,-10,-5,0,5,10,15,20,25]) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, (datclm5_comp.maxt850-datclm5_comp.mint850) - (datera5_comp.maxt850 - datera5_comp.mint850), datera5_comp.lon, datera5_comp.lat,0.5,-4,4,'(b) CLM5$-$ERA5, T850$_{hot}-$T850$_{cold}$',0.4,0.66,0.8,0.95) ax.contourf(datera5_comp.lon, datera5_comp.lat, psmask, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, (datsnowd_comp.maxt850 - datsnowd_comp.mint850) - (datera5_comp.maxt850 - datera5_comp.mint850), datera5_comp.lon, datera5_comp.lat, 0.5,-4,4,'(c) SNOWD$-$ERA5, T850$_{hot}-$T850$_{cold}$',0.7,0.96,0.8,0.95) ax.contourf(datera5_comp.lon, datera5_comp.lat, psmask, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 0.5,-4,4,'T850 (K)',0.97,0.975,0.8,0.95, orient='vertical', fsize=10, ticks=[-4,-3,-2,-1,0,1,2,3,4]) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, maxnet_era5_unmasked - minnet_era5_unmasked, datera5_comp.lon, datera5_comp.lat, 5,-45,45, '(d) ERA5, F$\\uparrow$$_{hot}-$F$\\uparrow$$_{cold}$', 0.05,0.31,0.6,0.75) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 5,-45,45,'F$\\uparrow$ (Wm$^{-2}$)',0.32,0.325, 0.6, 0.75, ticks=[-40,-30,-20,-10,0,10,20,30,40], orient='vertical', fsize=10) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, (maxnet_clm5_unmasked - minnet_clm5_unmasked) - (maxnet_era5_unmasked-minnet_era5_unmasked), datera5_comp.lon, datera5_comp.lat, 2.5,-30,30,'(e) CLM5$-$ERA5, F$\\uparrow$$_{hot}$$-$F$\\uparrow$$_{cold}$',0.4,0.66,0.6,0.75) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, (maxnet_snowd_unmasked - minnet_snowd_unmasked) - (maxnet_era5_unmasked-minnet_era5_unmasked), datera5_comp.lon, datera5_comp.lat, 2.5,-30,30,'(f) SNOWD$-$ERA5, F$\\uparrow$$_{hot}$$-$F$\\uparrow$$_{cold}$',0.7, 0.96, 0.6, 0.75) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax.contourf(maskgreenland.lon, maskgreenland.lat, maskclm5inverse, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 2.5, -30, 30, 'F$\\uparrow$ (Wm$^{-2}$)', 0.97,0.975,0.6,0.75, ticks=[-30,-20,-10,0,10,20,30], orient='vertical', fsize=10) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datera5_unmasked.maxincrement - datera5_unmasked.minincrement, datera5_comp.lon, datera5_comp.lat, 0.1,-2,2,'(g) ERA5, Increment$_{hot}$$-$Increment$_{cold}$', 0.05,0.31,0.4,0.55) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 0.1, -2, 2, 'Increment (K)', 0.32,0.325,0.4,0.55, ticks=[-2,-1,0,1,2], orient='vertical', fsize=10) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datera5_unmasked.maxincrement, datera5_comp.lon, datera5_comp.lat, 0.1,-2,2,'(h) Increment$_{hot}$ ',0.4,0.66,0.4,0.55) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datera5_unmasked.minincrement, datera5_comp.lon, datera5_comp.lat, 0.1,-2,2,'(i) Increment$_{cold}$',0.7,0.96,0.4,0.55) ax.contourf(maskgreenland.lon, maskgreenland.lat, maskgreenland, level=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 0.1, -2, 2, 'Increment (K)',0.97,0.975, 0.4,0.55, ticks=[-2,-1,0,1,2], orient='vertical', fsize=10) fig.savefig(plotpath+'obsfigure.png', bbox_inches='tight', facecolor='white') # + fig = plt.figure(figsize=(16,16)) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datera5.bt850, datera5.lon, datera5.lat, 0.05,-1.2,1.2,'ERA5',0.05,0.32,0.8,0.95) ax.contourf(datera5.lon, datera5.lat, psmask, levels=1, colors='lightgray') ax = cbars.plotcolorbar(fig, 0.05,-1.2,1.2,'b of T850=a+bT$_{2m}$', 0.33,0.34,0.8,0.95, orient='vertical', fsize=10, ticks=[-0.9,-0.6,-0.3,0,0.3,0.6,0.9]) ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datclm5.bt850 - datera5.bt850, datera5.lon, datera5.lat, 0.02,-0.2,0.2,' ',0.4,0.67,0.8,0.95) ax.contourf(datera5.lon, datera5.lat, psmask, levels=1, colors='lightgray') #ax = maps.contourmap_bothcontinents_fill_nh_pos(fig, datsnowd.bt850 - datera5.bt850, datera5.lon, datera5.lat, # 0.02,-0.2,0.2,' ',0.65,0.92,0.8,0.95)# #ax.contourf(datera5.lon, datera5.lat, psmask, levels=1, colors='lightgray') # - basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/flux_trefht_gradients/" clm5 = xr.open_dataset(basepath+'gradients_Isla_CAM6_CLM5_002.nc') snowd = xr.open_dataset(basepath+'gradients_CAM6_CLM5_snowdensity_002.nc') era5 = xr.open_dataset(basepath+'gradients_ERA5.nc') clm5 = clm5*np.array(landfrac)*maskclm5 snowd = snowd*np.array(landfrac)*masksnowd era5 = era5*np.array(landfrac)*maskclm5 # + fig = plt.figure(figsize=(16,16)) ax1 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(era5.bsumflux), np.array(era5.lon), np.array(era5.lat),0.02,-3.,3, 'Net upward flux, ERA5',0.05,0.32,0.8,0.95) ax1 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(clm5.bsumflux) - np.array(era5.bsumflux), np.array(era5.lon), np.array(era5.lat), 0.02,-2,2,'CLM5$-$ERA5',0.36,0.64,0.8,0.95) ax1 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(snowd.bsumflux) - np.array(era5.bsumflux), np.array(era5.lon), np.array(era5.lat), 0.02,-2,2,'SNOWD$-$ERA5',0.68,0.95,0.8,0.95) # - print(datera5.bt850.sel(lon=255, lat=60, method='nearest'))
FIGURES/figobs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: rscube # language: python # name: rscube # --- # The point of this notebook is to find a *global* shift within the time series to ensure that the data is co-registered should there be sub-pixel shifts. # + import rasterio from pathlib import Path import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm from itertools import starmap from skimage.registration import phase_cross_correlation import scipy.ndimage as nd import shutil from rscube import interpolate_nn, bilinear_interpolate # - DATA_DIR_NAME = f'data/asf_data_reprojected' DATA_DIR = Path(DATA_DIR_NAME) DATA_DIR.exists() OUT_DIR = Path(str(DATA_DIR).replace('_reprojected', '_coregistered')) OUT_DIR.mkdir(exist_ok=True, parents=True) OUT_DIR.exists() hh_paths = sorted(list(DATA_DIR.glob('*/*hh*.tif'))) hv_paths = sorted(list(DATA_DIR.glob('*/*hv*.tif'))) hv_paths with rasterio.open(hv_paths[0]) as ds: profile = ds.profile def read_arr(path): with rasterio.open(path) as ds: arr = ds.read(1) return arr hv_ts = list(map(read_arr, hv_paths)) hh_ts = list(map(read_arr, hh_paths)) def f(img): img_ = img[-1000: , -1000:] img_ = interpolate_nn(img_) return img_ hv_ts_nn_fill = list(map(f, tqdm(hv_ts))) reference_img = hv_ts_nn_fill[0] def get_shift(img): shift, _, _ = phase_cross_correlation(reference_img, img, upsample_factor=100) return shift shifts = list(map(get_shift, tqdm(hv_ts_nn_fill))) shifts, len(shifts) resample = True mask = np.isnan(hv_ts[0]) def resample(img, shift): indices_y, indices_x = np.indices(img.shape) y_shift, x_shift = shift if resample: img_r = interpolate_nn(img) #img_resampled = bilinear_interpolate(img_, # indices_x - x_shift, # indices_y - y_shift, # nan_boundaries=False) img_r = nd.shift(img_r, (y_shift, x_shift), mode='constant', cval=np.nan ) img_r[mask] = np.nan else: img_r = img #img_resampled = np.clip(img_resampled, 0.00001, .5) return img_r hh_ts_coreg = list(starmap(resample, tqdm(zip(hh_ts, shifts), total=len(hh_ts)))) hv_ts_coreg = list(starmap(resample, tqdm(zip(hv_ts, shifts), total=len(hv_ts)))) plt.imshow(hv_ts_coreg[0], vmax=.15) plt.colorbar() # # Write def write_one(img, dest_path): with rasterio.open(dest_path, 'w', **profile) as ds: ds.write(img.astype(np.float32), 1) return dest_path (OUT_DIR/'hh').mkdir(exist_ok=True, parents=True) (OUT_DIR/'hv').mkdir(exist_ok=True, parents=True) hh_dest_paths = hh_paths.copy() hv_dest_paths = hv_paths.copy() hh_dest_paths = [OUT_DIR/'hh'/path.name for path in hh_dest_paths] hv_dest_paths = [OUT_DIR/'hv'/path.name for path in hv_dest_paths] hh_dest_paths len(hh_ts_coreg) list(starmap(write_one, zip(tqdm(hh_ts_coreg), (hh_dest_paths)))) list(starmap(write_one, zip(tqdm(hv_ts_coreg), (hv_dest_paths)))) # # Copy DEM # + dem_path = DATA_DIR/'dem.tif' shutil.copy(dem_path, OUT_DIR/'dem.tif') # -
notebooks/change_detection/ALOS1_Borreal_Forest_Quebec/2 - Cross-correlation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:espei-new] # language: python # name: conda-env-espei-new-py # --- from pycalphad import Database, variables as v from espei.datasets import recursive_glob, load_datasets from espei.plot import dataplot from tinydb import where, Query import matplotlib.pyplot as plt # + comps = ['FE', 'MO','VA'] #phases = list(dbf.phases.keys()) #print(phases) conds = {v.N: 1.0, v.P: 101325, v.T: (300, 4000, 20), v.X('MO'): (0, 1, 0.02)} datasets = load_datasets(recursive_glob('.','*.json')) datasets.update({'reference':'MPDS'},Query().reference.search('C10')) for records in datasets: ''' if records['reference']!='MPDS': records['phases']=['EXP'] print('S=',records) #for i in range(len(records['conditions']['T'])): # records['conditions']['T'][i]=records['conditions']['T'][i]+273 for i in range(len(records['values'])): print('N=',records['values'][i]) for j in range(len(records['values'][i])): records['values'][i][j][0] = 'EXP' #print('Y=',records) ''' a=records['phases'] records['phases']=['R_PHASE' if o=='MO0.38FE0.62_148_HR159' else o for o in records['phases']] records['phases']=['FCC_A1' if o=='MO0.08FE0.92_229_CI2' else o for o in records['phases']] records['phases']=['MU_PHASE' if o=='MO5.1FE7.9_166_HR39' else o for o in records['phases']] records['phases']=['LIQUID' if o=='L' else o for o in records['phases']] records['phases']=['SIGMA' if o=='MO0.5FE0.5_136_TP30' else o for o in records['phases']] records['phases']=['FCC_A1' if o=='CR0.28NI0.72_225_CF4' or o=='FCC' else o for o in records['phases']] records['phases']=['LAVES_PHASE_C14' if o=='MOFE2_194_HP12' else o for o in records['phases']] records['phases']=['BCC_A2' if o=='MO_229_CI2'or o=='FE_PURE' else o for o in records['phases']] datasets.update({'phases':records['phases']},where('phases')==a) print(records['phases']) for i in range(len(records['values'])): for j in range(len(records['values'][i])): records['values'][i][j] = ['R_PHASE' if o=='MO0.38FE0.62_148_HR159' else o for o in records['values'][i][j]] records['values'][i][j] = ['FCC_A1' if o=='MO0.08FE0.92_229_CI2' else o for o in records['values'][i][j]] records['values'][i][j] = ['MU_PHASE' if o=='MO5.1FE7.9_166_HR39' else o for o in records['values'][i][j]] records['values'][i][j] = ['LIQUID' if o=='L' else o for o in records['values'][i][j]] records['values'][i][j] = ['SIGMA' if o=='MO0.5FE0.5_136_TP30' else o for o in records['values'][i][j]] records['values'][i][j] = ['FCC_A1' if o=='CR0.28NI0.72_225_CF4'or o=='FCC' else o for o in records['values'][i][j]] records['values'][i][j] = ['LAVES_PHASE_C14' if o=='MOFE2_194_HP12' else o for o in records['values'][i][j]] records['values'][i][j] = ['BCC_A2' if o=='MO_229_CI2' or o=='FE_PURE' else o for o in records['values'][i][j]] phases_1 = [r['phases'] for r in datasets] phases_db = [] for i in phases_1: for j in i: if j not in phases_db: phases_db.append(j) #phases_db.append('EXP') print(phases_db) dataplot(comps, phases_db, conds,datasets,tielines=False) plt.ylim(1000,3000) #plt.xlim(0.8,1) # -
MPDS-datasets/2-binary/Fe-Mo/Plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from sklearn.impute import SimpleImputer # + df = pd.read_csv('googleplaystore.csv') df # - df.isnull().sum() # + impute = SimpleImputer(missing_values = np.nan , strategy = 'mean') impute.fit(df.iloc[ : , 2:3 ].values) df.iloc[ : , 2:3 ] = impute.transform(df.iloc[ : , 2:3 ].values) df.head() # - df = df.dropna() df.isnull().sum() df
11. Data Analysis on Google Playstore Dataset/3. Null Values Handling on GooglePlaystore Dataset/Null Values Handling on GooglePlaystore Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from gcpds.utils import loaddb from matplotlib import pyplot as plt import numpy as np # # EEG Filters from gcpds.utils import filters as flt db = loaddb.BCI2a('BCI2a_database') db.load_subject(1) run, _ = db.get_run(0) fs = db.metadata['sampling_rate'] trial = run[0,0][:fs] trial.shape # There are some predefined filters: `notch60`, `band545`, `band330`, `band245`, `band440`, `delta`, `theta`, `alpha`, `beta`, `band1100`, `mu`, `band150`, `band713`, `band1550` and `band550` # + plt.figure(figsize=(15, 11)) t = np.linspace(0, trial.shape[0]/fs, trial.shape[0]) plt.subplot(221) plt.title('Raw') plt.plot(t, trial) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.subplot(222) plt.title('Beta') plt.plot(t, flt.beta(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.subplot(223) plt.title('Mu') plt.plot(t, flt.mu(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.subplot(224) plt.title('5-50 Hz') plt.plot(t, flt.band550(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.show() # - # A custom filter can be declared with the methods `GenericButterBand` and `GenericNotch` notch66 = flt.GenericNotch(f0=66, fs=fs) band830 = flt.GenericButterBand(f0=8, f1=30, fs=fs) # + plt.figure(figsize=(15, 5)) plt.subplot(121) plt.title('Notch 66 Hz') plt.plot(t, notch66(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.subplot(122) plt.title('3-30 Hz') plt.plot(t, band830(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.show() # - # The same filter is automatically recompilled for a different sampling rate. # + plt.figure(figsize=(15, 5)) plt.subplot(121) plt.title('Notch 66 Hz') plt.plot(t, notch66(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.subplot(122) plt.title('3-30 Hz') plt.plot(t, band830(trial, fs=fs)) plt.xlabel('Time [s]') plt.ylabel('Amplitude') plt.show() # - # ---- # ### References # # * [Butterworth digital and analog filter design](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html#scipy.signal.butter) # * [Design second-order IIR notch digital filter](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirnotch.html#scipy.signal.iirnotch) # * [Apply a digital filter forward and backward to a signal.](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html#scipy.signal.filtfilt) #
notebooks/02-filters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # SVM from Scratch # ## Linear SVM # ### y = mx + b # # ### y = x + z + b + q # + #هذه المكتبه تساعدنا على القيام بعمليات حسابيه على المصفوفات import numpy as np #هذه المكتبه خاصة بالرسم from matplotlib import pyplot as plt # %matplotlib inline #اولاً نقوم بإنشاء مصفوفة نبماي لنشكل البيانات dummy data creation #المدخلات تكون على الصيغة التاليه - [X قيمة, Y قيمة, Bias term] X = np.array([ [-2,4,-1], [4,1,-1], [1, 6, -1], [2, 4, -1], [6, 2, -1], ]) # التسميات حيث سنقوم باعتبار العينتين الاولى سالب 1 و الثالث الاخرى على انهن موجب 1 y = np.array([-1,-1,1,1,1]) #لنرسم العينات على شكل ثنائي الابعاد هنا for d, sample in enumerate(X): # رسم العينات السالبه if d < 2: plt.scatter(sample[0], sample[1], s=120, marker='_', linewidths=2) # رسم العيانات الموجبه else: plt.scatter(sample[0], sample[1], s=120, marker='+', linewidths=2) # رسم خط إفتراضي فاصل بين الكلاسين # نفرض ذلك من خلال رسم خط بين نقطتين قمنا بتحديدهم كما يلي plt.plot([-2,6],[6,0.5]) # - # # حساب الخطأباستخدام معادلة Hinge Loss # ## نستخدم هذه المعادلة لحساب الخطأحيث غالباً ما تستخدم في تصنيف البيانات و maximum margin # ## c(x,y,f(x)) = (1 - y*f(x))+ # ![title](img/hinge.png) # # Objective Function # ![title](img/act.png) # ## What is regularizer ? # # ### هو الجزء المسؤل عن الضبط بين قيمة الخطأ تكبير الهامش margin maximization # # كيف يحدث التعليم رياضياً؟ # ![title](img/opt.png) # # الوزن Weight # # What is the weight ? # ## الوزن هو عباره عن متجه و يرسم لنا موقع خط القرار او الخط الفاصل # ## شرط التصنيف الخطأ # ![title](img/m.png) # ### في حالة التصنيف الخطأ نقوم بتحديث قيمة الوزن في معادلة الخسارة و المنظم باستخدام المعادلة التاليه # ![title](img/w.png) # #### n = learning rate معدل التعليم # #### λ = regularizer المنظم # # #### مهمة معدل التعليم انه يعمل كمقدار المسافة التي نقطعها في البحث في داخل منحنى الخطأ # # #### معدل تعليم عالي: الخوارزمية قد تتجاوز اقل نقطة خطأ و يتم الامر بسرعه # #### معدل تعليم منخفض: قد ياخذ البحث مليون سنه ضوئيه لتصل و ربما لتصل # ### المنظم يتحكم بالمعادلة بين تحقيق قيمة خطأمنخفضه في التدريب و في الفحص # #### قيمة منظم مرتفع: خطأ كبير في الفحص # #### قيمة منظم منخفضه: خطأ كبير في التدريب # ### Regularizer value = 1/ epoch # # ### تعديل الوزن في حالة التصنيف الصحيح يكون في معادلة المنظممن المعادلة التاليه # ![title](img/wy.png) # + #lets perform stochastic gradient descent to learn the seperating hyperplane between both classes def svm_sgd_plot(X, Y): w = np.zeros(len(X[0])) eta = 1 epochs = 100000 errors = [] for epoch in range(1,epochs): error = 0 for i, x in enumerate(X): if (Y[i]*np.dot(X[i], w)) < 1: w = w + eta * ( (X[i] * Y[i]) + (-2 *(1/epoch)* w) ) error = 1 else: w = w + eta * (-2 *(1/epoch)* w) errors.append(error) plt.plot(errors, '|') plt.ylim(0.5,1.5) plt.axes().set_yticklabels([]) plt.xlabel('Epoch') plt.ylabel('Misclassified') plt.show() for d, sample in enumerate(X): # Plot the negative samples if d < 2: plt.scatter(sample[0], sample[1], s=120, marker='_', linewidths=2) # Plot the positive samples else: plt.scatter(sample[0], sample[1], s=120, marker='+', linewidths=2) # Add our test samples plt.scatter(2,2, s=120, marker='_', linewidths=2, color='yellow') plt.scatter(4,3, s=120, marker='+', linewidths=2, color='blue') # Print the hyperplane calculated by svm_sgd() x2=[w[0],w[1],-w[1],w[0]] x3=[w[0],w[1],w[1],-w[0]] x2x3 =np.array([x2,x3]) X,Y,U,V = zip(*x2x3) ax = plt.gca() ax.quiver(X,Y,U,V,scale=1, color='blue') # - w = svm_sgd_plot(X,y) #they decrease over time! Our SVM is learning the optimal hyperplane
SVM from scratch/SVM from scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Version 03 -> Pred RUL # !pip install texttable # + # importing required libraries from scipy.io import loadmat import matplotlib.pyplot as plt import numpy as np from pprint import pprint as pp from sklearn.model_selection import train_test_split from sklearn import svm from pprint import pprint from sklearn.linear_model import LinearRegression from texttable import Texttable import math from sklearn.metrics import r2_score # getting the battery data #bs_all = [ # 'B0005', 'B0006', 'B0007', 'B0018', 'B0025', 'B0026', 'B0027', 'B0028', 'B0029', 'B0030', 'B0031', 'B0032', # 'B0042', 'B0043', 'B0044', 'B0045', 'B0046', # 'B0047', 'B0048' #] bs_all = [ 'B0005', 'B0006', 'B0007', 'B0018' ] ds = {} for b in bs_all: ds[b] = loadmat(f'DATA/{b}.mat') types = {} times = {} ambient_temperatures = {} datas = {} for b in bs_all: x = ds[b][b]["cycle"][0][0][0] ambient_temperatures[b] = x['ambient_temperature'] types[b] = x['type'] times[b] = x['time'] datas[b] = x['data'] # clubbing all the compatible batteries together # Batteries are compatible if they were recorded under similar conditions # And their data size match up bs_compt = {} for b in bs_all: sz = 0 for j in range(datas[b].size): if types[b][j] == 'discharge': sz += 1 if bs_compt.get(sz): bs_compt[sz].append(b) else: bs_compt[sz] = [ b ] pp(bs_compt) BSSS = bs_compt # + ## CRITICAL TIME POINTS FOR A CYCLE ## We will only these critical points for furthur training ## TEMPERATURE_MEASURED ## => Time at highest temperature ## VOLTAGE_MEASURED ## => Time at lowest Voltage ## VOLTAGE_LOAD ## => First time it drops below 1 volt after 1500 time def getTemperatureMeasuredCritical(tm, time): high = 0 critical = 0 for i in range(len(tm)): if (tm[i] > high): high = tm[i] critical = time[i] return critical def getVoltageMeasuredCritical(vm, time): low = 1e9 critical = 0 for i in range(len(vm)): if (vm[i] < low): low = vm[i] critical = time[i] return critical def getVoltageLoadCritical(vl, time): for i in range(len(vl)): if (time[i] > 1500 and vl[i] < 1): return time[i] return -1 # - # ## MODEL # * Considering 1 Cycle for RUL estimation # # ## Features # * [CP1, CP2, CP3, Capacity] -> RUL # # ## Remaining Useful Life # * n = number of cycles above threshold # * RUL of Battery after (cycle x) = (1 - (x / n)) * 100 ## X: Features ## y: RUL ## x: no. of cycles to merge def merge(X, y, x): XX = [] yy = [] sz = len(X) for i in range(sz - x + 1): curr = [] for j in range(x): for a in X[i + j]: curr.append(a) XX.append(curr) # val = 0 # for j in range(x): # val += y[i + j] # val /= x yy.append(y[i + x - 1]) return XX, yy # + ## Data Structure # Cycles[battery][param][cycle] # Cycles[battery][Capacity][cycle] Cycles = {} params = ['Temperature_measured', 'Voltage_measured', 'Voltage_load', 'Time'] rmses = [] for bs_cmpt in bs_compt: rmses.append([]) # iterate over the merge hyper parameter for xx in range(1, 2): results = Texttable() results.add_row(['Compatible Batteries', 'Cycles', 'MAE', 'RMSE', 'R2 Score' ]) loc = 0 # iterate over all the battery sets for bs_cmpt in bs_compt: # getting data for a given set # y contains RUL after current cycle # model will train for y y = [] bs = bs_compt[bs_cmpt] for b in bs: Cycles[b] = {} for param in params: Cycles[b][param] = [] for j in range(datas[b].size): if types[b][j] == 'discharge': Cycles[b][param].append(datas[b][j][param][0][0][0]) cap = [] for j in range(datas[b].size): if types[b][j] == 'discharge': cap.append(datas[b][j]['Capacity'][0][0][0][0]) Cycles[b]['Capacity'] = np.array(cap) Cycles[b]['count'] = len(Cycles[b][params[0]]) effective_cycle_count = 0 for x in Cycles[b]['Capacity']: if (x < 1.4): break effective_cycle_count += 1 for i in range(len(Cycles[b]['Capacity'])): if (i < effective_cycle_count): y.append((1 - ((i + 1) / effective_cycle_count)) * 100) else: y.append(0) # preparing data for regression model temperature_measured = [] voltage_measured = [] voltage_load = [] capacity = [] for b in bs: for c in Cycles[b]['Capacity']: capacity.append(c) for i in range(Cycles[b]['count']): temperature_measured.append(getTemperatureMeasuredCritical(Cycles[b]['Temperature_measured'][i], Cycles[b]['Time'][i])) voltage_measured.append(getVoltageMeasuredCritical(Cycles[b]['Voltage_measured'][i], Cycles[b]['Time'][i])) voltage_load.append(getVoltageLoadCritical(Cycles[b]['Voltage_load'][i], Cycles[b]['Time'][i])) # creating the model X = [] for i in range(len(temperature_measured)): X.append(np.array([temperature_measured[i], voltage_measured[i], voltage_load[i], capacity[i]])) # X.append(np.array(capacity)) X = np.array(X) y = np.array(y) # merge cycles X, y = merge(X, y, xx) # creating train test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # fitting the model regressor = LinearRegression() regressor.fit(X_train, y_train) # test y_pred = regressor.predict(X_test) # model evaluation diff = 0 total = 0 rmse = 0 for i in range(len(y_test)): diff += abs(y_test[i] - y_pred[i]) rmse += ((y_test[i] - y_pred[i]) * (y_test[i] - y_pred[i])) total += y_test[i] diff /= len(y_test) total /= len(y_test) rmse = math.sqrt(rmse / len(y_test)) accuracy = ((total - diff) / total) * 100 #accuracy = r2_score(y_test, y_pred) # Adding evaluation to result array to print in a table results.add_row([ str(bs), str(Cycles[bs[0]]['count']), diff, rmse, accuracy ]) rmses[loc].append(rmse) loc += 1 # printing results # print(f'Evaluation: Clubbing Compatible Batteries for cycle param: {xx}\n{results.draw()}') # + # print(rmses) # - for rm in rmses: plt.plot(range(1, len(rm) + 1), rm) plt.ylabel("Error") plt.show() def removeFromGroup(x): loc = 0 y = {} for a in x: for b in x[a]: y[loc] = [ b ] loc += 1 return y # + ## Data Structure # Cycles[battery][param][cycle] # Cycles[battery][Capacity][cycle] from sklearn.svm import SVR from sklearn import tree Cycles = {} params = ['Temperature_measured', 'Voltage_measured', 'Voltage_load', 'Time'] # remove batteries from group bs_compt = BSSS bs_compt = removeFromGroup(bs_compt) rmses = [] for bs_cmpt in bs_compt: rmses.append([bs_compt[bs_cmpt][0]]) # iterate over the merge hyper parameter for xx in range(1, 25): results = Texttable() results.add_row(['Compatible Batteries', 'Cycles', 'MAE', 'RMSE', 'R2 Score' ]) loc = 0 # iterate over all the battery sets for bs_cmpt in bs_compt: # getting data for a given set # y contains RUL after current cycle # model will train for y y = [] bs = bs_compt[bs_cmpt] for b in bs: Cycles[b] = {} for param in params: Cycles[b][param] = [] for j in range(datas[b].size): if types[b][j] == 'discharge': Cycles[b][param].append(datas[b][j][param][0][0][0]) cap = [] for j in range(datas[b].size): if types[b][j] == 'discharge': cap.append(datas[b][j]['Capacity'][0][0][0][0]) Cycles[b]['Capacity'] = np.array(cap) Cycles[b]['count'] = len(Cycles[b][params[0]]) effective_cycle_count = 0 for x in Cycles[b]['Capacity']: if (x < 1.4): break effective_cycle_count += 1 for i in range(len(Cycles[b]['Capacity'])): if (i < effective_cycle_count): y.append((1 - ((i + 1) / effective_cycle_count)) * 100) else: y.append(0) # preparing data for regression model temperature_measured = [] voltage_measured = [] voltage_load = [] capacity = [] for b in bs: for c in Cycles[b]['Capacity']: capacity.append(c) for i in range(Cycles[b]['count']): temperature_measured.append(getTemperatureMeasuredCritical(Cycles[b]['Temperature_measured'][i], Cycles[b]['Time'][i])) voltage_measured.append(getVoltageMeasuredCritical(Cycles[b]['Voltage_measured'][i], Cycles[b]['Time'][i])) voltage_load.append(getVoltageLoadCritical(Cycles[b]['Voltage_load'][i], Cycles[b]['Time'][i])) # creating the model X = [] for i in range(len(temperature_measured)): X.append(np.array([temperature_measured[i], voltage_measured[i], voltage_load[i], capacity[i]])) # X.append(np.array(capacity)) X = np.array(X) y = np.array(y) # merge cycles X, y = merge(X, y, xx) # creating train test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) ############## ------------------ MODEL ------------------- #################### # fitting the model #regressor = SVR(kernel = 'rbf', C=100, gamma=0.1, epsilon = .1) #regressor = tree.DecisionTreeRegressor( random_state = 0) regressor = LinearRegression() regressor.fit(X_train, y_train) # test y_pred = regressor.predict(X_test) ############# ----------------- MODEL -------------------- ##################### # model evaluation diff = 0 total = 0 rmse = 0 for i in range(len(y_test)): diff += abs(y_test[i] - y_pred[i]) rmse += ((y_test[i] - y_pred[i]) * (y_test[i] - y_pred[i])) total += y_test[i] diff /= len(y_test) total /= len(y_test) rmse = math.sqrt(rmse / len(y_test)) accuracy = ((total - diff) / total) * 100 #accuracy = r2_score(y_test, y_pred) # Adding evaluation to result array to print in a table results.add_row([ str(bs), str(Cycles[bs[0]]['count']), diff, rmse, accuracy ]) rmses[loc].append(rmse) loc += 1 #printing results print(f'Evaluation: Clubbing Compatible Batteries for cycle param: {xx}\n{results.draw()}') # - for rm in rmses: mn = 100000 loc = -1 for i in range(1, len(rm)): if (mn > rm[i]): mn = rm[i] loc = i mn = min(mn, rm[i]) print(f"Minima: {mn}, Merge: {loc}") plt.plot(range(1, len(rm)), rm[1:]) plt.ylabel(rm[0]) plt.show()
.ipynb_checkpoints/Version 4 RUL Yash-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + block_hidden=true # %load_ext rpy2.ipython # %matplotlib inline from fbprophet import Prophet import pandas as pd import logging logging.getLogger('fbprophet').setLevel(logging.ERROR) import warnings warnings.filterwarnings("ignore") # + block_hidden=true language="R" # library(prophet) # - # ### Forecasting Growth # # By default, Prophet uses a linear model for its forecast. When forecasting growth, there is usually some maximum achievable point: total market size, total population size, etc. This is called the carrying capacity, and the forecast should saturate at this point. # # Prophet allows you to make forecasts using a [logistic growth](https://en.wikipedia.org/wiki/Logistic_function) trend model, with a specified carrying capacity. We illustrate this with the log number of page visits to the [R (programming language)](https://en.wikipedia.org/wiki/R_%28programming_language%29) page on Wikipedia: df = pd.read_csv('../examples/example_wp_R.csv') import numpy as np df['y'] = np.log(df['y']) # + language="R" # df <- read.csv('../examples/example_wp_R.csv') # df$y <- log(df$y) # - # We must specify the carrying capacity in a column `cap`. Here we will assume a particular value, but this would usually be set using data or expertise about the market size. df['cap'] = 8.5 # + language="R" # df$cap <- 8.5 # - # The important things to note are that `cap` must be specified for every row in the dataframe, and that it does not have to be constant. If the market size is growing, then `cap` can be an increasing sequence. # # We then fit the model as before, except pass in an additional argument to specify logistic growth: # + output_hidden=true m = Prophet(growth='logistic') m.fit(df) # + output_hidden=true language="R" # m <- prophet(df, growth = 'logistic') # - # We make a dataframe for future predictions as before, except we must also specify the capacity in the future. Here we keep capacity constant at the same value as in the history, and forecast 3 years into the future: # + output_hidden=true magic_args="-w 10 -h 6 -u in" language="R" # future <- make_future_dataframe(m, periods = 1826) # future$cap <- 8.5 # fcst <- predict(m, future) # plot(m, fcst); # - future = m.make_future_dataframe(periods=1826) future['cap'] = 8.5 fcst = m.predict(future) m.plot(fcst); # The logistic function has an implicit minimum of 0, and will saturate at 0 the same way that it saturates at the capacity. It is possible to also specify a different saturating minimum. # # ### Saturating Minimum # # The logistic growth model can also handle a saturating minimum, which is specified with a column `floor` in the same way as the `cap` column specifies the maximum: # + output_hidden=true magic_args="-w 10 -h 6 -u in" language="R" # df$y <- 10 - df$y # df$cap <- 6 # df$floor <- 1.5 # future$cap <- 6 # future$floor <- 1.5 # m <- prophet(df, growth = 'logistic') # fcst <- predict(m, future) # plot(m, fcst) # - df['y'] = 10 - df['y'] df['cap'] = 6 df['floor'] = 1.5 future['cap'] = 6 future['floor'] = 1.5 m = Prophet(growth='logistic') m.fit(df) fcst = m.predict(future) m.plot(fcst); # To use a logistic growth trend with a saturating minimum, a maximum capacity must also be specified.
notebooks/saturating_forecasts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature-wise normalization of .nii images import os from src.files.file import create_directory import nibabel as nib import matplotlib.pyplot as plt import numpy as np import tensorflow.keras from keras.preprocessing.image import ImageDataGenerator # + input_dir = '../data/SPM-preprocessed/' output_dir = '../data/SPM-preprocessed-normalized/' create_directory(output_dir) # - # ### Utility def get_nii_data_in_directory(directory): files = os.listdir(directory) for file in files: if file[-4:] == '.nii': nii_data = nib.load(directory + file) yield nii_data, file def get_all_nii_data_in_directory(directory): generator = get_nii_data_in_directory(directory) nii_data = [] file_names = [] for i_nii_data, file_name in generator: nii_data.append(i_nii_data) file_names.append(file_name) return nii_data, file_names def save_normalized_data_to_directory(nii_data, file_name, directory): nib.save(nii_data, directory + file_name) def feature_wise_normalize(images): length = len(images) dim_x = images[0].shape[0] dim_y = images[0].shape[1] dim_z = images[0].shape[2] x_transformed = np.zeros((length, dim_x, dim_y, dim_z)) for image_index in range(length): x_train_one = np.zeros((dim_z, dim_x, dim_y, 1)) x_transformed_one = np.zeros((dim_z, dim_x, dim_y, 1)) for slice_index in range(dim_z): x_train_one[slice_index, :, :, 0] = images[image_index][:, :, slice_index] # Why? datagen_featurewise_mean = ImageDataGenerator(featurewise_center = True, featurewise_std_normalization = True) datagen_featurewise_mean.fit(x_train_one) batches = 0 count = 0 for x_batch in datagen_featurewise_mean.flow(x_train_one, shuffle = False): for i_inb in range(x_batch.shape[0]): x_transformed_one[count + i_inb, :, :, :] = (x_batch[i_inb] + 3) / 12 count += x_batch.shape[0] batches += 1 if batches >= len(x_train_one) / 32: break for slice_index in range(dim_z): x_transformed[image_index, :, :, slice_index] = x_transformed_one[slice_index, :, :, 0] return x_transformed # #### Read, normalize, and save nii_data, file_names = get_all_nii_data_in_directory(input_dir) images = [] for i_nii_data in nii_data: images.append(i_nii_data.get_fdata()) normalized_images = feature_wise_normalize(images) for normalized_image, i_nii_data, file_name in zip(normalized_images, nii_data, file_names): new_nii_data = nib.Nifti1Image(normalized_image, i_nii_data.affine, i_nii_data.header) save_normalized_data_to_directory(new_nii_data, file_name, output_dir) # # Plot distributions etc plot_img_nr = 0 # ## Distribution before normalization: plt.hist(images[plot_img_nr].flatten(), bins = 30) plt.show() plt.imshow(images[plot_img_nr][:, 30, :], cmap = 'gray') # ## Distribution after normalization: plt.hist(normalized_images[plot_img_nr].flatten(), range = [0, 1], bins = 30) plt.show() plt.imshow(normalized_images[plot_img_nr, :, 30, :], cmap = 'gray')
notebooks/01_1 - Data - Normalization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # データマイニング概論 # # Pythonの基礎 # ## 式と数学演算子 # 以下のセルで足し算を計算してみましょう。`1+1`は式で、単純なプログラム命令となっていて、Python処理系はこの命令を評価して足し算の結果を返しています。セルの式は`Ctrl+Enter`もしくは`Shift+Enter`で実行することができます。 # # 式の中の`1`は値、`+`は数学演算子です。 # 1+1を計算 1+1 # 数学演算子には他に以下のような演算子があります。これらの演算子を使って式を入力して実行し、その評価結果を確認してみてください。 # - `**`:累乗 # - `*`:掛け算 # - `/`:割り算 # - `//`:整数の割り算(小数点以下は切り捨て) # - `%`:割り算の余り # - `+`:足し算 # - `-`:引き算 # 2の5乗を計算 2**5 # 通常の数学と同様に、演算子には以下のように優先順序(評価の順序)があります。括弧`()`を使うことで、明示的に評価の順序を変えることもできます。 # `** > *, /, //, % > +, -` # (1+1)(2+3)を計算 (1+1)**(2+3) # 以下の式は間違った命令で`Syntax Error`(文法エラー)となります。エラーの対処については後で学びます。 1+* # # 練習 # 上記の数学演算子を利用して任意の数式を記述し実行してください # *** # *** # ## 型 # 値(例えば先の式の`1`)のタイプをデータ型と呼びます。すべての値はいずれかのデータ型に属しています。以下にPythonの主なデータ型を示します。 # - 整数(int): -1, 0, 1 # - 浮動小数点(float): -1.0, 0.0, 3.14, # - 文字列(str): 'a', '1', 'U Tokyo' # # 浮動小数点型は小数点を含む数値です。文字列型は文字・テキストの値です。文字列は、シングルクオート(`'`)もしくはダブルクオート(`"`)で囲んで記述します。`''`や`""`のように空の文字列も値となります。 # # 演算子の意味は、一緒に使われる値のデータ型によって変化します。整数型や浮動小数点の値に対しては、`+`は値の足し合わせを意味します。一方、文字列の値に対しては、`+`は文字列をつなぐ文字列連結演算子となります。 # 文字列の連結 'Hello' + ' ' + 'World!' # `*`は整数型や浮動小数点の値の掛け算を意味しますが、文字列と整数と併せて用いると文字列の複製演算子となります。以下の式を評価すると、整数で指定した回数だけ文字列を繰り返した文字列になります。 # Helloを5回繰り返し出力 'Hello'*5 # 以下の式はそれぞれ評価されるでしょうか? 'Hello'+1 'Hello'*'World!' 'Hello'*1.0 # # 練習 # 好きな英単語を10回繰り返して表示してください # *** # *** # ## 変数 # 変数は値や式の評価結果を格納するための"箱"の役割を果たします。値や式の評価結果を変数へ保存することで、その結果を再利用することができます。変数に値を保存するには、以下のような代入文を用います。 # ```Pyhon # 変数名 = 値 or 式 # ``` # + # 変数xを初期化 x=1 # 変数yを初期化 y=1 # xとy利用して変数zを初期化 z = x+y z # - # zを上書き z = z+1 z # + # 変数word1を初期化 word1='Hello' # 変数word2を初期化 word2='World' # word1とword2を利用して変数sentenceを初期化 sentence = word1 + ' ' + word2 sentence # - #sentenceを上書き sentence = sentence+'!' sentence # コーディングではわかりやすい変数名をつけましょう。 # # 変数の命名規則 # - 空白を含まない # - 文字と数字と下線記号_から構成される # - 数字から始まらない # - 特殊文字($や`など) を含まない # # 練習 # 変数$x$と$y$の値を掛け算した結果を変数$z$に代入して、$z$の値を表示してください # *** # *** # ## `print`関数 # `print()`関数を使うことで、以下のように括弧内の文字列を出力させることできます。この時、Pythonは`print()`関数を呼び出し、'Hello World!'という文字列の値を関数に渡しています。関数については後で学びます。なお、シングルクオートは文字列の開始と終了を表す記号で、文字列の値には含まれないので表示されません。 # + print('Hello World') # 変数greetに文字列を代入 greet='Hello World' print(greet) # - # 整数型や浮動小数点型の値を文字列型の値とともに`print()`関数に渡したいときは、`str()`関数を使って文字列型に変換します。 # 整数型や浮動小数点型の値のprint print('int:'+str(1) + " float:"+str(3.14)) # # 練習 # 2つの好きな文字列をつなげたものを変数に代入して、その変数の値を`print()`関数で表示してください # *** # *** # ## ブール型と比較演算子 # ブール型は`True`または`False`の2種類の値をとります。以下に示す比較演算子は、2つの値を比較して1つのブール型の値を返します。 # - `==` : 等しい # - `!=` : 等しくない # - `>` : より大きい # - `<` : より小さい # - `>=` : 以上 # - `<=` : 以下 # # `>,<,>=,<=`の比較演算子は、両辺が同じ型どうしである必要があります。 1 == 1 1 == 0 1 == 1.0 'Hello' == 'Hello' 'Hello' == 'hello' 1 == '1' x = 1 x > 0 x = -1 x > 0 # ## ブール演算子 # ブール演算子は、ブール型の値を組み合わせる場合に使います。比較演算子のように1つのブール型の値を返します。 # - `and` # - `or` # - `not` # # ### 二項ブール演算子 # `and`と`or`演算子は、常に2つのブール値(もしくは式)をとるので、二項演算子と呼ばれます。`and`演算子は、以下のように、2つのブール値が`True`の時のみに`True`となり、それ以外は`False`となります。 # - `True and True`: `True` # - `True and False`: `False` # - `False and False`: `False` # - `False and False`: `False` # # 一方、`or`演算子は、以下のように、2つのブール値がどちらかが`True`なら`True`となり、両方が`False`なら`False`となります。 # - `True or True`: `True` # - `True or False`: `True` # - `False or True`: `True` # - `False or False`: `False` # ### `not`演算子 # `not`演算子は1つのブール値(もしくは式)をとり、そのブール値を以下のように反転させます。 # - `not True`: `False` # - `not False`: `True` # # ## ブール演算子と比較演算子 # 比較演算子はブール値を返すので、以下のようにブール演算子と組み合わせて使うことができます。 (0<1) and (1<2) x = 1 y = -1 (x>0) and (y>0) x=1 y=-1 (x ==1) or (y==1) # 複数の演算子がある時は、算術演算子と比較演算子を評価した後に、まず`not`演算子を評価し、次に`and`演算子、最後に`or`演算子を評価します。 x = 1 y = -1 x+y==0 and not x-y< 0 and (x >0 or y > 0) # # 練習 # 変数$x$,$y$,$z$について、$x$が$y$より大きく、$y$が$z$より大きいならば`True`となる式をブール演算子と比較演算子を使って記述してください # *** # *** # ## フロー制御 # ### 条件式 # ブール演算子を使った式は、条件式ともよばれます。条件式は、これから説明するフロー制御文(`if`文など)で使われます。条件式は、常に1つのブール値に評価され、その値が`True`か`False`によって、フロー制御文は次に何を実行するかを決定します。 # # ### コードブロック # Pythonのコードは1行以上をひとまとまりとしてブロックとすることができます。ブロックの区間は、コードのインデントで指定します。インデントとは、行の先頭に何個かのスペースを入れることです。Pythonの標準コーディングスタイルでは、4文字のスペースを入れます(ノートブックのセルではtabでインデントが挿入されます)。 # - ブロックはインデントで始まる # - ブロックの中には新しいブロックを含めることができる # - インデントがなくなるか、上位のブロックのインデントに戻るとそのブロックは終了する # # # ```Python # x=1 # if x >= 0: # if x == 0: # 第1ブロック開始 # print('0') # 第2ブロック開始 # else: # 第2ブロック終了 # print('positive') #第3ブロック開始 # ``` # # ### `if`文 # `if`文は最もよく用いるフロー制御文の1つです。`if`文に続くブロックは、`if`文の条件式が`True`の時に実行されます。条件式が`False`ならば、そのブロックの実行はスキップされます。フロー制御文はすべてコロン(`:`)で終わり、次にコードのブロックが続きます。 # # ### `else`文 # `if`文には、オプションとして`else`文を続けることができます。`else`文に続くブロックには、先の`if`文の条件式が`False`の時に実行されます。`else`文には条件式は必要ありません。 # # ### `elif`文 # 複数のブロックからいずれか1つを実行したい場合は、`if`文に`elif`文を続けて、それ以前の条件式が`False`だった場合に、別の条件式を判定させます。この時、`elif`文に続くブロックには、`elif`文の条件式が`True`の時に実行されます。 # ```Python # if 条件式1: # ブロック1 # elif 条件式2: # ブロック2 # elif 条件式3: # ブロック3 # else: # ブロック4 # ``` # + # 変数ageを初期化 age = -1 # ageの値で条件分岐 if age == 20: print('come of age') elif age > 65: print('elder') elif age > 20: print('adult') elif age >= 0: print('child') else: print('before birth') # - # # 練習 # 変数$x$が偶数だったら'even'、奇数だったら'odd'をprintする処理を`if`文を使って記述してください。なお、$x$は整数とします。 # ### `while`文 # `while`文を使うと、`while`文の条件式が`True`である限り、`while`文に続くブロックを何回も繰り返すことができます。`while`文のブロックの終わりでは、プログラムの実行は`while`文の最初に戻り、繰り返し`while`文の条件式を判定します。条件式が`False`になると、ブロックの実行をスキップして繰り返しを抜けます。 # ```Python # while 条件式: # ブロック # ``` # + # 変数countを初期化 count=1 # countが5以内であればwhile文内の処理を繰り返す while count <=5: print(count) count = count+1 # - # ### `break`文 # `break`文を使って、`while`文の繰り返しブロックから抜け出すことができます。Jupyter Notebookで無限ループに入りセル操作が受け付けられない時は、メニューのKernelからJupyter NotebookをRestartしてください。 # 変数countを初期化 count=1 while True: # countが5を超えたらwhile文の処理を抜ける if count > 5: break print(count) count = count+1 # ### for文 # `while`文では条件式が`True`のあいだ、ブロックの実行が繰り返されます。一定の回数だけ、ブロックの実行を繰り返したい場合は、`for`文と`range()`関数を用います。 # ```Python # for 変数名 in range()関数: # ブロック # ``` # # `range()`関数では、第1引数で繰り返し変数の開始値を指定し、第2引数では終了値より1大きい数を指定します。`for`に続く変数には、繰り返しごとに`range()`関数が返す値が代入されます。 # 変数countに1から5を順に代入してfor文内を繰り返す for count in range(1,6): print(count) # # 練習 # 1. while文を使って、1から5までの整数の平方を順番に出力する処理を記述してください。その際、break文を含めてください。 # 1. for文を使って、1から5までの整数の平方を順番に出力する処理を記述してください。その際、range関数を含めてください。 # *** # *** # ## 関数 # 関数はプログラムの処理や手続きの流れをまとめて小さなプログラムのようなものです。関数の主な目的は、頻繁に呼び出されるような処理のコードをまとめることです。 # # # `print()`や`range()`はPythonが持つ組み込み関数ですが、自分で関数を定義することもできます。以下は、'Hello'を出力するだけの単純な関数を定義しています。1行目では`def`で`hello()`という名前の関数を定義することを宣言しています。`def`文に続くブロックが関数の本体です。このブロックは関数が呼び出された時に実行されます。関数を定義すると、その関数名を使った関数を呼び出すことができます。 # + def hello(): # Helloを出力 print('Hello') hello() # - # ### 引数 # 関数を定義する際に、括弧の中に関数へ渡す変数の一覧を記述することができます。これらの変数は関数のローカル変数となります。ローカル変数とはプログラムの一部(ここでは関数内)でのみ利用可能な変数で、関数の外からは参照することはできません。また、関数が呼び出された後はその変数は消滅します。 # + def hello(greet): # 引数greetの値を出力 print(greet) hello("Hello") # - # ### 戻り値 # 関数は受け取った引数を元に処理を行い、その結果の戻り値を返すことができます。戻り値は、`return`で定義します。関数の戻り値がない場合は、`None`が返されます。 # + def hello(greet): # 引数greetの値を返す return greet echo = hello("Hello") print(echo) # - # ### 複数の引数 # 関数は任意の数の引数を受け取ることができます。複数の引数を受け取る場合は、引数をカンマで区切ります。これらの引数名は重複しないようにしましょう。 # + def hello(en, fr, de): print(en, fr, de) hello("Hello","Bonjour","Guten Tag") # - # ### キーワード引数 # 一般的な引数(位置引数とも呼ばれます)では、事前に定義した引数の順番に従って、関数は引数を受け取る必要があります。キーワード引数を使うと、関数は引数の変数名とその値の組みを受け取ることができます。その際、引数は順不同で関数に渡すことができます。 # + def hello(en, fr, de): print(en, fr, de, sep=', ') hello("Hello", de="Guten Tag", fr="Bonjour") # - # # 練習 # 引数xを受け取り、xが偶数だったら'even'、奇数だったら'odd'を返す関数even_oddを作成し、作成した関数に引数を渡して、関数から返ってきた値を表示してください # *** # *** # ## リスト # リストは値を格納するためのデータ構造の1つです。リストまたはタプルは複数の値を格納でき、大量のデータを保持しながら処理するプログラムを書くのに役立ちます。また、リストの中に他のリストを入れ子に含むこともできるので、階層的なデータ構造を表現するのにも使えます。 # # リストは以下のように、複数の値(要素と呼びます)をカンマ区切りにして、四角括弧`[]`で囲って記述します。リストはそれ自体が値なので、他の値のように、変数に代入したり、関数に渡すことができます。なお、関数にリストを渡して操作すると、その操作は元のリストに反映されることに注意してください。 # # `[]`という値は空リストと呼び、要素が1つも入っていないリストを表します。 num=[1,2,3,4,5] num greet=['hello','bonjour','guten tag'] greet # + def hello(greet_list): print(greet_list) hello(greet) # - # ### リストとインデックス # 以下では、`ut_dep`という変数に`['理1','理2','理3','文1','文2','文3']`という文字列の要素からなるリストが入っています。ここで、`ut_dep[0]`を評価して値を見てみましょう。同様に、`ut_dep[1]`を評価するとどうなるでしょう。 # + ut_dep=['理1','理2','理3','文1','文2','文3'] # リストの先頭要素を出力 print(ut_dep[0]) # リストの2番目の要素を出力 print(ut_dep[1]) # - # リストに続く四角括弧内の整数はインデックスと呼ばれ、リストの各要素に対応しています。リストの先頭の要素のインデックスは0、2番目の要素のインデックスは1、3番目は2となります。このようにインデックスを使うことで、リスト内の任意の要素にアクセスできます。 print('文科:'+ut_dep[3]+","+ut_dep[4]+","+ut_dep[5]) # インデックスは0から始まりますが、インデックスとして負の整数も用いることができます。インデックス-1はリストの末尾の要素に対応し、-2は末尾から2番目の要素に対応しています。 print('文科:'+ut_dep[-3]+","+ut_dep[-2]+","+ut_dep[-1]) # リストのは他のリストを入れ子に入れることができます。リストの中のリストの要素にアクセするには以下のように複数のインデックスを使います。第1のインデックスにはどのリストを用いるのかを指定し、第2のインデックスにはそのリストの中の要素に対応するものを指定します。 ut_dep2=[['理1','理2','理3'],['文1','文2','文3']] print('3類:'+ut_dep2[0][2]+","+ut_dep2[1][2]) # リストのインデックスを用いるとリストの要素の値を変更できます。リストの要素を削除する場合は`del`文を使います。削除した要素より後ろの要素は、ひとつずつ前にずれます。 # + ut_dep=['理','文'] ut_dep[0]='Science' ut_dep[1]='Human' print(ut_dep) del ut_dep[0] print(ut_dep) # - # ### リストとスライス # インデックスではリストの任意の要素にアクセスできましたが、スライスを用いるとリスト内の複数の要素にアクセスすることができます。スライスでは四角括弧の中に、2つの整数をコロンで区切って記述します。第1番目の整数はスライスの開始インデックスを、第2整数はスライスの終端インデックスを表しますが、そのインデックス自身は含まれずに、それより1つ小さいインデックスまでを含みます。スライスを評価すると新しいリストとなります。 # + ut_dep=['理1','理2','理3','文1','文2','文3'] # リストの先頭から3番目までの要素 ut_science = ut_dep[0:3] #リストの4番目から6番目までの要素 ut_human = ut_dep[3:6] print(ut_science) print(ut_human) # - # スライスで第1のインデックスを省略するとインデックス0を指定したのと同じになり、リストの先頭からスライスとなります。また、第2のインデックスを省略するとリストの長さを指定したのと同じになり、リストの末尾までのスライスとなります。なお、リストの長さは`len()`関数で取得できます。 # + ut_dep=['理1','理2','理3','文1','文2','文3'] print(len(ut_dep)) # リストの先頭から3番目までの要素 ut_science = ut_dep[:3] #リストの4番目から終端までの要素 ut_human = ut_dep[3:] print(ut_science) print(ut_human) # - # ### リストの連結 # 2つのリストに`+`演算子を適用すると、2つのリストが連結され新たに1つのリストが生成されます。また、`*`演算子をリストと整数に適用すると、リストを整数の数分だけ複製します。 ut_science=['理1','理2','理3'] ut_human=['文1','文2','文3'] ut_dep = ut_science+ut_human print(ut_dep) print(ut_dep*2) # ### リストと`for`文 # さきほど出てきた`for`文の繰り返しは、リストのように複数の要素を持つデータ構造から要素を1つずつ取り出してコードブロックを繰り返しています。そのため、リストと`for`文を組み合わせると、リストの要素を1つずつ取り出しながら処理することができます。 # リストの要素を1つずつ取り出し ut_dep=['理1','理2','理3','文1','文2','文3'] for dep in ut_dep: print(dep) # リストのインデックスを用いて、繰り返し処理をしたい場合は、`range()`関数と`len()`関数を用いて以下のように記述します。 ut_dep=['理1','理2','理3','文1','文2','文3'] for i in range(len(ut_dep)): print(i,ut_dep[i]) # ### リストと`in`演算子 # `in`演算子を使うと、ある要素がリストの中に含まれているかどうかを判定することができます。`in`は式として用い、調べたい要素と対象のリストの間に書きます。この式を評価するとブール値となります。 ut_dep=['理1','理2','理3','文1','文2','文3'] '理1' in ut_dep # ### リストとメソッド # メソッドは、あるオブジェクト(リストはオブジェクトの1つです)について呼び出し可能な専用の関数です。リストのようなデータ型には1連のメソッドが備わっていて、例えばリスト型には、検索(`index()`)、追加(`append()`, `insert()`)、削除(`remove()`)、並び替え(`sort()`)、取り出し(`pop()`)などのリストの要素を操作するための便利なメソッドが準備されています。 # #### index()メソッド # `index()`メソッドは、以下のように値を渡すと、リストの要素からその値のインデックスを返します。リストの中に値がなければ、`ValueError`を返します。リストの中に値が複数存在している場合は、最初に出現した方のインデックスを返します。 ut_dep=['理1','理2','理3','文1','文2','文3'] ut_dep.index('理2') # #### apped()メソッドとinsert()メソッド # `append()`メソッドや`insert()`メソッドを使うと、以下のようにリストに新しい要素を追加することができます。`append()`メソッドは、引数に渡された値をリストの末尾に要素として追加します。`index()`メソッドでは、第1引数で値を挿入するインデックスを指定することで、リストの任意の場所に要素を追加することができます。 # # `append()`メソッドや`insert()`メソッドは元のリストを書き換える操作をしていて、戻り値で新しいリストが返るわけではないことに注意してください。 # + ut_dep=['理2','理3','文1','文2'] # リストの末尾に要素を追加 ut_dep.append('文3') print(ut_dep) # リストの先頭に要素を追加 ut_dep.insert(0,'理1') print(ut_dep) # - # #### remove()メソッド # `remove()`メソッドは、渡された値をリストから削除します。リストに含まれない値を削除しようとすると、`ValueError`を返します。リストの中に値が複数含まれる場合は、最初のあたいだけが削除されます。 # # `del`文はリストから削除したい値のインデックスがわかっている時に使い、`remove()`メソッドはリストから削除したい値がわかっている時に使います。 ut_dep=['理1','理2','理3','文1','文2','文3'] ut_dep.remove('文1') print(ut_dep) # #### pop()メソッド # `pop()`メソッドは、渡された値のインデックスの要素をリストから削除し、削除した値を返します。`pop()`メソッドは、リストから任意の要素を取り出したい時にも使えます。 ut_dep=['理1','理2','理3','文1','文2','文3'] print(ut_dep.pop(3)) print(ut_dep) # #### sort()メソッド # `sort()`メソッドを使うと、リストの要素(数や文字列)を並べ換えることができます。 # # `append()`メソッドや`insert()`メソッドと同じく`sort()`メソッドは元のリストを書き換える操作をしていて、戻り値で新しいリストが返るわけではないことに注意してください。 num = [4,3,5,1,2] num.sort() num word = ['desert','banana','carrot','apple'] word.sort() word # 文字列の並び替えは、ASCIIコード順で行われます。アルファベット順で行いたい場合は、キーワード引数keyにstr.lowerを指定します。 word = ['Desert','Banana','carrot','apple'] word.sort() print(word) word.sort(key=str.lower) print(word) # # 練習 # 以下のリストデータdepについて # 1. 先頭と末尾の要素をインデックスを使って表示してください # 2. 先頭と末尾以外の要素をスライスを使って表示してください # 3. リストの長さを表示してください # 4. for文を使ってリストの要素を1つずつ取り出して表示してください # ```Python # dep = ['理', '工', '農', '薬', '医', '法', '経', '教', '養', '文'] # ``` dep = ['理', '工', '農', '薬', '医', '法', '経', '教', '養', '文'] # *** # *** # ## 辞書 # 辞書は、リスト同様に複数の要素の集合を格納するためのデータ構造の1つです。リストのインデックスが整数型だったのに対して、辞書のインデックスにはさまざまなデータ型(整数、浮動小数点数、文字列など)を用いることができます。辞書のインデックスをキーと呼び、キーには対応する値が存在します。リストと同様に辞書のキーとして整数値を使うこともできます。その場合は、任意の整数値をとることができます。 # # 辞書は以下のように、複数の要素(キーと値の組み)をカンマ区切りにして、波括弧`{}`で囲って記述します。ここでは、'semester', 'title', 'year', 'unit'というキーが、それぞれ'S', 'data mining', '2018', 2という値と対応している辞書を変数`course`に代入しています。 course = {'semster':'S', 'title':'data mining', 'year':'2018', 'unit':2} # 辞書のキーを使うことで、以下のようにキーに対応する値にアクセスすることができます。 print(course['title'] + ', ' + course['year']+course['semster']) # キーと値を更新 course['year']=2017 course['semster']='A' del course['unit'] course # ### 辞書とkeys, values, itemsメソッド # リストと異なり、辞書の要素には順序がありません。リストの先頭はインデックスが0で、末尾はインデックスが-1でしたが、辞書は先頭あるいは末尾の要素という概念がありません。また、2つの辞書の中身が同じか判定する際も、辞書では要素の順番は影響しません。このように辞書には要素の順序関係がないので、インデックスやスライスで部分を抜き出すことはできないことに注意してください。 # # 辞書にはインデックスやスライスで要素にアクセスできませんが、キー、キーに対応する値、キーと値の組み、それぞれにアクセスするためのメソッドがあります。 # - `keys()`:辞書のすべてのキーをリストで取得 # - `values()`:辞書のすべての値をリストで取得 # - `items()`:辞書のすべてキーと値の組みをリストで取得 # # これらのメソッドは以下のようにfor文と一緒に使うことができます。 # + course = {'semster':'S', 'title':'data mining', 'year':'2018', 'unit':2} for k in course.keys(): print(k) for v in course.values(): print(v) for i in course.items(): print(i) for k, v in course.items(): print(k, v) # - # ### 辞書と`in`演算子 # リストにある要素が含まれるかどうか判定するのに、`in`演算子を使ったように、辞書にあるキーや値が存在するかを判定するのにも、`in`演算子を使うことができます。 # + course = {'semster':'S', 'title':'data mining', 'year':'2018', 'unit':2} # # キーに'year'があるか? print('year' in course.keys()) # # 値に'2017'があるか? print('2017' in course.values()) # - # ### 辞書のメソッド # リスト型でみたメソッドのように、辞書型にも辞書を操作するための便利なメソッドが準備されています。さきほどの`keys`, `values`, `items`メソッドはそのようなメソッドです。 # # #### get()メソッド # `get()`メソッドを使うと、辞書にキーの存在の確認ができます。第1引数には存在を確認したいキー、第2引数にはそのキーが存在しない時に用いる値を渡します。 course = {'semster':'S', 'title':'data mining', 'year':'2018'} print(course.get('year',2017)) print(course.get('unit',1)) # #### pop()メソッド # リストと同じく、`pop()`メソッドを使って、渡されたキーに対応する要素を辞書から削除し、削除した要素の値を返します。`pop()`メソッドは、辞書から任意の要素を取り出したい時にも使えます。 course = {'semster':'S', 'title':'data mining', 'year':'2018', 'unit':2} print(course.pop('unit')) print(course) # # 練習 # 以下の辞書データcourseについて # 1. キー'title'に対応する値を表示させてください # 1. for文を使って、辞書のすべてのキーと値の組みを表示してください # 1. if文を使って、辞書のキー'year'に対応する値が'2017'であれば`True`、そうでなければ`False`を表示する処理を記述してください # # # ```Python # course = {'semster':'A', 'title':'python programming', 'year':'2018', 'unit':1} # ``` course = {'semster':'A', 'title':'python programming', 'year':'2018', 'unit':1} # *** # *** # ## 文字列 # ### リストと文字列 # 文字列は1文字の要素が並んだリスト(正確にはタプル)とみなすことができます。そのため、インデックスによる要素の指定、スライスによる部分の取り出し、`for`文での要素の繰り返し処理、`len()`関数による長さの取得、`in`演算子による要素の検索など、リストに対して可能なことの多くは文字列に対しても可能です。 # # リストは変更可能なデータ型ですが、文字列はそれ自体が変更不可能です。文字列のように要素を変更したり追加したり削除したりできなくしたリストをタプルと呼びます。 # # リスト型や辞書型と同様に、文字列型にも文字列を操作するための便利なメソッドが準備されています。 # + course = 'datamining' print(course[0]) print(course[-1]) print(course[:4]) print(course[4:]) for i in course: print(i) for i in range(len(course)): print(i,course[i]) print('data' in course) # - # #### upper(), lower()メソッド # `upper()`, `lower()`メソッドは、元の文字列のすべての文字を、それぞれ大文字または小文字に変換した文字列を返します。なお、以下のように `ut.upper()`や`ut.lower()`だけでは、元の文字列`ut`は変更されません。`ut = ut.lower()`として変数に代入することで、`ut`を変更します。 ut = 'The University of Tokyo' print(ut.upper()) print(ut) ut = ut.lower() print(ut) # #### join(), split()メソッド # `join()`メソッドは、文字列のリストを渡すと、リスト中の文字列を元の文字列で連結したものを返します。以下では、リスト中の文字列を' : 'で連結しています。 ' : '.join(['理1','理2','理3','文1','文2','文3']) # `split()`メソッドは、文字列を渡すと、その文字列で、元の文字列を分割してリストとして返します。以下では、文字列を' : 'で分割してリストにしています。 '理1 : 理2 : 理3 : 文1 : 文2 : 文3'.split(' : ') # #### count()メソッド # `count()`メソッドは、文字列を検索して、引数で指定した文字列が含まれている個数を返します。 'the university of tokyo'.count('t') # #### replaceメソッド # `replace()`メソッドは、引数で指定した文字列で、元の文字列の指定部分を置き換えた新しい文字列を返します。元の文字列を直接置き換えないことに注意してください。 s = 'I love Tokyo' print(s.replace('Tokyo', 'Paris')) print(s) # # 練習 # 以下の文に含まれる単語を、1つずつ出力してください。その際、単語はすべて大文字にして、その単語の長さとともに出力してください。 # # 'Data mining is the process of discovering patterns in large data sets involving methods at the intersection of machine learning, statistics, and database systems' # *** # *** # ## ファイルの入出力 # Pythonでファイルを読み書きするには以下のステップがあります。なお、ここではテキストが含まれるプレーンテキストファイルの読み書きを扱うこととします。 # 1. `open()`関数を呼び出し、`File`オブジェクトを取得する # 1. `File`オブジェクトの`read()`や`write()`メソッドを呼び出して読み書きする # 1. `File`オブジェクトの`close()`オブジェクトを呼び出してファイルを閉じる # ### `open()`関数 # `open()`関数を使ってファイルを開くには、開きたいファイルへのパスを文字列として渡します。パスは絶対パスでも相対パスでも構いません。パスは例えば、以下のように指定します。 # # ```Python # # 同じフォルダ(ディレクトリ)ファイルがある場合 # data_file = open('data.txt') # # # 同じフォルダの下位フォルダ、フォルダ名:data、にファイルがある場合 # data_file = open('data/data.txt') # # # 上位のフォルダ、フォルダ名:data、にファイルがある場合 # data_file = open('../data/data.txt') # ``` # # `open()`関数を呼び出すと、ファイルを読み込みモードで開くことになります。この時、ファイルからは読み込みだけが可能になり、書き込んだり変更したりすることはできません。 # # `open()`関数の戻り値は`File`オブジェクトです。`File`オブジェクトはデータ型の一つで、ファイルを操作するためのメソッドが用意されています。 # ### read()メソッド, readlines()メソッド # `File`オブジェクトの`read()`メソッドを使うと、ファイル全体をひとつの文字列として読み込むことができます。`read`メソッドは、ファイルの内容をひとつの文字列として返します。 # Colaboratoryでは以下を実行して必要なファイルをダウンロード # !wget https://raw.githubusercontent.com/UTDataMining/2020A/master/week1/data_mining.txt infile = open('data_mining.txt') content = infile.read() print(content) # `File`オブジェクトの`readlines()`メソッドを使うと、1行ずつの文字列を要素とするリストとしてファイルを読み込むことができます。 # + dmfile = open('data_mining.txt') content = dmfile.readlines() print(content) # '\n'は改行を表す文字 print('\n') # 1行ずつ出力 for line in content: print(line) # - # ### write()メソッド # `File`オブジェクトの`write()`メソッドを使うと、ファイルに書き込むことができます。この時、`write()`メソッドは書き込まれた文字数を返します。ただし、読み込みモードで開いたファイルに書き込むことはできません。ファイルに書き込むには、書き込みモード、もしくは、追記モードでファイルを開く必要があります。 # # `open()`関数の第2引数に`'w'`を渡すと書き込みモードでファイルを開きます。書き込みモードでは、既存のファイルの内容を上書きして書き直します。 # ```Python # data_file = open('data.txt', 'w') # ``` # # `open()`関数の第2引数に`'a'`を渡すと追記モードでファイルを開きます。追記モードでは、既存のファイルの終端に追加して書き込みます。 # ```Python # data_file = open('data.txt', 'a') # ``` # # 書き込みモードでも追記モードでも、もし引数で指定したファイルが存在しなければ、新たに空のファイルが引数で示したパスに作成されます。 # # ファイルを読み書きした後は、`close()`メソッドを呼び出して閉じます。 # # + myfile = open('greet.txt', 'w') myfile.write('Hello\n') myfile.close() myfile = open('greet.txt', 'a') myfile.write('Bonjour\n') myfile.close() myfile = open('greet.txt') content=myfile.read() print(content) myfile.close() # - # # 練習 # 先ほどの"data_mining.txt"ファイルを開いて、以下のコードを参考にファイル中のテキストに単語"data"が出現する回数を数えてください。その際、大文字・小文字は区別しないこととします。 # # + dmfile = open('data_mining.txt') content = dmfile.readlines() freq = 0 for line in content: # rstripは文字列の末尾の指定した文字を削除 sentence = line.rstrip().rstrip('.').split(' ') for word in sentence: word=word.rstrip(',') print(word.lower()) print(freq) # - # ## モジュール # Pythonでは、これまでに見てきたような`print`, `len`, `range`などの組み込み関数と呼ばれる基本的な関数を使用することができます。さらに、Pythonには標準ライブラリと呼ばれるモジュール群も含まれています。 # # 各モジュールは関連する関数を備えたPythonプログラムで、それらの関数を利用し独自のPythonプログラムを作成することができます。例えば、`math`モジュールには、数学関連の関数が含まれています。 # # モジュールの中の関数を呼び出すには、まず`import`文を使ってモジュールを読み込む必要があります。 # ```Python # import モジュール名 # ``` # # モジュールを読み込んだら、その中の関数を使うことができます。以下では、`math`モジュールの`sqrt`関数(平方根を計算する関数)を`math.sqrt`と指定して使用しています。 import math print(math.sqrt(4)) # `import`は、以下のように`from`と組み合わせて書くこともできます。この時は、モジュールの関数を使う際にモジュール名を書く必要はありません。 # ```Python # from モジュール名 import * # ``` from math import * # mathモジュールのsqrt関数だけ使う場合は、from math import sqrt print(sqrt(4)) # # 練習 # mathモジュールのfactorial関数を用いて10の階乗を計算してください # ## CSVファイル # ### csvモジュール # CSV(Comma Separated Values)ファイルは、プレーンテキストファイルに記録された簡易的なスプレッドシート(エクセルの表など)です。CSVファイルの各行は、スプレッドシートの行を表していて、区切り文字(通常はカンマ)は、その行の要素を区切るものです。 # # ```Python # ## 成績CSVファイル # # ユーザID, 国語、算数 # 1, 60, 70 # 2, 70, 90 # 3, 80, 80 # ... # ``` # # csvモジュールを使うとCSVファイルを読み書き、解析することが容易になります。csvモジュールを使って、CSVファイルからデータを読み込むには、まず通常のテキストファイルと同様に、`open()`関数でCSVファイルを開きます。この`open()`関数の戻り値の`File`オブジェクトを、`csv.reader()`関数へ渡します。すると、`csv.reader()`は、`Reader`オブジェクトを生成し、この`Reader`オブジェクトを用いると、CSVファイルの行を順番に処理することができます。 # # `Reader`オブジェクトを`list()`関数に渡すと、元のcsvファイルの1行がリストとそのリスト要素としたリスト(リストのリスト)が返ってきます。このリストに対して、リストのインデックスを使うことで、特定の行と列の値に以下のようにアクセスすることができます。 # Colaboratoryでは以下を実行して必要なファイルをダウンロード # !wget https://raw.githubusercontent.com/UTDataMining/2020A/master/week1/simple_score.csv # + import csv score = open('simple_score.csv') # score = open('simple_score.csv', encoding='utf-8') # 文字コードを指定する場合 score_reader = csv.reader(score) score_data = list(score_reader) print(score_data) # 2行目の3列目 print(score_data[1][2]) # 3行目の4列目 print(score_data[2][3]) # - # `Reader`オブジェクトからは、以下のように`for`文をつかうことで1行ずつファイルを処理することもできます。この時、各行はリストとなって取り出されます。以下では、行番号を取得するのに、`Reader`オブジェクトの`line_num`変数を使っています。`Reader`オブジェクトは、1度繰り返し処理すると、再び利用することはできないため、元のCSVファイルを読み込み直すには、ファイルを開き直す必要があります。 import csv score = open('simple_score.csv') # score = open('simple_score.csv', encoding='utf-8') # 文字コードを指定する場合 score_reader = csv.reader(score) for row in score_reader: print(str(score_reader.line_num)+": "+str(row)) # `Writer`オブジェクトを用いると、データをCSVファイルに書き込むことができます。`Writer`オブジェクトを作るには`csv.writer()`関数を使います。CSVファイルにデータを書き込むには、まず、`open()`関数に `'w'`を渡して書き込みモードでファイルを開きます。この`File`オブジェクトを、`csv.writer()`関数に渡して`Writer`オブジェクトを生成します。 # # `Writer`オブジェクトの`writerow()`メソッドは、引数にリストをとります。この引数に渡されたリストの各要素の値が、出力するCSVファイルの各セルの値となります。 import csv score = open('my_score.csv', 'w') # score = open('my_score.csv', 'w', encoding='utf-8') # 文字コードを指定する場合 # score = open('my_score.csv', 'w', newline='') #for Windows score_writer = csv.writer(score) score_writer.writerow(['user','kokugo','sugaku']) score_writer.writerow([1,50,50]) score_writer.writerow([2,30,40]) score.close() # カンマの代わりにタブ区切りで出力したい場合は、以下のように`csv.writer()`関数の引数`delimiter`に区切り文字を指定します。 # ```Python # score_writer = csv.writer(score, delimiter='\t') # ``` # # 練習 # 先ほどの"simple_score.csv"ファイルを開いて、すべてのユーザIDとそのユーザの数学の点数を組みにして出力してください。 # ## エラーが出たら # プログラムが実行できない(エラーが出た)時は、バグを取り除くデバッギングが必要になります。例えば、以下に留意することでバグを防ぐことができます。 # # - "よい"コードを書く # - コードに説明のコメントを入れる # - 1行の文字数、インデント、空白などのフォーマットに気をつける # - 変数や関数の名前を適当につけない # - グローバル変数に留意する # - コードに固有の"マジックナンバー"を使わず、変数を使う # - コード内でのコピーアンドペーストを避ける # - コード内の不要な処理は削除する # - コードの冗長性を減らすようにする など # - 参考 # - [Google Python Style Guide](http://works.surgo.jp/translation/pyguide.html) # - [Official Style Guide for Python Code](http://pep8-ja.readthedocs.io/ja/latest/) # - 関数の単体テストを行う # - 一つの関数には一つの機能・タスクを持たせるようにする など # # エラーには大きく分けて、文法エラー、実行エラーがあります。以下、それぞれのエラーについて対処法を説明します。 # ### 文法エラー # 1. まず、エラーメッセージを確認しましょう # 2. エラーメッセージの最終行を見て、それがSyntaxErrorであることを確認しましょう # 3. エラーとなっているコードの行数を確認しましょう # 4. そして、当該行付近のコードを注意深く確認しましょう # # よくある文法エラーの例: # - クオーテーションや括弧の閉じ忘れ # - コロンのつけ忘れ # - =と==の混同 # - インデントの誤り など print('This is the error) # ### 実行エラー # 1. まず、エラーメッセージを確認しましょう # 2. エラーメッセージの最終行を見て、そのエラーのタイプを確認しましょう # 3. エラーとなっているコードの行数を確認しましょう # 4. そして、当該行付近のコードについて、どの部分が実行エラーのタイプに関係しているか確認しましょう。もし複数の原因がありそうであれば、行を分割、改行して再度実行し、エラーを確認しましょう # 5. 原因がわからない場合は、print文を挿入して処理の入出力の内容を確認しましょう # # よくある実行エラーの例: # - 文字列やリストの要素エラー # - 変数名・関数名の打ち間違え # - 無限の繰り返し # - 型と処理の不整合 # - ゼロ分割 # - ファイルの入出力誤り など print(1/0)
week1/python_for_data_analysis1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pathlib import Path import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # + tests = pd.read_csv('tests.csv') utility = pd.read_csv('utility.csv') train = pd.read_csv('train.csv') welfare = pd.read_csv('welfare.csv') train.update(tests) # - midx_util = pd.MultiIndex.from_frame(utility.query("Agent == 0")[['Report','Utility']]) # + # Find max over all misreports util = utility.query("Report > 0").groupby(['Iter','Agent','Noise','Clip','Exp'])['Utility'].max() # Transform to df with index/columns util_data = pd.DataFrame(pd.DataFrame(util).to_records()) # + # Find min revenue over all misreports tr = train.groupby(['Iter','Noise','Clip','Exp'])['Revenue'].min() # Transform to df with index/columns train_data = pd.DataFrame(pd.DataFrame(tr).to_records()) # + # Find max regret over all misreports reg = utility.query("Report > 0").groupby(['Iter','Agent','Noise','Clip','Exp'])['Regret'].max() # Transform to df with index/columns reg_data = pd.DataFrame(pd.DataFrame(reg).to_records()) # + # Find max regret over all misreports reg_s = train.groupby(['Iter','Noise','Clip','Exp'])['Regret'].max() # Transform to df with index/columns reg_sum_data = pd.DataFrame(pd.DataFrame(reg_s).to_records()) # + # Find min welfare over all misreports wel = welfare.groupby(['Iter','Noise','Clip','Exp'])['Welfare'].min() # Transform to df with index/columns wel_data = pd.DataFrame(pd.DataFrame(wel).to_records()) # - # # Welfare w = sns.FacetGrid(wel_data, col='Noise', row='Clip', hue='Exp') w = w.map(plt.plot, 'Iter', 'Welfare') # # Revenue rev = sns.FacetGrid(train_data, col='Noise', row='Clip', hue='Exp') rev = rev.map(plt.plot, 'Iter', 'Revenue') # # Sum Regret reg_sum = sns.FacetGrid(reg_sum_data, col='Noise', hue='Exp') reg_sum = reg_sum.map(plt.plot, 'Iter', 'Regret') # # Regret reg = sns.FacetGrid(reg_data, col='Noise', row='Agent', hue='Exp') reg = reg.map(plt.plot, 'Iter', 'Regret') # # Utility Agent 0 ag0 = sns.FacetGrid(util_data.query('Agent == 0'), col='Noise', row='Clip', hue='Exp') ag0 = ag0.map(plt.plot, 'Iter', 'Utility') # # Utility All Agents a = sns.FacetGrid(util_data, col='Noise', row='Agent', hue='Exp') a = a.map(plt.plot, 'Iter', 'Utility')
regretNet/visualize_valuation_space.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project Report # # ## Salary predictor for tech employees in Canada based on survey data</center> # ### Summary # # Here we attempt to build the model to predict the income of tech employees in Canada by using a multi-linear regression model based on the following features: years of coding experience, programming languages used, education level, and role. After the hyperparameter tuning process, $R^2$ of the training data set increases from 0.67 to 0.72, and the model is also tested on the testing data set with $R^2 = 0.71$, which is consistent with the training result. Besides, there are 3 points that we want to explore further in the future: to find other explanatory variables that might give us a better score, to include the United States in our model, to identify the best features that contribute to the prediction. # ### Introduction # # The aim of this project is to allow tech employees in Canada to get a reasonable estimation of how much they will potentially earn given their skill set and years of experience. Fresh graduates and seasoned employees would benefit from an analysis tool that allows them to predict their earning potential. While the Human Resources (HR) department of companies has access to this market information, tech employees are mostly clueless about what their market value is. Therefore, a salary predictor tool could assist them in the negotiation process. # ### Methods # #### Data # The data set used in this project is from the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey), which is conducted annually. The survey data set has nearly 80,000 responses. There are several useful features that could be extracted from this survey such as education level, location, the language used, job type, all of which are potentially associated with annual compensation {cite:p}`stack_overflow_survey`. # # #### Exploratory Data Analysis # After performing EDA on the training data set, there are several points worth mentioning. The distribution of the response variable, salary, is positively skewed with a fat tail, as shown in Fig. 1 {cite:p}`vanderplas2018altair`. This attribute is undesirable, which makes the model less robust. So, extremely high salaries (top 8%) in our training data set will be defined as outliers that are removed in the preprocessing step.<br> # # ```{figure} ../results/salary_density_plot.png # --- # height: 400px # name: salary_density # --- # Density plot of annual compensation(USD): The distribution of annual compensation is right skewed with extremely large value as well as extremely small value. # ``` # # Among all the features investigated, it can be found that the salary is strongly correlated to the number of professional coding years. Fig.2 clearly shows that there is a linear relationship between the number of professional coding years and the salary. Figure.3 displays both effects of professional coding years and languages mastered on the salary. # # ```{figure} ../results/code_years_plot.png # --- # height: 400px # name: code_years_plot # --- # Coding year vs. annual compensation(USD): Number of coding years is strongly correlated to compensation, but becomes widely spread when the coding years are greater than 20. # ``` # # ```{figure} ../results/language_codeyears_plot.png # --- # height: 150px # name: language_code_years_plot # --- # Both effects of languages mastered and coding years on annual compensation: The more languages mastered and the more years in professional experience, the higher compensation expected. # ``` # # Figures below present how other 3 features we selected have significant effects on the income level. # ```{figure} ../results/edu_plot.png # --- # height: 150px # name: edu_plot # --- # Education level vs. annual compensation(USD): Education levels are positively related to compensation. # ``` # ```{figure} ../results/language_plot.png # --- # height: 150px # name: lang_plot # --- # Programming languages vs. annual compensation(USD): Programming languages is associated with compensation. The more programming languages mastered, the higher compensation. # ``` # ```{figure} ../results/role_plot.png # --- # height: 150px # name: role_plot # --- # Roles vs. annual compensation(USD): Roles are related to compensation. Mobile and back-end developers have greater compensation. # ``` # #### Model # In light of EDA and recommendations from Stack Overflow, 4 features are extracted that are duration for being a profession, education level, programming language worked with and job position. Then, the regression equation can be obtained:<br> # # $$ # y_{salary} = w^T X + b # $$ # # *where w is the weight vector, X is the feature vector, b is the error term, $y_{salary}$ is predicted variable.* <br> # # Ridge model is chosen in this case, which can help reduce over-fitting problems. Ridge solves a regression model where the loss function is the linear least squares function and regularization is given by the L2-norm. # # $$ # loss\ function = ||y - w^T X||^2_2 + alpha * ||w||^2_2 # $$ # # *where alpha is regularization strength to reduces the variance of the estimates. Larger values specify stronger regularization.* <br> # # Within the training data set, randomized hyperparameter searching was carried out based on the scoring matrix, $R^2$. Then, the model with the best performed parameter was used to make prediction on the test data set. # ### Results and Discussion # + tags=["hide-cell"] from joblib import dump, load import pandas as pd from myst_nb import glue import altair as alt from altair_saver import save alt.data_transformers.enable('data_server') alt.renderers.enable('mimetype') # Load regression training results pipe_loaded = load('../results/best_model_pipe.joblib') alpha = round(pipe_loaded.best_params_['ridge__alpha'], 3) rsquare = round(pipe_loaded.best_score_, 3) glue("alpha_coef", alpha); glue("R2", rsquare); # Load regression testing results test_result_loaded = load('../results/test_result.joblib') rsquare_test = round(test_result_loaded["r_sq_test"], 3) glue("R2_test", rsquare_test); # Draw the predicted value error plot test_df = pd.read_csv("../data/processed/test.csv") y_test = test_df.ConvertedComp.tolist() y_predict = test_result_loaded["predict_y"].tolist() result = {"true_y": y_test, "predicted_y": y_predict} df_result = pd.DataFrame(data=result) df_result.head(5) df_diag = pd.DataFrame(data={"true_y": [0, max(df_result.true_y)+500], "predicted_y":[0, max(df_result.true_y)+500]}) plt1 = alt.Chart(df_result).mark_point(opacity=0.5).encode( alt.X("predicted_y", title="Predicted salary"), alt.Y("true_y", title="True salary") ) + alt.Chart(df_diag).mark_line(color='red').encode( alt.X("predicted_y", title="Predicted salary"), alt.Y("true_y", title="True salary") ) plt1.save("../results/test_data_result.png") # - # As mentioned previously, Ridge model is selected in order to avoid conditioning problems and large estimator coefficients. Firstly, hypeparameterization of alpha is carried out. The hyperparameter tuning result shows that the model is at the best performance when alpha = {glue:text}`alpha_coef` with a training $R^2$ of {glue:text}`R2` as shown in the figure below. # # ```{figure} ../results/alpha-tuning.png # --- # height: 400px # name: alpha-tuning # --- # Hyperparameter searching: the model is well trained when the hyperparameter, alpha, equals to 0.091. With a greater alpha, the model is under-fitted, whereas the model is over-fitted if alpha is getting smaller than 0.091. # ``` # Applying the fitted model to the test data set, we get a testing $R^2$ of {glue:text}`R2_test`.<br> # After identifying the most important features, we built multiple linear regression models with the annual salary as our response variable and the following predictors: years of coding experience, programming languages used, education level, and role. Since our target is a continuous variable, regression made sense here.<br> # # We carried out hyper-parameter tuning via cross validation with `RandomizedSearchCV`. This allowed us to find optimal parameters which improved our validation score from 67% to 72%. We tested the final model on our test data (20% of the survey data) and the model performed well on the test data with an accuracy of 71%. As you can see in Fig 8, the model is slightly under predicting or over-predicting, but the fit seems to be good. This is a decent score that indicates that our model generalizes enough and should perform well on unseen examples. # # ```{figure} ../results/test_data_result.png # --- # height: 400px # name: test_data_result # --- # Predicted salary VS. observed salary: the fitted model can fairly predict the salary. # ``` # In the future, we plan to do two important changes; exploring other explanatory variables that might give us a better score, and including the United States in our model. In order to identify the best features that contribute to the prediction, we will include all the sensible columns in the original survey, and perform model and feature selection. We hope that this will help us discover more features that are important for the annual compensation prediction of tech employees. # ## Reference # # ```{bibliography} # :style: unsrtalpha # :all: # # ```
docs/_build/jupyter_execute/report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ['CUDA_VISIBLE_DEVICES'] = '' import tensorflow as tf import tensorflow_hub as hub from tokenizers import BertWordPieceTokenizer def sample(gpt, sentence, number=1, length=20): inputs = tf.constant([tokenizer.encode(sentence).ids] * number, dtype=tf.int64) length = tf.constant(length, dtype=tf.int64) ret = gpt.signatures['serving_default'](inp=inputs, length=length)['output_0'] return [ tokenizer.decode(s).replace(' ', '') for s in ret.numpy() ] gpt = hub.load('./gpt_model_tf2/') tokenizer = BertWordPieceTokenizer('./clue-vocab.txt', lowercase=True, add_special_tokens=False) # %%time ret = sample(gpt, '今天天气不错', 3, 50) for s in ret: print(s) print()
tf2gpt/predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from matplotlib import pyplot as plt import matplotlib # %matplotlib inline matplotlib.style.use('ggplot') # + data = pd.read_csv('train.csv') data.head() #data.describe() # + # Visualising survival based on gender survived_sex = data[data['Survived']==1]['Sex'].value_counts() dead_sex = data[data['Survived']== 0]['Sex'].value_counts() df = pd.DataFrame([survived_sex, dead_sex]) df.index = ['Survived', 'Dead'] df.plot(kind = 'bar', stacked = True, figsize=(15,8)) # - # figure = plt.figure(figsize=(15,8)) # plt.hist([data[data['Survived'] == 1]['Age'], data[data['Survived'] == 0]['Age']], stacked = True, color=['g', 'r'], bins = 30, label = ['Survived', 'Dead']) # plt.xlabel('Fare') # plt.ylabel('No of Passengers') # plt.legend() # Survival based on Age figure = plt.figure(figsize=(15,8)) plt.hist([data[data['Survived']==1]['Age'], data[data['Survived']==0]['Age']], stacked=True, color = ['g','r'],label = ['Survived','Dead']) plt.xlabel("Age") plt.ylabel("Number of passengers") plt.legend() # + # Survival based on fare figure = plt.figure(figsize=(15,8)) plt.hist([data[data['Survived']==1]['Fare'],data[data['Survived']==0]['Fare']], stacked=True, color = ['g','r'], bins = 70,label = ['Survived','Dead']) plt.xlabel('Fare') plt.ylabel('Number of passengers') plt.legend() # - data_test = pd.read_csv('test.csv') data_test.head() def status(feature): print("Processing " + feature + ": OK") def combined_data(): train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') targets = train.Survived train.drop('Survived', 1, inplace = True) combined = train.append(test) combined.reset_index(inplace = True) combined.drop('index', inplace = True, axis = 1) return combined # + combined = combined_data() combined.head() # - def get_titles(): combined['Title'] = combined['Name'].map(lambda name: name.split(',')[1].split('.')[0].strip()) Title_Dictionary = { "Capt": "Officer", "Col": "Officer", "Major": "Officer", "Jonkheer": "Royalty", "Don": "Royalty", "Sir" : "Royalty", "Dr": "Officer", "Rev": "Officer", "the Countess":"Royalty", "Dona": "Royalty", "Mme": "Mrs", "Mlle": "Miss", "Ms": "Mrs", "Mr" : "Mr", "Mrs" : "Mrs", "Miss" : "Miss", "Master" : "Master", "Lady" : "Royalty" } combined['Title'] = combined.Title.map(Title_Dictionary) get_titles() combined.head() # + grouped_train = combined.head(891).groupby(['Sex','Pclass','Title']) grouped_median_train = grouped_train.median() grouped_test = combined.iloc[891:].groupby(['Sex','Pclass','Title']) grouped_median_test = grouped_test.median() # - grouped_median_train grouped_median_test def process_age(): global combined # a function that fills the missing values of the Age variable def fillAges(row, grouped_median): if row['Sex']=='female' and row['Pclass'] == 1: if row['Title'] == 'Miss': return grouped_median.loc['female', 1, 'Miss']['Age'] elif row['Title'] == 'Mrs': return grouped_median.loc['female', 1, 'Mrs']['Age'] elif row['Title'] == 'Officer': return grouped_median.loc['female', 1, 'Officer']['Age'] elif row['Title'] == 'Royalty': return grouped_median.loc['female', 1, 'Royalty']['Age'] elif row['Sex']=='female' and row['Pclass'] == 2: if row['Title'] == 'Miss': return grouped_median.loc['female', 2, 'Miss']['Age'] elif row['Title'] == 'Mrs': return grouped_median.loc['female', 2, 'Mrs']['Age'] elif row['Sex']=='female' and row['Pclass'] == 3: if row['Title'] == 'Miss': return grouped_median.loc['female', 3, 'Miss']['Age'] elif row['Title'] == 'Mrs': return grouped_median.loc['female', 3, 'Mrs']['Age'] elif row['Sex']=='male' and row['Pclass'] == 1: if row['Title'] == 'Master': return grouped_median.loc['male', 1, 'Master']['Age'] elif row['Title'] == 'Mr': return grouped_median.loc['male', 1, 'Mr']['Age'] elif row['Title'] == 'Officer': return grouped_median.loc['male', 1, 'Officer']['Age'] elif row['Title'] == 'Royalty': return grouped_median.loc['male', 1, 'Royalty']['Age'] elif row['Sex']=='male' and row['Pclass'] == 2: if row['Title'] == 'Master': return grouped_median.loc['male', 2, 'Master']['Age'] elif row['Title'] == 'Mr': return grouped_median.loc['male', 2, 'Mr']['Age'] elif row['Title'] == 'Officer': return grouped_median.loc['male', 2, 'Officer']['Age'] elif row['Sex']=='male' and row['Pclass'] == 3: if row['Title'] == 'Master': return grouped_median.loc['male', 3, 'Master']['Age'] elif row['Title'] == 'Mr': return grouped_median.loc['male', 3, 'Mr']['Age'] combined.head(891).Age = combined.head(891).apply(lambda r : fillAges(r, grouped_median_train) if np.isnan(r['Age']) else r['Age'], axis=1) combined.iloc[891:].Age = combined.iloc[891:].apply(lambda r : fillAges(r, grouped_median_test) if np.isnan(r['Age']) else r['Age'], axis=1) status('age') process_age() combined.info() # Add dummy titles viz., Title_master, Title_miss etc def process_names(): global combined combined.drop('Name',axis=1,inplace=True) title_dummies = pd.get_dummies(combined['Title'],prefix='Title') combined = pd.concat([combined,title_dummies],axis=1) combined.drop('Title',axis=1,inplace=True) status('names') process_names() combined.head() # replace the missing fare value with mean of all fares def process_fares(): global combined combined.head(891).Fare.fillna(combined.head(891).Fare.mean(), inplace=True) combined.iloc[891:].Fare.fillna(combined.iloc[891:].Fare.mean(), inplace=True) status('fare') process_fares() def process_embarked(): global combined # two missing embarked values - filling them with the most frequent one (S) combined.head(891).Embarked.fillna('S', inplace=True) combined.iloc[891:].Embarked.fillna('S', inplace=True) # dummy encoding embarked_dummies = pd.get_dummies(combined['Embarked'],prefix='Embarked') combined = pd.concat([combined,embarked_dummies],axis=1) combined.drop('Embarked',axis=1,inplace=True) status('embarked') def process_cabin(): global combined # replacing missing cabins with U (for Uknown) combined.Cabin.fillna('U', inplace=True) # mapping each Cabin value with the cabin letter combined['Cabin'] = combined['Cabin'].map(lambda c : c[0]) # dummy encoding ... cabin_dummies = pd.get_dummies(combined['Cabin'], prefix='Cabin') combined = pd.concat([combined,cabin_dummies], axis=1) combined.drop('Cabin', axis=1, inplace=True) status('cabin') process_cabin() combined.head() def process_sex(): global combined # mapping string values to numerical one combined['Sex'] = combined['Sex'].map({'male':1,'female':0}) status('sex') def process_pclass(): global combined # encoding into 3 categories: pclass_dummies = pd.get_dummies(combined['Pclass'], prefix="Pclass") # adding dummy variables combined = pd.concat([combined,pclass_dummies],axis=1) # removing "Pclass" combined.drop('Pclass',axis=1,inplace=True) status('pclass') process_pclass() def process_ticket(): global combined # a function that extracts each prefix of the ticket, returns 'XXX' if no prefix (i.e the ticket is a digit) def cleanTicket(ticket): ticket = ticket.replace('.','') ticket = ticket.replace('/','') ticket = ticket.split() ticket = map(lambda t : t.strip(), ticket) ticket = list(filter(lambda t : not t.isdigit(), ticket)) if len(ticket) > 0: return ticket[0] else: return 'XXX' # Extracting dummy variables from tickets: combined['Ticket'] = combined['Ticket'].map(cleanTicket) tickets_dummies = pd.get_dummies(combined['Ticket'], prefix='Ticket') combined = pd.concat([combined, tickets_dummies], axis=1) combined.drop('Ticket', inplace=True, axis=1) status('ticket') process_ticket() combined.head() # combined.head() from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score def compute_score(clf, X, y, scoring='accuracy'): xval = cross_val_score(clf, X, y, cv = 5, scoring=scoring) return np.mean(xval) def recover_train_test_target(): global combined train0 = pd.read_csv('train.csv') targets = train0.Survived train = combined.head(891) test = combined.iloc[891:] return train, test, targets train, test, targets = recover_train_test_target() #print(targets) # + #Here we encode the values before passing it to fit since it does not accept string values from sklearn.preprocessing import LabelEncoder for column in train.columns: if train[column].dtype == type(object): le = LabelEncoder() train[column] = train[column].factorize()[0] train[column] = le.fit_transform(train[column]) for column in test.columns: if test[column].dtype == type(object): le = LabelEncoder() test[column] = test[column].factorize()[0] test[column] = le.fit_transform(test[column]) # Tree-based estimators can be used to compute feature importances, which in turn can be used to discard irrelevant features. from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectFromModel clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') clf.fit(train, targets) # - features = pd.DataFrame() features['feature'] = train.columns features['importance'] = clf.feature_importances_ features.sort_values(by=['importance'], ascending = True, inplace = True) features.set_index('feature', inplace=True) # we plot the imp of each feature features.plot(kind='barh', figsize=(20, 20)) # We transform the test and train dataset accordingly model = SelectFromModel(clf, prefit = True) train_red = model.transform(train) test_red = model.transform(test) # + run_gs = False if run_gs: parameter_grid = { 'max_depth' : [4, 6, 8], 'n_estimators': [50, 10], 'max_features': ['sqrt', 'auto', 'log2'], 'min_samples_split': [1, 3, 10], 'min_samples_leaf': [1, 3, 10], 'bootstrap': [True, False], } forest = RandomForestClassifier() cross_validation = StratifiedKFold(targets, n_folds=5) grid_search = GridSearchCV(forest, scoring='accuracy', param_grid=parameter_grid, cv=cross_validation) grid_search.fit(train, targets) model = grid_search parameters = grid_search.best_params_ print('Best score: {}'.format(grid_search.best_score_)) print('Best parameters: {}'.format(grid_search.best_params_)) else: parameters = {'bootstrap': False, 'min_samples_leaf': 3, 'n_estimators': 50, 'min_samples_split': 10, 'max_features': 'sqrt', 'max_depth': 6} model = RandomForestClassifier(**parameters) model.fit(train, targets) # - compute_score(model, train, targets, scoring='accuracy') output = model.predict(test).astype(int) df_output = pd.DataFrame() aux = pd.read_csv('test.csv') df_output['PassengerId'] = aux['PassengerId'] df_output['Survived'] = output df_output[['PassengerId','Survived']].to_csv('../../data/output.csv',index=False)
Titanic Kaggle challenge_80.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Tag 1. Kapitel 6. R Programmierung # # ## Lektion 26. Logische Operatoren # # Logische Operatoren werden es uns erlauben mehrere Vergleichsoperatoren zu kombinieren. Die logischen Operatoren die wir kennenlernen werden sind die folgenden: # # * UND - & # * ODER - | # * NICHT - ! # # Am besten schauen wir uns einige Beispiel an, um sie zu verstehen: # Staten wir mit einer Variablen x x <- 100 # Jetzt wollen wir wissen, ob 10 kleiner als 20 UND größer als 5 ist: x < 200 x > 50 x < 200 & x > 50 # Für die Leserlichkeit können wir außerdem Klammern hinzufügen. Außerdem gehen wir so sicher, dass die gewünschte Reihenfolge eingehalten wird: (x < 200 | x > 100) & x>50 (x < 200) & (x>5) & (x == 100) # Wir können uns das ganze als Serie von logischen Boolean Werten vorstellen: TRUE & TRUE & TRUE. Wir geben ein einzelnes TRUE zurück, wenn alle Werte TRUE sind. Schaun wir uns ein Beispiel an, indem dem nicht so ist: x==2 & x > 1 # Wir erhalten FALSE, da zwar x > 1 TRUE ist, aber nicht beide Verlgeiche. Wir haben UND genutzt, also festgelegt, dass alle Werte TRUE ergeben müssen. Sofern es nur einer muss verwenden wir ODER: x==2 | x > 1 # Es muss allso nur entweder das eine *oder* das andere TRUE sein: x==1 | x==12 # ## NICHT! # # Du kannst dir NICHT als Umkehrung jedes logoschen Wertes vorstellen, der ihm nachsteht. Ein Beispiel: (10==1) !(10==1) 10!=1 # Wir können sie auch mehrfach verwenden (unüblich, aber möglich) !!(10==1) 10==1 # # Use Case Beispiel # # Hier ein kurzes Beispiel eines echten Use Cases dieser Operatoren. Stellt euch den folgenden Data Frame vor: df <- mtcars df # Dieser zeigt Daten für verschiedenste Auto Modele (und ist in R enthalten). Wählen wir nun alle Modelle mit mindestens 20 MPG (11.76 L/100km): df[df['mpg'] >= 20,] # Achtet auf das Komma beim Index # Toll! Jetzt können wir noch logische Operatoren kombinieren: Schauen wir uns alle Autos mit mindestens 20 MPG und über 100 PS an! df[(df['mpg'] >= 20) & (df['hp'] > 100),] # Das hat euch hoffentlich einen Eindruck davon verschafft, wie nützlich diese logischen Operatoren sind! # ## Logische Operatoren mit Vektoren # # Wir haben zwei Möglichkeiten Vektoren mit logischen Operatoren zu vergeleichen: Element für Element oder nur die jeweils ersten Elemente im Vektor. So gehen wir sicher einen einzigen logischen Wert als Ergebnis zu erhalten. Was es genau damit auf sich hat ist für den Moment nicht so wichtig, wir kommen darauf in einer späteren Lektion zurück. tf <- c(TRUE,FALSE) tt <- c(TRUE,TRUE) ft <- c(FALSE, TRUE) tt & tf tt | tf # Um die ersten Elemente zu vergleichen nutzen wir && oder || ft && tt ft & tt tt && tf tt & tf tt || tf tt | tf tt || ft tt | ft # Herzlichen Glückwunsch! Sie sind mit Lektion 26. fertig!
1.6 R Programming/de-DE/1.6.26 R - Logical Operators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="WskFs9pW1Yt4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620479433622, "user_tz": -540, "elapsed": 4223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghmk6UcXaYKgj2inQwOKS6s49TntUZgdmWo7mep=s64", "userId": "14225296630148170886"}} outputId="896a0826-bc8c-4284-b54b-06facc9615c7" N = int(input()) A = input().split(' ') B = [] for k in A: B.append(int(k)) C = 0 for j in range(1,N): T = B[j] for i in range(j): if (B[i] - T)%200 ==0: C = C+1 print(int(C))
AtCoder/20210508/Q3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 5 - Variational quantum eigensolver # # # ## Historical background # # During the last decade, quantum computers matured quickly and began to realize Feynman's initial dream of a computing system that could simulate the laws of nature in a quantum way. A 2014 paper first authored by <NAME> introduced the **Variational Quantum Eigensolver (VQE)**, an algorithm meant for finding the ground state energy (lowest energy) of a molecule, with much shallower circuits than other approaches.[1] And, in 2017, the IBM Quantum team used the VQE algorithm to simulate the ground state energy of the lithium hydride molecule.[2] # # VQE's magic comes from outsourcing some of the problem's processing workload to a classical computer. The algorithm starts with a parameterized quantum circuit called an ansatz (a best guess) then finds the optimal parameters for this circuit using a classical optimizer. The VQE's advantage over classical algorithms comes from the fact that a quantum processing unit can represent and store the problem's exact wavefunction, an exponentially hard problem for a classical computer. # # This exercise 5 allows you to realize Feynman's dream yourself, setting up a variational quantum eigensolver to determine the ground state and the energy of a molecule. This is interesting because the ground state can be used to calculate various molecular properties, for instance the exact forces on nuclei than can serve to run molecular dynamics simulations to explore what happens in chemical systems with time.[3] # # # ### References # # 1. Peruzzo, Alberto, et al. "A variational eigenvalue solver on a photonic quantum processor." Nature communications 5.1 (2014): 1-7. # 2. Kandala, Abhinav, et al. "Hardware-efficient variational quantum eigensolver for small molecules and quantum magnets." Nature 549.7671 (2017): 242-246. # 3. Sokolov, <NAME>., et al. "Microcanonical and finite-temperature ab initio molecular dynamics simulations on quantum computers." Physical Review Research 3.1 (2021): 013125. # # ## Introduction # # For the implementation of VQE, you will be able to make choices on how you want to compose your simulation, in particular focusing on the ansatz quantum circuits. # This is motivated by the fact that one of the important tasks when running VQE on noisy quantum computers is to reduce the loss of fidelity (which introduces errors) by finding the most compact quantum circuit capable of representing the ground state. # Practically, this entails to minimizing the number of two-qubit gates (e.g. CNOTs) while not loosing accuracy. # # <div class="alert alert-block alert-success"> # # <b>Goal</b> # # Find the shortest ansatz circuits for representing accurately the ground state of given problems. Be creative! # # <b>Plan</b> # # First you will learn how to compose a VQE simulation for the smallest molecule and then apply what you have learned to a case of a larger one. # # **1. Tutorial - VQE for H$_2$:** familiarize yourself with VQE and select the best combination of ansatz/classical optimizer by running statevector simulations. # # **2. Final Challenge - VQE for LiH:** perform similar investigation as in the first part but restricting to statevector simulator only. Use the qubit number reduction schemes available in Qiskit and find the optimal circuit for this larger system. Optimize the circuit and use your imagination to find ways to select the best building blocks of parameterized circuits and compose them to construct the most compact ansatz circuit for the ground state, better than the ones already available in Qiskit. # # </div> # # # <div class="alert alert-block alert-danger"> # # Below is an introduction to the theory behind VQE simulations. You don't have to understand the whole thing before moving on. Don't be scared! # # </div> # # # ## Theory # # Here below is the general workflow representing how the molecular simulations using VQE are performed on quantum computers. # # <img src="resources/workflow.png" width=800 height= 1400/> # # The core idea hybrid quantum-classical approach is to outsource to **CPU (classical processing unit)** and **QPU (quantum processing unit)** the parts that they can do best. The CPU takes care of listing the terms that need to be measured to compute the energy and also optimizing the circuit parameters. The QPU implements a quantum circuit representing the quantum state of a system and measures the energy. Some more details are given below: # # **CPU** can compute efficiently the energies associated to electron hopping and interactions (one-/two-body integrals by means of a Hartree-Fock calculation) that serve to represent the total energy operator, Hamiltonian. The [Hartree–Fock (HF) method](https://en.wikipedia.org/wiki/Hartree%E2%80%93Fock_method#:~:text=In%20computational%20physics%20and%20chemistry,system%20in%20a%20stationary%20state.) efficiently computes an approximate grounds state wavefunction by assuming that the latter can be represented by a single Slater determinant (e.g. for H$_2$ molecule in STO-3G basis with 4 spin-orbitals and qubits, $|\Psi_{HF} \rangle = |0101 \rangle$ where electrons occupy the lowest energy spin-orbitals). What QPU does later in VQE is finding a quantum state (corresponding circuit and its parameters) that can also represent other states associated missing electronic correlations (i.e. $\sum_i c_i |i\rangle$ states in $|\Psi \rangle = c_{HF}|\Psi_{HF} \rangle + \sum_i c_i |i\rangle $ where $i$ is a bitstring). # # After a HF calculation, operators in the Hamiltonian are mapped to measurements on a QPU using fermion-to-qubit transformations (see Hamiltonian section below). One can further analyze the properties of the system to reduce the number of qubits or shorten the ansatz circuit: # # - For Z2 symmetries and two-qubit reduction, see [Bravyi *et al*, 2017](https://arxiv.org/abs/1701.08213v1). # - For entanglement forging, see [Eddins *et al.*, 2021](https://arxiv.org/abs/2104.10220v1). # - For the adaptive ansatz see, [Grimsley *et al.*,2018](https://arxiv.org/abs/1812.11173v2), [Rattew *et al.*,2019](https://arxiv.org/abs/1910.09694), [Tang *et al.*,2019](https://arxiv.org/abs/1911.10205). You may use the ideas found in those works to find ways to shorten the quantum circuits. # # **QPU** implements quantum circuits (see Ansatzes section below), parameterized by angles $\vec\theta$, that would represent the ground state wavefunction by placing various single qubit rotations and entanglers (e.g. two-qubit gates). The quantum advantage lies in the fact that QPU can efficiently represent and store the exact wavefunction, which becomes intractable on a classical computer for systems that have more than a few atoms. Finally, QPU measures the operators of choice (e.g. ones representing a Hamiltonian). # # Below we go slightly more in mathematical details of each component of the VQE algorithm. It might be also helpful if you watch our [video episode about VQE](https://www.youtube.com/watch?v=Z-A6G0WVI9w). # # # ### Hamiltonian # # Here we explain how we obtain the operators that we need to measure to obtain the energy of a given system. # These terms are included in the molecular Hamiltonian defined as: # $$ # \begin{aligned} # \hat{H} &=\sum_{r s} h_{r s} \hat{a}_{r}^{\dagger} \hat{a}_{s} \\ # &+\frac{1}{2} \sum_{p q r s} g_{p q r s} \hat{a}_{p}^{\dagger} \hat{a}_{q}^{\dagger} \hat{a}_{r} \hat{a}_{s}+E_{N N} # \end{aligned} # $$ # with # $$ # h_{p q}=\int \phi_{p}^{*}(r)\left(-\frac{1}{2} \nabla^{2}-\sum_{I} \frac{Z_{I}}{R_{I}-r}\right) \phi_{q}(r) # $$ # $$ # g_{p q r s}=\int \frac{\phi_{p}^{*}\left(r_{1}\right) \phi_{q}^{*}\left(r_{2}\right) \phi_{r}\left(r_{2}\right) \phi_{s}\left(r_{1}\right)}{\left|r_{1}-r_{2}\right|} # $$ # # where the $h_{r s}$ and $g_{p q r s}$ are the one-/two-body integrals (using the Hartree-Fock method) and $E_{N N}$ the nuclear repulsion energy. # The one-body integrals represent the kinetic energy of the electrons and their interaction with nuclei. # The two-body integrals represent the electron-electron interaction. # The $\hat{a}_{r}^{\dagger}, \hat{a}_{r}$ operators represent creation and annihilation of electron in spin-orbital $r$ and require mappings to operators, so that we can measure them on a quantum computer. # Note that VQE minimizes the electronic energy so you have to retrieve and add the nuclear repulsion energy $E_{NN}$ to compute the total energy. # # # # So, for every non-zero matrix element in the $ h_{r s}$ and $g_{p q r s}$ tensors, we can construct corresponding Pauli string (tensor product of Pauli operators) with the following fermion-to-qubit transformation. # For instance, in Jordan-Wigner mapping for an orbital $r = 3$, we obtain the following Pauli string: # $$ # \hat a_{3}^{\dagger}= \hat \sigma_z \otimes \hat \sigma_z \otimes\left(\frac{ \hat \sigma_x-i \hat \sigma_y}{2}\right) \otimes 1 \otimes \cdots \otimes 1 # $$ # where $\hat \sigma_x, \hat \sigma_y, \hat \sigma_z$ are the well-known Pauli operators. The tensor products of $\hat \sigma_z$ operators are placed to enforce the fermionic anti-commutation relations. # A representation of the Jordan-Wigner mapping between the 14 spin-orbitals of a water molecule and some 14 qubits is given below: # # <img src="resources/mapping.png" width=600 height= 1200/> # # # Then, one simply replaces the one-/two-body excitations (e.g. $\hat{a}_{r}^{\dagger} \hat{a}_{s}$, $\hat{a}_{p}^{\dagger} \hat{a}_{q}^{\dagger} \hat{a}_{r} \hat{a}_{s}$) in the Hamiltonian by corresponding Pauli strings (i.e. $\hat{P}_i$, see picture above). The resulting operator set is ready to be measured on the QPU. # For additional details see [Seeley *et al.*, 2012](https://arxiv.org/abs/1208.5986v1). # # ### Ansatzes # # There are mainly 2 types of ansatzes you can use for chemical problems. # # - **q-UCC ansatzes** are physically inspired, and roughly map the electron excitations to quantum circuits. The q-UCCSD ansatz (`UCCSD`in Qiskit) possess all possible single and double electron excitations. The paired double q-pUCCD (`PUCCD`) and singlet q-UCCD0 (`SUCCD`) just consider a subset of such excitations (meaning significantly shorter circuits) and have proved to provide good results for dissociation profiles. For instance, q-pUCCD doesn't have single excitations and the double excitations are paired as in the image below. # - **Heuristic ansatzes (`TwoLocal`)** were invented to shorten the circuit depth but still be able to represent the ground state. # As in the figure below, the R gates represent the parametrized single qubit rotations and $U_{CNOT}$ the entanglers (two-qubit gates). The idea is that after repeating certain $D$-times the same block (with independent parameters) one can reach the ground state. # # For additional details refer to [Sokolov *et al.* (q-UCC ansatzes)](https://arxiv.org/abs/1911.10864v2) and [Barkoutsos *et al.* (Heuristic ansatzes)](https://arxiv.org/pdf/1805.04340.pdf). # # <img src="resources/ansatz.png" width=700 height= 1200/> # # # # ### VQE # # Given a Hermitian operator $\hat H$ with an unknown minimum eigenvalue $E_{min}$, associated with the eigenstate $|\psi_{min}\rangle$, VQE provides an estimate $E_{\theta}$, bounded by $E_{min}$: # # \begin{align*} # E_{min} \le E_{\theta} \equiv \langle \psi(\theta) |\hat H|\psi(\theta) \rangle # \end{align*} # # where $|\psi(\theta)\rangle$ is the trial state associated with $E_{\theta}$. By applying a parameterized circuit, represented by $U(\theta)$, to some arbitrary starting state $|\psi\rangle$, the algorithm obtains an estimate $U(\theta)|\psi\rangle \equiv |\psi(\theta)\rangle$ on $|\psi_{min}\rangle$. The estimate is iteratively optimized by a classical optimizer by changing the parameter $\theta$ and minimizing the expectation value of $\langle \psi(\theta) |\hat H|\psi(\theta) \rangle$. # # As applications of VQE, there are possibilities in molecular dynamics simulations, see [Sokolov *et al.*, 2021](https://arxiv.org/abs/2008.08144v1), and excited states calculations, see [Ollitrault *et al.*, 2019](https://arxiv.org/abs/1910.12890) to name a few. # # <div class="alert alert-block alert-danger"> # # <b> References for additional details</b> # # For the qiskit-nature tutorial that implements this algorithm see [here](https://qiskit.org/documentation/nature/tutorials/01_electronic_structure.html) # but this won't be sufficient and you might want to look on the [first page of github repository](https://github.com/Qiskit/qiskit-nature) and the [test folder](https://github.com/Qiskit/qiskit-nature/tree/main/test) containing tests that are written for each component, they provide the base code for the use of each functionality. # # </div> # ## Part 1: Tutorial - VQE for H$_2$ molecule # # # # In this part, you will simulate H$_2$ molecule using the STO-3G basis with the PySCF driver and Jordan-Wigner mapping. # We will guide you through the following parts so then you can tackle harder problems. # # # # #### 1. Driver # # The interfaces to the classical chemistry codes that are available in Qiskit are called drivers. # We have for example `PSI4Driver`, `PyQuanteDriver`, `PySCFDriver` are available. # # By running a driver (Hartree-Fock calculation for a given basis set and molecular geometry), in the cell below, we obtain all the necessary information about our molecule to apply then a quantum algorithm. # + from qiskit_nature.drivers import PySCFDriver molecule = "H .0 .0 .0; H .0 .0 0.739" driver = PySCFDriver(atom=molecule) qmolecule = driver.run() # - # <div class="alert alert-block alert-danger"> # # <b> Tutorial questions 1</b> # # Look into the attributes of `qmolecule` and answer the questions below. # # # 1. We need to know the basic characteristics of our molecule. What is the total number of electrons in your system? # 2. What is the number of molecular orbitals? # 3. What is the number of spin-orbitals? # 3. How many qubits would you need to simulate this molecule with Jordan-Wigner mapping? # 5. What is the value of the nuclear repulsion energy? # # You can find the answers at the end of this notebook. # </div> # + # WRITE YOUR CODE BETWEEN THESE LINES - START n_el = qmolecule.num_alpha + qmolecule.num_beta n_mo = qmolecule.num_molecular_orbitals n_so = 2 * qmolecule.num_molecular_orbitals n_q = 2* qmolecule.num_molecular_orbitals e_nn = qmolecule.nuclear_repulsion_energy print(n_el,n_mo,n_so,n_q,e_nn) # WRITE YOUR CODE BETWEEN THESE LINES - END # - # #### 2. Electronic structure problem # # You can then create an `ElectronicStructureProblem` that can produce the list of fermionic operators before mapping them to qubits (Pauli strings). # + from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem from qiskit_nature.transformers import FreezeCoreTransformer,ActiveSpaceTransformer tr = FreezeCoreTransformer(True,[3,4]) problem = ElectronicStructureProblem(driver,[tr]) # Generate the second-quantized operators second_q_ops = problem.second_q_ops() # Hamiltonian main_op = second_q_ops[0] #rint(second_q_ops[0]) # - # #### 3. QubitConverter # # Allows to define the mapping that you will use in the simulation. You can try different mapping but # we will stick to `JordanWignerMapper` as allows a simple correspondence: a qubit represents a spin-orbital in the molecule. # + from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter # Setup the mapper and qubit converter mapper_type = 'ParityMapper' if mapper_type == 'ParityMapper': mapper = ParityMapper() elif mapper_type == 'JordanWignerMapper': mapper = JordanWignerMapper() elif mapper_type == 'BravyiKitaevMapper': mapper = BravyiKitaevMapper() converter = QubitConverter(mapper=mapper, two_qubit_reduction=True, z2symmetry_reduction=[1,1]) # The fermionic operators are mapped to qubit operators num_particles = (problem.molecule_data_transformed.num_alpha, problem.molecule_data_transformed.num_beta) qubit_op = converter.convert(main_op, num_particles=num_particles) # - # #### 4. Initial state # As we described in the Theory section, a good initial state in chemistry is the HF state (i.e. $|\Psi_{HF} \rangle = |0101 \rangle$). We can initialize it as follows: # + from qiskit_nature.circuit.library import HartreeFock num_particles = (problem.molecule_data_transformed.num_alpha, problem.molecule_data_transformed.num_beta) num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals init_state = HartreeFock(num_spin_orbitals, num_particles, converter) print(init_state) # - # #### 5. Ansatz # One of the most important choices is the quantum circuit that you choose to approximate your ground state. # Here is the example of qiskit circuit library that contains many possibilities for making your own circuit. # + from qiskit.circuit.library import TwoLocal from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD # Choose the ansatz ansatz_type = "TwoLocal" # Parameters for q-UCC antatze num_particles = (problem.molecule_data_transformed.num_alpha, problem.molecule_data_transformed.num_beta) num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals # Put arguments for twolocal if ansatz_type == "TwoLocal": # Single qubit rotations that are placed on all qubits with independent parameters rotation_blocks = ['ry', 'rz'] # Entangling gates entanglement_blocks = 'cx' # How the qubits are entangled entanglement = 'linear' # Repetitions of rotation_blocks + entanglement_blocks with independent parameters repetitions = 1 # Skip the final rotation_blocks layer skip_final_rotation_layer = False ansatz = TwoLocal(qubit_op.num_qubits, rotation_blocks, entanglement_blocks, reps=repetitions, entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer) #ansatz = TwoLocal(qubit_op.num_qubits, rotation_blocks, reps=repetitions, # entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer) # Add the initial state ansatz.compose(init_state, front=True, inplace=True) elif ansatz_type == "UCCSD": ansatz = UCCSD(converter,num_particles,num_spin_orbitals,initial_state = init_state) elif ansatz_type == "PUCCD": ansatz = PUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state) elif ansatz_type == "SUCCD": ansatz = SUCCD(converter,num_particles,num_spin_orbitals,initial_state = init_state) elif ansatz_type == "Custom": # Example of how to write your own circuit from qiskit.circuit import Parameter, QuantumCircuit, QuantumRegister # Define the variational parameter theta = Parameter('a') n = qubit_op.num_qubits # Make an empty quantum circuit qc = QuantumCircuit(qubit_op.num_qubits) qubit_label = 0 # Place a Hadamard gate qc.h(qubit_label) # Place a CNOT ladder for i in range(n-1): qc.cx(i, i+1) # Visual separator qc.barrier() # rz rotations on all qubits qc.rz(theta, range(n)) ansatz = qc ansatz.compose(init_state, front=True, inplace=True) print(ansatz) # - # #### 6. Backend # This is where you specify the simulator or device where you want to run your algorithm. # We will focus on the `statevector_simulator` in this challenge. # from qiskit import Aer backend = Aer.get_backend('statevector_simulator') # #### 7. Optimizer # # The optimizer guides the evolution of the parameters of the ansatz so it is very important to investigate the energy convergence as it would define the number of measurements that have to be performed on the QPU. # A clever choice might reduce drastically the number of needed energy evaluations. # + from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP optimizer_type = 'COBYLA' # You may want to tune the parameters # of each optimizer, here the defaults are used if optimizer_type == 'COBYLA': optimizer = COBYLA(maxiter=10000) elif optimizer_type == 'L_BFGS_B': optimizer = L_BFGS_B(maxfun=500) elif optimizer_type == 'SPSA': optimizer = SPSA(maxiter=500) elif optimizer_type == 'SLSQP': optimizer = SLSQP(maxiter=500) # - # #### 8. Exact eigensolver # For learning purposes, we can solve the problem exactly with the exact diagonalization of the Hamiltonian matrix so we know where to aim with VQE. # Of course, the dimensions of this matrix scale exponentially in the number of molecular orbitals so you can try doing this for a large molecule of your choice and see how slow this becomes. # For very large systems you would run out of memory trying to store their wavefunctions. # + from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver import numpy as np def exact_diagonalizer(problem, converter): solver = NumPyMinimumEigensolverFactory() calc = GroundStateEigensolver(converter, solver) result = calc.solve(problem) return result result_exact = exact_diagonalizer(problem, converter) exact_energy = np.real(result_exact.eigenenergies[0]) print("Exact electronic energy", exact_energy) print(result_exact) # The targeted electronic energy for H2 is -1.85336 Ha # Check with your VQE result. # - # #### 9. VQE and initial parameters for the ansatz # Now we can import the VQE class and run the algorithm. # + from qiskit.algorithms import VQE from IPython.display import display, clear_output # Print and save the data in lists def callback(eval_count, parameters, mean, std): # Overwrites the same line when printing display("Evaluation: {}, Energy: {}, Std: {}".format(eval_count, mean, std)) clear_output(wait=True) counts.append(eval_count) values.append(mean) params.append(parameters) deviation.append(std) counts = [] values = [] params = [] deviation = [] # Set initial parameters of the ansatz # We choose a fixed small displacement # So all participants start from similar starting point try: initial_point = [0.01] * len(ansatz.ordered_parameters) except: initial_point = [0.01] * ansatz.num_parameters algorithm = VQE(ansatz, optimizer=optimizer, quantum_instance=backend, callback=callback, initial_point=initial_point) result = algorithm.compute_minimum_eigenvalue(qubit_op) print(result) # - # #### 9. Scoring function # We need to judge how good are your VQE simulations, your choice of ansatz/optimizer. # For this, we implemented the following simple scoring function: # # $$ score = N_{CNOT}$$ # # where $N_{CNOT}$ is the number of CNOTs. # But you have to reach the chemical accuracy which is $\delta E_{chem} = 0.004$ Ha $= 4$ mHa, which may be hard to reach depending on the problem. # You have to reach the accuracy we set in a minimal number of CNOTs to win the challenge. # The lower the score the better! # + # Store results in a dictionary from qiskit.transpiler import PassManager from qiskit.transpiler.passes import Unroller # Unroller transpile your circuit into CNOTs and U gates pass_ = Unroller(['u', 'cx']) pm = PassManager(pass_) ansatz_tp = pm.run(ansatz) cnots = ansatz_tp.count_ops()['cx'] score = cnots accuracy_threshold = 4.0 # in mHa energy = result.optimal_value if ansatz_type == "TwoLocal": result_dict = { 'optimizer': optimizer.__class__.__name__, 'mapping': converter.mapper.__class__.__name__, 'ansatz': ansatz.__class__.__name__, 'rotation blocks': rotation_blocks, 'entanglement_blocks': entanglement_blocks, 'entanglement': entanglement, 'repetitions': repetitions, 'skip_final_rotation_layer': skip_final_rotation_layer, 'energy (Ha)': energy, 'error (mHa)': (energy-exact_energy)*1000, 'pass': (energy-exact_energy)*1000 <= accuracy_threshold, '# of parameters': len(result.optimal_point), 'final parameters': result.optimal_point, '# of evaluations': result.optimizer_evals, 'optimizer time': result.optimizer_time, '# of qubits': int(qubit_op.num_qubits), '# of CNOTs': cnots, 'score': score} else: result_dict = { 'optimizer': optimizer.__class__.__name__, 'mapping': converter.mapper.__class__.__name__, 'ansatz': ansatz.__class__.__name__, 'rotation blocks': None, 'entanglement_blocks': None, 'entanglement': None, 'repetitions': None, 'skip_final_rotation_layer': None, 'energy (Ha)': energy, 'error (mHa)': (energy-exact_energy)*1000, 'pass': (energy-exact_energy)*1000 <= accuracy_threshold, '# of parameters': len(result.optimal_point), 'final parameters': result.optimal_point, '# of evaluations': result.optimizer_evals, 'optimizer time': result.optimizer_time, '# of qubits': int(qubit_op.num_qubits), '# of CNOTs': cnots, 'score': score} # Plot the results import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1) ax.set_xlabel('Iterations') ax.set_ylabel('Energy') ax.grid() fig.text(0.7, 0.75, f'Energy: {result.optimal_value:.3f}\nScore: {score:.0f}') plt.title(f"{result_dict['optimizer']}-{result_dict['mapping']}\n{result_dict['ansatz']}") ax.plot(counts, values) ax.axhline(exact_energy, linestyle='--') fig_title = f"\ {result_dict['optimizer']}-\ {result_dict['mapping']}-\ {result_dict['ansatz']}-\ Energy({result_dict['energy (Ha)']:.3f})-\ Score({result_dict['score']:.0f})\ .png" fig.savefig(fig_title, dpi=300) # Display and save the data import pandas as pd import os.path filename = 'results_h2.csv' if os.path.isfile(filename): result_df = pd.read_csv(filename) result_df = result_df.append([result_dict]) else: result_df = pd.DataFrame.from_dict([result_dict]) result_df.to_csv(filename) result_df[['optimizer','ansatz', '# of qubits', '# of parameters','rotation blocks', 'entanglement_blocks', 'entanglement', 'repetitions', 'error (mHa)', 'pass', 'score']] # - # <div class="alert alert-block alert-danger"> # # <b>Tutorial questions 2</b> # # Experiment with all the parameters and then: # # 1. Can you find your best (best score) heuristic ansatz (by modifying parameters of `TwoLocal` ansatz) and optimizer? # 2. Can you find your best q-UCC ansatz (choose among `UCCSD, PUCCD or SUCCD` ansatzes) and optimizer? # 3. In the cell where we define the ansatz, # can you modify the `Custom` ansatz by placing gates yourself to write a better circuit than your `TwoLocal` circuit? # # For each question, give `ansatz` objects. # Remember, you have to reach the chemical accuracy $|E_{exact} - E_{VQE}| \leq 0.004 $ Ha $= 4$ mHa. # # </div> # # # + # WRITE YOUR CODE BETWEEN THESE LINES - START # WRITE YOUR CODE BETWEEN THESE LINES - END # - # ## Part 2: Final Challenge - VQE for LiH molecule # # # In this part, you will simulate LiH molecule using the STO-3G basis with the PySCF driver. # # </div> # # <div class="alert alert-block alert-success"> # # <b>Goal</b> # # Experiment with all the parameters and then find your best ansatz. You can be as creative as you want! # # For each question, give `ansatz` objects as for Part 1. Your final score will be based only on Part 2. # # </div> # # Be aware that the system is larger now. Work out how many qubits you would need for this system by retrieving the number of spin-orbitals. # # ### Reducing the problem size # # You might want to reduce the number of qubits for your simulation: # - you could freeze the core electrons that do not contribute significantly to chemistry and consider only the valence electrons. Qiskit  already has this functionality implemented. So inspect the different transformers in `qiskit_nature.transformers` and find the one that performs the freeze core approximation. # - you could use `ParityMapper` with `two_qubit_reduction=True` to eliminate 2 qubits. # - you could reduce the number of qubits by inspecting the symmetries of your Hamiltonian. Find a way to use `Z2Symmetries` in Qiskit. # # ### Custom ansatz # # You might want to explore the ideas proposed in [Grimsley *et al.*,2018](https://arxiv.org/abs/1812.11173v2), [<NAME> *et al.*,2019](https://arxiv.org/abs/1911.10205), [Rattew *et al.*,2019](https://arxiv.org/abs/1910.09694), [Tang *et al.*,2019](https://arxiv.org/abs/1911.10205). # You can even get try machine learning algorithms to generate best ansatz circuits. # # ### Setup the simulation # # Let's now run the Hartree-Fock calculation and the rest is up to you! # # <div class="alert alert-block alert-danger"> # # <b>Attention</b> # # We give below the `driver`, the `initial_point`, the `initial_state` that should remain as given. # You are free then to explore all other things available in Qiskit. # So you have to start from this initial point (all parameters set to 0.01): # # `initial_point = [0.01] * len(ansatz.ordered_parameters)` # or # `initial_point = [0.01] * ansatz.num_parameters` # # and your initial state has to be the Hartree-Fock state: # # `init_state = HartreeFock(num_spin_orbitals, num_particles, converter)` # # For each question, give `ansatz` object. # Remember you have to reach the chemical accuracy $|E_{exact} - E_{VQE}| \leq 0.004 $ Ha $= 4$ mHa. # # </div> # + from qiskit_nature.drivers import PySCFDriver molecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 1.5474' driver = PySCFDriver(atom=molecule) qmolecule = driver.run() # + n_el = qmolecule.num_alpha + qmolecule.num_beta n_mo = qmolecule.num_molecular_orbitals n_so = 2 * qmolecule.num_molecular_orbitals n_q = 2* qmolecule.num_molecular_orbitals e_nn = qmolecule.nuclear_repulsion_energy print(n_el,n_mo,n_so,n_q,e_nn) # - # Check your answer using following code from qc_grader import grade_ex5 freeze_core = True # change to True if you freezed core electrons grade_ex5(ansatz,qubit_op,result,freeze_core) # Submit your answer. You can re-submit at any time. from qc_grader import submit_ex5 submit_ex5(ansatz,qubit_op,result,freeze_core) # ## Answers for Part 1 # # <div class="alert alert-block alert-danger"> # # <b>Questions</b> # # Look into the attributes of `qmolecule` and answer the questions below. # # # 1. We need to know the basic characteristics of our molecule. What is the total number of electrons in your system? # 2. What is the number of molecular orbitals? # 3. What is the number of spin-orbitals? # 3. How many qubits would you need to simulate this molecule with Jordan-Wigner mapping? # 5. What is the value of the nuclear repulsion energy? # # </div> # # <div class="alert alert-block alert-success"> # # <b>Answers </b> # # 1. `n_el = qmolecule.num_alpha + qmolecule.num_beta` # # 2. `n_mo = qmolecule.num_molecular_orbitals` # # 3. `n_so = 2 * qmolecule.num_molecular_orbitals` # # 4. `n_q = 2* qmolecule.num_molecular_orbitals` # # 5. `e_nn = qmolecule.nuclear_repulsion_energy` # # # </div> # ## Additional information # # **Created by:** <NAME>, <NAME>, <NAME> # # **Version:** 1.0.1
solutions by participants/ex5/ex5-RiyaMalani-3cnot-2.44mHa-16params.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Correlation between entropy/FD metrics # # Requires the [seaborn](https://seaborn.pydata.org/) and [pingouin](https://pingouin-stats.org) packages. # + from entropy import * import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set(context='talk') df = pd.DataFrame() # Generate 1000 1Hz sines with increasing noise np.random.seed(123) N = 1000 sf = 100 n_sines = 100 noises_factor = np.linspace(0, 1, n_sines) noises = np.random.rand(n_sines, N) sines = np.zeros(shape=(n_sines, N)) for i in range(n_sines): sines[i] = np.sin(2 * np.pi * np.arange(N) / sf) + noises_factor[i] * noises[i, :] # Plot the first and last sines fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 6), sharex=True, sharey=False) ax1.plot(sines[0]) ax2.plot(sines[-1]) # + # Compute the entropy metrics for i in range(n_sines): df = df.append({'PermEnt': perm_entropy(sines[i], order=3, normalize=True), 'SVDEnt' : svd_entropy(sines[i], order=3, normalize=True), 'SpecEnt' : spectral_entropy(sines[i], sf, method='welch', normalize=True), 'AppEnt': app_entropy(sines[i], order=2), 'SampleEnt': sample_entropy(sines[i], order=2), 'PetrosianFD': petrosian_fd(sines[i]), 'KatzFD': katz_fd(sines[i]), 'HiguchiFD': higuchi_fd(sines[i]), # 'DFA': detrended_fluctuation(sines[i]) }, ignore_index=True) df.head() # - # ## Pairwise correlations sns.pairplot(df, markers='+') from pingouin import pairwise_corr pairwise_corr(df).sort_values(by=['r2'], ascending=False) # ## Clustermap sns.clustermap(df, row_cluster=False, standard_scale=1)
notebooks/complexity_metrics_correlation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Particle on a sphere # # The Hamiltonial $H$ for a particle on a sphere is given by # # \begin{align} # H &= \frac{p^2}{2m} = \frac{L^2}{2 I} # \end{align} # # # # ## Exercise 1 # $\mathbf{L} = \mathbf{r} \times \mathbf{p}$ is the angular momentum operator # where $\mathbf{r} = (x, y, z)$ is the positional operator and $\mathbf{p} = (p_x, p_y, p_z)$ is the momentum operator # # Show that: # # \begin{align} # L_x &= - i \hbar \left(y \frac{\partial}{\partial z} - z \frac{\partial}{\partial y} \right) \\ # L_y &= - i \hbar \left(z \frac{\partial}{\partial x} - x \frac{\partial}{\partial z} \right) \\ # L_z &= - i \hbar \left(x \frac{\partial}{\partial y} - y \frac{\partial}{\partial x} \right) # \end{align} # # Hint: $p_x = -i\frac{\partial}{\partial x}$ # # Hint 2: To do this exercise you need to do a cross product # ## Exercise 2 # # Find the commutator relations $[L_x, L_y]$ and $[L^2, L_z]$. # # Does $L_x$ and $L_y$ commute? # # Does $L^2$ and $L_z$ commute? # # Hint: $[a, b] = ab - ba$ # # # The particle on a sphere is given by: # # \begin{align} # L^2 Y_{m_l}^l (\varphi, \theta) = \hbar^2 l(l+1) Y_{m_l}^l (\varphi, \theta) \\ # L_z Y_{m_l}^l (\varphi, \theta) = \hbar m_l Y_{m_l}^l (\varphi, \theta) # \end{align} # # where $l = 0, 1, 2, ...$ and $m_l = -l, -l+1,...,0,..., l-1, l$ # # # ## Exercise 3 # # Make a surface plot of a sphere where the particle can be located # ## Exercise 4 # # Find the analytic expressions for $Y_{m_l}^l (\varphi, \theta)$, for instance in the lecture notes on explain everything (code ATEHWGWY). # # # Make python functions for $L = 0$, $L = 1$ and $L = 2$ with all combination of $m_l$ # + def Y_10(phi, theta): def Y_11(phi, theta): def Y_1m1(phi, theta): # or more generally using sph_harm from scipy.special from scipy.special import sph_harm # - # ## Exercise 5 # The parametrization for the probability densities are: # # \begin{align} # x &= \cos \theta \sin \phi |Y_{m_l}^l (\varphi, \theta)|^2 \\ # y &= \sin \theta \sin \phi |Y_{m_l}^l (\varphi, \theta)|^2 \\ # z &= \cos \phi |Y_{m_l}^l (\varphi, \theta)|^2 # \end{align} # # Give an explaination to why the parametrization looks like this. # # # Plot the probability density of $|Y_{m_l}^l (\varphi, \theta)|^2$ for $L = 0$, $L = 1$ # and $L=2$ for $m_l = -l, -l+1,...,0,..., l-1, l$. # # Try to plot them with the sphere from exercise 3, here it is a good idea to add the kword $alpha = 0.3$ for the sphere. # ## Exercise 6 # # Looking at the formulas for $Y_{m_l}^l (\varphi, \theta)$ # # What is the probabilty densities $P_x$ and $P_y$, plot these for your favorite values of $l$ and $m_l$.
problem_candidates/Particle on a sphere exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.insert(0,r"C:\Users\LENOVO\Downloads") import numpy as np import pandas as pd import csv from sklearn.model_selection import train_test_split import class_report as cr from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix, accuracy_score, auc, f1_score, log_loss from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import StratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.neural_network import MLPClassifier from sklearn.ensemble import RandomForestClassifier # + ## Lectura de datos base_noroccidente = pd.read_csv(r'C:\Users\LENOVO\Downloads\BASE_NOROCCIDENTE.txt', sep=r'¡', engine='python') base_noroccidente.head() ## Eliminación de variables: base_noroccidente2 = base_noroccidente[[ #'CUCONUSE', 'SESUSERV', 'SERVDESC', 'SESUSUSC', 'SESUFEIN','DEPADESC', 'SESUCUSA', 'SESUSAPE','SESUCICL', 'SUSCNITC', #'VECTOR','SESUCATE', 'SESUSUCA','SESULOCA','SESUSAAN','V1', 'V2', 'V3', 'V4', 'V5', 'V6',#'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'TIPIFICACION_CLIENTE', 'SUMA','CANTIDAD','CALIFICACION_VECTOR_SERVICIO', #'VECTOR_CUALITATIVO_CONTRATO',#'BANCO_1', 'DIA_PAGO_1', 'DIA_PAGO_2','DIA_PAGO_3', 'FECHA_SUSPENSION','RANGO_EDAD', 'GENERO', 'PROM_SUSC', 'CANAL_ENVIO', 'SEGMENTACION', 'REGIONAL','ESTRATO_AGRUPADO','DEPARTAMENTO_AGR','ANTIGUEDAD_DIAS', 'CANAL_PAGO']] # - # Vista previa base_noroccidente2.head() ## Eliminacion de clientes con datos NA base_noroccidente3 = base_noroccidente2.dropna() # + ## Group by por nit para calificacion b4 = base_noroccidente3[['SUSCNITC','CALIFICACION_VECTOR_SERVICIO']] group_nit = b4.groupby(['SUSCNITC'])['CALIFICACION_VECTOR_SERVICIO'].mean().reset_index() ## Join de la nueva calificacion base_noroccidente4 = pd.merge(base_noroccidente3, group_nit, how='left', on=['SUSCNITC']) ## Prueba promedio b5 = base_noroccidente4[['SUSCNITC','CALIFICACION_VECTOR_SERVICIO_x','CALIFICACION_VECTOR_SERVICIO_y']] ### Creacion de la etiqueta de clasificacion base_noroccidente4['y'] = pd.cut(x=base_noroccidente4['CALIFICACION_VECTOR_SERVICIO_y'], bins=[-1,50,76,100], labels=['No pago','Pago inoportuno','Pago']) # + dataTypeSeries = base_noroccidente4.dtypes dataTypeSeries cat = base_noroccidente4.select_dtypes(include=['object','category']) cuanti = base_noroccidente4.select_dtypes(include=['int64','float64']) cat = cat.apply(lambda col: LabelEncoder().fit_transform(col)) # - cuanti.corr(method = 'pearson') # + base_noroccidente5=cuanti.join(cat) print(base_noroccidente5.dtypes) #base_noroccidente5.to_csv(r'C:\Users\pc\Desktop\base_sur_process.csv') #base_noroccidente5 = pd.read_csv(r'C:\Users\pc\Desktop\base_sur_process.csv', sep=',', engine='python') X = base_noroccidente5[[ #'CUCONUSE', 'SESUSERV', 'SERVDESC', #'SESUSUSC', 'DEPADESC', 'SESUCUSA', 'SESUSAPE','SESUCICL', 'TIPIFICACION_CLIENTE', #'SESUFEIN','SUSCNITC', 'VECTOR','SESUCATE', 'SESUSUCA','SESULOCA','SESUSAAN','RANGO_EDAD', 'GENERO', #'V1', 'V2', 'V3', 'V4', 'V5', 'V6','V7', 'V8', 'V9', 'V10', 'V11', 'V12', #'REGIONAL', # 'SUMA', #'CANTIDAD', #'CALIFICACION_VECTOR_SERVICIO_x','CALIFICACION_VECTOR_SERVICIO_y','DEPARTAMENTO_AGR' #'VECTOR_CUALITATIVO_CONTRATO','BANCO_1', 'DIA_PAGO_1', 'DIA_PAGO_2','DIA_PAGO_3', 'FECHA_SUSPENSION', 'PROM_SUSC', 'CANAL_ENVIO', 'SEGMENTACION', 'ESTRATO_AGRUPADO','ANTIGUEDAD_DIAS', 'CANAL_PAGO']] y = base_noroccidente5['y'] # - ## Vista previa de las variables preliminares para clasificacion X.head() # + from datetime import datetime instanteInicial = datetime.now() X_ = StandardScaler().fit_transform(X) #X_ = X X_train, X_test, y_train, y_test = train_test_split(X_, y, test_size=0.2,random_state=321) classifiers = [ KNeighborsClassifier(3), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1, max_iter=500), LogisticRegression(multi_class="multinomial", solver="lbfgs", C=5), GaussianNB() ] # Competencia de clasificadores log_cols=["Classifier", "Accuracy", "Log Loss", "F1_SCORE"] log = pd.DataFrame(columns=log_cols) for clf in classifiers: clf.fit(X_train, y_train) name = clf.__class__.__name__ print("="*30) print(name) print('****Resultados****') train_predictions = clf.predict_proba(X_test) acc = cr.class_report(y_true = y_test, y_pred = clf.predict(X_test), y_score = train_predictions) print("Métricas: {}".format(acc)) log_entry = pd.DataFrame([[name, acc]]) log = log.append(log_entry) print("="*30) # + # Importancia de variables model = DecisionTreeClassifier(max_depth=5).fit(X_train, y_train) importance = model.feature_importances_ # Total importancia de variables for i,v in enumerate(importance): print('Variable: %0d, Score: %.5f' % (i,v)) # + ## El mejor modelo de clasificacion fue el KNN # + classifiers = [ KNeighborsClassifier(2), KNeighborsClassifier(4), KNeighborsClassifier(5), ] # Competencia de clasificadores log_cols=["Classifier", "Accuracy", "Log Loss", "F1_SCORE"] log = pd.DataFrame(columns=log_cols) for clf in classifiers: clf.fit(X_train, y_train) name = clf.__class__.__name__ print("="*30) print(name) print('****Resultados****') train_predictions = clf.predict_proba(X_test) acc = cr.class_report(y_true = y_test, y_pred = clf.predict(X_test), y_score = train_predictions) print("Métricas: {}".format(acc)) log_entry = pd.DataFrame([[name, acc]]) log = log.append(log_entry) print("="*30) # + kf = StratifiedKFold(n_splits=5,shuffle=False) summary_model = [] # Iteraciones del entrenamiento sobre los kfolds #X_fin = X[['SESUSERV','SESUCUSA', 'SESUSAPE','SESUCICL', 'TIPIFICACION_CLIENTE', 'PROM_SUSC', #'CANAL_ENVIO','SEGMENTACION', 'ESTRATO_AGRUPADO','ANTIGUEDAD_DIAS', 'CANAL_PAGO']] X_fin = X for train_index, test_index in kf.split(X_fin, y) : # Particion train test para validacion cruzada X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y[train_index], y[test_index] # Entrenamiento de la red MLP model_final = KNeighborsClassifier(5).fit(X_train, y_train) summary_model.append(cr.class_report(y_true = y_test, y_pred = model_final.predict(X_test), y_score = model_final.predict_proba(X_test))) # Mostrar resumen de metricas print(summary_model) # - from sklearn.metrics import confusion_matrix confusion = confusion_matrix(y_test, model_final.predict(X_test)) confusion
Mdl_preliminar_noroccidente.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # # System Properties # # *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).* # - # ## Idealized Systems # # Idealized systems are systems with idealized properties that typically render their practical implementation infeasible. They play an important role in various fields of signal processing as they allow a convenient formulation of major concepts and principles. In the following, the ideal low-pass is introduced as prototype for an idealized frequency selective system. Other frequency selective systems can be deduced directly from this prototype. # ### Ideal Low-Pass # # The transfer function $H(j \omega)$ of a real-valued [ideal low-pass](https://en.wikipedia.org/wiki/Sinc_filter) reads # # \begin{equation} # H(j \omega) = \text{rect} \left( \frac{\omega}{2 \omega_\text{c}} \right) # \end{equation} # # where $\omega_\text{c} > 0$ denotes its cut-off frequency. The ideal low-pass removes all frequency components above the cut-off frequency $\omega_\text{c}$, without affecting lower frequencies. The impulse response $h(t) = \mathcal{F}^{-1} \{ H(j \omega) \}$ is computed by applying the [duality principle](../fourier_transform/properties.ipynb#Duality) to the [Fourier transform of the rectangular signal](../fourier_transform/definition.ipynb#Transformation-of-the-Rectangular-Signal) # # \begin{equation} # h(t) = \frac{\omega_\text{c}}{\pi} \cdot \text{sinc} ( \omega_\text{c} t ) # \end{equation} # # Since the impulse response is an acausal signal, the ideal low-pass is an [acausal system](causality_stability.ipynb#Condition-for-the-Impulse-Response). The sinc-function is furthermore not absolutely integrable. Hence the ideal-low pass is not a [stable system](causality_stability.ipynb#Condition-for-the-Impulse-Response) in the sense of the bounded-input bounded-output (BIBO) criterion. In conclusion, the ideal low-pass is not realizable. It can only be approximated in practical systems. It plays nevertheless an important role in the theory of sampling and interpolation. # #### Realizable low-pass # # Various techniques have been developed in order to approximate the ideal low-pass by a realizable system. One is the *windowed sinc filter*. In order to make the ideal-low pass filter causal and stable, its impulse response is windowed to a finite-length $T$ followed by a temporal shift of $\frac{T}{2}$. Using the [rectanglar signal](../continuous_signals/standard_signals.ipynb#Rectangular-Signal) to truncate (window) the impulse response, the impulse response of the realizable low-pass is given as # # \begin{equation} # h(t) = \frac{\omega_\text{c}}{\pi} \cdot \text{sinc} \left( \omega_\text{c} \left(t - \frac{T}{2} \right) \right) \cdot \text{rect}\left( \frac{1}{T} \left( t - \frac{T}{2} \right) \right) # \end{equation} # # Fourier transformation yields its transfer function # # \begin{equation} # H(j \omega) = \frac{1}{2 \pi} e^{-j \omega \frac{T}{2}} \cdot \text{rect}\left( \frac{\omega}{2 \omega_c} \right) * T \cdot \text{sinc} \left( \frac{T}{2} \omega \right) # \end{equation} # # The impulse response is plotted for $w_\text{c}=10$ and $T=5$ # + import sympy as sym # %matplotlib inline sym.init_printing() t, w = sym.symbols('t omega', real=True) wc = 10 T = 5 h = wc/sym.pi * sym.sinc(wc*(t-T/2)) sym.plot(h, (t, 0, T), xlabel='$t$', ylabel='$h(t)$') # - # The transfer function $H(j \omega)$ of the realizable low-pass is given above in terms of a convolution integral over the rectangular and sinc signal. Applying the definition of the convolution and exploiting the properties of the rectangular signal yields # # \begin{equation} # H(j \omega) = \frac{T}{2 \pi} e^{-j \omega \frac{T}{2}} \int_{-\omega_\text{c}}^{\omega_\text{c}} \text{sinc} \left( \frac{T}{2} (\nu - \omega) \right) d \nu # \end{equation} # # No closed-form solution of this integral is known. In order to gain insight into the properties of the realizable low-pass, the transfer function is approximated by numerical integration for equally spaced angular frequencies $\omega$. Only positive angular frequencies are evaluated in order to lower the computational complexity. Note the symmetry relations of a real-valued system apply. # + from numpy import linspace, array import matplotlib.pyplot as plt nu = sym.symbols('nu', real=True) w = linspace(0, 1.5*wc, 100) H = [(T/(2*sym.pi)).evalf(2) * sym.exp(-sym.I*wi*T/2).evalf(2) * sym.Integral(sym.sinc(T/2*(nu-wi)), (nu, -wc, wc)).evalf(2) for wi in w] plt.plot(w, abs(array(H))) plt.xlabel('$\omega$') plt.ylabel('$|H(j \omega)|$') plt.grid() # - # **Exercise** # # * Discuss the properties of the magnitude response $|H(j \omega)|$ of the realizable low-pass in dependence of its length $T$. # ### Ideal Band-Pass # # The transfer function $H(j \omega)$ of a real-valued ideal band-pass reads # # \begin{equation} # H(j \omega) = # \begin{cases} # 1 & \text{for } \omega_\text{c} - \frac{\Delta \omega}{2} < |\omega| < \omega_\text{c} + \frac{\Delta \omega}{2} \\ # 0 & \text{otherwise} # \end{cases} # \end{equation} # # The ideal band-pass does not affect the frequency components of a signal around a given center frequency $\omega_\text{c}$ where the total width of this transition band is $\Delta \omega$. Components outside the transition band are removed. The transfer function can be rewritten as # # \begin{equation} # H(j \omega) = # \text{rect} \left( \frac{\omega - \omega_\text{c}}{\Delta \omega} \right) + \text{rect} \left( \frac{\omega + \omega_\text{c}}{\Delta \omega} \right) # = \text{rect} \left( \frac{\omega}{\Delta \omega} \right) * \left( \delta(\omega - \omega_\text{c}) + \delta(\omega + \omega_\text{c}) \right) # \end{equation} # # Its impulse response is computed by inverse Fourier transformation # # \begin{equation} # h(t) = \pi \Delta \omega \cdot \text{sinc} ( \frac{\Delta \omega}{2} t ) \cdot \cos(\omega_\text{c} t) # \end{equation} # # The ideal band-pass can be interpreted as a modulated low-pass filter. Due to its direct relation to the ideal low-pass, it is neither causal nor stable. The ideal band-pass can only be approximated in practical realizations. Its plays an important role in the theoretical foundations of wireless communications. # **Example** # # For illustration, the impulse response of the ideal band-pass for $\omega_\text{c} = 10$ and $\Delta \omega = 2$ is plotted # + wc = 10 dw = 2 h = sym.pi*dw * sym.sinc(dw/2*t) * sym.cos(wc*t) sym.plot(h, (t, -10, 10), xlabel='$t$', ylabel='$h(t)$'); # - # **Exercise** # # * In the same manner as for the low-pass, derive the impulse response and transfer function of a realizable band-pass. # ### Ideal High-Pass # # The transfer function $H(j \omega)$ of a real-valued ideal high-pass reads # # \begin{equation} # H(j \omega) = 1 - \text{rect} \left( \frac{\omega}{2 \omega_\text{c}} \right) # \end{equation} # # where $\omega_\text{c} > 0$ denotes its cut-off frequency. The ideal high-pass removes all frequency components below the cut-off frequency $\omega_\text{c}$, without affecting higher frequencies. Its impulse response can be derived in a straightforward manner from the impulse response of the ideal low-pass # # \begin{equation} # h(t) = \delta(t) - \frac{\omega_\text{c}}{\pi} \cdot \text{sinc} ( \omega_\text{c} t ) # \end{equation} # # Due to its relation to the ideal low-pass, the ideal high-pass is neither causal nor stable. The ideal high-pass can only be approximated in practical realizations. # ### Ideal Band-Stop # # The transfer function $H(j \omega)$ of a real-valued ideal band-stop is derived from the transfer function of the ideal band-pass in the same manner as the ideal high-pass. It reads # # \begin{equation} # H(j \omega) = 1 - \text{rect} \left( \frac{\omega - \omega_\text{c}}{\Delta \omega} \right) - \text{rect} \left( \frac{\omega + \omega_\text{c}}{\Delta \omega} \right) # \end{equation} # # The ideal band-stop removes the frequency components of a signal around a given center frequency $\omega_\text{c}$ where the total width of this stop band is $\Delta \omega$. Components outside the stop band are not affected by the system. The impulse response of the ideal band-stop can be derived in a straightforward manner from the impulse response of the ideal band-pass as # # \begin{equation} # h(t) = \delta(t) - \pi \Delta \omega \cdot \text{sinc} ( \frac{\Delta \omega}{2} t ) \cdot \cos(\omega_\text{c} t) # \end{equation} # # Due to its relation to the ideal band-pass, the ideal band-stop is neither causal nor stable. The ideal band-stop can only be approximated in practical realizations. The ideal band-stop is for instance used to remove undesired signal components, e.g. [mains hum](https://en.wikipedia.org/wiki/Mains_hum). # + [markdown] nbsphinx="hidden" # **Copyright** # # This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Continuous- and Discrete-Time Signals and Systems - Theory and Computational Examples*.
systems_properties/idealized_systems.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Event-related Analysis # This example can be referenced by [citing the package](https://github.com/neuropsychology/NeuroKit#citation). # # This example shows how to use Neurokit to extract epochs from data based on events localisation and its corresponding physiological signals. That way, you can compare *experimental conditions* with one another. # Load NeuroKit and other useful packages import neurokit2 as nk import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [15, 5] # Bigger images plt.rcParams['font.size']= 14 # ## The Dataset # Use the `nk.data()` function to load the dataset located on Neurokit data folder. # # It contains 2.5 minutes of biosignals recorded at a frequency of 100Hz (2.5 x 60 x 100 = 15000 data points). # # Biosignals : **ECG, RSP, EDA + Photosensor (event signal)** # Get data data = nk.data("bio_eventrelated_100hz") # This is the data from ***1*** **participant** to whom was presented 4 images (emotional stimuli, [IAPS-like emotional faces](https://en.wikipedia.org/wiki/International_Affective_Picture_System)), which we will refer to as **events**. # # Importantly, the images were marked by a small black rectangle on the screen, which led to the photosensor signal to go down (and then up again after the image). This is what will allow us to retrieve the location of these events. # # They were 2 types (the **condition**) of images that were shown to the participant: **"Negative"** vs. **"Neutral"** in terms of emotion. Each picture was presented for 3 seconds. The following list is the condition order. condition_list = ["Negative", "Neutral", "Neutral", "Negative"] # ## Find Events # These events can be localized and extracted using [events_find()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.events_find). # # *Note that you should also specify whether to select events that are higher or below the threshold using the `threshold_keep` argument.* # Find events events = nk.events_find(data["Photosensor"], threshold_keep='below', event_conditions=condition_list) events # As we can see, `events_find()` returns a dict containing onsets and durations for each corresponding event, based on the label for event identifiers and each event condition. Each event here lasts for 300 data points (equivalent to 3 seconds sampled at 100Hz). # Plot the location of event with the signals plot = nk.events_plot(events, data) # The output of [events_plot()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.events_plot) shows the corresponding events in the signal, with the blue dashed line representing a Negative event and red dashed line representing a Neutral event. # ## Process the Signals # Now that we have the events location, we can go ahead and process the data. # # Biosignals processing can be done quite easily using NeuroKit with the [bio_process()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.bio_process) function. Simply provide the appropriate biosignal channels and additional channels that you want to keep (for example, the photosensor), and `bio_process()` will take care of the rest. It will return a dataframe containing processed signals and a dictionary containing useful information. # + # Process the signal df, info = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=100) # Visualize df.plot() # - # ## Create Epochs # We now have to transform this dataframe into **epochs**, i.e. segments (chunks) of data around the **events** using [epochs_create()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.epochs_create). # # 1. We want it to start *1 second before the event onset* # # 2. and end *6 seconds* afterwards # These are passed into the `epochs_start` and `epochs_end` arguments, respectively. # # Our epochs will then cover the region from **-1 s** to **+6 s** (i.e., 700 data points since the signal is sampled at 100Hz). # Build and plot epochs epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-1, epochs_end=6) # Let's plot some of the signals of the first epoch (and transform them to the same scale for visualization purposes). for i, epoch in enumerate (epochs): epoch = epochs[epoch] # iterate epochs", epoch = epoch[['ECG_Clean', 'ECG_Rate', 'RSP_Rate', 'RSP_Phase', 'EDA_Phasic', 'EDA_Tonic']] # Select relevant columns", title = events['condition'][i] # get title from condition list", nk.standardize(epoch).plot(title=title, legend=True) # Plot scaled signals" # ## Extract Event Related Features # With these segments, we are able to compare how the physiological signals vary across the different events. We do this by: # 1. **Iterating through our object epochs** # # 2. **Storing the mean value of $X$ feature of each condition in a new dictionary** # # 3. **Saving the results in a readable format** # # # We can call them *epochs-dictionary*, the *mean-dictionary* and our *results-dataframe*. # # # + df = {} # Initialize an empty dict, for epoch_index in epochs: df[epoch_index] = {} # then Initialize an empty dict inside of it with the iterative # Save a temp var with dictionary called <epoch_index> in epochs-dictionary epoch = epochs[epoch_index] # We want its features: # Feature 1 ECG ecg_baseline = epoch["ECG_Rate"].loc[-100:0].mean() # Baseline ecg_mean = epoch["ECG_Rate"].loc[0:400].mean() # Mean heart rate in the 0-4 seconds # Store ECG in df df[epoch_index]["ECG_Rate"] = ecg_mean - ecg_baseline # Correct for baseline # Feature 2 EDA - SCR scr_max = epoch["SCR_Amplitude"].loc[0:600].max() # Maximum SCR peak # If no SCR, consider the magnitude, i.e. that the value is 0 if np.isnan(scr_max): scr_max = 0 # Store SCR in df df[epoch_index]["SCR_Magnitude"] = scr_max # Feature 3 RSP rsp_baseline = epoch["RSP_Rate"].loc[-100:0].mean() # Baseline rsp_rate = epoch["RSP_Rate"].loc[0:600].mean() # Store RSP in df df[epoch_index]["RSP_Rate"] = rsp_rate - rsp_baseline # Correct for baseline df = pd.DataFrame.from_dict(df, orient="index") # Convert to a dataframe df["Condition"] = condition_list # Add the conditions df # Print DataFrame # - # ## Plot Event Related Features # # You can now plot and compare how these features differ according to the event of interest. sns.boxplot(x="Condition", y="ECG_Rate", data=df) sns.boxplot(x="Condition", y="RSP_Rate", data=df) sns.boxplot(x="Condition", y="SCR_Magnitude", data=df) # *Then interpret* : As we can see, there seems to be a difference between the negative and the neutral pictures. Negative stimuli, as compared to neutral stimuli, were related to a stronger cardiac deceleration (i.e., higher heart rate variability), an accelerated breathing rate, and higher SCR magnitude. # # ## Important remarks: # You can't break anything if you're on Binder, so have fun. Keep in mind that *this is for illustration purposes only*. # # Data size limits on Github force us to downsample and have only one participant (sample rate would have to be >250 Hz, and you can't do stats with 4 observations in 1 subjects). # # We invite you to read on reporting guidelines for biosignal measures. For ECG-PPG/HRV : [<NAME>, 2016 - GRAPH](https://www.ncbi.nlm.nih.gov/pubmed/27163204)
docs/examples/eventrelated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Initialising Your Agent as an Issuing Authority # # This template walks you through the basic steps you need to take to configure your agent as an issuing authority on the Sovrin StagingNet. If using a different network you will need to update this template. The steps include: # # * Writing your DID to the Sovrin StagingNet # * Accepting the Transaction Author Agreement # * Authoring schema to the ledger # * Authoring credential definitions for the schema this agent intends to issue # * Persisting Identifiers for use throughout the playground. # # It is recommended that this initialisation notebook be run **once**. If you are following the default docker-compose services then your agents wallet storage will be persisted in a postgres database as long as you run `./manage.sh stop` rather than `./manage.sh down`. # # # ### Imports from aries_cloudcontroller import AriesAgentController import os from termcolor import colored # ### Initialise the Agent Controller # + api_key = os.getenv("ACAPY_ADMIN_API_KEY") admin_url = os.getenv("ADMIN_URL") print( f"Initialising a controller with admin api at {admin_url} and an api key of {api_key}" ) agent_controller = AriesAgentController(admin_url, api_key) # - # ## Write DID to the Public Ledger # # Note: if defined a ACAPY_WALLET_SEED value for your agent then this function will return a DID, but this DID still needs to be written to the ledger. If you did not define a seed you will need to create a DID first. public_did_response = await agent_controller.wallet.get_public_did() if public_did_response["result"]: did_obj = public_did_response["result"] else: create_did_response = await agent_controller.wallet.create_did() did_obj = create_did_response["result"] print("DID", did_obj) # + # write new DID to Sovrin Stagingnet import requests import json url = "https://selfserve.sovrin.org/nym" payload = { "network": "stagingnet", "did": did_obj["did"], "verkey": did_obj["verkey"], "paymentaddr": "", } # Adding empty header as parameters are being sent in payload headers = {} r = requests.post(url, data=json.dumps(payload), headers=headers) print(r.json()) print(r["body"]["reason"]) # - # ## Accept Transaction Author Agreement # # Although the Sovrin StagingNet is permissionless, before DID's have the authority to write to the ledger they must accept something called a transaction author agreement by signing it using the DID they have on the ledger. # # As a global public ledger, the Sovrin Ledger and all its participants are subject to privacy and data protection regulations such as the EU General Data Protection Regulation (GDPR). These regulations require that the participants be explicit about responsibilities for Personal Data. # # To clarify these responsibilities and provide protection for all parties, the Sovrin Governance Framework Working Group developed an agreement between Transaction Authors and the Sovrin Foundation. The TAA can be found at Sovrin.org. It ensures that users are aware of and consent to the fact that all data written to the Sovrin Ledger cannot be removed, even if the original author of the transaction requests its removal. # # The TAA outlines the policies that users must follow when interacting with the Sovrin Ledger. When a user’s client software is preparing a transaction for submission to the network, it must include a demonstration that the user had the opportunity to review the current TAA and accept it. This is done by including some additional fields in the ledger write transaction: # # * A hash of the agreement # * A date when the agreement was accepted, and # * A string indicating the user interaction that was followed to obtain the acceptance. # # The Indy client API used by Sovrin has been extended to allow users to review current and past agreements and to indicate acceptance through an approved user interaction pattern. - source: https://sovrin.org/preparing-for-the-sovrin-transaction-author-agreement/ # # For more details on TAA please read more at the following links: # * [Preparing for the Sovrin Transaction Author Agreement](https://sovrin.org/preparing-for-the-sovrin-transaction-author-agreement/) # * [How the recent approval of the Sovrin Governance Framework v2 affects Transaction Authors # ](https://sovrin.org/how-the-recent-approval-of-the-sovrin-governance-framework-v2-affects-transaction-authors/) # * [TAA v2](https://github.com/sovrin-foundation/sovrin/blob/master/TAA/TAA.md) # * [TAA Acceptance Mechanism List (AML)](https://github.com/sovrin-foundation/sovrin/blob/master/TAA/AML.md) taa_response = await agent_controller.ledger.get_taa() TAA = taa_response["result"]["taa_record"] TAA["mechanism"] = "service_agreement" await agent_controller.ledger.accept_taa(TAA) # ## Assign Agent Public DID if Not Set # # Will only be ran if ACAPY_WALLET_SEED not initially set. if did_obj["posture"] != "public": response = await agent_controller.wallet.assign_public_did(did_obj["did"]) print("Successfully intialised agent with Public DID : ", did_obj["did"]) # ## Writing Schema # # Write as many schema to the ledger as you like. Be sure to store each schema_id, you will need these when it comes to authoring credential defintition transactions and issuing credentials against this schema. # # Uncomment and copy the below cell as many times as you need. Be sure to update any arugments surrounded by <> with your own details. # + # # Define you schema name - must be unique on the ledger # schema_name = "<YOUR SCHEMA NAME>" # # Can version the schema if you wish to update it # schema_version = "0.0.1" # # Define any list of attributes you wish to include in your schema # attributes = ["<ATTR_1", "<ATTR_2>", ..., "<ATTR_N>"] # response = await agent_controller.schema.write_schema(schema_name, attributes, schema_version) # <YOUR SCHEMA ID> = response["schema_id"] # - # ## Using External Schema # # You do not have to author the schema for the credentials you wish to issue, instead you can identify extisting schema on the ledger that you want to issue against. To do this you must set the schema identifier for any schema you want to use and these MUST be on the ledger your agent is pointing to. # + # <EXTERNAL SCHEMA ID> = "<SOME SCHEMA ID>" # - # ## Writing Credential Definitions # # For all schema you intend to issue credentials against your agent must author a credential definition transaction to the public ledger. This specifies the public cryptographic material your agent will use to sign all credentials issued against a specific schema. # # Again uncomment and copy this cell as often as you need. Remebering to update the arguments in <> to specify your schema identifiers. Store each credential definition identifier in a unique variable. # + # cred_def_response = await agent_controller.definitions.write_cred_def(<SCHEMA ID>) # <YOUR CRED DEF ID> = cred_def_response["credential_definition_id"] # - # ## Persist Identifiers for use throughout other business logic notebooks associated with this agent # # The schema_id and cred_def_id value pairs are required whenever issuing credentials, and also can be used to constrain acceptable proof requests. In a real application these values might be stored in environment variables or most likely in a database. For notebooks we have found it easier to store as string values in a cell and then load these values into the jupyter store so that they can be fetched across multiple notebooks. # # As such you are recommended to print out each of the schema and cred def identifiers used by your agent and copy them across to your **main** business logic notebook where you should store them in a variable and save them to the jupyter store. Remember, you should only be running this notebook once so having this logic in here will not be useful. # # + # print(<YOUR SCHEMA ID>) # print(<YOUR CRED DEF ID>) # - # ## Terminate Controller # await agent_controller.terminate()
packages/syft/examples/hyperledger-aries/recipes/issue-credential/issuer_initialisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pysc2_env # language: python # name: pysc2_env # --- # + # Tutorial by www.pylessons.com # Tutorial written for - Tensorflow 2.3.1 import os import random import gym import pylab import numpy as np import tensorflow as tf from tensorflow.keras.models import Model, load_model from tensorflow.keras.layers import Input, Dense, Lambda, Add, Conv2D, Flatten, LSTM, Reshape from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras import backend as K import cv2 import threading from threading import Thread, Lock import time import tensorflow_probability as tfp from typing import Any, List, Sequence, Tuple import deepmind_lab gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4000)]) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # -1:cpu, 0:first gpu tfd = tfp.distributions class ActorCritic(tf.keras.Model): def __init__(self, input_shape, action_space): super(ActorCritic, self).__init__() self.conv_1 = Conv2D(8, 3, 2, padding="valid", activation="relu") self.conv_2 = Conv2D(16, 3, 2, padding="valid", activation="relu") self.lstm = LSTM(64, name="lstm", return_sequences=True, return_state=True) self.flatten = Flatten() self.reshape = Reshape((15*15,16)) self.dense_0 = Dense(256, activation='relu', name="dense_0") self.dense_1 = Dense(action_space, name="dense_1") self.dense_2 = Dense(1, name="dense_2") def call(self, X_input, memory_state, carry_state, training): batch_size = tf.shape(X_input)[0] conv_1 = self.conv_1(X_input) conv_2 = self.conv_2(conv_1) #print("conv_2.shape:" , conv_2.shape) conv_2_reshaped = self.reshape(conv_2) initial_state = (memory_state, carry_state) lstm_output, next_memory_state, next_carry_state = self.lstm(conv_2_reshaped, initial_state=initial_state, training=training) X_input = self.flatten(lstm_output) X_input = self.dense_0(X_input) action_logit = self.dense_1(X_input) value = self.dense_2(X_input) return action_logit, value, next_memory_state, next_carry_state def safe_log(x): """Computes a safe logarithm which returns 0 if x is zero.""" return tf.where( tf.math.equal(x, 0), tf.zeros_like(x), tf.math.log(tf.math.maximum(1e-12, x))) def take_vector_elements(vectors, indices): """ For a batch of vectors, take a single vector component out of each vector. Args: vectors: a [batch x dims] Tensor. indices: an int32 Tensor with `batch` entries. Returns: A Tensor with `batch` entries, one for each vector. """ return tf.gather_nd(vectors, tf.stack([tf.range(tf.shape(vectors)[0]), indices], axis=1)) huber_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM) sparse_ce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.SUM) mse_loss = tf.keras.losses.MeanSquaredError() workspace_path = '/home/kimbring2/stabilizing_transformers' writer = tf.summary.create_file_writer(workspace_path + "/tensorboard/pong") def _action(*entries): return np.array(entries, dtype=np.intc) ACTIONS = { 'look_left': _action(-20, 0, 0, 0, 0, 0, 0), 'look_right': _action(20, 0, 0, 0, 0, 0, 0), 'look_up': _action(0, 10, 0, 0, 0, 0, 0), 'look_down': _action(0, -10, 0, 0, 0, 0, 0), 'strafe_left': _action(0, 0, -1, 0, 0, 0, 0), 'strafe_right': _action(0, 0, 1, 0, 0, 0, 0), 'forward': _action(0, 0, 0, 1, 0, 0, 0), 'backward': _action(0, 0, 0, -1, 0, 0, 0), 'fire': _action(0, 0, 0, 0, 1, 0, 0), 'jump': _action(0, 0, 0, 0, 0, 1, 0), 'crouch': _action(0, 0, 0, 0, 0, 0, 1) } ACTION_LIST = list(ACTIONS) class PPOAgent: # PPO Main Optimization Algorithm def __init__(self, env_name): # Initialization # Environment and PPO parameters self.env_name = env_name self.env = deepmind_lab.Lab(self.env_name, ['RGB_INTERLEAVED'], {'fps': '15', 'width': '64', 'height': '64'}) #env.reset(seed=1) self.action_size = len(ACTION_LIST) self.EPISODES, self.episode, self.max_average = 20000, 0, -21.0 # specific for pong self.lock = Lock() self.learning_rate = 0.0001 self.ROWS = 80 self.COLS = 60 self.REM_STEP = 3 # Instantiate plot memory self.scores, self.episodes, self.average = [], [], [] self.Save_Path = 'Models' self.state_size = (self.REM_STEP, self.ROWS, self.COLS) if not os.path.exists(self.Save_Path): os.makedirs(self.Save_Path) self.path = '{}_A3C_{}'.format(self.env_name, self.learning_rate) self.model_name = os.path.join(self.Save_Path, self.path) # Create Actor-Critic network model self.policy = ActorCritic(input_shape=self.state_size, action_space=self.action_size) self.policy_old = ActorCritic(input_shape=self.state_size, action_space=self.action_size) self.optimizer = tf.keras.optimizers.Adam(self.learning_rate) @tf.function def act_old(self, state, memory_state, carry_state, training): # Use the network to predict the next action to take, using the model prediction = self.policy_old(tf.expand_dims(state, 0), memory_state, carry_state, training=training) action = tf.random.categorical(prediction[0], 1) return action[0][0], prediction @tf.function def act_new(self, state, memory_state, carry_state, training): # Use the network to predict the next action to take, using the model prediction = self.policy(tf.expand_dims(state, 0), memory_state, carry_state, training=training) action = tf.random.categorical(prediction[0], 1) return action[0][0], prediction def discount_rewards(self, reward): # Compute the gamma-discounted rewards over an episode gamma = 0.99 # discount rate running_add = 0 discounted_r = np.zeros_like(reward) for i in reversed(range(0, len(reward))): if reward[i] != 0: # reset the sum, since this was a game boundary (pong specific!) running_add = 0 running_add = running_add * gamma + reward[i] discounted_r[i] = running_add if np.std(discounted_r) != 0: discounted_r -= np.mean(discounted_r) # normalizing the result discounted_r /= np.std(discounted_r) # divide by standard deviation return discounted_r def get_loss(self, states, actions, discounted_r, memory_state_old, carry_state_old, memory_state_new, carry_state_new): batch_size = states.shape[0] action_logits_old = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) values_old = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) action_logits = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) for i in range(0, batch_size): action_old, prediction_old = self.act_old(states[i,:,:,:], memory_state_old, carry_state_old, training=True) action_logit_old = prediction_old[0] value_old = prediction_old[1] memory_state_old = prediction_old[2] carry_state_old = prediction_old[3] action_logits_old = action_logits_old.write(i, action_logit_old[0]) values_old = values_old.write(i, tf.squeeze(value_old)) action_new, prediction_new = self.act_new(states[i,:,:,:], memory_state_new, carry_state_new, training=True) action_logit = prediction_new[0] value = prediction_new[1] memory_state_new = prediction_new[2] carry_state_new = prediction_new[3] action_logits = action_logits.write(i, action_logit[0]) values = values.write(i, tf.squeeze(value)) action_logits_old = action_logits_old.stack() values_old = values_old.stack() action_logits = action_logits.stack() values = values.stack() action_logits_selected_old = take_vector_elements(action_logits_old, actions) action_logits_selected = take_vector_elements(action_logits, actions) advantages = discounted_r - tf.stop_gradient(values_old) entropy_loss_old = tf.keras.losses.categorical_crossentropy(action_logits_old, action_logits_old) action_logits_selected_old = tf.nn.softmax(action_logits_selected_old) action_log_selected_old = tf.math.log(action_logits_selected_old) action_logits_selected = tf.nn.softmax(action_logits_selected) action_log_selected = tf.math.log(action_logits_selected) ratios = tf.math.exp(action_log_selected - tf.stop_gradient(action_log_selected_old)) eps_clip = 0.2 surr1 = ratios * advantages surr2 = tf.clip_by_value(ratios, 1 - eps_clip, 1 + eps_clip) * advantages actor_loss = -tf.math.minimum(surr1, surr2) actor_loss = tf.math.reduce_mean(actor_loss) actor_loss = tf.cast(actor_loss, 'float32') #print("actor_loss: ", actor_loss) critic_loss = mse_loss(values, discounted_r) total_loss = actor_loss + critic_loss return total_loss, memory_state_old, carry_state_old, memory_state_new, carry_state_new def replay(self, states, actions, rewards, memory_state, carry_state): # reshape memory to appropriate shape for training states = np.vstack(states) #print("states.shape: ", states.shape) # Compute discounted rewards discounted_r = self.discount_rewards(rewards) discounted_r = discounted_r.astype(np.float32) #print("discounted_r: ", discounted_r) divide_size = 500 batch_size = states.shape[0] #print("batch_size: ", batch_size) epoch_size = batch_size // divide_size #print("epoch_size: ", epoch_size) remain_size = batch_size - epoch_size * divide_size #print("remain_size: ", remain_size) memory_state_old = memory_state carry_state_old = carry_state memory_state_new = memory_state carry_state_new = carry_state for e in range(0, epoch_size): with tf.GradientTape() as tape: return_value = self.get_loss(states[divide_size*e:divide_size*(e+1),:,:,:], actions[divide_size*e:divide_size*(e+1)], discounted_r[divide_size*e:divide_size*(e+1)], memory_state_old, carry_state_old, memory_state_new, carry_state_new) total_loss = return_value[0] memory_state_old = return_value[1] carry_state_old = return_value[2] memory_state_new = return_value[3] carry_state_new = return_value[4] #print("total_loss: ", total_loss) #print("") grads = tape.gradient(total_loss, self.policy.trainable_variables) self.optimizer.apply_gradients(zip(grads, self.policy.trainable_variables)) if remain_size != 0: with tf.GradientTape() as tape: total_loss = self.get_loss(states[divide_size*epoch_size:divide_size*epoch_size+remain_size,:,:,:], actions[divide_size*epoch_size:divide_size*epoch_size+remain_size], discounted_r[divide_size*epoch_size:divide_size*epoch_size+remain_size], memory_state_old, carry_state_old, memory_state_new, carry_state_new) for a, b in zip(self.policy_old.variables, self.policy.variables): a.assign(b) # copies the variables of model_b into model_a return total_loss def load(self, model_name): self.ActorCritic = load_model(model_name, compile=False) #self.Critic = load_model(Critic_name, compile=False) def save(self): self.ActorCritic.save(self.model_name) #self.Critic.save(self.Model_name + '_Critic.h5') def GetScoreAverage(self, score, episode): self.scores.append(score) self.episodes.append(episode) self.average.append(sum(self.scores[-50:]) / len(self.scores[-50:])) return self.average[-1] def imshow(self, image, rem_step=0): cv2.imshow(self.model_name + str(rem_step), image[rem_step,...]) if cv2.waitKey(25) & 0xFF == ord("q"): cv2.destroyAllWindows() return def reset(self, env): image_memory = np.zeros(self.state_size) env.reset() state = env.observations() state = state['RGB_INTERLEAVED'] return state def step(self, action, env, image_memory): reward = env.step(ACTIONS[ACTION_LIST[action]]) if not env.is_running(): done = True next_state = np.zeros((64,64,3)) reward = 0 else: done = False next_state = env.observations() next_state = next_state['RGB_INTERLEAVED'] return next_state, reward, done def train(self, n_threads): self.env.close() # Instantiate one environment per thread envs = [deepmind_lab.Lab(self.env_name, ['RGB_INTERLEAVED'], {'fps': '15', 'width': '64', 'height': '64'}) for i in range(n_threads)] # Create threads threads = [threading.Thread( target=self.train_threading, daemon=True, args=(self, envs[i], i)) for i in range(n_threads)] for t in threads: time.sleep(2) t.start() for t in threads: time.sleep(10) t.join() def render(self, obs): im_rgb = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB) cv2.imshow("obs", im_rgb) cv2.waitKey(1) def train_threading(self, agent, env, thread): total_step = 0 while self.episode < self.EPISODES: # Reset episode score, done, SAVING = 0, False, '' state = self.reset(env) state = state / 255.0 state = state.astype(np.float32) states, actions, rewards = [], [], [] memory_state = np.zeros([1,64], dtype=np.float32) carry_state = np.zeros([1,64], dtype=np.float32) initial_memory_state = memory_state initial_carry_state = carry_state while True: action, prediction = agent.act_old(state, memory_state, carry_state, training=False) #print("action: ", action) memory_state = prediction[2] carry_state = prediction[3] #print("action: ", action) next_state, reward, done = self.step(action.numpy(), env, state) #print("next_state: ", next_state) next_state = next_state / 255.0 next_state = next_state.astype(np.float32) #print("next_state.shape: ", next_state.shape) if thread == 0: self.render(next_state) if done == True: break states.append(np.array([state])) actions.append(action) rewards.append(reward) score += reward state = next_state self.lock.acquire() self.replay(states, actions, rewards, initial_memory_state, initial_carry_state) states, actions, rewards = [], [], [] self.lock.release() # Update episode count with self.lock: average = self.GetScoreAverage(score, self.episode) with writer.as_default(): # other model code would go here tf.summary.scalar("average", average, step=self.episode) writer.flush() # saving best models if average >= self.max_average: self.max_average = average #self.save() SAVING = "SAVING" else: SAVING = "" print("episode: {}/{}, thread: {}, score: {}, average: {:.2f} {}".format(self.episode, self.EPISODES, thread, score, average, SAVING)) if(self.episode < self.EPISODES): self.episode += 1 env.close() def test(self, Actor_name, Critic_name): self.load(Actor_name, Critic_name) for e in range(100): state = self.reset(self.env) done = False score = 0 while not done: self.env.render() action = np.argmax(self.Actor.predict(state)) state, reward, done, _ = self.step(action, self.env, state) score += reward if done: print("episode: {}/{}, score: {}".format(e, self.EPISODES, score)) break self.env.close() if __name__ == "__main__": env_name = 'ctf_simple' agent = PPOAgent(env_name) #agent.run() # use as A2C agent.train(n_threads=5) # use as A3C #agent.test('Models/Pong-v0_A3C_2.5e-05_Actor.h5', '') # -
DMLab_Distributed_PPO_TF2_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Set up # %pylab # %pylab inline # %matplotlib inline # %load_ext autoreload # %autoreload 2 import tqdm import spectral from sklearn.cluster import KMeans, MiniBatchKMeans # # 1. Load data <br/> # Link: http://apex.vgt.vito.be/content/free-data-cubes?fbclid=IwAR1yKu7SKlNokPOSOY4fXDrQg1Di-8X6FiA9j8_kBKlWbcciK5au9n0vRaI img = spectral.open_image( 'APEX_OSD_Package_1.0/APEX_OSD_V1_calibr_cube.hdr' ) data = img.load() X = data.reshape(-1, data.shape[-1]) X_std = (X - X.mean()) / X.std() X_norm = (X - np.min(X)) / (np.max(X) - np.min(X)) def show_results(model): ''' Use given model to break down the spectral photo into layers ''' Y = model.labels_.reshape(data.shape[0], data.shape[1]).astype(np.uint8) labels_no = np.unique(model.labels_).size table_width = 3 table_hight = (labels_no + table_width - 1) // table_width fig, axes = plt.subplots(table_hight, table_width) fig.set_size_inches(15, table_hight * 5) for i in tqdm.tqdm(np.unique(model.labels_), desc='Plotting groups', position=0, leave=True): map_feature = np.zeros(Y.shape, dtype=np.uint8) map_feature[Y == i] = 1 row, col = i // table_width, i % table_width if table_hight == 1: axes[col].imshow(map_feature, cmap='summer') axes[col].set_title(f'Group: {i}') else: axes[row][col].imshow(map_feature, cmap='summer') axes[row][col].set_title(f'Group: {i}') plt.figure(figsize=(15, 10)) plt.title('All groups') plt.imshow(Y, cmap='gist_earth') # %%time # Breaking the photo into 3 layers model = MiniBatchKMeans(n_clusters=3, n_init=10) model.fit(X) show_results(model) # %%time # Breaking the photo into 12 layers model = MiniBatchKMeans(n_clusters=12, n_init=10) model.fit(X) show_results(model) # %%time # Using regular KMeans takes much longer model = KMeans(n_clusters=3, n_init=10) model.fit(X) show_results(model)
Projects/KMEANS/multispectral_image.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Memoization # - based on the Latin word memorandum, meaning "to be remembered" <img src="brain.jpg" width="25%" style="float:right"> # - similar to word memorization, its a technique used in coding to improve program runtime by memorizing intermediate solutions # - if an intermediate results (from some computations) are used over and again, the results can be remembered/stored to avoid unnecessary repeating calculations # - using dict type datastructure, one can memoize intermediate results from functions esp. in recurisive solutions # ## naive recursive fib function # + count = 0 def fib(n): global count count += 1 if n <= 1: return 1 f = fib(n-1) + fib(n-2) return f n=30 #40, 50? takes a while print("fib at {}th position = {}".format(n, fib(n))) print("fib function count = {}".format(count)) # - # ## theoritical computational complexity # - Worst case Big-Oh Time Complexity: O(n) = time to calculate Fib(n-1) + Fib(n-2) + time to add them: O(1) # - T(n) = T(n-1) + T(n-2) + O(1) # - T(n) = O(2<sup>n-1</sup>) + O(2<sup>n-2</sup>) + O(1) # - T(n) = O(2<sup>n</sup>) # - Space Complexity = O(n) due to call stack # ## finding actual time - timeit # - timeit - measures execution time of small code snippets # - timeit.timeit(stmt='pass', setup='pass', timer=[default timer], number=1000000, globals=None) # - returns time in seconds import timeit help(timeit) #print(globals()) import timeit print(timeit.timeit('fib(30)', number=1, globals=globals())) # ## memoized recursive fib function # + memo = {} count = 0 def MemoizedFib(n): global memo, count count += 1 if n <= 1: return 1 if n in memo: return memo[n] memo[n] = MemoizedFib(n-1) + MemoizedFib(n-2) return memo[n] n=60 #try 40, 50, 60 print("fib at {}th position = {}".format(n, MemoizedFib(n))) print("fib function count = {}".format(count)) # - import timeit print(timeit.timeit('MemoizedFib(1000)', number=1, globals=globals())) # ## computational complexity of memoized fib # - Time Complexity - O(n) # - Space Complexity - O(n) # ## exercise # - Write a program that finds factorials of a bunch of positive integer numbers? Would memoization improve time complexity of the program? # find the factorials of the first 20 positive numbers # optimize the program so it finds the factorials in the # shortest time possible nums = list(range(1, 21)) print(nums)
Memoization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Titanic Survival # ## Data Exploration and Visualization import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline train = pd.read_csv('titanic_train.csv') train.head(3) # missing value ploting sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') """ Yellow lines represent the missing values """ sns.set_style('whitegrid') # count plot of surviva l or non-survival sns.countplot(x='Survived', data=train) # survival or non-survival order by sex sns.countplot(x='Survived', hue='Sex', data=train) # survival or non-survival order by class sns.countplot(x='Survived', hue='Pclass', data=train) # distribution plot sns.displot(train['Age'].dropna(),bins=30) # pandas own hist plot train['Age'].plot.hist(bins=30) train.info() sns.countplot(x='SibSp',data=train) train['Fare'].hist(bins=30,figsize=(10,4)) import cufflinks as cf cf.go_offline() # hist plot using cufflinks train['Fare'].iplot(kind='hist',bins=50) # ## Data Cleaning train[train['Pclass']==1]['Age'].mean() train[train['Pclass']==2]['Age'].mean() train[train['Pclass']==3]['Age'].mean() # #### Fill missing age def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 38 elif Pclass==2: return 29 else: return 25 else: return Age train['Age'] = train[['Age','Pclass']].apply(impute_age,axis=1) sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') # Drop Missinng value Column train.drop('Cabin',axis=1,inplace=True) train.dropna(inplace=True) # Dummy Data sex = pd.get_dummies(train['Sex'], drop_first=True) sex.head() embark=pd.get_dummies(train['Embarked'],drop_first=True) embark.head() # Concatinate train = pd.concat([train,sex,embark],axis=1) train.head(3) # Drop Column train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True) train.drop('PassengerId',axis=1,inplace=True) train.head(3) # Machine Learning from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression X = train.drop('Survived',axis=1) y = train['Survived'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=101) logmodel = LogisticRegression(solver='lbfgs',max_iter=1000) logmodel.fit(X_train,y_train) predictions = logmodel.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test,predictions)) test = pd.read_csv('titanic_test.csv') test.head(5) sns.heatmap(test.isnull(),yticklabels = False, cbar=False, cmap='viridis') test.drop('Cabin',axis=1,inplace=True) sns.heatmap(test.isnull(),yticklabels = False, cbar=False, cmap='viridis')
Logistic Regression/.ipynb_checkpoints/Logistic Regression-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # declare list with any types of elements and use if-else and if-elif-else to get the result of your condition list_1 = [1, 2, 3, 4, 5, 10, 20, 50, 100] if 500 in list_1: print(500) else: print("not in the list") sum(list_1) if sum(list_1) == 0: print("The sum of elements is zero") elif sum(list_1) < 150: print("The sum is less than 150") else: print("The sum is not within 0-150")
Lesson 3(if-else-elif).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''geopandas-alive'': conda)' # name: python38364bitgeopandasalivecondaf3e8ba5bf7cb4f5380e5054548cb8b2d # --- # Geopandas and Pandas_Alive # # Following on from a previous post on [making animated charts with `pandas_alive`](https://jackmckew.dev/creating-animated-plots-with-pandas_alive.html), let's go into generating animated charts specifically for geospatial data with `geopandas`. Support for `geopandas` was introduced into `pandas_alive` in version 0.2.0, along with functionality to interface with `contextily` for enabling basemaps. The visualisation(s) we will make today, are initially was `pandas_alive` was created for! # # When setting up `geopandas` & `pandas_alive` on Windows, the recommended set up is using [Anaconda](https://www.anaconda.com/) as `geopandas` requires GDAL, which is not a trivial process to set up on Windows. Luckily Anaconda distributes GDAL along with geopandas so we don't have to worry about it. We also need to install `descartes` (support for plotting polygons) and `contextily` for basemap support. These can be installed with: # # - `descartes` : `conda install -c conda-forge descartes` # - `contextily` : `conda install -c conda-forge contextily` # # `pandas_alive` also supports progress bars with `tqdm`, this can be installed via `conda install tqdm` and enabled using the enable_progress_bar=True keyword in `plot_animated()` # # First off let's check out the end-result visualisation we'll be building today: # # ![NSW COVID Visualisation]({static img/gpd-nsw-covid.gif}) # # Now let's get started, as always we begin by importing all the neccessary libraries. # + import geopandas import pandas as pd import pandas_alive import contextily import matplotlib.pyplot as plt import urllib.request, json # - # The data we wish to visualise is hosted through an API, so we will use `urllib` to load the json response and then find the dataset link (provided as a csv). Once we determine what the link is, we can use `pandas` to read the csv directly from the url. We also read in a dataset of matching geospatial co-ordinates to the postcodes. # + with urllib.request.urlopen( "https://data.nsw.gov.au/data/api/3/action/package_show?id=aefcde60-3b0c-4bc0-9af1-6fe652944ec2" ) as url: data = json.loads(url.read().decode()) # Extract url to csv component covid_nsw_data_url = data["result"]["resources"][0]["url"] # Read csv from data API url nsw_covid = pd.read_csv(covid_nsw_data_url) # Source for postcode dataset https://www.matthewproctor.com/australian_postcodes postcode_dataset = pd.read_csv("data/postcode-data.csv") display(nsw_covid.head()) display(postcode_dataset.head()) # - # This data isn't in the format we need it to be, so let's do some preprocessing, in particular we: # # - Fill in any gaps (with error value 9999) # - Convert the date string to a datetime object # - Groupby to get number of cases by date & postcode # - Unstack the multi-index that groupby returns # - Drop the unused column level # - Fill any missing values now with 0 cases (as these would be unprovided) # + # Prepare data from NSW health dataset nsw_covid = nsw_covid.fillna(9999) nsw_covid["postcode"] = nsw_covid["postcode"].astype(int) # Convert the date time string to a datetime object nsw_covid['notification_date'] = pd.to_datetime(nsw_covid['notification_date'],dayfirst=True) grouped_df = nsw_covid.groupby(["notification_date", "postcode"]).size() grouped_df = pd.DataFrame(grouped_df).unstack() grouped_df.columns = grouped_df.columns.droplevel().astype(str) grouped_df = grouped_df.fillna(0) grouped_df.index = pd.to_datetime(grouped_df.index) cases_df = grouped_df cases_df.to_csv('data/nsw-covid-cases-by-postcode.csv') # - # Now we can start by creating an area chart, and labelling any events in particular with vertical bars. # + from datetime import datetime bar_chart = cases_df.sum(axis=1).plot_animated( filename='area-chart.gif', kind='line', label_events={ 'Ruby Princess Disembark':datetime.strptime("19/03/2020", "%d/%m/%Y"), 'Lockdown':datetime.strptime("31/03/2020", "%d/%m/%Y") }, fill_under_line_color="blue", enable_progress_bar=True ) # - # ![Area Chart]({static img/gpd-area-chart.gif}) # Now it's time to prepare the dataset for our geospatial visualisations with `geopandas`. In particular: # # - Drop any invalid longitudes / latitudes from our postcode dataset # - Drop any longitudes / latitudes that are 0 # - Match the postcodes in each dataset to retrieve the equivalent longitude / latitude # - Remove the redundant/duplicated columns # - Package into a geopackage (ensure to keep the index column separate) # + # Clean data in postcode dataset prior to matching grouped_df = grouped_df.T postcode_dataset = postcode_dataset[postcode_dataset['Longitude'].notna()] postcode_dataset = postcode_dataset[postcode_dataset['Longitude'] != 0] postcode_dataset = postcode_dataset[postcode_dataset['Latitude'].notna()] postcode_dataset = postcode_dataset[postcode_dataset['Latitude'] != 0] postcode_dataset['Postcode'] = postcode_dataset['Postcode'].astype(str) # Build GeoDataFrame from Lat Long dataset and make map chart grouped_df['Longitude'] = grouped_df.index.map(postcode_dataset.set_index('Postcode')['Longitude'].to_dict()) grouped_df['Latitude'] = grouped_df.index.map(postcode_dataset.set_index('Postcode')['Latitude'].to_dict()) gdf = geopandas.GeoDataFrame( grouped_df, geometry=geopandas.points_from_xy(grouped_df.Longitude, grouped_df.Latitude),crs="EPSG:4326") gdf = gdf.dropna() # Prepare GeoDataFrame for writing to geopackage gdf = gdf.drop(['Longitude','Latitude'],axis=1) gdf.columns = gdf.columns.astype(str) gdf['postcode'] = gdf.index gdf.to_file("data/nsw-covid19-cases-by-postcode.gpkg", layer='nsw-postcode-covid', driver="GPKG") # - # Before we merge together all the charts, let's plot the prepared geospatial data on it's own. # + # Prepare GeoDataFrame for plotting gdf.index = gdf.postcode gdf = gdf.drop('postcode',axis=1) gdf = gdf.to_crs("EPSG:3857") #Web Mercator map_chart = gdf.plot_animated(filename='map-chart.gif',title="Cases by Location",basemap_format={'source':contextily.providers.Stamen.Terrain},cmap='cool') # - # ![Map Chart]({static img/gpd-map-chart.gif}) # Finally let's merge all these charts together into a single chart! # + grouped_df = pd.read_csv('data/nsw-covid-cases-by-postcode.csv', index_col=0, parse_dates=[0]) line_chart = ( grouped_df.sum(axis=1) .cumsum() .fillna(0) .plot_animated(kind="line", period_label=False, title="Cumulative Total Cases") ) def current_total(values): total = values.sum() s = f'Total : {int(total)}' return {'x': .85, 'y': .2, 's': s, 'ha': 'right', 'size': 11} race_chart = grouped_df.cumsum().plot_animated( n_visible=5, title="Cases by Postcode", period_label=False,period_summary_func=current_total ) import time timestr = time.strftime("%d/%m/%Y") plots = [bar_chart, line_chart, map_chart, race_chart] from matplotlib import rcParams rcParams.update({"figure.autolayout": False}) figs = plt.figure() gs = figs.add_gridspec(2, 3, hspace=0.5) f3_ax1 = figs.add_subplot(gs[0, :]) f3_ax1.set_title(bar_chart.title) bar_chart.ax = f3_ax1 f3_ax2 = figs.add_subplot(gs[1, 0]) f3_ax2.set_title(line_chart.title) line_chart.ax = f3_ax2 f3_ax3 = figs.add_subplot(gs[1, 1]) f3_ax3.set_title(map_chart.title) map_chart.ax = f3_ax3 f3_ax4 = figs.add_subplot(gs[1, 2]) f3_ax4.set_title(race_chart.title) race_chart.ax = f3_ax4 timestr = cases_df.index.max().strftime("%d/%m/%Y") figs.suptitle(f"NSW COVID-19 Confirmed Cases up to {timestr}") pandas_alive.animate_multiple_plots( 'nsw-covid.gif', plots, figs ) # - # ![NSW COVID Chart]({static img/gpd-nsw-covid.gif}) # Pandas_Alive also supports animating polygon GeoDataFrames! # + import geopandas import pandas_alive import contextily gdf = geopandas.read_file('data/italy-covid-region.gpkg') gdf.index = gdf.region gdf = gdf.drop('region',axis=1) map_chart = gdf.plot_animated(filename='examples/example-geo-polygon-chart.gif',basemap_format={'source':contextily.providers.Stamen.Terrain}) # - # ![Geopandas Polygon Chart]({static img/gpd-polygon-chart.gif})
content/2020/geopandas-and-pandas-alive/notebooks/geopandas-alive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="6bYaCABobL5q" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab_type="code" id="FlUw7tSKbtg4" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="xc1srSc51n_4" # # Using the SavedModel format # + [markdown] colab_type="text" id="-nBUqG2rchGH" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/alpha/guide/saved_model"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="CPE-fshLTsXU" # A SavedModel contains a complete TensorFlow program, including weights and computation. It does not require the original model building code to run, which makes it useful for sharing or deploying (with [TFLite](https://tensorflow.org/lite), [TensorFlow.js](https://js.tensorflow.org/), [TensorFlow Serving](https://www.tensorflow.org/tfx/serving/tutorials/Serving_REST_simple), or [TFHub](https://tensorflow.org/hub)). # # If you have code for a model in Python and want to load weights into it, see the [guide to training checkpoints](./checkpoints.ipynb). # # # # For a quick introduction, this section exports a pre-trained Keras model and serves image classification requests with it. The rest of the guide will fill in details and discuss other ways to create SavedModels. # + colab_type="code" id="Le5OB-fBHHW7" colab={} from __future__ import absolute_import, division, print_function, unicode_literals # !pip install tensorflow==2.0.0-alpha0 import tensorflow as tf from matplotlib import pyplot as plt import numpy as np # + colab_type="code" id="SofdPKo0G8Lb" colab={} file = tf.keras.utils.get_file( "grace_hopper.jpg", "https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg") img = tf.keras.preprocessing.image.load_img(file, target_size=[224, 224]) plt.imshow(img) plt.axis('off') x = tf.keras.preprocessing.image.img_to_array(img) x = tf.keras.applications.mobilenet.preprocess_input( x[tf.newaxis,...]) # + [markdown] colab_type="text" id="sqVcFL10JkF0" # We'll use an image of Grace Hopper as a running example, and a Keras pre-trained image classification model since it's easy to use. Custom models work too, and are covered in detail later. # + colab_type="code" id="JhVecdzJTsKE" colab={} #tf.keras.applications.vgg19.decode_predictions labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt') imagenet_labels = np.array(open(labels_path).read().splitlines()) # + colab_type="code" id="aEHSYjW6JZHV" colab={} pretrained_model = tf.keras.applications.MobileNet() result_before_save = pretrained_model(x) print() decoded = imagenet_labels[np.argsort(result_before_save)[0,::-1][:5]+1] print("Result before saving:\n", decoded) # + [markdown] colab_type="text" id="r4KIsQDZJ5PS" # The top prediction for this image is "military uniform". # + colab_type="code" id="8nfznDmHCW6F" colab={} tf.saved_model.save(pretrained_model, "/tmp/mobilenet/1/") # + [markdown] colab_type="text" id="pyX-ETE3wX63" # The save-path follows a convention used by TensorFlow Serving where the last path component (`1/` here) is a version number for your model - it allows tools like Tensorflow Serving to reason about the relative freshness. # # SavedModels have named functions called signatures. Keras models export their forward pass under the `serving_default` signature key. The [SavedModel command line interface](#saved_model_cli) is useful for inspecting SavedModels on disk: # + colab_type="code" id="djmcTavtIZyT" colab={} # !saved_model_cli show --dir /tmp/mobilenet/1 --tag_set serve --signature_def serving_default # + [markdown] colab_type="text" id="VCZZ8avqLF1g" # We can load the SavedModel back into Python with `tf.saved_model.load` and see how Admiral Hopper's image is classified. # + colab_type="code" id="NP2UpVFRV7N_" colab={} loaded = tf.saved_model.load("/tmp/mobilenet/1/") print(list(loaded.signatures.keys())) # ["serving_default"] # + [markdown] colab_type="text" id="K5srGzowfWff" # Imported signatures always return dictionaries. # + colab_type="code" id="ChFLpegYfQGR" colab={} infer = loaded.signatures["serving_default"] print(infer.structured_outputs) # + [markdown] colab_type="text" id="cJYyZnptfuru" # Running inference from the SavedModel gives the same result as the original model. # + colab_type="code" id="9WjGEaS3XfX7" colab={} labeling = infer(tf.constant(x))["reshape_2"] decoded = imagenet_labels[np.argsort(labeling)[0,::-1][:5]+1] print("Result after saving and loading:\n", decoded) # + [markdown] colab_type="text" id="SJEkdXjTWbtl" # ## Serving the model # # SavedModels are usable from Python, but production environments typically use a dedicated service for inference. This is easy to set up from a SavedModel using TensorFlow Serving. # # See the [TensorFlow Serving REST tutorial](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/tutorials/Serving_REST_simple.ipynb) for more details about serving, including instructions for installing `tensorflow_model_server` in a notebook or on your local machine. As a quick sketch, to serve the `mobilenet` model exported above just point the model server at the SavedModel directory: # # ```bash # nohup tensorflow_model_server \ # --rest_api_port=8501 \ # --model_name=mobilenet \ # --model_base_path="/tmp/mobilenet" >server.log 2>&1 # ``` # # Then send a request. # # ```python # # # !pip install requests # import json # import numpy # import requests # data = json.dumps({"signature_name": "serving_default", # "instances": x.tolist()}) # headers = {"content-type": "application/json"} # json_response = requests.post('http://localhost:8501/v1/models/mobilenet:predict', # data=data, headers=headers) # predictions = numpy.array(json.loads(json_response.text)["predictions"]) # ``` # # The resulting `predictions` are identical to the results from Python. # + [markdown] colab_type="text" id="Bi0ILzu1XdWw" # ### SavedModel format # # A SavedModel is a directory containing serialized signatures and the state needed to run them, including variable values and vocabularies. # # + colab_type="code" id="6u3YZuYZXyTO" colab={} # !ls /tmp/mobilenet/1 # assets saved_model.pb variables # + [markdown] colab_type="text" id="ple4X5utX8ue" # The `saved_model.pb` file contains a set of named signatures, each identifying a function. # # SavedModels may contain multiple sets of signatures (multiple MetaGraphs, identified with the `tag_set` argument to `saved_model_cli`), but this is rare. APIs which create multiple sets of signatures include [`tf.Estimator.experimental_export_all_saved_models`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#experimental_export_all_saved_models) and in TensorFlow 1.x `tf.saved_model.Builder`. # + colab_type="code" id="Pus0dOYTYXbI" colab={} # !saved_model_cli show --dir /tmp/mobilenet/1 --tag_set serve # + [markdown] colab_type="text" id="eALHpGvRZOhk" # The `variables` directory contains a standard training checkpoint (see the [guide to training checkpoints](./checkpoints.ipynb)). # + colab_type="code" id="EDYqhDlNZAC2" colab={} # !ls /tmp/mobilenet/1/variables # + [markdown] colab_type="text" id="VKmaZQpHahGh" # The `assets` directory contains files used by the TensorFlow graph, for example text files used to initialize vocabulary tables. It is unused in this example. # # SavedModels may have an `assets.extra` directory for any files not used by the TensorFlow graph, for example information for consumers about what to do with the SavedModel. TensorFlow itself does not use this directory. # + [markdown] colab_type="text" id="zIceoF_CYmaF" # ### Exporting custom models # # In the first section, `tf.saved_model.save` automatically determined a signature for the `tf.keras.Model` object. This worked because Keras `Model` objects have an unambiguous method to export and known input shapes. `tf.saved_model.save` works just as well with low-level model building APIs, but you will need to indicate which function to use as a signature if you're planning to serve a model. # + colab_type="code" id="6EPvKiqXMm3d" colab={} class CustomModule(tf.Module): def __init__(self): super(CustomModule, self).__init__() self.v = tf.Variable(1.) @tf.function def __call__(self, x): return x * self.v @tf.function(input_signature=[tf.TensorSpec([], tf.float32)]) def mutate(self, new_v): self.v.assign(new_v) module = CustomModule() # + [markdown] colab_type="text" id="fUrCTSK2HV2b" # This module has two methods decorated with `tf.function`. While these functions will be included in the SavedModel and available if the SavedModel is reloaded via `tf.saved_model.load` into a Python program, without explicitly declaring the serving signature tools like Tensorflow Serving and `saved_model_cli` cannot access them. # # `module.mutate` has an `input_signature`, and so there is enough information to save its computation graph in the SavedModel already. `__call__` has no signature and so this method needs to be called before saving. # + colab_type="code" id="85PUO9iWH7xn" colab={} module(tf.constant(0.)) tf.saved_model.save(module, "/tmp/module_no_signatures") # + [markdown] colab_type="text" id="eyWD4wr-Ng7m" # For functions without an `input_signature`, any input shapes used before saving will be available after loading. Since we called `__call__` with just a scalar, it will accept only scalar values. # + colab_type="code" id="xy7oCex1Ibj1" colab={} imported = tf.saved_model.load("/tmp/module_no_signatures") assert 3. == imported(tf.constant(3.)).numpy() imported.mutate(tf.constant(2.)) assert 6. == imported(tf.constant(3.)).numpy() # + [markdown] colab_type="text" id="lbLNVfVJOTfb" # The function will not accept new shapes like vectors. # # ```python # imported(tf.constant([3.])) # ``` # # <pre> # ValueError: Could not find matching function to call for canonicalized inputs ((<tf.Tensor 'args_0:0' shape=(1,) dtype=float32>,), {}). Only existing signatures are [((TensorSpec(shape=(), dtype=tf.float32, name=u'x'),), {})]. # </pre> # + [markdown] colab_type="text" id="WrJqD-epPGnr" # `get_concrete_function` lets you add input shapes to a function without calling it. It takes `tf.TensorSpec` objects in place of `Tensor` arguments, indicating the shapes and dtypes of inputs. Shapes can either be `None`, indicating that any shape is acceptable, or a list of axis sizes. If an axis size is `None` then any size is acceptable for that axis. `tf.TensorSpecs` can also have names, which default to the function's argument keywords ("x" here). # + colab_type="code" id="1m9Okb75PFmb" colab={} module.__call__.get_concrete_function(x=tf.TensorSpec([None], tf.float32)) tf.saved_model.save(module, "/tmp/module_no_signatures") imported = tf.saved_model.load("/tmp/module_no_signatures") assert [3.] == imported(tf.constant([3.])).numpy() # + [markdown] colab_type="text" id="gvy3GFl4IfSW" # Functions and variables attached to objects like `tf.keras.Model` and `tf.Module` are available on import, but many Python types and attributes are lost. The Python program itself is not saved in the SavedModel. # # We didn't identify any of the functions we exported as a signature, so it has none. # + colab_type="code" id="uNTV6o_TIeRu" colab={} # !saved_model_cli show --dir /tmp/module_no_signatures --tag_set serve # + [markdown] colab_type="text" id="BiNtaMZSI8Tb" # ## Identifying a signature to export # # To indicate that a function should be a signature, specify the `signatures` argument when saving. # + colab_type="code" id="_pAdgIORR2yH" colab={} call = module.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32)) tf.saved_model.save(module, "/tmp/module_with_signature", signatures=call) # + [markdown] colab_type="text" id="lHiBm-kdKBmG" # Notice that we first converted the `tf.function` to a `ConcreteFunction` with `get_concrete_function`. This is necessary because the function was created without a fixed `input_signature`, and so did not have a definite set of `Tensor` inputs associated with it. # + colab_type="code" id="nAzRHR0UT4hv" colab={} # !saved_model_cli show --dir /tmp/module_with_signature --tag_set serve --signature_def serving_default # + colab_type="code" id="0B25WsscTZoC" colab={} imported = tf.saved_model.load("/tmp/module_with_signature") signature = imported.signatures["serving_default"] assert [3.] == signature(x=tf.constant([3.]))["output_0"].numpy() imported.mutate(tf.constant(2.)) assert [6.] == signature(x=tf.constant([3.]))["output_0"].numpy() assert 2. == imported.v.numpy() # + [markdown] colab_type="text" id="_gH91j1IR4tq" # We exported a single signature, and its key defaulted to "serving_default". To export multiple signatures, pass a dictionary. # + colab_type="code" id="6VYAiQmLUiox" colab={} @tf.function(input_signature=[tf.TensorSpec([], tf.string)]) def parse_string(string_input): return imported(tf.strings.to_number(string_input)) signatures = {"serving_default": parse_string, "from_float": imported.signatures["serving_default"]} tf.saved_model.save(imported, "/tmp/module_with_multiple_signatures", signatures) # + colab_type="code" id="8IPx_0RWEx07" colab={} # !saved_model_cli show --dir /tmp/module_with_multiple_signatures --tag_set serve # + [markdown] colab_type="text" id="sRdSPjKFfQpx" # `saved_model_cli` can also run SavedModels directly from the command line. # + colab_type="code" id="hcHmgVytfODo" colab={} # !saved_model_cli run --dir /tmp/module_with_multiple_signatures --tag_set serve --signature_def serving_default --input_exprs="string_input='3.'" # !saved_model_cli run --dir /tmp/module_with_multiple_signatures --tag_set serve --signature_def from_float --input_exprs="x=3." # + [markdown] colab_type="text" id="WiNhHa_Ne82K" # ## Fine-tuning imported models # # Variable objects are available, and we can backprop through imported functions. # + colab_type="code" id="mSchcIB2e-n0" colab={} optimizer = tf.optimizers.SGD(0.05) def train_step(): with tf.GradientTape() as tape: loss = (10. - imported(tf.constant(2.))) ** 2 variables = tape.watched_variables() grads = tape.gradient(loss, variables) optimizer.apply_gradients(zip(grads, variables)) return loss # + colab_type="code" id="Yx9bO2taJJxm" colab={} for _ in range(10): # "v" approaches 5, "loss" approaches 0 print("loss={:.2f} v={:.2f}".format(train_step(), imported.v.numpy())) # + [markdown] colab_type="text" id="qyL9tOPrg5Zw" # ## Control flow in SavedModels # # Anything that can go in a `tf.function` can go in a SavedModel. With [AutoGraph](./autograph.ipynb) this includes conditional logic which depends on Tensors, specified with regular Python control flow. # + colab_type="code" id="tfbh3uGMgBpH" colab={} @tf.function(input_signature=[tf.TensorSpec([], tf.int32)]) def control_flow(x): if x < 0: tf.print("Invalid!") else: tf.print(x % 3) to_export = tf.Module() to_export.control_flow = control_flow tf.saved_model.save(to_export, "/tmp/control_flow") # + colab_type="code" id="bv4EXevIjHch" colab={} imported = tf.saved_model.load("/tmp/control_flow") imported.control_flow(tf.constant(-1)) # Invalid! imported.control_flow(tf.constant(2)) # 2 imported.control_flow(tf.constant(3)) # 0 # + [markdown] colab_type="text" id="Dk5wWyuMpuHx" # ## SavedModels from Estimators # # Estimators export SavedModels through [`tf.Estimator.export_saved_model`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#export_saved_model). See the [guide to Estimator](https://www.tensorflow.org/guide/estimators) for details. # + colab_type="code" id="B9KQq5qzpzbK" colab={} input_column = tf.feature_column.numeric_column("x") estimator = tf.estimator.LinearClassifier(feature_columns=[input_column]) def input_fn(): return tf.data.Dataset.from_tensor_slices( ({"x": [1., 2., 3., 4.]}, [1, 1, 0, 0])).repeat(200).shuffle(64).batch(16) estimator.train(input_fn) serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( tf.feature_column.make_parse_example_spec([input_column])) export_path = estimator.export_saved_model( "/tmp/from_estimator/", serving_input_fn) # + [markdown] colab_type="text" id="XJ4PJ-Cl4060" # This SavedModel accepts serialized `tf.Example` protocol buffers, which are useful for serving. But we can also load it with `tf.saved_model.load` and run it from Python. # + colab_type="code" id="c_BUBBNB1UH9" colab={} imported = tf.saved_model.load(export_path) def predict(x): example = tf.train.Example() example.features.feature["x"].float_list.value.extend([x]) return imported.signatures["predict"]( examples=tf.constant([example.SerializeToString()])) # + colab_type="code" id="C1ylWZCQ1ahG" colab={} print(predict(1.5)) print(predict(3.5)) # + [markdown] colab_type="text" id="_IrCCm0-isqA" # `tf.estimator.export.build_raw_serving_input_receiver_fn` allows you to create input functions which take raw tensors rather than `tf.train.Example`s. # + [markdown] colab_type="text" id="Co6fDbzw_UnD" # ## Load a SavedModel in C++ # # The C++ version of the SavedModel [loader](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/saved_model/loader.h) provides an API to load a SavedModel from a path, while allowing SessionOptions and RunOptions. You have to specify the tags associated with the graph to be loaded. The loaded version of SavedModel is referred to as SavedModelBundle and contains the MetaGraphDef and the session within which it is loaded. # # ```C++ # const string export_dir = ... # SavedModelBundle bundle; # ... # LoadSavedModel(session_options, run_options, export_dir, {kSavedModelTagTrain}, # &bundle); # ``` # + [markdown] colab_type="text" id="b33KuyEuAO3Z" # <a id=saved_model_cli/> # # ## Details of the SavedModel command line interface # # You can use the SavedModel Command Line Interface (CLI) to inspect and # execute a SavedModel. # For example, you can use the CLI to inspect the model's `SignatureDef`s. # The CLI enables you to quickly confirm that the input # Tensor dtype and shape match the model. Moreover, if you # want to test your model, you can use the CLI to do a sanity check by # passing in sample inputs in various formats (for example, Python # expressions) and then fetching the output. # # # ### Install the SavedModel CLI # # Broadly speaking, you can install TensorFlow in either of the following # two ways: # # * By installing a pre-built TensorFlow binary. # * By building TensorFlow from source code. # # If you installed TensorFlow through a pre-built TensorFlow binary, # then the SavedModel CLI is already installed on your system # at pathname `bin\saved_model_cli`. # # If you built TensorFlow from source code, you must run the following # additional command to build `saved_model_cli`: # # ``` # $ bazel build tensorflow/python/tools:saved_model_cli # ``` # # ### Overview of commands # # The SavedModel CLI supports the following two commands on a # `MetaGraphDef` in a SavedModel: # # * `show`, which shows a computation on a `MetaGraphDef` in a SavedModel. # * `run`, which runs a computation on a `MetaGraphDef`. # # # ### `show` command # # A SavedModel contains one or more `MetaGraphDef`s, identified by their tag-sets. # To serve a model, you # might wonder what kind of `SignatureDef`s are in each model, and what are their # inputs and outputs. The `show` command let you examine the contents of the # SavedModel in hierarchical order. Here's the syntax: # # ``` # usage: saved_model_cli show [-h] --dir DIR [--all] # [--tag_set TAG_SET] [--signature_def SIGNATURE_DEF_KEY] # ``` # # For example, the following command shows all available # MetaGraphDef tag-sets in the SavedModel: # # ``` # $ saved_model_cli show --dir /tmp/saved_model_dir # The given SavedModel contains the following tag-sets: # serve # serve, gpu # ``` # # The following command shows all available `SignatureDef` keys in # a `MetaGraphDef`: # # ``` # $ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve # The given SavedModel `MetaGraphDef` contains `SignatureDefs` with the # following keys: # SignatureDef key: "classify_x2_to_y3" # SignatureDef key: "classify_x_to_y" # SignatureDef key: "regress_x2_to_y3" # SignatureDef key: "regress_x_to_y" # SignatureDef key: "regress_x_to_y2" # SignatureDef key: "serving_default" # ``` # # If a `MetaGraphDef` has *multiple* tags in the tag-set, you must specify # all tags, each tag separated by a comma. For example: # # <pre> # $ saved_model_cli show --dir /tmp/saved_model_dir --tag_set serve,gpu # </pre> # # To show all inputs and outputs TensorInfo for a specific `SignatureDef`, pass in # the `SignatureDef` key to `signature_def` option. This is very useful when you # want to know the tensor key value, dtype and shape of the input tensors for # executing the computation graph later. For example: # # ``` # $ saved_model_cli show --dir \ # /tmp/saved_model_dir --tag_set serve --signature_def serving_default # The given SavedModel SignatureDef contains the following input(s): # inputs['x'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: x:0 # The given SavedModel SignatureDef contains the following output(s): # outputs['y'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: y:0 # Method name is: tensorflow/serving/predict # ``` # # To show all available information in the SavedModel, use the `--all` option. # For example: # # <pre> # $ saved_model_cli show --dir /tmp/saved_model_dir --all # MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs: # # signature_def['classify_x2_to_y3']: # The given SavedModel SignatureDef contains the following input(s): # inputs['inputs'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: x2:0 # The given SavedModel SignatureDef contains the following output(s): # outputs['scores'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: y3:0 # Method name is: tensorflow/serving/classify # # ... # # signature_def['serving_default']: # The given SavedModel SignatureDef contains the following input(s): # inputs['x'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: x:0 # The given SavedModel SignatureDef contains the following output(s): # outputs['y'] tensor_info: # dtype: DT_FLOAT # shape: (-1, 1) # name: y:0 # Method name is: tensorflow/serving/predict # </pre> # # # ### `run` command # # Invoke the `run` command to run a graph computation, passing # inputs and then displaying (and optionally saving) the outputs. # Here's the syntax: # # ``` # usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET --signature_def # SIGNATURE_DEF_KEY [--inputs INPUTS] # [--input_exprs INPUT_EXPRS] # [--input_examples INPUT_EXAMPLES] [--outdir OUTDIR] # [--overwrite] [--tf_debug] # ``` # # The `run` command provides the following three ways to pass inputs to the model: # # * `--inputs` option enables you to pass numpy ndarray in files. # * `--input_exprs` option enables you to pass Python expressions. # * `--input_examples` option enables you to pass `tf.train.Example`. # # #### `--inputs` # # To pass input data in files, specify the `--inputs` option, which takes the # following general format: # # ```bsh # --inputs <INPUTS> # ``` # # where *INPUTS* is either of the following formats: # # * `<input_key>=<filename>` # * `<input_key>=<filename>[<variable_name>]` # # You may pass multiple *INPUTS*. If you do pass multiple inputs, use a semicolon # to separate each of the *INPUTS*. # # `saved_model_cli` uses `numpy.load` to load the *filename*. # The *filename* may be in any of the following formats: # # * `.npy` # * `.npz` # * pickle format # # A `.npy` file always contains a numpy ndarray. Therefore, when loading from # a `.npy` file, the content will be directly assigned to the specified input # tensor. If you specify a *variable_name* with that `.npy` file, the # *variable_name* will be ignored and a warning will be issued. # # When loading from a `.npz` (zip) file, you may optionally specify a # *variable_name* to identify the variable within the zip file to load for # the input tensor key. If you don't specify a *variable_name*, the SavedModel # CLI will check that only one file is included in the zip file and load it # for the specified input tensor key. # # When loading from a pickle file, if no `variable_name` is specified in the # square brackets, whatever that is inside the pickle file will be passed to the # specified input tensor key. Otherwise, the SavedModel CLI will assume a # dictionary is stored in the pickle file and the value corresponding to # the *variable_name* will be used. # # # #### `--input_exprs` # # To pass inputs through Python expressions, specify the `--input_exprs` option. # This can be useful for when you don't have data # files lying around, but still want to sanity check the model with some simple # inputs that match the dtype and shape of the model's `SignatureDef`s. # For example: # # ```bsh # `<input_key>=[[1],[2],[3]]` # ``` # # In addition to Python expressions, you may also pass numpy functions. For # example: # # ```bsh # `<input_key>=np.ones((32,32,3))` # ``` # # (Note that the `numpy` module is already available to you as `np`.) # # # #### `--input_examples` # # To pass `tf.train.Example` as inputs, specify the `--input_examples` option. # For each input key, it takes a list of dictionary, where each dictionary is an # instance of `tf.train.Example`. The dictionary keys are the features and the # values are the value lists for each feature. # For example: # # ```bsh # `<input_key>=[{"age":[22,24],"education":["BS","MS"]}]` # ``` # # #### Save output # # By default, the SavedModel CLI writes output to stdout. If a directory is # passed to `--outdir` option, the outputs will be saved as `.npy` files named after # output tensor keys under the given directory. # # Use `--overwrite` to overwrite existing output files. #
site/en/r2/guide/saved_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Itto-ryu/OOP-1-2/blob/main/OOP_Concepts_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="gXm-FvHuVGSW" # ##Classes with Multiple Objects # + colab={"base_uri": "https://localhost:8080/"} id="AUyUGvKuVO8L" outputId="9162814f-f46b-49d5-ca22-a364df6994ad" class Birds: def __init__(self,bird_name): self.bird_name = bird_name def flying_birds(self): print(f"{self.bird_name} flies above the sky") def non_flying_birds(self): print(f"{self.bird_name} is the national bird of the Philippines") vulture = Birds("Griffon Vulture") crane = Birds("Commmon Crane") emu = Birds("Emu") vulture.flying_birds() crane.flying_birds() emu.non_flying_birds() # + [markdown] id="TXI4ZIwdXST7" # ##Encapsulation # + colab={"base_uri": "https://localhost:8080/"} id="_QwysgfKXJdM" outputId="a48b2351-79f1-4d53-c31e-406f9b334bc8" class foo: def __init__(self,a,b): self.a = a self.b = b def add(self): return self.a + self.b number = foo(3,4) number.add() number.a = 9 #changed a's value to 9 resulting to 9+4=13 number.add() # + [markdown] id="i8o2kSYaYy2D" # ##Encapsulation using Mangling (use of Double Underscores) # + colab={"base_uri": "https://localhost:8080/"} id="EryUrjyaYvgU" outputId="bc563a1a-3b28-4062-ae75-e90ab84c4cd6" class foo: def __init__(self,a,b): self.__a = a self.__b = b def add(self): return self.__a + self.__b number = foo(3,4) number.add() number.a = 7 #wont change the value number.add() # + [markdown] id="P52DK12oZbRU" # ##Encapsulation with Private Attributes # + colab={"base_uri": "https://localhost:8080/"} id="DC6FY5WHZCzr" outputId="df48a340-0541-428b-9979-e793ee8ff1a1" class Counter: def __init__(self): self.current = 0 def increment(self): self.current += 1 def value(self): return self.current def reset(self): self.current = 0 counter = Counter() counter.increment() #counter = counter + 1 counter.increment() counter.increment() counter.value() # + [markdown] id="HU2KaMfbcbE7" # ##Encapsulation with Private Attributes (use of Mangling) # + colab={"base_uri": "https://localhost:8080/"} id="od_laYjYanOU" outputId="614c2724-5f20-4c33-d3aa-ca09c13fa06f" class Counter: def __init__(self): self.__current = 0 def increment(self): self.__current += 1 def value(self): return self.__current def reset(self): self.__current = 0 counter = Counter() counter.increment() counter.increment() counter.increment() counter.current = 1 #does not add up, remains 3 counter.current = 1 counter.value() # + [markdown] id="-8-jhZEGdOWl" # ##Inheritance # + colab={"base_uri": "https://localhost:8080/"} id="1MTTP-D5dQt7" outputId="7a4843b2-1a54-4923-f134-0f5114a7dc29" class Person: def __init__(self,firstname,surname): self.firstname = firstname self.surname = surname def printname(self): print(self.firstname,self.surname) me = Person("Ryu","Santos") me.printname() class Friend(Person): pass him = Friend("Ashdayle","Daradal") him.printname() class Friend2(Person): pass him2 = Friend2("Keith","Robles") him2.printname() # + [markdown] id="jUHzArJEfLV0" # ##Polymorphism # + colab={"base_uri": "https://localhost:8080/"} id="KTXVkaqteQHD" outputId="4b44f928-0c61-4f82-a41f-4f9393011ccf" class RegularPolygon: def __init__(self,side): self.side = side class Square(RegularPolygon): def area(self): return self.side * self.side class EquilateralTriangle(RegularPolygon): def area(self): return self.side * self.side * 0.433 object = Square(4) print(object.area()) object2 = EquilateralTriangle(3) print(object2.area())
OOP_Concepts_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import sys import numpy as np import sys print(sys.version) from phasor.utilities.ipynb.displays import * from phasor.utilities.ipynb.filters import * from phasor.utilities.ipynb.hdf import * #from YALL.utilities.tabulate import tabulate from declarative import ( OverridableObject, mproperty ) import sympy from phasor.utilities.ipynb.displays import * from phasor.utilities.ipynb.ipy_sympy import * import scipy.linalg # + deletable=true editable=true r,t,rbs = sympy.var('r, t, r_bs', real = True) a,b,c,d = sympy.var('a,b,c,d') #rbs = 0 a,b,c,d = 1,-1,-1,1 #r = sympy.sqrt(1 - t**2) rp = sympy.var('r_p', real = True)# r * sympy.sqrt(1 - rbs**2) tp = sympy.var('t_p', real = True)# t * sympy.sqrt(1 - rbs**2) theta = sympy.var('theta', real = True)# t * sympy.sqrt(1 - rbs**2) X = sympy.exp(sympy.ps_In * theta) Xc = X.conjugate() theta = sympy.var('phi', real = True)# t * sympy.sqrt(1 - rbs**2) Y = sympy.exp(sympy.ps_In * phi) Yc = Y.conjugate() R_matInner = sympy.Matrix([ [a*rbs, rp, tp, 0], [rp, b*rbs, 0, tp], [tp, 0, c*rbs, -rp], [0, tp, -rp, d*rbs], ]) Rl_matY = sympy.Matrix([ [Yc,0,0,0], [0,1,0,0], [0,0,Yc,0], [0,0,0,1], ]) Rr_matY = sympy.Matrix([ [Y,0,0,0], [0,1,0,0], [0,0,Y,0], [0,0,0,1], ]) Rl_matX = sympy.Matrix([ [X,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,X], ]) Rr_matX = sympy.Matrix([ [1,0,0,0], [0,Xc,0,0], [0,0,Xc,0], [0,0,0,1], ]) R_mat = Rl_matX *Rl_matY * R_matInner * Rr_matY * Rr_matX R_mat # + deletable=true editable=true M = (R_mat.transpose().conjugate() * R_mat) M = M.subs(rp, r * sympy.sqrt(1 - rbs**2)) M = M.subs(tp, t * sympy.sqrt(1 - rbs**2)) M = M.subs(r, sympy.sqrt(1 - t**2)) M.simplify() M # + deletable=true editable=true R_matInner = sympy.Matrix([ [a*rbs, rp, tp, 0], [rp, b*rbs, 0, tp], [ tp, 0, c*rbs, -rp], [0, tp, -rp, d*rbs], ]) I4 = sympy.eye(4) O4 = sympy.zeros(4) Lt = sympy.var('Lt', real = True) Lr = sympy.var('Lr', real = True) Lx = sympy.var('Lx', real = True) rL = sympy.var('r_L', real = True) tL = sympy.var('t_L', real = True) def enlarge(mat): return sympy.Matrix(numpy.bmat([[mat, O4],[O4, I4]])) R_matBig = sympy.Matrix(numpy.bmat([[R_matInner, Lx*I4],[-Lx*I4, R_matInner]])) R_matBig R_mat = enlarge(Rl_matX) * enlarge(Rl_matY) * R_matBig * enlarge(Rr_matY) * enlarge(Rr_matX) M = R_mat #R_mat = R_matBig M = M.subs(Lx, sympy.sqrt(Lt**2 + Lr**2)) M = M.subs(tp, sympy.sqrt(t**2 - Lt**2)) M = M.subs(rp, sympy.sqrt(r**2 - Lr**2 - rbs**2)) M = M.subs(r, sympy.sqrt(1 - t**2)) M # + editable=true M = (R_mat.transpose().conjugate() * R_mat) M = M.subs(Lx, sympy.sqrt(Lt**2 + Lr**2)) M = M.subs(tp, sympy.sqrt(t**2 - Lt**2)) M = M.subs(rp, sympy.sqrt(r**2 - Lr**2 - rbs**2)) M = M.subs(r, sympy.sqrt(1 - t**2)) #M = M.expand() M.simplify() M # + deletable=true editable=true
phasor/nonlinear_crystal/derivations/Backscatter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # plotts.py in a notebook # **plotts.py** is a python utility that is distributed with GIAnT for interactive visualization of the estimated time-series. We have translated that script to a python notebook for use with this tutorial. # # The command line usage for **plotts.py** is shown below. # # # + ##The usual imports # %matplotlib notebook import matplotlib.pyplot as plt import numpy as np from matplotlib.widgets import Slider from matplotlib.ticker import FormatStrFormatter import sys import h5py import datetime # - class TimeSeriesViewer: def __init__(self, filename=None, showerror=False, multiplier=0.1, ylimits=[-25,25], markersize=5, showraw = False, showmodel=False, dataset='recons'): #Open HDF5 file self.h5file = h5py.File(filename, 'r') #Time array self.tims = self.h5file['tims'].value #Master index self.masterind = self.h5file['masterind'].value #Raw dataset self.rawds = None if showraw: self.rawds = self.h5file['rawts'] #Filtered dataset self.filtds = self.h5file[dataset] #Common mask self.cmask = 1.0 if 'cmask' in self.h5file.keys(): self.cmask = self.h5file['cmask'] #Error estimates self.errds = None if showerror: if 'error' not in self.h5file.keys(): print('Error layer requested but not found in HDF5 file. Continuing ...') else: self.errds = self.h5file['error'] #Dates self.dates = self.h5file['dates'].value t0 = datetime.date.fromordinal(np.int(self.dates[0])) t0 = t0.year + t0.timetuple().tm_yday/(np.choose((t0.year % 4)==0,[365.0,366.0])) self.tims = self.tims+t0 #Reference slice self.dref = self.filtds[self.masterind,:,:] ##Axes for plotting self.slicefig = None self.sliceaxes = None self.sliceimg = None self.timeaxes = None self.timeslider = None self.tsfig = None self.tsaxes = None #Constants self.multiplier = multiplier self.ylim = ylimits self.markersize = markersize #Intialize plot self.initPlot() def close(self): ''' Close the HDF5 file ''' self.h5file.close() def __del__(self): ''' Destructor ''' self.close() def initPlot(self): ''' Create the first plot. ''' #Draw the middle slide from time-series avgind = self.filtds.shape[0]//2 self.slicefig = plt.figure('Cumulative Displacement') self.sliceaxes = self.slicefig.add_axes([0.125,0.25,0.75,0.65]) self.sliceimg =self.sliceaxes.imshow((self.filtds[avgind,:,:] - self.dref)*self.multiplier*self.cmask,clim=self.ylim) dstr = datetime.date.fromordinal(np.int(self.dates[avgind])).strftime('%b-%d-%Y') self.sliceaxes.set_title('Time = %s'%dstr) #Draw colorbar cbr=self.slicefig.colorbar(self.sliceimg, orientation='vertical') #Draw time slider self.timeaxes = self.slicefig.add_axes([0.2,0.1,0.6,0.07], yticks= []) self.tslider = Slider(self.timeaxes,'Time',self.tims[0],self.tims[-1],valinit=self.tims[avgind]) self.tslider.ax.bar(self.tims, np.ones(len(self.tims)), facecolor='black', width=0.01, ecolor=None) self.tslider.ax.set_xticks(np.round(np.linspace(self.tims[0],self.tims[-1],num=5)*100)/100) self.tslider.on_changed(self.timeSliderUpdate) #Draw time-series viewer self.tsfig = plt.figure('Time-series') self.tsaxes = self.tsfig.add_subplot(111) self.tsaxes.scatter(self.tims,np.zeros(len(self.tims))) #Connect image canvas to time-series viewer cid = self.slicefig.canvas.mpl_connect('button_press_event', self.timeSeriesUpdate) plt.show() def timeSliderUpdate(self, val): ''' Update function for the time slider. ''' timein = self.tslider.val timenearest = np.argmin(np.abs(self.tims-timein)) dstr = datetime.date.fromordinal(np.int(self.dates[timenearest])).strftime('%b-%d-%Y') self.sliceaxes.set_title('Time = %s'%(dstr)) newv = (self.filtds[timenearest,:,:]-self.dref)*self.cmask*self.multiplier self.sliceimg.set_data(newv) self.slicefig.canvas.draw() def timeSeriesUpdate(self, event): ''' Response to point click on slice. ''' if event.inaxes != self.sliceaxes: return ii = np.int(np.floor(event.ydata)) jj = np.int(np.floor(event.xdata)) if np.isscalar(self.cmask) or np.isfinite(self.cmask[ii,jj]): dph = self.multiplier*self.filtds[:,ii,jj] if self.rawds is not None: dphraw = self.multiplier*self.rawds[:,ii,jj] if self.errds is not None: derr = np.abs(self.multiplier*self.errds[:,ii,jj]) self.tsaxes.cla() if self.errds is None: self.tsaxes.scatter(self.tims,dph) else: self.tsaxes.errorbar(self.tims,dph,yerr=derr, fmt='o',ms=self.markersize, barsabove=True) if self.rawds is not None: self.tsaxes.scatter(self.tims,dphraw,c='r') self.tsaxes.set_ylim(self.ylim) self.tsaxes.set_title('Line = %d, Pix = %d'%(ii,jj)) self.tsaxes.set_xlabel('Time in years') if np.abs(self.multiplier) == 0.1: self.tsaxes.set_ylabel('Displacement in cm') else: self.tsaxes.set_ylabel('Scaled Displacement') else: self.tsaxes.cla() self.tsaxes.scatter(self.tims,np.zeros(len(self.tims))) self.tsaxes.set_title('NaN: L = %d, P = %d'%(ii,jj)) self.tsaxes.xaxis.set_major_formatter(FormatStrFormatter('%4.2f')) self.tsfig.canvas.draw() viewer = TimeSeriesViewer(filename='GRFN/GIAnT/Stack/TS-PARAMS.h5', ylimits=[-35,5], showraw=False)
docs/Notebooks/TimeSeries/plotts_notebook.ipynb
import os import argparse import logging import time import numpy as np import numpy.random as npr import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F parser = argparse.ArgumentParser() parser.add_argument('--adjoint', type=eval, default=False) parser.add_argument('--visualize', type=eval, default=False) parser.add_argument('--niters', type=int, default=2000) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--train_dir', type=str, default=None) args = parser.parse_args() if args.adjoint: from torchdiffeq import odeint_adjoint as odeint else: from torchdiffeq import odeint def generate_spiral2d(nspiral=1000, ntotal=500, nsample=100, start=0., stop=1, # approximately equal to 6pi noise_std=.1, a=0., b=1., savefig=True): """Parametric formula for 2d spiral is `r = a + b * theta`. Args: nspiral: number of spirals, i.e. batch dimension ntotal: total number of datapoints per spiral nsample: number of sampled datapoints for model fitting per spiral start: spiral starting theta value stop: spiral ending theta value noise_std: observation noise standard deviation a, b: parameters of the Archimedean spiral savefig: plot the ground truth for sanity check Returns: Tuple where first element is true trajectory of size (nspiral, ntotal, 2), second element is noisy observations of size (nspiral, nsample, 2), third element is timestamps of size (ntotal,), and fourth element is timestamps of size (nsample,) """ # add 1 all timestamps to avoid division by 0 orig_ts = np.linspace(start, stop, num=ntotal) samp_ts = orig_ts[:nsample] # generate clock-wise and counter clock-wise spirals in observation space # with two sets of time-invariant latent dynamics zs_cw = stop + 1. - orig_ts rs_cw = a + b * 50. / zs_cw xs, ys = rs_cw * np.cos(zs_cw) - 5., rs_cw * np.sin(zs_cw) orig_traj_cw = np.stack((xs, ys), axis=1) zs_cc = orig_ts rw_cc = a + b * zs_cc xs, ys = rw_cc * np.cos(zs_cc) + 5., rw_cc * np.sin(zs_cc) orig_traj_cc = np.stack((xs, ys), axis=1) if savefig: plt.figure() plt.plot(orig_traj_cw[:, 0], orig_traj_cw[:, 1], label='clock') plt.plot(orig_traj_cc[:, 0], orig_traj_cc[:, 1], label='counter clock') plt.legend() plt.savefig('./ground_truth.png', dpi=500) print('Saved ground truth spiral at {}'.format('./ground_truth.png')) # sample starting timestamps orig_trajs = [] samp_trajs = [] for _ in range(nspiral): # don't sample t0 very near the start or the end t0_idx = npr.multinomial( 1, [1. / (ntotal - 2. * nsample)] * (ntotal - int(2 * nsample))) t0_idx = np.argmax(t0_idx) + nsample cc = bool(npr.rand() > .5) # uniformly select rotation orig_traj = orig_traj_cc if cc else orig_traj_cw orig_trajs.append(orig_traj) samp_traj = orig_traj[t0_idx:t0_idx + nsample, :].copy() samp_traj += npr.randn(*samp_traj.shape) * noise_std samp_trajs.append(samp_traj) # batching for sample trajectories is good for RNN; batching for original # trajectories only for ease of indexing orig_trajs = np.stack(orig_trajs, axis=0) samp_trajs = np.stack(samp_trajs, axis=0) return orig_trajs, samp_trajs, orig_ts, samp_ts class LatentODEfunc(nn.Module): def __init__(self, latent_dim=4, nhidden=20): super(LatentODEfunc, self).__init__() self.elu = nn.ELU(inplace=True) self.fc1 = nn.Linear(latent_dim, nhidden) self.fc2 = nn.Linear(nhidden, nhidden) self.fc3 = nn.Linear(nhidden, latent_dim) self.nfe = 0 def forward(self, t, x): self.nfe += 1 out = self.fc1(x) out = self.elu(out) out = self.fc2(out) out = self.elu(out) out = self.fc3(out) return out class RecognitionRNN(nn.Module): def __init__(self, latent_dim=4, obs_dim=2, nhidden=25, nbatch=1): super(RecognitionRNN, self).__init__() self.nhidden = nhidden self.nbatch = nbatch self.i2h = nn.Linear(obs_dim + nhidden, nhidden) self.h2o = nn.Linear(nhidden, latent_dim * 2) def forward(self, x, h): combined = torch.cat((x, h), dim=1) h = torch.tanh(self.i2h(combined)) out = self.h2o(h) return out, h def initHidden(self): return torch.zeros(self.nbatch, self.nhidden) class Decoder(nn.Module): def __init__(self, latent_dim=4, obs_dim=2, nhidden=20): super(Decoder, self).__init__() self.relu = nn.ReLU(inplace=True) self.fc1 = nn.Linear(latent_dim, nhidden) self.fc2 = nn.Linear(nhidden, obs_dim) def forward(self, z): out = self.fc1(z) out = self.relu(out) out = self.fc2(out) return out class RunningAverageMeter(object): """Computes and stores the average and current value""" def __init__(self, momentum=0.99): self.momentum = momentum self.reset() def reset(self): self.val = None self.avg = 0 def update(self, val): if self.val is None: self.avg = val else: self.avg = self.avg * self.momentum + val * (1 - self.momentum) self.val = val def log_normal_pdf(x, mean, logvar): const = torch.from_numpy(np.array([2. * np.pi])).float().to(x.device) const = torch.log(const) return -.5 * (const + logvar + (x - mean) ** 2. / torch.exp(logvar)) def normal_kl(mu1, lv1, mu2, lv2): v1 = torch.exp(lv1) v2 = torch.exp(lv2) lstd1 = lv1 / 2. lstd2 = lv2 / 2. kl = lstd2 - lstd1 + ((v1 + (mu1 - mu2) ** 2.) / (2. * v2)) - .5 return kl if __name__ == '__main__': latent_dim = 4 nhidden = 20 rnn_nhidden = 25 obs_dim = 2 nspiral = 1000 start = 0. stop = 6 * np.pi noise_std = .3 a = 0. b = .3 ntotal = 1000 nsample = 100 device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') # generate toy spiral data orig_trajs, samp_trajs, orig_ts, samp_ts = generate_spiral2d( nspiral=nspiral, start=start, stop=stop, noise_std=noise_std, a=a, b=b ) orig_trajs = torch.from_numpy(orig_trajs).float().to(device) samp_trajs = torch.from_numpy(samp_trajs).float().to(device) samp_ts = torch.from_numpy(samp_ts).float().to(device) # model func = LatentODEfunc(latent_dim, nhidden).to(device) rec = RecognitionRNN(latent_dim, obs_dim, rnn_nhidden, nspiral).to(device) dec = Decoder(latent_dim, obs_dim, nhidden).to(device) params = (list(func.parameters()) + list(dec.parameters()) + list(rec.parameters())) optimizer = optim.Adam(params, lr=args.lr) loss_meter = RunningAverageMeter() if args.train_dir is not None: if not os.path.exists(args.train_dir): os.makedirs(args.train_dir) ckpt_path = os.path.join(args.train_dir, 'ckpt.pth') if os.path.exists(ckpt_path): checkpoint = torch.load(ckpt_path) func.load_state_dict(checkpoint['func_state_dict']) rec.load_state_dict(checkpoint['rec_state_dict']) dec.load_state_dict(checkpoint['dec_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) orig_trajs = checkpoint['orig_trajs'] samp_trajs = checkpoint['samp_trajs'] orig_ts = checkpoint['orig_ts'] samp_ts = checkpoint['samp_ts'] print('Loaded ckpt from {}'.format(ckpt_path)) try: for itr in range(1, args.niters + 1): optimizer.zero_grad() # backward in time to infer q(z_0) h = rec.initHidden().to(device) for t in reversed(range(samp_trajs.size(1))): obs = samp_trajs[:, t, :] out, h = rec.forward(obs, h) qz0_mean, qz0_logvar = out[:, :latent_dim], out[:, latent_dim:] epsilon = torch.randn(qz0_mean.size()).to(device) z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean # forward in time and solve ode for reconstructions pred_z = odeint(func, z0, samp_ts).permute(1, 0, 2) pred_x = dec(pred_z) # compute loss noise_std_ = torch.zeros(pred_x.size()).to(device) + noise_std noise_logvar = 2. * torch.log(noise_std_).to(device) logpx = log_normal_pdf( samp_trajs, pred_x, noise_logvar).sum(-1).sum(-1) pz0_mean = pz0_logvar = torch.zeros(z0.size()).to(device) analytic_kl = normal_kl(qz0_mean, qz0_logvar, pz0_mean, pz0_logvar).sum(-1) loss = torch.mean(-logpx + analytic_kl, dim=0) loss.backward() optimizer.step() loss_meter.update(loss.item()) print('Iter: {}, running avg elbo: {:.4f}'.format(itr, -loss_meter.avg)) except KeyboardInterrupt: if args.train_dir is not None: ckpt_path = os.path.join(args.train_dir, 'ckpt.pth') torch.save({ 'func_state_dict': func.state_dict(), 'rec_state_dict': rec.state_dict(), 'dec_state_dict': dec.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'orig_trajs': orig_trajs, 'samp_trajs': samp_trajs, 'orig_ts': orig_ts, 'samp_ts': samp_ts, }, ckpt_path) print('Stored ckpt at {}'.format(ckpt_path)) print('Training complete after {} iters.'.format(itr)) if args.visualize: with torch.no_grad(): # sample from trajectorys' approx. posterior h = rec.initHidden().to(device) for t in reversed(range(samp_trajs.size(1))): obs = samp_trajs[:, t, :] out, h = rec.forward(obs, h) qz0_mean, qz0_logvar = out[:, :latent_dim], out[:, latent_dim:] epsilon = torch.randn(qz0_mean.size()).to(device) z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean orig_ts = torch.from_numpy(orig_ts).float().to(device) # take first trajectory for visualization z0 = z0[0] ts_pos = np.linspace(0., 2. * np.pi, num=2000) ts_neg = np.linspace(-np.pi, 0., num=2000)[::-1].copy() ts_pos = torch.from_numpy(ts_pos).float().to(device) ts_neg = torch.from_numpy(ts_neg).float().to(device) zs_pos = odeint(func, z0, ts_pos) zs_neg = odeint(func, z0, ts_neg) xs_pos = dec(zs_pos) xs_neg = torch.flip(dec(zs_neg), dims=[0]) xs_pos = xs_pos.cpu().numpy() xs_neg = xs_neg.cpu().numpy() orig_traj = orig_trajs[0].cpu().numpy() samp_traj = samp_trajs[0].cpu().numpy() plt.figure() plt.plot(orig_traj[:, 0], orig_traj[:, 1], 'g', label='true trajectory') plt.plot(xs_pos[:, 0], xs_pos[:, 1], 'r', label='learned trajectory (t>0)') plt.plot(xs_neg[:, 0], xs_neg[:, 1], 'c', label='learned trajectory (t<0)') plt.scatter(samp_traj[:, 0], samp_traj[ :, 1], label='sampled data', s=3) plt.legend() plt.savefig('./vis.png', dpi=500) print('Saved visualization figure at {}'.format('./vis.png'))
examples/latent_ode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from scipy import stats from scipy import optimize as opt from scipy.stats import beta, uniform # ベータ分布と一様分布 import matplotlib.pyplot as plt # %matplotlib inline plt.style.use("ggplot") np.random.seed(123) # 目標分布 a, b = 1.5, 2.0 x = np.linspace(beta.ppf(0.001, a, b), beta.ppf(0.999, a, b), 100) # ベータ分布x=0.001-0.999まで100個準備 plt.plot(x, beta.pdf(x, a, b)) # 上記ベータ分布の最大値のxを求める f = beta(a=a, b=b).pdf res = opt.fmin(lambda x: -f(x), 0.3) # 最大値求めるのを最小値求めるのに変えるために-f(x)にしている y_max = f(res) y_max NMCS = 5000 x_mcs = uniform.rvs(size=NMCS) # uniform.rvs:一様分布に従うサンプリング r = uniform.rvs(size=NMCS) * y_max accept = x_mcs[r <= f(x_mcs)] plt.hist(accept, bins=30, rwidth=0.8, label="rejection sampling") x = np.linspace(beta.ppf(0.001, a, b), beta.ppf(0.999, a, b), 100) plt.plot(x, beta.pdf(x, a, b), label="Target dis") plt.legend()
bayes_pystan_sample/502_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow2_p36 # language: python # name: conda_tensorflow2_p36 # --- # # Amazon SageMaker Notebook for ProcGen Starter Kit with homogeneous scaling of multiple GPU instances # + import os import time import yaml import sagemaker from sagemaker.rl import RLEstimator, RLToolkit, RLFramework import boto3 from IPython.display import HTML, Markdown from source.common.docker_utils import build_and_push_docker_image from source.common.markdown_helper import generate_help_for_s3_endpoint_permissions, create_s3_endpoint_manually # - with open(os.path.join("config", "sagemaker_config.yaml")) as f: sagemaker_config = yaml.safe_load(f) # ## Initialize Amazon SageMaker # + sm_session = sagemaker.session.Session() s3_bucket = sagemaker_config["S3_BUCKET"] s3_output_path = 's3://{}/'.format(s3_bucket) print("S3 bucket path: {}".format(s3_output_path)) # + job_name_prefix = 'sm-ray-gpu-dist-procgen' role = sagemaker.get_execution_role() print(role) # - # #### Note that `local_mode = True` does not work with heterogeneous scaling instance_type = sagemaker_config["GPU_TRAINING_INSTANCE"] # # Configure the framework you want to use # # Set `framework` to `"tf"` or `"torch"` for tensorflow or pytorch respectively. # # You will also have to edit your entry point i.e., `train-sagemaker-distributed-gpu.py` with the configuration parameter `"use_pytorch"` to match the framework that you have selected. framework = "tf" # # Train your homogeneous scaling job here # ### Edit the training code # # The training code is written in the file `train-sagemaker-distributed-gpu.py` which is uploaded in the /source directory. # # *Note that ray will automatically set `"ray_num_cpus"` and `"ray_num_gpus"` in `_get_ray_config`* # !pygmentize source/train-sagemaker-distributed-gpu.py # ### Train the RL model using the Python SDK Script mode # # When using SageMaker for distributed training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs. # # 1. Specify the source directory where the environment, presets and training code is uploaded. # 2. Specify the entry point as the training code # 3. Specify the image (CPU or GPU) to be used for the training environment. # 4. Define the training parameters such as the instance count, job name, S3 path for output and job name. # 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. # #### GPU docker image #Build GPU image gpu_repository_short_name = "sagemaker-procgen-ray-%s" % "gpu" docker_build_args = { 'CPU_OR_GPU': "gpu", 'AWS_REGION': boto3.Session().region_name, 'FRAMEWORK': framework } image_name = build_and_push_docker_image(gpu_repository_short_name, build_args=docker_build_args) print("Using GPU ECR image %s" % image_name) metric_definitions = [ {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episodes_total', 'Regex': 'episodes_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'num_steps_trained', 'Regex': 'num_steps_trained: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'timesteps_total', 'Regex': 'timesteps_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_min', 'Regex': 'episode_reward_min: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, ] # ### Ray homogeneous scaling - Specify `train_instance_count` > 1 # # Homogeneous scaling allows us to use multiple instances of the same type. # # Spot instances are unused EC2 instances that could be used at 90% discount compared to On-Demand prices (more information about spot instances can be found [here](https://aws.amazon.com/ec2/spot/?cards.sort-by=item.additionalFields.startDateTime&cards.sort-order=asc) and [here](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html)) # # To use spot instances, set `train_use_spot_instances = True`. To use On-Demand instances, `train_use_spot_instances = False`. # + train_instance_count = 2 train_use_spot_instances = False # Select which procgen environments to run in `envs_to_run` ''' envs_to_run = ["coinrun", "bigfish", "bossfight", "caveflyer", "chaser", "climber", "dodgeball", "fruitbot", "heist", "jumper", "leaper", "maze", "miner", "ninja", "plunder", "starpilot"] ''' envs_to_run = ["coinrun"] for env in envs_to_run: if train_use_spot_instances: print('*** Using spot instances ... ') job_name = 'sm-ray-dist-procgen-spot-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) + "-" + env checkpoint_s3_uri = 's3://{}/sagemaker-procgen/checkpoints/{}'.format(s3_bucket, job_name) training_params = {"train_use_spot_instances": True, "train_max_run": 3600 * 5, "train_max_wait": 7200 * 5, "checkpoint_s3_uri": checkpoint_s3_uri } hyperparameters = { "rl.training.upload_dir": checkpoint_s3_uri, #Necessary for syncing between spot instances "rl.training.config.env_config.env_name": env, } else: training_params = {"base_job_name": job_name_prefix + "-" + env} hyperparameters = { #"rl.training.upload_dir": s3_output_path + "/tensorboard_sync", # Uncomment to view tensorboard "rl.training.config.env_config.env_name": env, } # Defining the RLEstimator estimator = RLEstimator(entry_point="train-sagemaker-distributed-gpu.py", source_dir='source', dependencies=["source/utils", "source/common/", "neurips2020-procgen-starter-kit/"], image_uri=image_name, role=role, instance_type=instance_type, instance_count=train_instance_count, output_path=s3_output_path, metric_definitions=metric_definitions, hyperparameters=hyperparameters, **training_params ) if train_use_spot_instances: estimator.fit(job_name=job_name, wait=False) else: estimator.fit(wait=False) print(' ') print(estimator.latest_training_job.job_name) print('type=', instance_type, 'count=', train_instance_count ) print(' ') # -
sagemaker/train-homo-distributed-gpu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # - 임베딩 벡터의 시각화(Embedding Visualization) # - 구글은 임베딩 프로젝터(embedding projector)라는 데이터 시각화 도구를 지원합니다. # - 이번 챕터에서는 임베딩 프로젝터를 사용하여 학습한 임베딩 벡터들을 시각화해보겠습니다. # # - 시각화를 위해서는 이미 모델을 학습하고, # - 파일로 저장되어져 있어야 합니다. # - 모델이 저장되어져 있다면 아래 커맨드를 통해 시각화에 필요한 파일들을 생성할 수 있습니다. # # ``` # python -m gensim.scripts.word2vec2tensor --input .eng_w2v --output .eng_w2v # mv .eng_w2v_metadata.tsv dot_eng_w2v_metadata.tsv # mv .eng_w2v_tensor.tsv dot_eng_w2v_tensor.tsv # ``` # # - 임베딩 프로젝터를 사용하여 시각화하기 # - https://projector.tensorflow.org/ 오픈 # - 로드 버튼클릭 # - dot_eng_w2v_metadata.tsv, dot_eng_w2v_tensor.tsv 로드 #
0905_embedding_vector_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Control Flow # # # <br> <a href='#ControlStatements'>Control Statements</a> # <br> &emsp;&emsp; <a href='#ifelse'>__`if` and `else` Statements__</a> # <br> &emsp;&emsp; <a href='#Loops'>Loops</a> # <br> &emsp;&emsp; &emsp;&emsp; <a href='#range'>__`range`__</a> # <br> &emsp;&emsp; &emsp;&emsp; <a href='#forLoops'>__`for` Loops__</a> # <br> &emsp;&emsp; &emsp;&emsp; &emsp;&emsp;<a href='#ExampleConversionTable'>__Example: Conversion Table__</a> # <br> &emsp;&emsp; &emsp;&emsp; <a href='#whileLoops'>__`while` Loops__</a> # <br> &emsp;&emsp; &emsp;&emsp; <a href='#breakcontinue'>__`break` and `continue`__</a> # <br> &emsp;&emsp; &emsp;&emsp; &emsp;&emsp;<a href='#ExamplePrimeNumbers'>__Example: Prime Numbers__</a> # <br> <a href='#Summary'>Summary</a> # <br> <a href='#UpdatingClassNotes'>Updating your Class Notes</a> # <br> <a href='#Test-YourselfExercises'>Test-Yourself Exercises</a> # <br> <a href='#ReviewExercises'>Review Exercises</a> # # # + [markdown] slideshow={"slide_type": "slide"} # <a id='ControlStatements'></a> # ## Control Statements # # What is a *__control statement__*? # # Let's start with an example you are already familiar with... # + [markdown] slideshow={"slide_type": "slide"} # # Considerthe time-telling computer program that returned Boolean (True or False) variables... # # # + [markdown] slideshow={"slide_type": "slide"} # ### Time-telling program # # Based on the current time of day, the program answers two questions: # # >__Is it lunchtime?__ # # >`True` # # if it is lunch time. # # <br> # # >__Is it time for work?__ # # >`True` # # if it is `not`: # - before work (`time < work_starts`) # - after work (`time > work_ends `) # - lunchtime (the previous question assigns the value `True` or `False` to variable `lunchtime`). # + slideshow={"slide_type": "slide"} # Time-telling program time = 20.00 # current time work_starts = 8.00 # time work starts work_ends = 17.00 # time work ends lunch_starts = 13.00 # time lunch starts lunch_ends = 14.00 # time lunch ends # lunchtime if the time is between the start and end of lunchtime lunchtime = time >= lunch_starts and time < lunch_ends # work_time if the time is not... work_time = not ( time < work_starts # ... before work or time > work_ends # ... or after work or lunchtime) # ... or lunchtime if lunchtime: print("eat lunch") elif work_time: print("do work") else: print("go home") # print("Is it work time?") # print(work_time) # print("Is it lunchtime?") # print(lunchtime) # + [markdown] slideshow={"slide_type": "slide"} # What if we now want our computer program to do something based on these answers? # # To do this, we need to use *control statements*. # # Control statements allow us to make decisions in a program. # # This decision making is known as *control flow*. # # Control statements are a fundamental part of programming. # + [markdown] slideshow={"slide_type": "slide"} # Here is a control statement in pseudo code: # # This is an `if` statement. # # if A is true # Perform task X # # For example # # if lunchtime is true # Eat lunch # # <p align="center"> # <img src="img/flow_diag_if_lunctime.png" alt="Drawing" style="width: 300px;"/> # </p> # + [markdown] slideshow={"slide_type": "slide"} # We can check if an alternative to the `if` statement is true using an `else if` statement. # # # if A is true # Perform task X (only) # # else if B is true # Perform task Y (only) # # + [markdown] slideshow={"slide_type": "slide"} # Example: # # if lunchtime is true # Eat lunch # # else if work_time is true # Do work # # <p align="center"> # <img src="img/flow_diag_if_lunctime_elif_work.png" alt="Drawing" style="width: 300px;"/> # </p> # + [markdown] slideshow={"slide_type": "slide"} # Often it is useful to include an `else` statement. # # If none of the `if` and `else if` statements are satisfied, the code following the `else` statement will be executed. # # if A is true # Perform task X (only) # # else if B is true # Perform task Y (only) # # else # Perform task Z (only) # # # # # # + [markdown] slideshow={"slide_type": "slide"} # if lunchtime is true # Eat lunch # # else if work_time is true # Do work # # else # Go home # # <p align="center"> # <img src="img/flow_diag_if_lunctime_elif_work_else_home.png" alt="Drawing" style="width: 400px;"/> # </p> # + [markdown] slideshow={"slide_type": "slide"} # Let's get a better understanding of control flow statements by completing some examples. # + [markdown] slideshow={"slide_type": "slide"} # <a id='ifelse'></a> # ## `if` and `else` statements # # Here is what these control statements look like if we include them in the time-telling program... # # __Note:__ In Python, "else if" is written: `elif` # + slideshow={"slide_type": "slide"} # Time-telling program time = 13.05 # current time work_starts = 8.00 # time work starts work_ends = 17.00 # time work ends lunch_starts = 13.00 # time lunch starts lunch_ends = 14.00 # time lunch ends # variable lunchtime is True if the time is between the start and end of lunchtime lunchtime = time >= lunch_starts and time < lunch_ends # variable work_time is True if the time is not... work_time = not ( time < work_starts # ... before work or time > work_ends # ... or after work or lunchtime) # ... or lunchtime #print("Is it work time?") #print(work_time) #print("Is it lunchtime?") #print(lunchtime) if lunchtime: print("Eat lunch") elif work_time: print("Do work") else: print("Go home") # + [markdown] slideshow={"slide_type": "slide"} # __Remember:__ The program assigns the variables lunchtime and work_time the values `True` or `False`. # # Therefore when we type: # <br>`if lunchtime` # # <br>the meaning is the same as: # <br>`if lunchtime == True` # + [markdown] slideshow={"slide_type": "slide"} # Here is another example, using algebraic operators to modify the value of an initial variable, `x`. # # The __modification of `x`__ and the __message printed__ depend on the initial value of `x`. # # <p align="center"> # <img src="img/flow_diag_x_modify.png" alt="Drawing" style="width: 400px;"/> # </p> # + slideshow={"slide_type": "slide"} #Example : Modify input variable, `x`. x = -10.0 # Initial x value # x is greater than zero if (x>1) is True: x -=20 # x is less than zero elif # x is not less than zero and not greater than zero, therefore it must be zero print("Modified x = ", x) # + slideshow={"slide_type": "slide"} # Example solution # Example : Modify input variable, `x`. x = -10.0 # Initial x value # x is greater than zero if x > 0: x -= 20 # x is less than zero elif x < 0: x += 21 # x is not less than zero and not greater than zero, therefore it must be zero else: x *= 2.5 print("Modified x = ", x) # - # (Solution in 01b_ControlFlow_SOLS.ipynb) # + [markdown] slideshow={"slide_type": "slide"} # # # __Note:__ A full explanation of shortcut operators is given in 01a_BasicOperators_DataTypes # + [markdown] slideshow={"slide_type": "subslide"} # __Try it yourself__ # # In the cell code cell above, try: # # - changing the operations performed on `x` # # - changing the value of `x` a few times. # # Re-run the cell to see the different paths the program can follow. # + [markdown] slideshow={"slide_type": "slide"} # ### Look carefully at the structure of the `if`, `elif`, `else`, control statement: # # # __The control statement begins with an `if`__, followed by the expression to check. <br> # At the end of the `if` statement you must put a colon (`:`) <br> # ````python # if x > 0.0: # ```` # + [markdown] slideshow={"slide_type": "slide"} # After the `if` statement, indent the code to be run in the case that the `if` statement is `True`. <br> # # # To end the code to be run, simply stop indenting: # # ````python # if x > 0.0: # print('Initial x is greater than zero') # x -= 20.0 # ```` # + [markdown] slideshow={"slide_type": "slide"} # The indent can be any number of spaces. # # The number of spaces must be the same for all lines of code to be run if the `if` statement is True. # # Jupyter Notebooks automatically indent 4 spaces. # # This is considered best practise. # + [markdown] slideshow={"slide_type": "subslide"} # `if x > 0.0` is: # - `True`: # - The indented code is executed. # - The control block is exited. # - The program moves past any subsequent `elif` or `else` statements. # <br> # # # - `False`: # the program moves past the inented code to the next (non-indented) part of the program... <br> # + [markdown] slideshow={"slide_type": "subslide"} # In this the next (non-indented) part of the program is `elif` (else if). # # The elif statement is evaluated. # # (Notice that the code is structured in the same way as the `if` statement.): # # ```python # if x > 0.0: # print('Initial x is greater than zero') # x -= 20.0 # # elif x < 0.0: # print('Initial x is less than zero') # x += 21.0 # ``` # + [markdown] slideshow={"slide_type": "subslide"} # `elif x < 0.0`: # # - `True`: # - The indented code is executed. # - The control block is exited. # - The program moves past any subsequent `elif` or `else` statements. # # # - `False`: # the program moves past the indented code to the next (non-indented) part of the program. <br> # # # # + [markdown] slideshow={"slide_type": "subslide"} # If none of the preceding `if` or `elif` stements are true. # <br> e.g. in this example: # - `x > 0.0` is `False` # - `x < 0.0` is `False` # # the code following the `else` statement is executed. # # ```python # if x > 0.0: # print('Initial x is greater than zero') # x -= 20.0 # # elif x < 0.0: # print('Initial x is less than zero') # x += 21.0 # # else: # print('Initial x is not less than zero and not greater than zero, therefore it must be zero') # ``` # + [markdown] slideshow={"slide_type": "slide"} # <a id='Loops'></a> # ## Loops # # *Loops* are used to execute a command repeatedly. # <br> # A loop is a block that repeats an operation a specified number of times (loops). # # There are two main types of loops in Python: # - `for` loops : repeat a certain number of times # - `while` loops : repeat until something happens # # # # Examples: # - __`for` loop__ : print an ascending sequence of numbers between two values. # - __`while` loop__ : print the sum of each number and a positive constant *if* the answer is less than 200. # + [markdown] slideshow={"slide_type": "slide"} # <a id='range'></a> # ### `range` # # The function `range` gives us a sequence of *integer* numbers. # # `range(3, 6)` returns integer values starting from 3 and ending at 6. # # i.e. # # > 3, 4, 5 # # Note this does not include the second number in the () parentheses # <br>(which in this case is 6). # # # + [markdown] slideshow={"slide_type": "slide"} # We can change the starting value. # # For example for integer values starting at 0 and ending at 4: # # `range(0,4)` # # returns: # # > 0, 1, 2, 3 # # `range(4)` is a __shortcut__ for range(0, 4) # + [markdown] slideshow={"slide_type": "slide"} # <a id='Loops'></a> # ### `for` Loops # + [markdown] slideshow={"slide_type": "-"} # The statement # ```python # for i in range(0, 5): # ``` # says that we want to run the indented code five times. # # # <p align="center"> # <img src="img/flow_diag_for_loop.png" alt="Drawing" style="width: 400px;"/> # </p> # + slideshow={"slide_type": "slide"} for i in range(5): print(i) # + [markdown] slideshow={"slide_type": "subslide"} # The first time through, the value of i is equal to 0. # <br> # The second time through, its value is 1. # <br> # Each loop the value `i` increases by 1 (0, 1, 2, 3, 4) until the last time when its value is 4. # + [markdown] slideshow={"slide_type": "slide"} # A similar structure to `if` is used: # - `for` is followed by the condition being checked. # - : colon at the end of the `for` statement. # - The indented code that follows is run each time the code loops. <br> # (The __same of spaces__ should be used for all indents) # <br> # - To end the `for` loop, simply stop indenting. # + slideshow={"slide_type": "-"} for i in range(-2, 3): print(i) print('The end of the loop') # + [markdown] slideshow={"slide_type": "subslide"} # The above loop starts from -2 and executes the indented code for each value of i in the range (-2, -1, 0, 1, 2). # <br> # When the loop has executed the code for the final value `i = 2`, it moves on to the next unindented line of code. # + slideshow={"slide_type": "subslide"} for n in range(4): print("----") print(n, n**2) # + [markdown] slideshow={"slide_type": "subslide"} # The above executes 4 loops. # # The statement # ```python # for n in range(4): # ``` # says that we want to loop over four integers, starting from 0. # # Each loop the value `n` increases by 1 (0, 1, 2 3). # # # # + [markdown] slideshow={"slide_type": "subslide"} # __Try it yourself__ # <br> # Go back and change the __range__ of input values in the last three cells and observe the change in output. # # + [markdown] slideshow={"slide_type": "slide"} # If we want to step by three rather than one: # + slideshow={"slide_type": "-"} for n in range(0, 10, 3): print(n) # + [markdown] slideshow={"slide_type": "slide"} # If we want to step backwards rather than forwards we __must__ include the step size: # + slideshow={"slide_type": "-"} for n in range(10, 0, -1): print(n) # + [markdown] slideshow={"slide_type": "subslide"} # For example... # - for n in range(10, 0): print(n) # ...does not return any values because there are no values that lie between 10 and 0 when counting in the positive direction from 10. # + [markdown] slideshow={"slide_type": "subslide"} # __Try it yourself.__ # # In the cell below write a `for` loop that: # - starts at `n = 9` # - ends at `n = 3` (and includes `n = 3`) # - loops __backwards__ through the range in steps of -3 # - prints `n`$^2$ at each loop. # # + # For loop # + [markdown] slideshow={"slide_type": "slide"} # For loops are useful for performing operations on large data sets. # # We often encounter large data sets in real-world mathematical problems. # + [markdown] slideshow={"slide_type": "slide"} # A simple example of this is converting multiple values using the same mathematical equation to create a look-up table... # + [markdown] slideshow={"slide_type": "slide"} # <a id='ExampleConversionTable'></a> # ### Example: Conversion table from degrees Fahrenheit to degrees Celsius # # We can use a `for` loop to create a conversion table from degrees Fahrenheit ($Tf$) to degrees Celsius ($Tc$). # # Conversion formula: # # $$ # T_c = 5(T_f - 32)/9 # $$ # # # + [markdown] slideshow={"slide_type": "slide"} # __Conversion table : Fahrenheit ($Tf$) to degrees Celsius ($Tc$)__ # <br>-100 F to 200 F # <br>Steps of 20 F (not including 200 F): # # <p align="center"> # <img src="img/flow_diag_for_loop_temperature.png" alt="Drawing" style="width: 300px;"/> # </p> # + slideshow={"slide_type": "slide"} # print heading print("Tf \t Tc") print("-----------------") # convert all items in range for Tf in range(-100, 200, 20): Tc = ((Tf -32) * 5 / 9) Tc = round(Tc, 3) print(f"{Tf} \t {Tc}") # + [markdown] slideshow={"slide_type": "slide"} # <a id='whileLoops'></a> # # ## `while` Loops # # A __`for`__ loop performs an operation a specified number of times. # # ```python # for x in range(5): # print(x) # ``` # # A __`while`__ loop performs a task *while* a specified statement is true. # # ```python # x = 0 # while x < 5: # print(x) # ``` # # <p align="center"> # <img src="img/flow_diag_while_loop.png" alt="Drawing" style="width: 400px;"/> # </p> # + [markdown] slideshow={"slide_type": "slide"} # We use the same structure as for `for` loops and `if-elif-else`: # - `while` is followed by the condition being checked. # - : colon at the end of the `while` statement. # - The indented code that follows is repeatedly executed until the `while` statement (e.g. `x < 5`) is `False`. <br> # # # + [markdown] slideshow={"slide_type": "slide"} # It can be quite easy to crash your computer using a `while` loop. # # e.g. if we don't modify the value of x each time the code loops: # ```python # x = 0 # while x < 5: # print(x) # # x += 1 # ``` # will continue indefinitely since `x < 5 == False` will never be satisfied. # # This is called an *infinite loop*. # # # + [markdown] slideshow={"slide_type": "slide"} # To perform the same function as the `for` loop we need to increment the value of `x` within the loop: # + slideshow={"slide_type": "-"} x = 0 print("Start of while statement") while x < 5: print(x) x += 1 # Increment x print("End of while statement") # + [markdown] slideshow={"slide_type": "slide"} # `for` loops are often safer when performing an operation on a set range of values. # + slideshow={"slide_type": "-"} x = -2 print("Start of for loop") for y in range(x,5): print(y) print("End of for loop") # + [markdown] slideshow={"slide_type": "slide"} # `While` loops are more appropriate when the number of loops required is not known beforehand (e.g. before `x > 0.001` becomes false). # # # + x = 0.9 while x > 0.001: # Square x (shortcut x *= x) x = x * x print(round(x, 6)) # + [markdown] slideshow={"slide_type": "slide"} # __Note:__ In this example, if we use an initial value of $x \ge 1$, an infinite loop will be generated. # # e.g. # ```python # x = 2 # # while x > 0.001: # x = x * x # print(x) # ``` # # `x` will increase with each loop, meaning `x` will always be greater than 0.001. # + [markdown] slideshow={"slide_type": "subslide"} # To avoid errors, it is good practice to check that $x < 1$ before entering the `while` loop e.g. # + x = 0.9 if x < 1: while x > 0.001: # Square x (shortcut x *= x) x = x * x print(round(x, 6)) else: print("x is greater than one, infinite loop avoided") # + [markdown] slideshow={"slide_type": "subslide"} # __Try it for yourself:__ # # In the cell above change the value of x to above or below 1. # # Observe the output. # # + [markdown] slideshow={"slide_type": "subslide"} # __Try it for yourself:__ # # In the cell below: # - Create a variable,`x`, with the initial value 50 # - Each loop: # 1. print x # 1. reduce the value of x by half # - Exit the loop when `x` < 3 # + # While loop # + [markdown] slideshow={"slide_type": "slide"} # <a id='breakcontinue'></a> # ## `break` and `continue`. # + [markdown] slideshow={"slide_type": "slide"} # <a id='break'></a> # ### `break` # # Sometimes we want to exit a `for` or `while` loop prematurely. # # <img src="img/algorithm-break-statement.jpg" alt="Drawing" style="width: 300px;"/> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="img/flow_diag_break.png" alt="Drawing" style="width: 300px;"/> # </p> # + slideshow={"slide_type": "slide"} for x in range(10): print(x) if x == 5: print("Time to break out") break # + [markdown] slideshow={"slide_type": "slide"} # Let's look at how we can use this in a program... # # + [markdown] slideshow={"slide_type": "slide"} # <a id='ExamplePrimeNumbers'></a> # ### Example: Prime Numbers # We are going to look at a program to __find prime numbers__. # # >__Prime number:__ A positive integer, greater than 1, that has no positive divisors other than 1 and itself (2, 3, 5, 11, 13, 17....) # # The program checks (integer) numbers, up to a limit `N`, and prints the prime numbers. # # We will first look at an inefficient solution. # # We will then write an improved solution using `break`. # # # # + [markdown] slideshow={"slide_type": "slide"} # We can determine in `n` is a prime nunber by dividing it by every number in the range 2 to `n`. # # If any of these calculations has a remainder equal to zero, n is *not* a prime number. # # <p align="center"> # <img src="img/flow_diag_prime_numbers_.png" alt="Drawing" style="width: 800px;"/> # </p> # + slideshow={"slide_type": "slide"} N = 50 # for loop 1 for n in range(2, N): n_is_prime = True # for loop 2 for m in range(2, n): if not(n % m): # if remainder == 0... n_is_prime = False if n_is_prime: print(n) # + slideshow={"slide_type": "subslide"} # Here is the same code with comments to explain each part N = 50 # Check numbers up 50 for primes (excludes 50) # Loop over all numbers from 2 to 50 (excluding 50) for n in range(2, N): # Assume that n is prime n_is_prime = True # Check if n divided by (any number in the range 2 to n) returns a remainder equal to 0 for m in range(2, n): # If the remainder is zero, n is not a prime number if not(n % m): n_is_prime = False # If n is prime, print to screen if n_is_prime: print(n) # + [markdown] slideshow={"slide_type": "slide"} # Notice that our program contains a second `for` loop. # # For each value of n, it loops through incrementing values of m in the range (2 to n): # # ```python # # Check if n can be divided by m # # m ranges from 2 to n (excluding n) # for m in range(2, n): # ``` # before incrementing to the next value of n. # # We call this a *nested* loop. # # The indents in the code show where loops are nested. # + [markdown] slideshow={"slide_type": "slide"} # As n gets larger, dividing it by *every* number in the range (2, n) becomes more and more inefficient. # <br> # <br> # # # __Using a `break` statement to make code more efficient__ # <br>A `break` statement exits the loop as soon as a remainder equal to zero is returned <br>(indicating that n is not a prime number). # # # + [markdown] slideshow={"slide_type": "slide"} # As soon as a number is found to be not prime, the program: # - breaks out of loop 2 # - goes to the next value of n in loop 1. # # <p align="center"> # <img src="img/flow_diag_prime_numbers_break_.png" alt="Drawing" style="width: 800px;"/> # </p> # + slideshow={"slide_type": "slide"} # Example : Prime numbers # Modify the original program (below) to use break N = 50 # for loop 1 for n in range(2, N): n_is_prime = True # for loop 2 for m in range(2, n): if not(n % m): # if remainder == 0... #n_is_prime = False break #if n_is_prime: else: print(n) # - # (Solution in 01b_ControlFlow_SOLS.ipynb) # + [markdown] slideshow={"slide_type": "subslide"} # ### `for-else` and `while-else`. # # This is an example of the `for-else` construction... # # `for-else` and `while-else` are similar to `if-else`: # - The `for` / `while` loop is executed. # - *If* the loop is not exited due to the `break` the `else` will be executed. # # # By placing `else` *one level up* from `if` the program will iterate through all values of m before printing n if n is prime. # + [markdown] slideshow={"slide_type": "slide"} # <a id='Continue'></a> # ### `continue` # # Sometimes, instead of *skipping all remaining values*, we want to skip *just one value* in a loop. # # For this we use `continue`. # # <p align="center"> # <img src="img/flow_diag_continue.png" alt="Drawing" style="width: 300px;"/> # </p> # # # + [markdown] slideshow={"slide_type": "slide"} # <img src="img/algorithm-continue-statement.jpg" alt="Drawing" style="width: 300px;"/> # + [markdown] slideshow={"slide_type": "slide"} # Let's compare break and continue... # + [markdown] slideshow={"slide_type": "slide"} # # This program loops through numbers in the range 0 to 19. # # It prints a message about each number. # # It *stops* when it reaches a number that is not a multiple of 4. # + slideshow={"slide_type": "-"} for j in range(1, 20): if j % 4 == 0: # Check remainer of j/4 break # continue to next value of j print(j, "is not a multiple of 4") # + [markdown] slideshow={"slide_type": "slide"} # This program loops through numbers in the range 0 to 19. # # It prints a message about each number. # # It *skips* this operation whenever it reaches a number that is not a multiple of 4. # # If the number is divisible by 4 it *continues* to the next value in the loop, without printing. # + slideshow={"slide_type": "-"} for j in range(1, 20): if j % 4 == 0: # Check remainer of j/4 continue # continue to next value of j print(j, "is not a multiple of 4") # + [markdown] slideshow={"slide_type": "subslide"} # __Try it yourself__ # We can use a `for` loop to perform an operation on each character of a string. # # ```Python # string = "string" # # for i in range(len(sting)): # print(sting[i]) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # In the cell below, loop through the characters of the string. # Use `continue` to only print the letters of the word *sting*. # - # Print the letters of the word sting string = "string" # + [markdown] slideshow={"slide_type": "slide"} # <a id='Summary'></a> # # Summary # # [*McGrath, Python in easy steps, 2013*] # # - The Python `if` keyword performs a conditional test on an expression for a Boolean value of True or False. # - Alternatives to an `if` test are provided using `elif` and `else` tests. # - A `while` loop repeats until a test expression returns `False`. # - A `for`...`in`... loop iterates over each item in a specified data structure (or string). # - The `range()` function generates a numerical sequence that can be used to specify the length of the `for` loop. # - The `break` and `continue` keywords interrupt loop iterations. # + [markdown] slideshow={"slide_type": "slide"} # <a id='UpdatingClassNotes'></a> # # Updating your Class Notes # # To add the examples that we covered in class today... # # In the terminal: # # 1.Navigate to *inside* of the ILAS_python_for_engineers folder on your computer. # # 2.Type: # >`git add -A` # # >`git commit -m "commit"` # # >`git fetch upstream` # # >`git merge -X theirs upstream/master` # # + [markdown] slideshow={"slide_type": "slide"} # <a id='Test-YourselfExercises'></a> # # Test-Yourself Exercises # # Compete the Test-Youself exercises below. # # Save your answers as .py files and email them to: # <br><EMAIL> # # # + [markdown] slideshow={"slide_type": "slide"} # ### Test-Yourself Exercise : Currency Trading 両替 # To make a comission (profit), a currency trader sells US dollars (USD) to travellers above the market rate. # # The multiplier used to calculate the amount recieved by customer is shown in the table: # # |Amount (JPY) |Multiplier | # |--------------------------------------------|-------------------------| # | $ < 10,000$ | 0.9 | # | $\geq 10,000$ and $ < 100,000$ | 0.925 | # | $\geq 100,000$ and $ < 1,000,000$ | 0.95 | # | $\geq 1,000,000$ and $ < 10,000,000$ | 0.97 | # | $\geq 10,000,000$ | 0.98 | # # __Current market rate:__ 1 JPY = 0.0091 USD. # # USD received by customer = `JPY * market_rate * multiplier` # - # <p align="center"> # <img src="img/flow_diag_currency_trading.png" alt="Drawing" style="width: 600px;"/> # </p> # + [markdown] slideshow={"slide_type": "subslide"} # #### Part A # Use the flow diagram to write a program using `if`, `elif` and `else`. # <br>Your program should: # - calculate the amount (USD) received by a customer for a given sum paid in JPY. # - print the amount paid by the customer (JPY). # - print the amount received by the customer (USD). # - print the *effective rate* = $\frac{USD}{JPY}$. # # # # # # + slideshow={"slide_type": "subslide"} ## Test-Yourself Exercise : Currency Trading, Part A market_rate = 0.0091 # 1 JPY is worth this many dollars at the market rate # Select the appropriate multiplier # Calculate the total amount sold to the customer # Print a breakdown of the transaction print("Amount in JPY sold:") print("Amount in USD purchased:") print("Effective rate:") # + [markdown] slideshow={"slide_type": "subslide"} # #### Part B # The currency trader reduces the exchange rate by an __additional__ 10% if the customer pays with cash. # <br>(If the transaction is made electronically, this extra reduction is not applied). # # Use the flow diagram to edit your program to include the charge if a cash transaction is made. # # Try changing the values of `JPY` and `cash` a few times. # # Re-run the cell to see the different paths the program can follow. # # # # # - # <p align="center"> # <img src="img/flow_diag_currency_trading_cash.png" alt="Drawing" style="width: 600px;"/> # </p> # + slideshow={"slide_type": "subslide"} # Apply the appropriate reduction if the transaction is made in cash # + # Test-Yourself Exercise : Currency Trading # Example solution JPY = 10_000 # The amount in JPY to be changed into USD cash = False # True if transaction is in cash, otherwise False market_rate = 0.0091 # 1 JPY is worth this many dollars at the market rate # Apply the appropriate reduction depending on the amount being sold if JPY < 10_000: multiplier = 0.9 elif JPY < 100_000: multiplier = 0.925 elif JPY < 1_000_000: multiplier = 0.95 elif JPY < 10_000_000: multiplier = 0.97 else: # JPY > 10,000,000 multiplier = 0.98 # Apply the appropriate reduction if the transaction is made in cash if cash: cash_multiplier = 0.9 else: cash_multiplier = 1 # Calculate the total amount sold to the customer USD = JPY * market_rate * multiplier * cash_multiplier # Print a breakdown of the transaction print("Amount in JPY sold:", JPY) print("Amount in USD purchased:", USD) print("Effective rate:", USD/JPY) # + [markdown] slideshow={"slide_type": "subslide"} # __Note:__ # - We can use multiple `elif` statements within a control block. # - We can use multipe `if` statements. <br>When the program executes and exits a control block, it moves to the next `if` statement. # - __Readability:__ <br>Underscores _ are placed between 0s in long numbers to make them easier to read. # <br>You DO NOT need to include underscores for Python to interpret the number correctly. # <br>You can place the underscores wherever you like in the sequence of digits that make up the number. # # # + [markdown] slideshow={"slide_type": "slide"} # ### Test-Yourself Exercise : Classifier # Classification is the problem of sorting items into a set of categories or sub-populations e.g. in machine learning and statistics. # # Write a `for` loop to iterate through a range 1 to 30 (excluding 30) in steps of 2. # # Classify the range of numbers into groups A, B and C, using the flow diagram. # - # <p align="center"> # <img src="img/flow_diag_classifier__.png" alt="Drawing" style="width: 600px;"/> # </p> # + slideshow={"slide_type": "subslide"} # Test-Yourself Exercise : Classifier # + [markdown] slideshow={"slide_type": "slide"} # <a id='ReviewExercises'></a> # # Review Exercises # Here are a series short problems for you to practise the new Python skills that you have learnt today in your own time. # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: `while` loops. # In the cell below, write a while loop that with each loop: # - prints the value of `x` # - then decreases the value of x by 0.5 # # as long as `x` remains positive. # # <a href='#whileLoops'>Jump to while Loops</a> # - x = 4 # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: `for` loops # In the cell below, write a `for` loop to print the even numbers from 2 to 100, inclusive. # + # for loop to print the even numbers from 2 to 20, inclusive. # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Excercise: `for` loops and `if` statements # In the cell below, write a for loop to alternately print `Red` then `Blue` 3 times. # <br>i.e. # <br>Red # <br>Blue # <br>Red # <br>Blue # <br>Red # <br>Blue # + # Alternately print Red and Blue # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: `continue` # In the cell below, loop through the characters of the string. # <br>Use `continue` to only print the letters of the word *sing*. # <br>Hint: Refer to __Logical Operators__ (Seminar 2). # # <a href='#Continue'>Jump to continue</a> # - # Print the letters of the word sing string = "string" # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: `for` loops and `if`, `else` and `continue` statements. # __(A)__ In the cell below, use a for loop to print the square roots of the first 25 odd positive integers. # <br> (Remember, the square root of a number, $x$ can be found by $x^{1/2}$) # # __(B)__ If the number generated is greater than 3 and smaller than 5, print "`skip`" and __`continue`__ to the next iteration *without* printing the number. # <br>Hint: Refer to __Logical Operators__ (Seminar 2). # # <a href='#rorLoops'>Jump to for loops</a> # # <a href='#ifelse'>Jump to if and else statements</a> # # <a href='#Continue'>Jump to continue</a> # + # square roots of the first 25 odd positive integers
01b_ControlFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generate docs from taxo # ## Read the XBRL-instance from arelle import ModelManager, Cntlr, ModelFormulaObject, ModelXbrl, ViewFileFormulae, XbrlConst from arelle import RenderingEvaluator import src import pandas as pd from os import listdir, walk, makedirs, environ from os.path import isfile, join, exists, basename from io import StringIO # make sure you have a 'arelle' directory in the data_path! (This is where the taxonomy is stored) XBRL_DATA_PATH = 'C:\\Users\\wjwil\\20_local_data\\xbrl\\' #DATA_PATH = 'H:\\20_local_data\\xbrl\\' XBRL_RESULTS_PATH = 'C:\\Users\\wjwil\\50_results\\xbrl\\' LANGUAGE = "en-GB" # set the location of taxonomy environ['XDG_CONFIG_HOME'] = XBRL_DATA_PATH DOCS_PATH ="..\\docs\\" DATA_PATH ="..\\data\\" RULES_PATH = "..\\Solvency2-rules\\" DIR_NAME = "EIOPA Validation rules_2.4.0\\" # + # Now we make a modelmanager controller = Cntlr.Cntlr() controller.webCache.workOffline = True modelmanager = ModelManager.initialize(controller) modelmanager.defaultLang = LANGUAGE modelmanager.formulaOptions = ModelFormulaObject.FormulaOptions() modelmanager.loadCustomTransforms() # - template_dict = {} datapoint_dict = {} instances = [file for file in listdir(XBRL_DATA_PATH) if file.endswith(".xbrl")] # And we read the XBRL instance for file in instances: print(file) xbrl_instance = ModelXbrl.load(modelmanager, join(XBRL_DATA_PATH, file)) linkRoleUris = xbrl_instance.relationshipSet("Table-rendering").linkRoleUris for role_uri in linkRoleUris: definition = basename(role_uri) tblAxisRelSet = xbrl_instance.relationshipSet(XbrlConst.euTableAxis, role_uri) if len(tblAxisRelSet.modelRelationships)==0: tblAxisRelSet = xbrl_instance.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011), role_uri) for rootconcept in tblAxisRelSet.rootConcepts: # rootconcept is a modelTable-object template_dict[rootconcept.definitionLabelsView[2][1]] = rootconcept.definitionLabelsView[3][1] tables = list(xbrl_instance.relationshipSet("Table-rendering").linkRoleUris) RenderingEvaluator.init(xbrl_instance) datapoint_dict.update(src.rc2label.rc2label_dict(xbrl_instance)) # ## List validation rules in taxonomy ViewFileFormulae.viewFormulae(xbrl_instance, XBRL_RESULTS_PATH + "formulae.csv", "header", None) formulae = pd.read_csv(XBRL_RESULTS_PATH + "formulae.csv") df_xbrl = formulae[formulae['Expression'].str[0:2]=="BV"] len(df_xbrl.index) df = pd.DataFrame() for row in df_xbrl.index: expr = df_xbrl.loc[row, 'Expression'] label = df_xbrl.loc[row, 'Label'] rule_templates, rule_datapoints, rule_id, rule_ref = src.parse_formula(expr, syntax = "XBRL") rule_date = "Unknown" df = df.append(pd.DataFrame(data = [[rule_id, label, rule_templates, rule_datapoints, rule_ref, expr]]), ignore_index = True) df.columns = ['Rule id', 'Rule label', 'Rule templates', 'Rule datapoints', 'Rule references', 'Rule expression'] df for row in df.index: l = df.loc[row, 'Rule datapoints'] for item in l: if not item in d.keys(): print(item) # ## Generate subdirectories for XBRL Formulae # + all_templates = [] for row in df.index: all_templates.extend(df.loc[row, "Rule templates"]) templates = list(pd.Series(data = all_templates).sort_values().unique()) for template in templates: if not exists(join(DOCS_PATH, DIR_NAME, template)): makedirs(join(DOCS_PATH, DIR_NAME, template)) # - src.write_rst(join(DOCS_PATH, DIR_NAME), df, template_dict, datapoint_dict) d
notebooks/rules_documentation/Generate pages from taxo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework - 2 # ## Summary # This report utilizes the Twitter API to gather tweet data filtered by "avocado" and then analyze the data. Section 1 is focused on getting the data and section 2 is about analyzing the data. I would like to identify what are the attitudes of tweets with the keyword "avocado" by calculating the polarity value for each tweet. # ### Section - 2 Analyzing the data pip install -U textblob # + import os import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import itertools import collections import webbrowser import tweepy as tw import nltk from nltk.corpus import stopwords import re import networkx from textblob import TextBlob import warnings warnings.filterwarnings("ignore") sns.set(font_scale=1.5) sns.set_style("whitegrid") # - sentiment_df = pd.read_csv("data/tweet_sentiment.csv") sentiment_df.head() sentiment_df.describe # #### Mean polarity of avocado tweets = 0.319645 sentiment_df.mean() # #### Histogram of polarity # + fig, ax = plt.subplots(figsize=(8, 6)) # Plot histogram of the polarity values sentiment_df.hist(bins=[-1, -0.75, -0.5, -0.25, 0.25, 0.5, 0.75, 1], ax=ax, color="green") plt.title("Sentiments from Tweets on avocados") plt.show() # - # Remove polarity values equal to zero sentiment_df = sentiment_df[sentiment_df.polarity != 0] # + fig, ax = plt.subplots(figsize=(8, 6)) # Plot histogram with break at zero sentiment_df.hist(bins=[-1, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1], ax=ax, color="green") plt.title("Sentiments from Tweets on avocados") plt.show() # - # ### Conclusion # My assumption is people would have a more positive attitude about avocados if they were tweeting about avocados. Based on the dataset and analysis, people who tweet about avocados do indeed have a more favorable attitude of avocados, with mean value of 0.319645. The next step would be to understand where tweets are concentrated geographically; there by connecting consumers to sellers in a more target manner. # ##### Big thanks to Earth Data Science lesson 5. # https://www.earthdatascience.org/courses/use-data-open-source-python/intro-to-apis/analyze-tweet-sentiment-in-python/
Technical Notebook/Hwk2_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from numpy import random as rnd import matplotlib.pyplot as plt from IPython.display import display # Имена и количество рожденных names = ['Bob','Jessica','Mary','John','Mel'] births = [968, 155, 77, 578, 973] # Упаковка BabyDataSet = list(zip(names,births)) BabyDataSet[:10] # Датафрейм df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births']) df # Проверка данных в колонках df.dtypes # Проверка данныхконкретной колонки df.Births.dtype # Сортировка Sorted = df.sort_values(['Births'], ascending=False) display(Sorted) Sorted.head(1) # нахождение только максимального значения df['Births'].max() # нахождение только максимального значения со строкой df[df['Births']==df['Births'].max()] # нахождение только имени максимального значения df[df['Births']==df['Births'].max()]['Names'].values # + # df['Births'].plot() # MaxValue = df['Births'].max() # MaxName = df['Names'][df['Births'] == df['Births'].max()].values # Text = str(MaxValue) + " - " + MaxName # plt.annotate(Text, xy=(1, MaxValue), xytext=(8, 0), xycoords=('axes fraction', 'data'), textcoords='offset points') plt.show() print("Самое популярное имя") display(df[df['Births'] == df['Births'].max()])
learning python/learning pandas/pandas_pz_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/portkata/KataGo/blob/master/Rating_games.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rDYNTCaU8KCr" # # KataGo Training Contribution using Colab # # Step 1. Join to https://katagotraining.org # <br>(You can skip this if you don't want.) # # Step 2. Copy this colab image to your Google drive and edit code cell # <br>(change katagotraining id and password to yours if you want) # # Step 3. Run the code cell # + id="nPaa2KJt8Kyn" colab={"base_uri": "https://localhost:8080/"} outputId="888cc03f-890f-426a-c9c9-9186cc24d1b7" # supports: OPENCL, CUDA or AUTO KATAGO_BACKEND="CUDA" import subprocess gpu_name=str(subprocess.check_output("nvidia-smi -q | grep \"Product Name\" | cut -d\":\" -f2 | tr -cd '[:alnum:]._-'", shell=True), encoding='utf-8') if KATAGO_BACKEND == "AUTO": if gpu_name == "TeslaT4": KATAGO_BACKEND="CUDA" else: KATAGO_BACKEND="OPENCL" # !echo "Using Katago Backend : " $KATAGO_BACKEND # !echo "GPU : " $gpu_name # !wget -q https://github.com/wonsiks/katago-colab/releases/download/v1.8.0/libzip.so.5.0 -O /usr/lib/x86_64-linux-gnu/libzip.so.5 # %cd /content # !rm -rf katago-colab # !git clone https://github.com/kinfkong/katago-colab.git 1>/dev/null #download the binarires # !wget -q https://github.com/wonsiks/katago-colab/releases/download/v1.8.0/katago-$KATAGO_BACKEND -O katago # !chmod +x /content/katago # !mkdir -p /root/.katago/ # !cp -r /content/katago-colab/opencltuning /root/.katago/ if KATAGO_BACKEND == "CUDA": # %cd /usr/lib/x86_64-linux-gnu/ # !wget -q https://github.com/wonsiks/katago-colab/releases/download/v1.8.0/libcublas.so.11.3.0.106 -O libcublas.so.11 # !wget -q https://github.com/wonsiks/katago-colab/releases/download/v1.8.0/libcublasLt.so.11.3.0.106 -O libcublasLt.so.11 # %cd /content # !wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/libcudnn8_8.0.5.39-1+cuda11.1_amd64.deb # !dpkg -i libcudnn8_8.0.5.39-1+cuda11.1_amd64.deb else: # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaT4_x19_y19_c256_mv10.txt -O /root/.katago/opencltuning/tune8_gpuTeslaT4_x19_y19_c256_mv10.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaT4_x19_y19_c256_mv8.txt -O /root/.katago/opencltuning/tune8_gpuTeslaT4_x19_y19_c256_mv8.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaP4_x19_y19_c256_mv10.txt -O /root/.katago/opencltuning/tune8_gpuTeslaP4_x19_y19_c256_mv10.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaK80_x19_y19_c256_mv10.txt -O /root/.katago/opencltuning/tune8_gpuTeslaK80_x19_y19_c256_mv10.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaK80_x19_y19_c256_mv8.txt -O /root/.katago/opencltuning/tune8_gpuTeslaK80_x19_y19_c256_mv8.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaP100PCIE16GB_x19_y19_c256_mv10.txt -O /root/.katago/opencltuning/tune8_gpuTeslaP100PCIE16GB_x19_y19_c256_mv10.txt # !wget -q https://raw.githubusercontent.com/wonsiks/katago-colab/master/opencltuning/tune8_gpuTeslaP100PCIE16GB_x19_y19_c256_mv8.txt -O /root/.katago/opencltuning/tune8_gpuTeslaP100PCIE16GB_x19_y19_c256_mv8.txt # Change username and password to yours from belows if you want with open('/content/contribute.cfg', mode='w') as f: f.write('serverUrl = https://katagotraining.org/') f.write('\n') f.write('username = xxxxxxxx') f.write('\n') f.write('password = <PASSWORD>') f.write('\n') f.write('maxSimultaneousGames = 8') f.write('\n') f.write('onlyPlayRatingMatches = true') f.write('\n') f.write('watchOngoingGameInFile = false\n') f.write('watchOngoingGameInFileName = watchgame.txt\n') # !echo -e "\n[KataGo Contribute Config]" # !cat /content/contribute.cfg # !echo -e "\nStarting KataGo training..." # !/content/katago contribute -config /content/contribute.cfg # + [markdown] id="7xRjpefp0JiH" # # # ``` # function ClickConnect(){ # console.log("Clicked on connect button"); # document.querySelector("colab-connect-button").click() # } # setInterval(ClickConnect,60000) # ``` # #
Rating_games.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf from tensorflow.contrib import slim import numpy as np from tqdm import tqdm_notebook from matplotlib import pyplot as plt import os BATCH_SIZE = 512 LR = 2e-5 # + def get_data_samples(N): data = tf.random_uniform([N], minval=0, maxval=4, dtype=tf.int32) return data def encoder_func(x): net = x net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) zmean = slim.fully_connected(net, 2, activation_fn=None) zlogstd = slim.fully_connected(net, 2, activation_fn=None) return zmean, zlogstd def decoder_func(z): net = z net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) net = slim.fully_connected(net, 64, activation_fn=tf.nn.elu) xlogits = slim.fully_connected(net, 4, activation_fn=None) return xlogits def create_scatter(x_test_labels, eps_test, savepath=None): plt.figure(figsize=(5,5), facecolor='w') for i in range(4): z_out = sess.run(z_inferred, feed_dict={x_real_labels: x_test_labels[i], eps: eps_test}) plt.scatter(z_out[:, 0], z_out[:, 1], edgecolor='none', alpha=0.5) plt.xlim(-3, 3); plt.ylim(-3.5, 3.5) plt.axis('off') if savepath: plt.savefig(savepath) encoder = tf.make_template('encoder', encoder_func) decoder = tf.make_template('decoder', decoder_func) # + eps = tf.random_normal([BATCH_SIZE, 2]) x_real_labels = get_data_samples(BATCH_SIZE) x_real = tf.one_hot(x_real_labels, 4) zmean, zlogstd = encoder(x_real) z_inferred = zmean + eps*tf.exp(zlogstd) x_reconstr_logits = decoder(z_inferred) reconstr_err = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(labels=x_real, logits=x_reconstr_logits), axis=1 ) KL = tf.reduce_sum(0.5*tf.square(z_inferred) - zlogstd - 0.5, 1) loss = tf.reduce_mean(reconstr_err + KL) optimizer = tf.train.AdamOptimizer(LR) train_op = optimizer.minimize(loss) # - sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # + x_test_labels = [[i] * BATCH_SIZE for i in range(4)] eps_test = np.random.randn(BATCH_SIZE, 2) outdir = './out_vae' if not os.path.exists(outdir): os.makedirs(outdir) progress = tqdm_notebook(range(100000)) for i in progress: ELBO_out, _ = sess.run([loss, train_op]) progress.set_description('ELBO = %.2f' % ELBO_out) if i % 100 == 0: create_scatter(x_test_labels, eps_test, savepath=os.path.join(outdir, '%08d.png' % i)) # -
notebooks/VAE_toy_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Data Preparation and Setup # --- # # This notebook walks you through all the necessary steps to configure your environment and data for this solution accelerator including: # # 1. Connect to your workspace # 2. Deploying a compute cluster for training and forecasting # 3. Create, split, and register Datasets used in this accelerator # # ### Prerequisites # If you have already run the [00_Setup_AML_Workspace](../00_Setup_AML_Workspace.ipynb) notebooks you are all set. # ## 1.0 Connect to your Workspace # In the [00_Setup_AML_Workspace](../00_Setup_AML_Workspace.ipynb) notebook you created a [Workspace](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace?view=azure-ml-py). # + from azureml.core.workspace import Workspace ws = Workspace.from_config() # Take a look at Workspace ws.get_details() # - # ## 2.0 Create compute # # In this step we create an compute cluster that will be used for the training and forecasting pipelines. This is a one-time set up so you won't need to re-run this in future notebooks. # # We create a STANDARD_D13_V2 compute cluster. D-series VMs are used for tasks that require higher compute power and temporary disk performance. This [page](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs) will gives you more information on VM sizes to help you decide which will best fit your use case. # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster cpu_cluster_name = "cpucluster" # Verify that cluster does not exist already try: cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name) print('Found an existing cluster, using it instead.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D13_V2', min_nodes=0, max_nodes=5) cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config) cpu_cluster.wait_for_completion(show_output=True) # - # ## 3.0 Create Datasets # # This solution accelerator uses simulated orange juice weekly sales data from [Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/) to walk you through the process of training many models on Azure Machine Learning. You can learn more about the dataset [here](https://azure.microsoft.com/en-us/services/open-datasets/catalog/sample-oj-sales-simulated/). The full dataset includes simulared sales for 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. # # We'll start by downloading the first 10 files but you can easily edit the code below to train all 11,973 models. # + # #%pip install --upgrade azureml-opendatasets # + import os from azureml.core.dataset import Dataset from azureml.opendatasets import OjSalesSimulated # Pull all of the data oj_sales_files = OjSalesSimulated.get_file_dataset() # Pull only the first 10 files oj_sales_files_small = OjSalesSimulated.get_file_dataset().take(10) # Create a folder to download target_path = 'oj_sales_data_2' if not os.path.exists(target_path): os.mkdir(target_path) # Download the data oj_sales_files_small.download(target_path, overwrite=True) # - # We now create a train/test split for each dataset and upload both sets of data files to your default Workspace [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py). The test files will contain the last 20 weeks of observations from each original data file. In order to ensure the splitting respects temporal ordering, we need to provide the name of the timestamp column. Finally, both train and test files are uploaded to the Datastore. # + from scripts.helper import split_data_upload_to_datastore # Connect to default datastore datastore = ws.get_default_datastore() # Set upload paths for train and test splits ds_train_path = target_path + '_train' ds_test_path = target_path + '_test' # Provide name of timestamp column in the data and number of periods to reserve for the test set timestamp_column = 'WeekStarting' n_test_periods = 20 # Split each file and upload both sets to the datastore split_data_upload_to_datastore(target_path, timestamp_column, n_test_periods, datastore, ds_train_path, ds_test_path) # - # Next, we create and register [datasets](https://docs.microsoft.com/en-us/azure/machine-learning/concept-data#datasets) in Azure Machine Learning for the train and test sets. # # Using a [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.file_dataset.filedataset?view=azure-ml-py) is currently the best way to take advantage of the many models pattern so we create FileDatasets in the next cell. We also [register](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-register-datasets#register-datasets) the Datasets in your Workspace; this associates the train/test sets with simple names that can be easily referred to later on when we train models and produce forecasts. # + # Create file datasets ds_train = Dataset.File.from_files(path=datastore.path(ds_train_path), validate=False) ds_test = Dataset.File.from_files(path=datastore.path(ds_test_path), validate=False) # Register the file datasets dataset_name = 'oj_data_small' train_dataset_name = dataset_name + '_train' test_dataset_name = dataset_name + '_test' ds_train.register(ws, train_dataset_name, create_new_version=True) ds_test.register(ws, test_dataset_name, create_new_version=True) # - # Now that you've set up your Workspace and created Datasets, move on to 02_Training_Pipeline.ipynb to train the models.
Custom_Script/01_Data_Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:DeepFilter] * # language: python # name: conda-env-DeepFilter-py # --- # + import numpy as np from sklearn.metrics.pairwise import cosine_similarity def SSD(y, y_pred): return np.sum(np.square(y - y_pred), axis=1) # axis 1 is the signal dimension def MAD(y, y_pred): return np.max(np.abs(y - y_pred), axis=1) # axis 1 is the signal dimension def PRD(y, y_pred): N = np.sum(np.square(y_pred - y), axis=1) D = np.sum(np.square(y_pred - np.mean(y)), axis=1) PRD = np.sqrt(N/D) * 100 return PRD def COS_SIM(y, y_pred): cos_sim = [] y = np.squeeze(y, axis=-1) y_pred = np.squeeze(y_pred, axis=-1) for idx in range(len(y)): kl_temp = cosine_similarity(y[idx].reshape(1, -1), y_pred[idx].reshape(1, -1)) cos_sim.append(kl_temp) cos_sim = np.array(cos_sim) return cos_sim # + import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np from prettytable import PrettyTable def generate_violinplots(np_data, description, ylabel, log): # Process the results and store in Panda objects col = description loss_val_np = np.rot90(np_data) pd_df = pd.DataFrame.from_records(loss_val_np, columns=col) # Set up the matplotlib figure f, ax = plt.subplots() sns.set(style="whitegrid") ax = sns.violinplot(data=pd_df, palette="Set3", bw=.2, cut=1, linewidth=1) if log: ax.set_yscale("log") ax.set(xlabel='Models/Methods', ylabel=ylabel) ax = sns.despine(left=True, bottom=True) plt.show() #plt.savefig(store_folder + 'violinplot_fco' + info + description + '.png') def generate_barplot(np_data, description, ylabel, log): # Process the results and store in Panda objects col = description loss_val_np = np.rot90(np_data) pd_df = pd.DataFrame.from_records(loss_val_np, columns=col) # Set up the matplotlib figure f, ax = plt.subplots() sns.set(style="whitegrid") ax = sns.barplot(data=pd_df) if log: ax.set_yscale("log") ax.set(xlabel='Models/Methods', ylabel=ylabel) ax = sns.despine(left=True, bottom=True) plt.show() #plt.savefig(store_folder + 'violinplot_fco' + info + description + '.png') def generate_boxplot(np_data, description, ylabel, log): # Process the results and store in Panda objects col = description loss_val_np = np.rot90(np_data) pd_df = pd.DataFrame.from_records(loss_val_np, columns=col) # Set up the matplotlib figure f, ax = plt.subplots() sns.set(style="whitegrid") ax = sns.boxplot(data=pd_df) if log: ax.set_yscale("log") ax.set(xlabel='Models/Methods', ylabel=ylabel) ax = sns.despine(left=True, bottom=True) plt.show() #plt.savefig(store_folder + 'violinplot_fco' + info + description + '.png') def generate_hboxplot(np_data, description, ylabel, log, set_x_axis_size=None): # Process the results and store in Panda objects col = description loss_val_np = np.rot90(np_data) pd_df = pd.DataFrame.from_records(loss_val_np, columns=col) # Set up the matplotlib figure sns.set(style="whitegrid") f, ax = plt.subplots(figsize=(15, 6)) ax = sns.boxplot(data=pd_df, orient="h", width=0.4) if log: ax.set_xscale("log") if set_x_axis_size != None: ax.set_xlim(set_x_axis_size) ax.set(ylabel='Models/Methods', xlabel=ylabel) ax = sns.despine(left=True, bottom=True) plt.show() #plt.savefig(store_folder + 'violinplot_fco' + info + description + '.png') def ecg_view(ecg, ecg_blw, ecg_dl, ecg_f, signal_name=None, beat_no=None): fig, ax = plt.subplots(figsize=(16, 9)) plt.plot(ecg_blw, 'k', label='ECG + BW') plt.plot(ecg, 'g', label='ECG orig') plt.plot(ecg_dl, 'b', label='ECG DL Filtered') plt.plot(ecg_f, 'r', label='ECG IIR Filtered') plt.grid(True) plt.ylabel('au') plt.xlabel('samples') leg = ax.legend() if signal_name != None and beat_no != None: plt.title('Signal ' + str(signal_name) + 'beat ' + str(beat_no)) else: plt.title('ECG signal for comparison') plt.show() def ecg_view_diff(ecg, ecg_blw, ecg_dl, ecg_f, signal_name=None, beat_no=None): fig, ax = plt.subplots(figsize=(16, 9)) plt.plot(ecg, 'g', label='ECG orig') plt.plot(ecg_dl, 'b', label='ECG DL Filtered') plt.plot(ecg_f, 'r', label='ECG IIR Filtered') plt.plot(ecg - ecg_dl, color='#0099ff', lw=3, label='Difference ECG - DL Filter') plt.plot(ecg - ecg_f, color='#cb828d', lw=3, label='Difference ECG - IIR Filter') plt.grid(True) plt.ylabel('Amplitude (au)') plt.xlabel('samples') leg = ax.legend() if signal_name != None and beat_no != None: plt.title('Signal ' + str(signal_name) + 'beat ' + str(beat_no)) else: plt.title('ECG signal for comparison') plt.show() def generate_table(metrics, metric_values, Exp_names): # Print tabular results in the console, in a pretty way tb = PrettyTable() ind = 0 for exp_name in Exp_names: tb.field_names = ['Method/Model'] + metrics tb_row = [] tb_row.append(exp_name) for metric in metric_values: # metric_values[metric][model][beat] m_mean = np.mean(metric[ind]) m_std = np.std(metric[ind]) tb_row.append('{:.4f}'.format(m_mean) + ' (' + '{:.4f}'.format(m_std / np.sqrt(len(metric[ind]))) + ')') tb.add_row(tb_row) ind += 1 print(tb) def generate_table_time(column_names, all_values, Exp_names, gpu=True): # Print tabular results in the console, in a pretty way # The FIR and IIR are the last on all_values # We need circular shift them to the right all_values[0] = all_values[0][-2::] + all_values[0][0:-2] all_values[1] = all_values[1][-2::] + all_values[1][0:-2] tb = PrettyTable() ind = 0 if gpu: device = 'GPU' else: device = 'CPU' for exp_name in Exp_names: tb.field_names = ['Method/Model'] + [column_names[0] + '(' + device + ') h:m:s:ms'] + [ column_names[1] + '(' + device + ') h:m:s:ms'] tb_row = [] tb_row.append(exp_name) tb_row.append(all_values[0][ind]) tb_row.append(all_values[1][ind]) tb.add_row(tb_row) ind += 1 print(tb) if gpu: print('* For FIR and IIR Filters is CPU since scipy filters are CPU based implementations') # + import _pickle as pickle from datetime import datetime import numpy as np dl_experiments = ['DRNN', 'FCN-DAE', 'Vanilla L', 'Vanilla NL', 'Multibranch LANL', 'Multibranch LANLD' ] noise_types = ['BW', 'EM', 'MA'] folders = ['BA-DeepFilter', 'BA-DeepFilter_EM', 'BA-DeepFilter_MA'] timing = {} test_DRNN = {} test_FCN_DAE = {} test_Vanilla_L = {} test_Vanilla_NL = {} test_Multibranch_LANL = {} test_Multibranch_LANLD = {} test_FIR = {} test_IIR = {} for noise in range(len(noise_types)): # Load timing with open(folders[noise]+'/timing_nv1.pkl', 'rb') as input: timing_nv1 = pickle.load(input) [train_time_list_nv1, test_time_list_nv1] = timing_nv1 with open(folders[noise]+'/timing_nv2.pkl', 'rb') as input: timing_nv2 = pickle.load(input) [train_time_list_nv2, test_time_list_nv2] = timing_nv2 train_time_list = [] test_time_list = [] for i in range(len(train_time_list_nv1)): train_time_list.append(train_time_list_nv1[i] + train_time_list_nv2[i]) for i in range(len(test_time_list_nv1)): test_time_list.append(test_time_list_nv1[i] + test_time_list_nv2[i]) timing[noise_types[noise]] = [train_time_list, test_time_list] # Load Results DRNN with open(folders[noise]+'/test_results_' + dl_experiments[0] + '_nv1.pkl', 'rb') as input: test_DRNN_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[0] + '_nv2.pkl', 'rb') as input: test_DRNN_nv2 = pickle.load(input) test_DRNN[noise_types[noise]] = [np.concatenate((test_DRNN_nv1[0], test_DRNN_nv2[0])), np.concatenate((test_DRNN_nv1[1], test_DRNN_nv2[1])), np.concatenate((test_DRNN_nv1[2], test_DRNN_nv2[2]))] # Load Results FCN_DAE with open(folders[noise]+'/test_results_' + dl_experiments[1] + '_nv1.pkl', 'rb') as input: test_FCN_DAE_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[1] + '_nv2.pkl', 'rb') as input: test_FCN_DAE_nv2 = pickle.load(input) test_FCN_DAE[noise_types[noise]] = [np.concatenate((test_FCN_DAE_nv1[0], test_FCN_DAE_nv2[0])), np.concatenate((test_FCN_DAE_nv1[1], test_FCN_DAE_nv2[1])), np.concatenate((test_FCN_DAE_nv1[2], test_FCN_DAE_nv2[2]))] # Load Results Vanilla L with open(folders[noise]+'/test_results_' + dl_experiments[2] + '_nv1.pkl', 'rb') as input: test_Vanilla_L_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[2] + '_nv2.pkl', 'rb') as input: test_Vanilla_L_nv2 = pickle.load(input) test_Vanilla_L[noise_types[noise]] = [np.concatenate((test_Vanilla_L_nv1[0], test_Vanilla_L_nv2[0])), np.concatenate((test_Vanilla_L_nv1[1], test_Vanilla_L_nv2[1])), np.concatenate((test_Vanilla_L_nv1[2], test_Vanilla_L_nv2[2]))] # Load Results Exp Vanilla NL with open(folders[noise]+'/test_results_' + dl_experiments[3] + '_nv1.pkl', 'rb') as input: test_Vanilla_NL_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[3] + '_nv2.pkl', 'rb') as input: test_Vanilla_NL_nv2 = pickle.load(input) test_Vanilla_NL[noise_types[noise]] = [np.concatenate((test_Vanilla_NL_nv1[0], test_Vanilla_NL_nv2[0])), np.concatenate((test_Vanilla_NL_nv1[1], test_Vanilla_NL_nv2[1])), np.concatenate((test_Vanilla_NL_nv1[2], test_Vanilla_NL_nv2[2]))] # Load Results Multibranch LANL with open(folders[noise]+'/test_results_' + dl_experiments[4] + '_nv1.pkl', 'rb') as input: test_Multibranch_LANL_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[4] + '_nv2.pkl', 'rb') as input: test_Multibranch_LANL_nv2 = pickle.load(input) test_Multibranch_LANL[noise_types[noise]] = [np.concatenate((test_Multibranch_LANL_nv1[0], test_Multibranch_LANL_nv2[0])), np.concatenate((test_Multibranch_LANL_nv1[1], test_Multibranch_LANL_nv2[1])), np.concatenate((test_Multibranch_LANL_nv1[2], test_Multibranch_LANL_nv2[2]))] # Load Results Multibranch LANLD with open(folders[noise]+'/test_results_' + dl_experiments[5] + '_nv1.pkl', 'rb') as input: test_Multibranch_LANLD_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_' + dl_experiments[5] + '_nv2.pkl', 'rb') as input: test_Multibranch_LANLD_nv2 = pickle.load(input) test_Multibranch_LANLD[noise_types[noise]] = [np.concatenate((test_Multibranch_LANLD_nv1[0], test_Multibranch_LANLD_nv2[0])), np.concatenate((test_Multibranch_LANLD_nv1[1], test_Multibranch_LANLD_nv2[1])), np.concatenate((test_Multibranch_LANLD_nv1[2], test_Multibranch_LANLD_nv2[2]))] # Load Result FIR Filter with open(folders[noise]+'/test_results_FIR_nv1.pkl', 'rb') as input: test_FIR_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_FIR_nv2.pkl', 'rb') as input: test_FIR_nv2 = pickle.load(input) test_FIR[noise_types[noise]] = [np.concatenate((test_FIR_nv1[0], test_FIR_nv2[0])), np.concatenate((test_FIR_nv1[1], test_FIR_nv2[1])), np.concatenate((test_FIR_nv1[2], test_FIR_nv2[2]))] # Load Result IIR Filter with open(folders[noise]+'/test_results_IIR_nv1.pkl', 'rb') as input: test_IIR_nv1 = pickle.load(input) with open(folders[noise]+'/test_results_IIR_nv2.pkl', 'rb') as input: test_IIR_nv2 = pickle.load(input) test_IIR[noise_types[noise]] = [np.concatenate((test_IIR_nv1[0], test_IIR_nv2[0])), np.concatenate((test_IIR_nv1[1], test_IIR_nv2[1])), np.concatenate((test_IIR_nv1[2], test_IIR_nv2[2]))] # + SSD_values_DL_DRNN = {} MAD_values_DL_DRNN = {} PRD_values_DL_DRNN = {} COS_SIM_values_DL_DRNN = {} SSD_values_DL_FCN_DAE = {} MAD_values_DL_FCN_DAE = {} PRD_values_DL_FCN_DAE = {} COS_SIM_values_DL_FCN_DAE = {} SSD_values_DL_exp_1 = {} MAD_values_DL_exp_1 = {} PRD_values_DL_exp_1 = {} COS_SIM_values_DL_exp_1 = {} SSD_values_DL_exp_2 = {} MAD_values_DL_exp_2 = {} PRD_values_DL_exp_2 = {} COS_SIM_values_DL_exp_2 = {} SSD_values_DL_exp_3 = {} MAD_values_DL_exp_3 = {} PRD_values_DL_exp_3 = {} COS_SIM_values_DL_exp_3 = {} SSD_values_DL_exp_4 = {} MAD_values_DL_exp_4 = {} PRD_values_DL_exp_4 = {} COS_SIM_values_DL_exp_4 = {} SSD_values_FIR = {} MAD_values_FIR = {} PRD_values_FIR = {} COS_SIM_values_FIR = {} SSD_values_IIR = {} MAD_values_IIR = {} PRD_values_IIR = {} COS_SIM_values_IIR = {} for noise in noise_types: # DL Metrics # Exp FCN-DAE [X_test, y_test, y_pred] = test_DRNN[noise] SSD_values_DL_DRNN[noise] = SSD(y_test, y_pred) MAD_values_DL_DRNN[noise] = MAD(y_test, y_pred) PRD_values_DL_DRNN[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_DRNN[noise] = COS_SIM(y_test, y_pred) # Exp FCN-DAE [X_test, y_test, y_pred] = test_FCN_DAE[noise] SSD_values_DL_FCN_DAE[noise] = SSD(y_test, y_pred) MAD_values_DL_FCN_DAE[noise] = MAD(y_test, y_pred) PRD_values_DL_FCN_DAE[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_FCN_DAE[noise] = COS_SIM(y_test, y_pred) # Vanilla L [X_test, y_test, y_pred] = test_Vanilla_L[noise] SSD_values_DL_exp_1[noise] = SSD(y_test, y_pred) MAD_values_DL_exp_1[noise] = MAD(y_test, y_pred) PRD_values_DL_exp_1[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_exp_1[noise] = COS_SIM(y_test, y_pred) # Vanilla_NL [X_test, y_test, y_pred] = test_Vanilla_NL[noise] SSD_values_DL_exp_2[noise] = SSD(y_test, y_pred) MAD_values_DL_exp_2[noise] = MAD(y_test, y_pred) PRD_values_DL_exp_2[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_exp_2[noise] = COS_SIM(y_test, y_pred) # Multibranch_LANL [X_test, y_test, y_pred] = test_Multibranch_LANL[noise] SSD_values_DL_exp_3[noise] = SSD(y_test, y_pred) MAD_values_DL_exp_3[noise] = MAD(y_test, y_pred) PRD_values_DL_exp_3[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_exp_3[noise] = COS_SIM(y_test, y_pred) # Multibranch_LANLD [X_test, y_test, y_pred] = test_Multibranch_LANLD[noise] SSD_values_DL_exp_4[noise] = SSD(y_test, y_pred) MAD_values_DL_exp_4[noise] = MAD(y_test, y_pred) PRD_values_DL_exp_4[noise] = PRD(y_test, y_pred) COS_SIM_values_DL_exp_4[noise] = COS_SIM(y_test, y_pred) # Digital Filtering # FIR Filtering Metrics [X_test, y_test, y_filter] = test_FIR[noise] SSD_values_FIR[noise] = SSD(y_test, y_filter) MAD_values_FIR[noise] = MAD(y_test, y_filter) PRD_values_FIR[noise] = PRD(y_test, y_filter) COS_SIM_values_FIR[noise] = COS_SIM(y_test, y_filter) # IIR Filtering Metrics (Best) [X_test, y_test, y_filter] = test_IIR[noise] SSD_values_IIR[noise] = SSD(y_test, y_filter) MAD_values_IIR[noise] = MAD(y_test, y_filter) PRD_values_IIR[noise] = PRD(y_test, y_filter) COS_SIM_values_IIR[noise] = COS_SIM(y_test, y_filter) # + SSD_all = {} MAD_all = {} PRD_all = {} COS_SIM_all = {} for noise in noise_types: SSD_all[noise] = [SSD_values_FIR[noise], SSD_values_IIR[noise], SSD_values_DL_DRNN[noise], SSD_values_DL_FCN_DAE[noise], #SSD_values_DL_exp_1[noise], #SSD_values_DL_exp_2[noise], #SSD_values_DL_exp_3[noise], SSD_values_DL_exp_4[noise], ] MAD_all[noise] = [MAD_values_FIR[noise], MAD_values_IIR[noise], MAD_values_DL_DRNN[noise], MAD_values_DL_FCN_DAE[noise], #MAD_values_DL_exp_1[noise], #MAD_values_DL_exp_2[noise], #MAD_values_DL_exp_3[noise], MAD_values_DL_exp_4[noise], ] PRD_all[noise] = [PRD_values_FIR[noise], PRD_values_IIR[noise], PRD_values_DL_DRNN[noise], PRD_values_DL_FCN_DAE[noise], #PRD_values_DL_exp_1[noise], #PRD_values_DL_exp_2[noise], #PRD_values_DL_exp_3[noise], PRD_values_DL_exp_4[noise], ] COS_SIM_all[noise] = [COS_SIM_values_FIR[noise], COS_SIM_values_IIR[noise], COS_SIM_values_DL_DRNN[noise], COS_SIM_values_DL_FCN_DAE[noise], #COS_SIM_values_DL_exp_1[noise], #COS_SIM_values_DL_exp_2[noise], #COS_SIM_values_DL_exp_3[noise], COS_SIM_values_DL_exp_4[noise], ] Exp_names = ['FIR Filter', 'IIR Filter'] + ['DRNN', 'FCN-DAE', 'DeepFilter'] metrics = ['SSD', 'COS_SIM'] metric_values = [SSD_all[noise], COS_SIM_all[noise]] print(noise+':') # Metrics table generate_table(metrics, metric_values, Exp_names) # Timing table timing_var = ['training', 'test'] generate_table_time(timing_var, timing[noise], Exp_names, gpu=True) # + import pandas as pd result_means = {} result_stds = {} result_size = {} SSD_columns = ['SSD_values_FIR', 'SSD_values_IIR', 'SSD_values_DL_DRNN', 'SSD_values_DL_FCN_DAE', 'SSD_values_DL_exp_4'] MAD_columns = ['MAD_values_FIR', 'MAD_values_IIR', 'MAD_values_DL_DRNN', 'MAD_values_DL_FCN_DAE', 'MAD_values_DL_exp_4'] PRD_columns = ['PRD_values_FIR', 'PRD_values_IIR', 'PRD_values_DL_DRNN', 'PRD_values_DL_FCN_DAE', 'PRD_values_DL_exp_4'] COS_SIM_columns = ['COS_SIM_values_FIR', 'COS_SIM_values_IIR', 'COS_SIM_values_DL_DRNN', 'COS_SIM_values_DL_FCN_DAE', 'COS_SIM_values_DL_exp_4'] for noise in range(len(noise_types)): rnd_test = np.load(folders[noise]+'/rnd_test.npy') rnd_test = np.concatenate([rnd_test, rnd_test]) df = pd.DataFrame({'rnd': rnd_test}) for r in range(len(Exp_names)): df[SSD_columns[r]] = SSD_all[noise_types[noise]][r] df[MAD_columns[r]] = MAD_all[noise_types[noise]][r] df[PRD_columns[r]] = PRD_all[noise_types[noise]][r] COS_SIM_all[noise_types[noise]][r] = np.reshape(COS_SIM_all[noise_types[noise]][r], np.shape(PRD_all[noise_types[noise]][r])) df[COS_SIM_columns[r]] = COS_SIM_all[noise_types[noise]][r] result_means[noise_types[noise]] = df.groupby('rnd').mean() result_stds[noise_types[noise]] = df.groupby('rnd').std() result_size[noise_types[noise]] = df.groupby('rnd').size() # - len(rnd_test) # + import matplotlib.pyplot as plt for noise in noise_types: fig = plt.figure(figsize=(15, 7)) plt.rc('font', size=20) for name in range(len(SSD_columns)): yerr = np.array(result_stds[noise][SSD_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) plt.errorbar(result_means[noise].index, result_means[noise][SSD_columns[name]].tolist(), yerr=yerr, label=Exp_names[name], fmt='.', elinewidth=1.2, capsize=1) plt.legend() plt.title(noise) plt.yscale('log') plt.ylabel('SSD') plt.xlabel('SNR [dB]') plt.ylim(1, 260) plt.savefig('Figures/SingleECG_SSD_{}.png'.format(noise), bbox_inches='tight') # - for noise in noise_types: yerr_ssd = np.array(result_stds[noise][SSD_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) yerr_cos_sim = np.array(result_stds[noise][COS_SIM_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) plt.rc('font', size=20) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15,14)) fig.subplots_adjust(hspace=0.7) for name in range(len(SSD_columns)): yerr_ssd = np.array(result_stds[noise][SSD_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) yerr_cos_sim = np.array(result_stds[noise][COS_SIM_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) ax1.errorbar(result_means[noise].index, result_means[noise][SSD_columns[name]].tolist(), yerr=yerr_ssd, label=Exp_names[name], fmt='.', elinewidth=1.2, capsize=1) ax1.set_xlabel('SNR [dB]') ax1.set_ylabel('SSD') ax1.title.set_text('(a)') ax1.set_yscale('log') ax1.set_ylim(1, 260) ax1.legend() ax2.errorbar(result_means[noise].index, result_means[noise][COS_SIM_columns[name]].tolist(), yerr=yerr_cos_sim, label=Exp_names[name], fmt='.', elinewidth=1.2, capsize=1) ax2.set_xlabel('SNR [dB]') ax2.set_ylabel('CosSim') ax2.title.set_text('(b)') ax2.legend() ax2.set_ylim(0.3, 1) #ax2.set_yscale('log') plt.tight_layout() plt.savefig('Figures/SingleECG_Metrics_{}.png'.format(noise), bbox_inches='tight') plt.show() # + import matplotlib.pyplot as plt for name in range(len(SSD_columns)): fig = plt.figure(figsize=(15, 7)) plt.rc('font', size=15) for noise in noise_types: yerr = np.array(result_stds[noise][SSD_columns[name]].tolist()) / np.array(np.sqrt(result_size[noise]).tolist()) plt.errorbar(result_means[noise].index, result_means[noise][SSD_columns[name]].tolist(), yerr=yerr, label=noise, fmt='.', elinewidth=1.2, capsize=1) plt.legend() plt.title(Exp_names[name]) plt.yscale('log') plt.ylabel('SSD') plt.xlabel('Noise / Signal') plt.ylim(1, 250) # + time = np.arange(0, 512) / 360 N_S = [0, 20] for noise_i in range(len(noise_types)): noise = 1 rnd_test = np.load(folders[noise]+'/rnd_test.npy') rnd_test = np.concatenate([rnd_test, rnd_test]) low_noise = np.transpose(np.where(rnd_test == 19.75)) high_noise = np.transpose(np.where(rnd_test == 0)) low_noise_idx = low_noise[int((len(low_noise))/2)+1] high_noise_idx = high_noise[int((len(high_noise))/2)-2] [X_test, y_test, y_pred] = test_DRNN[noise_types[noise]] plt.rc('font', size=20) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,7), sharey = 'all') fig.subplots_adjust(hspace=0.7) ax1.plot(time, np.reshape(X_test[low_noise_idx], len(time)), label='Noised') ax1.plot(time, np.reshape(y_test[low_noise_idx], len(time)), label='No Noise') ax1.plot(time, np.reshape(y_pred[low_noise_idx], len(time)), label='Denoised') #ax1.set_xlim(0, 5) ax1.set_xlabel('Time [s]') ax1.set_ylabel('Amplitude [mV]') ax1.legend() ax1.text(1, -0.25, 'SSD: {:.2f}\nCosSim: {:.2f}'.format(float(SSD_all['EM'][2][low_noise_idx]), float(COS_SIM_all['EM'][2][low_noise_idx]))) ax1.title.set_text('Low Noise ' + noise_types[noise]) ax2.plot(time, np.reshape(X_test[high_noise_idx], len(time)), label='Noised') ax2.plot(time, np.reshape(y_test[high_noise_idx], len(time)), label='No Noise') ax2.plot(time, np.reshape(y_pred[high_noise_idx], len(time)), label='Denoised') #ax1.set_xlim(0, 5) ax2.set_xlabel('Time [s]') #ax2.set_ylabel('Amplitude [mV]') ax2.legend() ax2.title.set_text('High Noise ' + noise_types[noise]) ax2.text(1, -0.25, 'SSD: {:.2f}\nCosSim: {:.2f}'.format(float(SSD_all['EM'][2][high_noise_idx]), float(COS_SIM_all['EM'][2][high_noise_idx]))) plt.tight_layout() #plt.savefig('Figures/Noise_Range_{}.png'.format(noise_types[noise]), bbox_inches='tight') #plt.savefig('Figures/singleECG_EM_denoised.png', bbox_inches='tight') plt.show() # -
Evaluation_single.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="raCVgtYPPiRl" # # 파이썬 기본 문법 복습 # + [markdown] colab_type="text" id="I0VxNgsuPvYZ" # ### Q1 문자열 바꾸기 # 다음과 같은 문자열이 있다. # ``` # a:b:c:d # ``` # 문자열의 split와 join 함수를 사용하여 위 문자열을 다음과 같이 고치시오. # ``` # `a#b#c#d` # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 1561, "status": "ok", "timestamp": 1592227657836, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="Nlm7szZzPK6Q" outputId="6e8b17b1-4374-4887-d814-c9211cfeef44" a = "a:b:c:d" b = a.split(":") c = "#".join(b) print(c) # + [markdown] colab_type="text" id="aRv_5CznQGjG" # ### Q2 딕셔너리 값 추출하기 # # 다음은 딕셔너리의 a에서 'C'라는 key에 해당하는 value를 출력하는 프로그램이다. # ``` # >>> a = {'A':90, 'B':80} # >>> a['C'] # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # KeyError: 'C' # ``` # # a 딕셔너리에는 'C'라는 key가 없으므로 위와 같은 오류가 발생한다. # # 'C'에 해당하는 key 값이 없을 경우 오류 대신 70을 얻을 수 있도록 수정하시오.(힌트 get() 메소드) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 1610, "status": "ok", "timestamp": 1592227842861, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="Yy9c272mQQE7" outputId="9ca265ed-3f04-45ba-e87b-82995aeb7037" a = {'A':90, 'B':80} a.get('C', 70) # + [markdown] colab_type="text" id="ZXwU6VL1ToC2" # ### Q3 리스트의 더하기와 extend 함수 # # 다음과 같은 리스트 a가 있다. # ``` # >>> a = [1, 2, 3] # >>> a = a + [4,5] # >>> a.extend([4, 5]) # ``` # # '+' 기호를 사용하여 더한 것과 extend한 것의 차이점이 있을까? 있다면 그 차이점을 설명하시오. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 1010, "status": "ok", "timestamp": 1592227924606, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="lt2NjzkvUIXh" outputId="f3527986-3502-4b6f-a08b-b18bb80aa00b" a = [1, 2, 3] a = a + [4,5] print(a) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 817, "status": "ok", "timestamp": 1592227927095, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="zr6wDOE_LVXg" outputId="ecf2840d-3bd9-4910-b193-72c44eaedce6" a = [1, 2, 3] a.extend([4, 5]) print(a) # + [markdown] colab_type="text" id="CcFUiWawUIyN" # ### Q4 반복문 # # 1 ~ 10까지의 숫자에 대해 모두 더한 값을 출력하는 프로그램을 for 문을 사용하여 작성하시오. # # # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 822, "status": "ok", "timestamp": 1592228055074, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="bMzR9P5gUctH" outputId="baa33666-e875-4959-def3-ece594dca622" x = 0 for i in range(1, 11) : x += i print(x) # + [markdown] colab_type="text" id="MruQJN5lUc2z" # ### Q5 반복문과 조건문 # # 1부터 30까지의 숫자 중 3의 배수만 출력하는 코드를 작성하시오. # + colab={"base_uri": "https://localhost:8080/", "height": 197} colab_type="code" executionInfo={"elapsed": 831, "status": "ok", "timestamp": 1592228154824, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="7Ayuf4F0UowG" outputId="b14f5758-96da-44ab-bf20-1046099910fe" for i in range(1, 31) : if i % 3 != 0 : continue print(i) # + [markdown] colab_type="text" id="Skimr74MUsB6" # ### Q6 피보나치 함수 # # 첫 번째 항의 값이 0이고 두 번째 항의 값이 1일 때, 이후에 이어지는 항은 이전의 두 항을 더한 값으로 이루어지는 수열을 피보나치 수열이라고 한다. # ``` # 0, 1, 1, 2, 3, 5, 8, 13, ... # ``` # 입력을 정수 n으로 받았을 때, n 이하까지의 피보나치 수열을 출력하는 함수를 작성해 보자. # + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" executionInfo={"elapsed": 4096, "status": "ok", "timestamp": 1592228479868, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="CkVZmMpyUyMu" outputId="ff943b15-74a6-4523-ce19-b15b92d1bdf0" array = [0,1] def fibonach(n): i=0 while array[i]+array[i+1] <= n: array.append(array[i]+array[i+1]) i+=1 n = int(input()) fibonach(n) print(array) # + [markdown] colab_type="text" id="hFm_LaDpU7lU" # ### Q7 반복문 # # A 학급에 총 10명의 학생이 있다. 이 학생들의 중간고사 점수는 다음과 같다. # ``` # >>> A = [70, 60, 55, 75, 95, 90, 80, 80, 85, 100] # ``` # for문을 이용하여 A 학급의 평균 점수를 구해보자. # # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 795, "status": "ok", "timestamp": 1592228591279, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="M3g6VV0vVJoR" outputId="84dafa82-fec5-4dbc-e3ec-13fb030ebae5" A = [70, 60, 55, 75, 95, 90, 80, 80, 85, 100] total = 0 for score in A: total += score print('A 학급의 평균 점수', total/len(A)) # + [markdown] colab_type="text" id="QU6MBo0ZVJv8" # ### Q8 파일의 입출력 활용 # # 아래와 같이 이루어진 sample.txt 파일을 작성하라. (write 함수 사용) # ``` # 70 # 60 # 55 # 75 # 95 # 90 # 80 # 100 # ``` # # 이제 sample.txt 파일의 숫자값을 모두 읽어 총합과 평균값을 구한 후 평균값을 # result.txt라는 파일에 쓰는 프로그램을 작성해 보자. # # + colab={} colab_type="code" id="6k9pmfWiOFT0" # 파일의 작성 sample = [70,60,55,75,95,90,80,100] with open('sample.txt','w') as f: for i in sample: f.write(str(i)+'\n') # 파일 읽기 f = open("sample.txt") lines = f.readlines( ) # sample.txt를 줄 단위로 모두 읽는다. f.close( ) # 총합과 평균값 구하기 total = 0 for line in lines: score = int(line) # 줄에 적힌 점수를 숫자형으로 변환한다. total += score average = total / len(lines) # 평균값을 파일에 저장 f = open("result.txt", "w") f.write(str(average)) f.close() # + [markdown] colab_type="text" id="uq7OXvV4Vtci" # ### Q9 리스트 총합 구하기 # # 다음은 A학급 학생의 점수를 나타내는 리스트이다. 다음 리스트에서 50점 이상 점수의 총합을 구하시오. # ``` # A = [20, 55, 67, 82, 45, 33, 90, 87, 100, 25] # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 779, "status": "ok", "timestamp": 1592229083690, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="s9-LyLuYV-41" outputId="06d3a92c-287c-448b-e40b-759a55bf30db" A = [20, 55, 67, 82, 45, 33, 90, 87, 100, 25] total = 0 for score in A: if score >= 50 : total = total + score print(total) # + [markdown] colab_type="text" id="KmIRZpXpV_EX" # ### Q10 list comprehension # # 리스트 중에서 홀수에만 2를 곱하여 저장하는 다음 코드가 있다. # ``` # numbers = [1, 2, 3, 4, 5] # result = [] # for n in numbers: # if n % 2 == 1: # result.append(n*2) # ``` # 위 코드를 list comprehension를 사용하여 표현해 보자. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 1008, "status": "ok", "timestamp": 1592229280822, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://<KEY>", "userId": "08084686575025891086"}, "user_tz": -540} id="hfCjGRuZWkI1" outputId="ec63e246-f127-482a-85c3-c1c4fc2c2900" numbers = [1, 2, 3, 4, 5] result = [i * 2 for i in numbers if i % 2 == 1] print(result)
01Review/01PythonBasic_sol.ipynb