code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.2 # language: sage # name: sagemath # --- # This is a SageMath notebook that attempts to solve xDCP instances: given an elliptic curve $E/\mathbb{F}_q$, an integer $k$ and a polynomial $f\in\mathbb{F}_q[x_1,x_2]$, find $P=(x_1,y_1), Q=(x_2,y_2) \in E(\mathbb{F}_q)$ such that $f(x_1,x_2)=0$ and $k*P = Q$. # # generate a curve of given size def gen_setup(bits): q = next_prime(2 ** bits) F = GF(q) for b in F: try: E = EllipticCurve(F,[-3,b]) except ArithmeticError: pass if is_prime(E.order()) and E.order() != q: break n = E.order() return q, E, n # + #find l such that both l and k*l are small modulo q def find_l(k, q): M = Matrix(ZZ,2) M[0] = [1, k] M[1] = [0, q] return M.LLL()[0][0] def make_small(x, K): return ZZ(min(K(x), -K(x))) #construct an auxilliary polynomial as a part of solving xDCP def construct_poly(E, F, n, l, k, order_multiple=0, reduceby=None): R.<x1,x2> = PolynomialRing(F) K = GF(n) first = E.multiplication_by_m(make_small(l, K))[0](x=x1) second = E.multiplication_by_m(ZZ(make_small(l*k, K)) + order_multiple * n)[0](x=x1) if order_multiple > 0: index = order_multiple * n - make_small(l*k, K) else: index = make_small(l*k, K) if F(index) == 0: index += n # second = E.multiplication_by_m(index)[0](x=x1) # print(first,second) if reduceby: I = R.ideal(reduceby) second_num = I.reduce(second.numerator()) second_den = I.reduce(second.denominator()) second = second_num / second_den poly = (I.reduce(f(x1=first, x2=second).numerator()).univariate_polynomial())(x=x1) else: poly = f(x1=first, x2=second).numerator().univariate_polynomial() return poly def solve_xdcp(f, E, F, n, k, l, verbose=True): # catch multiplication_by_0 assert F(l*k) != 0, "Please choose k,l with nonzero product mod q" assert l*k % n != 0, "Please choose k,l with nonzero product mod n" poly1 = construct_poly(E, F, n, l, k) poly2 = construct_poly(E, F, n, l, k, order_multiple=1, reduceby=poly1) if k == 1: poly = poly1 else: poly = gcd(poly1, poly2) # if the x-coord is valid, reconstruct the original point roots = poly1.roots() for r in roots: x_coord = r[0] if not E.is_x_coord(x_coord): continue R = E.lift_x(x_coord) P = ZZ(l)*R Q = ZZ(l*k)*R assert f(P[0],Q[0]) == 0, "The found points do not satisfy xdcp" if verbose: print(f"Points P = {P}, Q = {Q} satisfy xDCP for f = {f}, k = {k}, l = {l}") return P, Q if verbose: print(f"No points P, Q satisfy xDCP for f = {f}, k = {k}") return None, None # + #test the attack q, E, n = gen_setup(5) F = GF(q) R.<x1,x2> = PolynomialRing(F) f = x1*x2 + 1 for k in [1..n-1]: l = find_l(k, q) try: solve_xdcp(f, E, F, n, k=k, l=l, verbose=True) except: pass # -
xdcp/xdcp_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Monte Carlo Simulation of Time Series # # **<NAME>, PhD** # # This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>. # # Original (Matlab) CompEcon file: **demqua10.m** # # Running this file requires the Python version of CompEcon. This can be installed with pip by running # # # !pip install compecon --upgrade # # <i>Last updated: 2021-Oct-01</i> # <hr> # # ## About # Simulate time series using Monte Carlo Method. # # A commodity price is governed by weekly price movements # \begin{equation*} # \log(p_{t+1}) = \log(p_t) + \tilde \epsilon_t # \end{equation*} # where the $\tilde \epsilon_t$ are i.i.d. normal with mean $\mu=0.005$ and standard deviation $\sigma=0.02$. # # To simulate three time series of T=40 weekly price changes, starting from a price of 2, execute the script # ## Initial tasks import numpy as np from compecon import demo from scipy.stats import norm import matplotlib.pyplot as plt # ## Simulation m, T = 3, 40 mu, sigma = 0.005, 0.02 e = norm.rvs(mu,sigma,size=[T,m]) logp = np.zeros([T+1,m]) logp[0] = np.log(2) for t in range(T): logp[t+1] = logp[t] + e[t] # ## Make figure fig, ax = plt.subplots() ax.set(xlabel='Week', ylabel='Price', xlim=[0,T]) ax.plot(np.exp(logp)); #demo.savefig([fig],name='demqua10')
_build/jupyter_execute/notebooks/qua/10 Monte Carlo Simulation of Time Series.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # [View in Colaboratory](https://colab.research.google.com/github/mizunashi92/dlhub/blob/master/Sentiment.ipynb) # + id="ddnua-UY4f4x" colab_type="code" colab={} import sys import re # !git clone https://github.com/openai/generating-reviews-discovering-sentiment.git import numpy as np import pandas as pd from google.colab import files # !pip install reactionrnn # + id="xACQ5YSQ4k7G" colab_type="code" colab={} ''' Google Translate based on codes by mouuff at https://github.com/mouuff/mtranslate ''' if (sys.version_info[0] < 3): import urllib2 import urllib import HTMLParser else: import html.parser import urllib.request import urllib.parse agent = {'User-Agent': "Mozilla/4.0 (\ compatible;\ MSIE 6.0;\ Windows NT 5.1;\ SV1;\ .NET CLR 1.1.4322;\ .NET CLR 2.0.50727;\ .NET CLR 3.0.04506.30\ )"} def unescape(text): if (sys.version_info[0] < 3): parser = HTMLParser.HTMLParser() else: parser = html.parser.HTMLParser() return (parser.unescape(text)) def translate(to_translate, to_language="auto", from_language="auto"): """Returns the translation using google translate you must shortcut the language you define (French = fr, English = en, Spanish = es, etc...) if not defined it will detect it or use english by default Example: print(translate("salut tu vas bien?", "en")) hello you alright? """ base_link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" if (sys.version_info[0] < 3): to_translate = urllib.quote_plus(to_translate) link = base_link % (to_language, from_language, to_translate) request = urllib2.Request(link, headers=agent) raw_data = urllib2.urlopen(request).read() else: to_translate = urllib.parse.quote(to_translate) link = base_link % (to_language, from_language, to_translate) request = urllib.request.Request(link, headers=agent) raw_data = urllib.request.urlopen(request).read() data = raw_data.decode("utf-8") expr = r'class="t0">(.*?)<' re_result = re.findall(expr, data) if (len(re_result) == 0): result = "" else: result = unescape(re_result[0]) return (result) # + id="LLyEl-nm4utR" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 89} outputId="53237d49-d696-43ef-dbe4-8ee237efa0fc" from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) # + id="eUqngxj84x3G" colab_type="code" colab={} data = pd.read_csv("datasource.csv", sep='|') data = data.drop_duplicates(subset='tweet_id', keep="last") # + id="rA_9Orhbx8sY" colab_type="code" colab={} english = [] for i in range(len(data)): a = data['tweet_text'].iat[i].lower().replace('#indonesiatanpastres','').replace('\n',' ').replace(' ',' ') english.append(a) english_tweet = [] for i in range(len(english)): b = translate(english[i]) english_tweet.append(b) if i%50 == 0: print('The machine has pre-processed {n} data. There are {s} data left'.format(n=i, s=len(english)-i)) # + [markdown] id="Z4_XlAxA4-1s" colab_type="text" # <h2>Working on the Sentiment Analysis Part</h2> # + id="nQ6N-qVvzGs0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="db5d0cbf-e46f-46a3-8d4f-6f86bc4fa34d" # %cd generating-reviews-discovering-sentiment # + id="d8qNn5i4zIVQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5ede0f31-a3f4-4eba-b799-36e6c5e4d25e" # !pip install tqdm # + id="vxshJVZkzKiM" colab_type="code" colab={} from encoder import Model model = Model() # + id="Pf_p_q4xzM65" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="baca9620-f4ab-4f99-b924-da0e47a3791b" ''' We will loop through the english tweets to get the reaction predictions and store them in text_features array ''' text_features = model.transform(english_tweet) # + id="mV02dIBnz2c8" colab_type="code" colab={} a = text_features[:, 2388] # + id="uHyBI6oHElte" colab_type="code" colab={} ''' We will re-combine the values with the initial comments in original language for easy readings. ''' b = data.index.get_values() #collect the indexes of the original table data2 = pd.DataFrame(data=a[0:], index=b[0:], columns=['Value']) #Saving the results as a new pd dataframe based on the index of original file stored on b data3 = pd.DataFrame(data=np.array(english)[0:], index = b[0:], columns=['tweet_text']) #Fetch the original tweet texts as a new pandas dataframe # + [markdown] id="QMjQUHXt5MCX" colab_type="text" # <h2>Working on the Reaction Analysis</h2> # + id="DFRoXlHF5ch5" colab_type="code" colab={} # !wget https://modeldepot.io/assets/uploads/models/models/e76620d2-41b0-4876-a4ba-d51388fe3444_reactionrnn_weights.hdf5 from reactionrnn import reactionrnn reaction_weights='e76620d2-41b0-4876-a4ba-d51388fe3444_reactionrnn_weights.hdf5' react = reactionrnn(reaction_weights) # + id="orxhFncv5luX" colab_type="code" colab={} ''' We will loop through the english tweets to get the reaction predictions and store them in ro array ''' ro =[] for i in range(len(english_tweet)): ve = react.predict(english_tweet[i]) ro.append(ve) # + id="WwyFxuvnDX24" colab_type="code" colab={} data10 = pd.DataFrame(data=np.array(ro)[0:], index=b[0:], columns=['emo']) #Saving the results as a new pd dataframe based on the index of original file stored on b # + [markdown] id="wz1SKR856Zx3" colab_type="text" # <h2>Create the result table</h2> # + id="ylwqIHgRD9yF" colab_type="code" colab={} result_table = pd.concat([data3, data2, data10], axis=1) # + id="H875p1rUFMuH" colab_type="code" colab={} result_table.to_csv('result2.csv', sep ="|", encoding='utf-8') # + id="Eq9d5a7qIC4W" colab_type="code" colab={} files.download('result2.csv') # + id="1CpDzErsT-0Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1969} outputId="51a7ce21-34f5-4160-f6f3-f2283b378838" result_table # + id="Ur5A3qoCT_2W" colab_type="code" colab={}
Various DL Projects/Sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JasperLS/data_science_intros/blob/main/MNIST_and_Fashion_MNIST_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="fNQ1UckPS99F" # # MNIST & Fashion MNIST Classification Notebook # This notebook is losely based on the example provided at https://victorzhou.com/blog/keras-neural-network-tutorial/: # # MNIST is publicly available image data set which consists of 28 by 28 pixels image of hand-written single digits. So each image in MNIST is an image of a digit from 0 to 9. MNIST challenge is to develop a machine learning algorithm that can classify these images into 10 classes (0 to 9). # # Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. # + [markdown] id="Z78pX8IjiBjr" # ## Setup # + id="sl2oW1o-gPl6" outputId="4eb2694e-3027-4572-a95e-3901242f2771" colab={"base_uri": "https://localhost:8080/"} # !pip install mnist # + id="8fPVR2wjhLqr" import numpy as np import mnist from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Conv2D,MaxPooling2D,Flatten from tensorflow.keras.utils import to_categorical # + id="ZFfCrGCJ5nV1" train_images = mnist.train_images() train_labels = mnist.train_labels() test_images = mnist.test_images() test_labels = mnist.test_labels() # + id="68kPGl-AJUTM" test_images.shape import pandas as pd print(pd.Series(test_labels).loc[test_labels == 4].sample(4)) from matplotlib import pyplot as plt plt.imshow(test_images[95]) # + id="FHi8wQoF0NND" # load alternative fashion mnist dataset import tensorflow as tf fashion_mnist = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() # + [markdown] id="8ES1Axj7iMsH" # ## Understand the Data # + id="IUUl3gDJi-Kx" outputId="3e4df123-aa09-4ade-d925-fe4ccb085180" colab={"base_uri": "https://localhost:8080/"} # how many examples do we have? print(len(train_images)) print(len(train_labels)) # + id="TnyDFPccIymb" outputId="fdea02ed-0e7d-4857-c4db-250e751be372" colab={"base_uri": "https://localhost:8080/"} train_labels.shape # + id="MaQsOC7BiUPy" outputId="d8a9f0dd-b24b-4c88-87f3-bb5900fe701c" colab={"base_uri": "https://localhost:8080/"} # what resolution do our images have? train_images.shape # + id="jKiD8ZBicg0C" outputId="d5491176-4bf8-4a7c-8595-6929d9a47663" colab={"base_uri": "https://localhost:8080/"} np.random.randn(4,4).reshape((-1,8)).flatten().mean() # + id="xrRpVt-FizYx" # let's look at some examples from matplotlib import pyplot as plt num = 132 print('Label:',train_labels[num]) plt.imshow(train_images[num]) plt.show() # + id="HCKQgmVQjoEg" # how many do we have of each example? np.unique(train_labels, return_counts=True) # + id="_77Auz8jnGbf" # how is our image data represented? np.unique(train_images) # + [markdown] id="KGvVYkgQiRYo" # ## Data Preparation # + id="WyFEyFpxjjGF" # normalize the images train_images = (train_images-train_images.mean())/train_images.std() test_images = (test_images-train_images.mean())/train_images.std() # + id="nqyb_D83u0SE" # flatten the images train_images = train_images.reshape((-1, 28*28)) test_images = test_images.reshape((-1, 28*28)) # + id="txBchLoguyLj" # do we need to transform labels (later)? print(to_categorical(train_labels)[:2]) (train_labels[:2]) # + [markdown] id="ffrUd_DGqqqX" # ## Get the Model # + id="XzU0XPuqrH_D" outputId="4d971698-6a01-46a2-8584-91979a4d71f1" colab={"base_uri": "https://localhost:8080/"} # build the model model = Sequential([ Dense(64, activation='relu', input_shape=(784,)), Dense(64, activation='relu'), Dense(10, activation='softmax'), ]) # compile the model model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], ) # let's take a look at our model model.summary() # why do we get the specific number of parameters? # + [markdown] id="1H8GjzSYuoQR" # ## Train the Model # + id="RXoZkZ2evqkY" outputId="6afd9b5f-4227-4e1a-de6a-04259af25256" colab={"base_uri": "https://localhost:8080/"} model.fit( train_images, to_categorical(train_labels), epochs=5, batch_size=64, validation_data=(test_images, to_categorical(test_labels)), ) # + [markdown] id="6jDf1xVlxLUL" # ## Evaluate Model # + id="03roFy8jgfUm" outputId="48352bbd-630f-4dd7-c190-9a45d85c1e3e" colab={"base_uri": "https://localhost:8080/"} model.evaluate( test_images, to_categorical(test_labels) ) # # predict on the first 5 test images. # predictions = model.predict(test_images[:5]) # # print our model's predictions # print(np.argmax(predictions, axis=1)) # # check our predictions against the ground truths # print(test_labels[:5]) # + id="Qtf1l7GsmhMi" # wouldn't it be nice to track test loss while training? # + [markdown] id="X0v3QSH_dJHE" # ## Save (and Load) Model # + id="_eM3O0gydN--" model.save('mnist_pretrained_model') # + id="NMnqYTkafBtv" # It can be used to reconstruct the model identically. reconstructed_model = keras.models.load_model("mnist_pretrained_model") # + id="VXw02EtNfw8v" outputId="05623fce-8bc5-41bd-e26c-a4ed47bd0bb7" colab={"base_uri": "https://localhost:8080/"} reconstructed_model.evaluate( test_images, to_categorical(test_labels) ) # predict on the first 5 test images. predictions = reconstructed_model.predict(test_images[:5]) # print our model's predictions print(np.argmax(predictions, axis=1)) # check our predictions against the ground truths print(test_labels[:5]) # + id="L6UFLtw9f1dx" reconstructed_model.summary() # + [markdown] id="ucWGgYkd1Lw3" # ## Try Alternative Model incl. CNNs # + [markdown] id="Gq-0jtJuBnNI" # ##### Get Model inlc. CNN # + id="blHfeE1z3VRV" model = Sequential([ Conv2D(64, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)), MaxPooling2D((2,2)), Flatten(), Dense(64, activation='relu'), Dense(64, activation='relu'), Dense(10, activation='softmax'), ]) # compile the model model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], ) # let's take a look at our model # + [markdown] id="-zrOxQU6BsOw" # ##### Train CNN Model # + id="CPoBdXtR8gFC" model.fit( train_images.reshape(train_images.shape[0],28,28,1), to_categorical(train_labels), epochs=5, batch_size=64, validation_data=(test_images.reshape(test_images.shape[0],28,28,1), to_categorical(test_labels)), ) # + [markdown] id="pX8qk0RABu8e" # ##### Evaluate CNN Model # + id="HbtT_mdWBlO_" model.evaluate( test_images, to_categorical(test_labels) ) # predict on the first 5 test images. predictions = model.predict(test_images[:5]) # print our model's predictions print(np.argmax(predictions, axis=1)) # [7, 2, 1, 0, 4] # check our predictions against the ground truths. print(test_labels[:5]) # [7, 2, 1, 0, 4]
MNIST_and_Fashion_MNIST_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="W-HaxNdSPbl9" colab_type="code" outputId="96d76804-f608-47d9-f486-dd2dcc368e41" colab={"base_uri": "https://localhost:8080/", "height": 105} # This is the two-step process used to prepare the # data for use with the convolutional neural network. # First step is to convert Python Image Library (PIL) format # to PyTorch tensors. # Second step is used to normalize the data by specifying a # mean and standard deviation for each of the three channels. # This will convert the data from [0,1] to [-1,1] # Normalization of data should help speed up conversion and # reduce the chance of vanishing gradients with certain # activation functions. transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='/home/CIFAR-10 Classifier Using CNN in PyTorch/data/', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # + id="PDWxHKm_SPa3" colab_type="code" outputId="f40dbd8b-458c-4cbf-cc70-2ceae796bfe9" colab={"base_uri": "https://localhost:8080/", "height": 182} import matplotlib.pyplot as plt import numpy as np def convert_to_imshow_format(image): # first convert back to [0,1] range from [-1,1] range image = image / 2 + 0.5 image = image.numpy() # convert from CHW to HWC # from 3x32x32 to 32x32x3 return image.transpose(1,2,0) dataiter = iter(trainloader) images, labels = dataiter.next() fig, axes = plt.subplots(1, len(images), figsize=(12,2.5)) for idx, image in enumerate(images): axes[idx].imshow(convert_to_imshow_format(image)) axes[idx].set_title(classes[labels[idx]]) axes[idx].set_xticks([]) axes[idx].set_yticks([]) # + id="nF07YF5NScK2" colab_type="code" colab={} import math import torch import torch.nn as nn import torch.nn.init as init import torch.utils.model_zoo as model_zoo __all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] model_urls = { 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', } class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([ self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x)) ], 1) class SqueezeNet(nn.Module): def __init__(self, version=1.0, num_classes=1000): super(SqueezeNet, self).__init__() if version not in [1.0, 1.1]: raise ValueError("Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)) self.num_classes = num_classes if version == 1.0: self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256), ) else: self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256), ) # Final convolution is initialized differently form the rest final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) self.classifier = nn.Sequential( nn.Dropout(p=0.5), final_conv, nn.ReLU(inplace=True), nn.AvgPool2d(13, stride=1) ) for m in self.modules(): if isinstance(m, nn.Conv2d): if m is final_conv: init.normal(m.weight.data, mean=0.0, std=0.01) else: init.kaiming_uniform(m.weight.data) if m.bias is not None: m.bias.data.zero_() def forward(self, x): x = self.features(x) x = self.classifier(x) return x.view(x.size(0), self.num_classes) def squeezenet1_0(pretrained=False, **kwargs): r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = SqueezeNet(version=1.0, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0'])) return model def squeezenet1_1(pretrained=False, **kwargs): r"""SqueezeNet 1.1 model from the `official SqueezeNet repo <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = SqueezeNet(version=1.1, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'])) return model # + id="eS9RjtLOSq8K" colab_type="code" outputId="4e0f7186-7553-4f2a-a30d-10983343b248" colab={"base_uri": "https://localhost:8080/", "height": 1360} #********************************************************************* # model part import torchvision.models as models # use pretrained model: net = models.squeezenet1_0(pretrained = True) print(net) # + id="pyEFIDeYTKqN" colab_type="code" colab={} import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # + id="D5UorjOdTODT" colab_type="code" outputId="9c695f5d-f38c-4de9-9c4b-edde9ef58f58" colab={"base_uri": "https://localhost:8080/", "height": 255} import os model_directory_path = '/home/CIFAR-10 Classifier Using CNN in PyTorch/model/' model_path = model_directory_path + 'cifar-10-cnn-model.pt' if not os.path.exists(model_directory_path): os.makedirs(model_directory_path) if os.path.isfile(model_path): # load trained model parameters from disk net.load_state_dict(torch.load(model_path)) print('Loaded model parameters from disk.') else: for epoch in range(2): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training.') torch.save(net.state_dict(), model_path) print('Saved model parameters to disk.') # + id="JzGgH0RPTTUb" colab_type="code" outputId="61884d9b-24ba-4049-a693-453f06f63359" colab={"base_uri": "https://localhost:8080/", "height": 182} dataiter = iter(testloader) images, labels = dataiter.next() fig, axes = plt.subplots(1, len(images), figsize=(12,2.5)) for idx, image in enumerate(images): axes[idx].imshow(convert_to_imshow_format(image)) axes[idx].set_title(classes[labels[idx]]) axes[idx].set_xticks([]) axes[idx].set_yticks([]) # + id="ftbX2MWCTXrI" colab_type="code" colab={} outputs = net(images) # + id="dnjdFZOjTZiz" colab_type="code" outputId="922109b8-1025-46b6-a274-8ee044f91d0d" colab={"base_uri": "https://localhost:8080/", "height": 153} outputs # + id="nBQetmV3TeiX" colab_type="code" outputId="bf17016e-432f-41d3-820f-e8aa87380712" colab={"base_uri": "https://localhost:8080/", "height": 153} sm = nn.Softmax(dim=1) sm_outputs = sm(outputs) print(sm_outputs) # + id="8UT0M5i2Tgy-" colab_type="code" outputId="348cafb9-69bc-4771-99ce-5d2d9ac37916" colab={"base_uri": "https://localhost:8080/", "height": 85} probs, index = torch.max(sm_outputs, dim=1) for p, i in zip(probs, index): print('{0} - {1:.4f}'.format(classes[i], p)) # + id="ItpRbbn0Tjd9" colab_type="code" outputId="dae10a92-4a94-4dc4-c925-ffc7c473025a" colab={"base_uri": "https://localhost:8080/", "height": 34} total_correct = 0 total_images = 0 confusion_matrix = np.zeros([10,10], int) with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total_images += labels.size(0) total_correct += (predicted == labels).sum().item() for i, l in enumerate(labels): confusion_matrix[l.item(), predicted[i].item()] += 1 model_accuracy = total_correct / total_images * 100 print('Model accuracy on {0} test images: {1:.2f}%'.format(total_images, model_accuracy)) # + id="7IoypgrKToFK" colab_type="code" outputId="d44fceb8-2cc2-41dc-b947-532b51b3f8c4" colab={"base_uri": "https://localhost:8080/", "height": 204} print('{0:10s} - {1}'.format('Category','Accuracy')) for i, r in enumerate(confusion_matrix): print('{0:10s} - {1:.1f}'.format(classes[i], r[i]/np.sum(r)*100)) # + id="Kqb-2bHdTqJ5" colab_type="code" outputId="3bc168b2-0138-4ef1-837a-5b5bdf23d555" colab={"base_uri": "https://localhost:8080/", "height": 388} fig, ax = plt.subplots(1,1,figsize=(8,6)) ax.matshow(confusion_matrix, aspect='auto', vmin=0, vmax=1000, cmap=plt.get_cmap('Blues')) plt.ylabel('Actual Category') plt.yticks(range(10), classes) plt.xlabel('Predicted Category') plt.xticks(range(10), classes) plt.show() # + id="rJMTtNtXTuFO" colab_type="code" outputId="90173efa-3d6d-45e1-df02-94670ce1aa48" colab={"base_uri": "https://localhost:8080/", "height": 374} print('actual/pred'.ljust(16), end='') for i,c in enumerate(classes): print(c.ljust(10), end='') print() for i,r in enumerate(confusion_matrix): print(classes[i].ljust(16), end='') for idx, p in enumerate(r): print(str(p).ljust(10), end='') print() r = r/np.sum(r) print(''.ljust(16), end='') for idx, p in enumerate(r): print(str(p).ljust(10), end='') print()
ablation/checkpoint1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.10 (''.venv'': poetry)' # language: python # name: python3 # --- import requests from config import Notion # + database_id = "d8c3f00b92004a2daf1be3fa5f3caf1b" url = f'https://api.notion.com/v1/databases/{database_id}/query' payload = {"page_size": 100} header = {"Authorization": Notion.API_KEY, "Notion-Version":"2021-05-13"} body = {"filter" : {"property": "Day", "checkbox": {"equals" : True}}} # - response = requests.post(url, headers=header, json=body) response_json = response.json() # + # response_json['results'][0]['properties']['Name']['title'][0]['plain_text'] # response_json['results'][0]['id'] response_json['results'][0] # for task in response_json['results']: # print(f"{task['properties']['Name']['title'][0]['plain_text']} | ID: {task['id']}") # + page_id = "d67069d1-d5a7-4030-b5d5-98c7fcdb5caa" url = f'https://api.notion.com/v1/pages/{page_id}' header = {"Authorization": Notion.API_KEY, "Notion-Version":"2022-02-22"} body = { "properties" : { "Day" : { "checkbox" : { "equals" : False } } } } payload = {"properties": {"Day" : True}} # - response = requests.request("PATCH", url, headers=header, json=payload) # response_json = response.json() response.json() response_json
sandbox/notion/notion_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-thesis] * # language: python # name: conda-env-.conda-thesis-py # --- # # 3 Task Based Language Model # # 1. Initialisation # 2. Training # 3. Fine-tuning # 4. Evaluation # # Dataset of interest: # 1. Long non-coding RNA (lncRNA) vs. messenger RNA (mRNA) # - # ## 3.1 Initialisation # ### 3.1.1 Imports # Set it to a particular device import torch import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' # + # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # + import pandas as pd from pathlib import Path from functools import partial from utils import tok_fixed, tok_variable, get_model_LM import sys; sys.path.append("../tools"); from config import * from utils import * # - # ### 3.1.2 mRNA/lncRNA Data initialisation # + data_df = pd.read_csv(HUMAN/'lncRNA.csv', usecols=['Sequence','Name']) # data for LM fine-tuning df_ulm = (data_df[data_df['Name'].str.contains('TRAIN.fa')].pipe(partition_data)) df_tr_,df_va_ = df_ulm[df_ulm.set == 'train'], df_ulm[df_ulm.set == 'valid'] # dfs for classification df_clas = (data_df[data_df['Name'].str.contains('train16K')].pipe(partition_data)) df_clas['Target'] = df_clas['Name'].map(lambda x : x.split('.')[0][:-1]) df_tr,df_va = df_clas[df_clas.set == 'train'], df_clas[df_clas.set == 'valid'] df_te = data_df[data_df['Name'].str.contains('TEST500')] # + import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=2, figsize = (16,8)); # suptitle = 'Domain Training Data: Distribution by Sample Length' # _=fig.suptitle(suptitle) plt.subplots_adjust(hspace=0.4) _=df_tr_['Sequence'].str.len().sort_values().head(-50).hist(bins=100, log=True, ax=axes[0], alpha=0.5, label='training') _=df_va_['Sequence'].str.len().sort_values().head(-50).hist(bins=100, log=True, ax=axes[0], alpha=0.5, label='validation') _=axes[0].set_title('Unsupervised Fine-Tuning'); _=axes[0].set_xlabel('Sample length (base-pairs)'); _=axes[0].legend(); axes[0].grid(False) # axes[0].set_xscale('log') _=df_tr['Sequence'].str.len().sort_values().hist(bins=100, ax=axes[1], alpha=0.5, label='training') _=df_va['Sequence'].str.len().sort_values().hist(bins=100, ax=axes[1], alpha=0.5, label='validation') _=axes[1].set_title('Classification Fine-Tuning'); _=axes[1].set_xlabel('Sample length (base-pairs)'); _=axes[1].legend() axes[1].grid(False) fig.savefig(FIGURES/'ulmfit'/suptitle.lower().replace(' ','_'), dpi=fig.dpi, bbox_inches='tight', pad_inches=0.5) # - # ## 3.2 LM Fine-Tuning # + # %%time def make_experiments(df_tr, df_va): """Construct experiment based on tokenisation parameters explored. """ experiments = [] # fixed length for i,ngram_stride in enumerate(NGRAM_STRIDE): experiment = {} experiment['title'] = 'fixed_{}_{}_rows_{}'.format(*ngram_stride,NROWS_TRAIN) experiment['xdata'], experiment['vocab'] = tok_fixed(df_tr, df_va, *ngram_stride, bs=BS[i]) experiments.append(experiment) # variable length for i,max_vocab in enumerate(MAX_VOCAB): experiment = {} experiment['title'] = 'variable_{}_rows_{}'.format(max_vocab,NROWS_TRAIN) experiment['xdata'], experiment['vocab'] = tok_variable(df_tr, df_va, max_vocab, bs=BS[i]) experiments.append(experiment) return experiments experiments = make_experiments(df_tr_, df_va_) # + TUNE_CONFIG = dict(emb_sz=400, n_hid=1150, n_layers=3, pad_token=0, qrnn=False, output_p=0.25, hidden_p=0.1, input_p=0.2, embed_p=0.02, weight_p=0.15, tie_weights=True, out_bias=True) TUNE_DROP_MULT = 0.25 def tune_model(experiment, epochs=1): config = TUNE_CONFIG.copy() drop_mult = TUNE_DROP_MULT data = experiment['xdata'] learn = get_model_LM(data, drop_mult, config) learn = learn.to_fp16(dynamic=True); # convert model weights to 16-bit float model = 'models/' + experiment['title'] + '.pth' if os.path.exists(HUMAN/model): print('model found: loading model: {}'.format(experiment['title'])) learn.load(experiment['title']) learn.data = data # add callbacks from fastai.callbacks.csv_logger import CSVLogger learn.callback_fns.append(partial(CSVLogger, filename='history_tune_' + experiment['title'], append=True)) learn.fit(epochs=epochs,wd=1e-4) learn.save('tune_'+experiment['title']) learn.save_encoder('tune_'+experiment['title']+'_enc') # free up cuda del learn; del data; torch.cuda.empty_cache() for experiment in experiments[-1:]: print(experiment['title']) tune_model(experiment, epochs=4) # - # ## 3.3 Classification # + # %%time def make_experiments(df_tr, df_va): """Construct experiment based on tokenisation parameters explored. """ experiments = [] # fixed length for i,ngram_stride in enumerate(NGRAM_STRIDE): experiment = {} experiment['title'] = 'fixed_{}_{}_rows_{}'.format(*ngram_stride,NROWS_TRAIN) experiment['xdata'], experiment['vocab'] = tok_fixed(df_tr, df_va, *ngram_stride, bs=400, clas=True) experiments.append(experiment) # variable length for i,max_vocab in enumerate(MAX_VOCAB): experiment = {} experiment['title'] = 'variable_{}_rows_{}'.format(max_vocab,NROWS_TRAIN) experiment['xdata'], experiment['vocab'] = tok_variable(df_tr, df_va, max_vocab, bs=400, clas=True) experiments.append(experiment) return experiments experiments = make_experiments(df_tr, df_va) # + CLAS_CONFIG = dict(emb_sz=400, n_hid=1150, n_layers=3, pad_token=0, qrnn=False, output_p=0.4, hidden_p=0.2, input_p=0.6, embed_p=0.1, weight_p=0.5) CLAS_DROP_MULT = 0.5 def tune_classifier(experiment, epochs=1): config = CLAS_CONFIG.copy() drop_mult = CLAS_DROP_MULT data = experiment['xdata'] learn = get_model_clas(data, CLAS_DROP_MULT, CLAS_CONFIG, max_len=4000*70) learn.load_encoder(experiment['title']+'_enc') learn = learn.to_fp16(dynamic=True); # add callbacks from fastai.callbacks.csv_logger import CSVLogger learn.callback_fns.append(partial(CSVLogger, filename='history_clas' + experiment['title'], append=True)) learn.freeze() learn.fit_one_cycle(epochs, 5e-2, moms=(0.8, 0.7)) learn.save('clas_'+experiment['title']) learn.save_encoder('clas_'+experiment['title']+'_enc') tune_classifier(experiments[1], epochs=4) # - CLAS_CONFIG = dict(emb_sz=400, n_hid=1150, n_layers=3, pad_token=0, qrnn=False, output_p=0.4, hidden_p=0.2, input_p=0.6, embed_p=0.1, weight_p=0.5) CLAS_DROP_MULT = 0.5 tune_classifier(experiments[1], epochs=4) # ## 3.4 Evaluation # We now evaluate every model trained for classification performance on the `TEST500` dataset. # All models have been trained for 10 epochs unsupervised, then fine tuned for an additional 8 epochs on long read ncRNA and mRNA data. We plot confusion matrices for each model, as well as a comparative accuracy plot. get_scores(learn) # ## 3.5 Explainability # We would like to visualise the model's attention at the intersection between coding and non-coding regions. # - We would like to identify a subset of the model's embedding that captures the majority of this variance # - We could call this the `coding neuron`
notebooks/2_seq_modelling/3_task_lm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/krishnamanchikalapudi/examples.py/blob/develop/MiniProjects/Calculator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ZdfrN9Ef1y3c" # # Problem # Ask user to input the number for simple arthemtic operations # + colab={"base_uri": "https://localhost:8080/"} id="oHGmsk9115nw" outputId="bc6956d1-c9cf-48f7-a0fe-c5434f718eea" class SimpleCalculator: # constructor def __init__(self): print('') # function adds two numbers def add(self, x, y): return x + y # function subtracts two numbers def subtract(self, x, y): return x - y # function multiplies two numbers def multiply(self, x, y): return x * y # function divides two numbers def divide(self, x, y): return x / y # decision function def calculate(self, num1, num2, userchoice): if '1' == userchoice: answer = self.add(num1, num2) print('\nformula:: num1 + num2 = answer ') print('{} + {} = {}'.format(num1, num2, answer) ) elif '2' == userchoice: answer = self.subtract(num1, num2) print('\nformula:: num1 - num2 = answer ') print('{} - {} = {}'.format(num1, num2, answer) ) elif '3' == userchoice: answer = self.multiply(num1, num2) print('\nformula:: num1 * num2 = answer ') print('{} * {} = {}'.format(num1, num2, answer) ) elif '4' == userchoice: answer = self.divide(num1, num2) print('\nformula:: num1 / num2 = answer ') print('{} / {} = {}'.format(num1, num2, answer) ) else: print('Invalid input!') sc = SimpleCalculator() while (True): print('\n\nSelect operation.\n\t {} \n\t {} \n\t {} \n\t {} '. format('1.Add', '2.Subtract', '3.Multiply', '4.Divide' ,'0. EXIT')) oper = input("Enter choice(0, 1, 2, 3, or 4): ") # Take input from the user if (oper != "0"): num1 = float(input("Enter first number: ")) num2 = float(input("Enter second number: ")) sc.calculate(num1, num2, oper) else: print('Exited! Happy using Calculator :-)') break
MiniProjects/Calculator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/archivesunleashed/notebooks/blob/master/datathon-nyc/parquet_pandas_stonewall.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="EN0gajhr7pKd" colab_type="text" # # Working with Archives Unleashed Parquet Derivatives # # In this notebook, we'll setup an environment, then download a dataset of web archive collection derivatives that were produced with the [Archives Unleashed Toolkit](https://github.com/archivesunleashed/aut/). These derivatives are in the [Apache Parquet](https://parquet.apache.org/) format, which is a [columnar storage](http://en.wikipedia.org/wiki/Column-oriented_DBMS) format. These derivatives are generally small enough to work with on your local machine, and can be easily converted to Pandas DataFrames as demonstrated below. # # This notebook is useful for exploring the following derivatives. # # **[Binary Analysis](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#binary-analysis)** # - [Audio](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-audio-information) # - [Images](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-image-information) # - [PDFs](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-pdf-information) # - [Presentation program files](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-presentation-program-files-information) # - [Spreadsheets](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-spreadsheet-information) # - [Text files](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-text-files-information) # - [Word processor files](https://github.com/archivesunleashed/aut-docs/blob/master/current/binary-analysis.md#extract-word-processor-files-information) # # **Web Pages** # # `.webpages().select($"crawl_date", $"url", $"mime_type_web_server", $"mime_type_tika", RemoveHTMLDF(RemoveHTTPHeaderDF(($"content"))).alias("content"))` # # Produces a DataFrame with the following columns: # - `crawl_date` # - `url` # - `mime_type_web_server` # - `mime_type_tika` # - `content` # # As the `webpages` derivative is especially rich - it contains the full text of all webpages - we have a separate notebook for [text analysis](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis.ipynb) here. # # **Web Graph** # # `.webgraph()` # # Produces a DataFrame with the following columns: # - `crawl_date` # - `src` # - `dest` # - `anchor` # # **Image Links** # # `.imageLinks()` # # Produces a DataFrame with the following columns: # - `src` # - `image_url` # # **Domains** # # `.webpages().groupBy(ExtractDomainDF($"url").alias("url")).count().sort($"count".desc)` # # Produces a DataFrame with the following columns: # - domain # - count # # We recommend running through the notebook with the provided sample dataset. You may then want to substitute it with your own dataset. # + [markdown] id="OctPUqrG-K2W" colab_type="text" # # Dataset # # Web archive derivatives of the [Stonewall 50 Commemoration collection](https://archive-it.org/collections/12143) from [Columbia University Libraries](https://archive-it.org/home/Columbia). The derivatives were created with the [Archives Unleashed Toolkit](https://github.com/archivesunleashed/aut/) and [Archives Unleashed Cloud](https://cloud.archivesunleashed.org/). # # # [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3631347.svg)](https://doi.org/10.5281/zenodo.3631347) # # # Curious about the size the derivative Parquet output compared to the size of the web archive collection? # # The total size of all 11 Parquet deriatives is 2.2G, with `webpages` being the largest (1.5G) since it has a column with full text (`content`). # # ``` # 16K parquet/presentation-program-files # 1.5G parquet/webpages # 16K parquet/spreadsheet # 784K parquet/pdf # 24K parquet/word-processor # 2.4M parquet/text-files # 105M parquet/image # 180M parquet/imagelinks # 1.7M parquet/audio # 433M parquet/webgraph # 308K parquet/domains # 2.2G parquet/ # ``` # # The total size of the web archive collection is 128G. # # The following command downloads all of the parquets file from the Zenodo data repository. To run a 'cell,' you can click the play button next to the cell or you can press your shift key and enter key at the same time. # # Whenever you see code snippets like this, you should do the same thing to run it. # # + id="m3FRzjAYyPid" colab_type="code" colab={} # %%capture # !curl -L "https://zenodo.org/record/3631347/files/cul-12143-parquet.tar.gz?download=1" > cul-12143-parquet.tar.gz # !tar -xzf cul-12143-parquet.tar.gz # + [markdown] id="h3Nr_JCbsWx8" colab_type="text" # The following command provides a list of all the downloaded parquet files. You should see a list of all the different derivatives here - note that they line up with the list provided at the beginning of this notebook. # # + id="2RK74TCSuIXE" colab_type="code" outputId="6d3b7d5e-fc15-473e-e7c7-a782867094a7" colab={"base_uri": "https://localhost:8080/", "height": 219} # !ls -1 parquet # + [markdown] colab_type="text" id="JS0T6i4xPTVx" # # Environment # # Next, we'll setup our environment so we can work with the Parquet output with [Pandas](https://pandas.pydata.org). # + colab_type="code" id="n56-DeEFPTCq" colab={} import numpy as np import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import matplotlib.pyplot as plt # + [markdown] id="oFAFb2X3_VJC" colab_type="text" # # Loading our Archives Unleashed Datasets as DataFrames # # Next, we'll load up our datasets to work with and show a preview of each. We'll load the network, domains, web graph, and images. # # The remainder of the binary datasets (audio, video, spreadsheets, etc.) will all follow the same pattern as the images dataset, except that they do not have the height and width columns. A useful exercise when trying to learn how to use this would be to swap out images for audio, for example, and see how you can explore these other file types. # # We've provided a [separate notebook](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis.ipynb) to work with the pages dataset because it tends to be resource intensive. # # + [markdown] id="SnEntLU0U2ox" colab_type="text" # ## Images # # The following commands create a variable called `images` that contain a DataFrame with all of the image information from the web archive. # # **Reminder:** If you want to look at a differnt derivative, you can, for instance, replace out `images` for `audio`. # + id="GhE_Vhv8Awkx" colab_type="code" outputId="62243d04-9f46-4cb3-d281-5dac666eae76" colab={"base_uri": "https://localhost:8080/", "height": 424} images_parquet = pq.read_table('parquet/image') images = images_parquet.to_pandas() images # + [markdown] id="y5CmfUPNVAQH" colab_type="text" # ## Web Graph # # The next data that we will explore will be the "web graph." This is a DataFrame containing all the hyperlinks within a collection - from `src` (or the page that _contains_ the link) to `dest` (or the page that the link is linking _to_). It also includes the date when this link was crawled, as well as the `anchor` text (what the user clicks on to visit). # + id="-YKUCY_izzT8" colab_type="code" outputId="573f83a1-6637-45ef-b7bb-08516496117d" colab={"base_uri": "https://localhost:8080/", "height": 424} webgraph_parquet = pq.read_table('parquet/webgraph') webgraph = webgraph_parquet.to_pandas() webgraph # + [markdown] id="ndZjsWKzUykd" colab_type="text" # ## Domains # # This derivative contains basic information about what's been collected in the crawl. Specifically we can analyze how often pages from each domain appear. # + id="xUebRBMHuoRs" colab_type="code" outputId="6b5aaaa6-3ec5-4b13-c297-49ca78ef46f8" colab={"base_uri": "https://localhost:8080/", "height": 424} domains_parquet = pq.read_table('parquet/domains') domains = domains_parquet.to_pandas() domains # + [markdown] id="3HPwOCNAvqMe" colab_type="text" # # Data Analysis # # Now that we have all of our datasets loaded up, we can begin to work with them! # + [markdown] id="J6Pkg0prv3BE" colab_type="text" # ## Counting total files, and unique files # # + [markdown] id="DFX4Gl3wv7bi" colab_type="text" # # #### Count number of rows (how many images are in the web archive collection). # # + id="bn-1v127aKIG" colab_type="code" outputId="e42580f3-257a-4b06-8dd3-d31a81d874ca" colab={"base_uri": "https://localhost:8080/", "height": 182} images.count() # + [markdown] id="38veKiPhwKo4" colab_type="text" # #### How many unique images are in the collection? # # We can see if an image is unique or not by computing an [MD5 hash](https://en.wikipedia.org/wiki/MD5#MD5_hashes) of it. The exact same image might be called `example.jpg` and `foo.jpg` - by computing the hash, we can see that even with different file names, they are actually the same image! # # # # + id="WesM3kQowM5B" colab_type="code" outputId="6bde95fa-c046-4361-88aa-c99c9c2dba24" colab={"base_uri": "https://localhost:8080/", "height": 35} len(images.md5.unique()) # + [markdown] id="ZIXkI0-1wWQf" colab_type="text" # #### What are the top 10 most occurring images in the collection? # # Here we discover which image (or images) occur most frequently. # + id="8Ts03OFyjPIM" colab_type="code" outputId="b2cf0f3b-dee2-44b7-f583-b38f8af83cfa" colab={"base_uri": "https://localhost:8080/", "height": 219} images['md5'].value_counts().head(10) # + [markdown] id="FG7pGZUEwlaI" colab_type="text" # # #### What's the information around all of the occurances of `b798f4ce7359fd815df4bdf76503b295`? # # What, you mean you don't know what `b798f4ce7359fd815df4bdf76503b295` means? Let's find those images in the DataFrame table - we can here see the real file name (`erosion.jpg`) and more importantly, its URL within the web archive. # # + id="msmmm65lkSIK" colab_type="code" outputId="c35e3789-5bed-4c8b-c9a7-d3547bed31f7" colab={"base_uri": "https://localhost:8080/", "height": 424} images.loc[images['md5'] == 'b798f4ce7359fd815df4bdf76503b295'] # + [markdown] id="kE-8epARIG0-" colab_type="text" # ### What does `b798f4ce7359fd815df4bdf76503b295` look like? # # We can extract the binary from the web archive using our [binary extraction functions](https://github.com/archivesunleashed/aut-docs-new/blob/master/current/image-analysis.md#scala-df). # # ```scala # import io.archivesunleashed._ # import io.archivesunleashed.df._ # # val df = RecordLoader # .loadArchives("example.arc.gz", sc) # .extractImageDetailsDF(); # # df.select($"bytes", $"extension") # .saveToDisk("bytes", "/path/to/export/directory/your-preferred-filename-prefix", $"extension") # ``` # # **But**, since we don't have access to the WARC files here, just the Parquet derivatives, we can make do by trying to display a live web version of the image or a replay URL. In this case, BANQ's replay service is available at [https://waext.banq.qc.ca](https://waext.banq.qc.ca). # + id="wB3VqcmgJQM0" colab_type="code" outputId="12a533de-0663-4548-e219-ffcd91a35595" colab={"base_uri": "https://localhost:8080/", "height": 54} pd.options.display.max_colwidth = -1 one_image = images.loc[images['md5'] == 'b798f4ce7359fd815df4bdf76503b295'].head(1) one_image['url'] # + [markdown] id="5YV2u_8aLEJS" colab_type="text" # ![1 pixel Facebook tracker](https://www.facebook.com/tr?id=735575706641503&ev=PageView&noscript=1) # + [markdown] id="6YfsUGSRt1Ns" colab_type="text" # Oh. Surprise, surprise. The most popular image is a 1-pixel image that [Facebook uses to track users for conversion](https://developers.facebook.com/docs/facebook-pixel/implementation/conversion-tracking). # + [markdown] id="GbLLZW2awzCv" colab_type="text" # #### What are the top 10 most occuring filenames in the collection? # # Note that this is of course different than the MD5 results up above. Here we are focusing _just_ on filename. So `carte-p.jpg` for example, might actually be referring to different images who happen to have the same name. # # + id="pQaw54ACkwdZ" colab_type="code" outputId="e0ac6eae-1777-4246-a081-fa3db4b8778f" colab={"base_uri": "https://localhost:8080/", "height": 219} top_filenames = images['filename'].value_counts().head(10) top_filenames # + [markdown] id="Z7F3re20BQRI" colab_type="text" # #### Let's plot it! # + id="sRvlstfsBWEZ" colab_type="code" outputId="6dac5f5f-91ce-412d-ad97-e6719df231ac" colab={"base_uri": "https://localhost:8080/", "height": 749} top_filenames_chart = top_filenames.plot.bar(figsize=(25,10)) top_filenames_chart.set_title("Top Filenames", fontsize=22) top_filenames_chart.set_xlabel("Filename", fontsize=20) top_filenames_chart.set_ylabel("Count", fontsize=20) # + [markdown] id="BneaN9cgGoly" colab_type="text" # #### How about a MIME type distribution? # # What _kind_ of image files are present? We can discover this by checking their "media type", or [MIME type](https://en.wikipedia.org/wiki/Media_type). # # # # # # + id="RDd-J8D-GwDk" colab_type="code" outputId="6d12d61f-543c-4629-a1ab-2f8486eb0545" colab={"base_uri": "https://localhost:8080/", "height": 723} image_mime_types = images['mime_type_tika'].value_counts().head(5) image_mime_type_chart = image_mime_types.plot.bar(figsize=(20,10)) image_mime_type_chart.set_title("Images MIME Type Distribution", fontsize=22) image_mime_type_chart.set_xlabel("MIME Type", fontsize=20) image_mime_type_chart.set_ylabel("Count", fontsize=20) # + [markdown] id="QGHnjGRQPzqV" colab_type="text" # #### How about the distribution of the top 10 domains? # # Here we can see which domains are the most frequent within the web archive. # + id="C_X_YSD4PyQi" colab_type="code" outputId="b90dabb5-e565-4a53-a084-b309cc1b586b" colab={"base_uri": "https://localhost:8080/", "height": 961} top_domains = domains.sort_values('count', ascending=False).head(10) top_domains_chart = top_domains.plot.bar(x='url', y='count', figsize=(25,13)) top_domains_chart.set_title("Domains Distribution", fontsize=22) top_domains_chart.set_xlabel("Domain", fontsize=20) top_domains_chart.set_ylabel("Count", fontsize=20) # + [markdown] id="VDXDhqCcyyFj" colab_type="text" # ### Top Level Domain Analysis # Now let's create a new column, `tld`, which is based off an existing column, 'Domain'. This example should give you an idea of how you can expand these datasets to do further research and analysis. # # A [top-level domain](https://en.wikipedia.org/wiki/Top-level_domain) refers to the highest domain in an address - i.e. `.ca`, `.com`, `.org`, or yes, even `.pizza`. # # Things get a bit complicated, however, in some national TLDs. While `qc.ca` (the domain for Quebec) isn't really a top-level domain, it has many of the features of one as people can directly register under it. Below, we'll use the command `suffix` to include this. # # > You can learn more about suffixes at https://publicsuffix.org. # # We'll take the `Domain` column and extract the `tld` from it with [`tldextract`](https://github.com/john-kurkowski/tldextract). # # First we'll add the [`tldextract`](https://github.com/john-kurkowski/tldextract) library to the notebook. Then, we'll create the new column. # + id="clPJuQAe5mcg" colab_type="code" colab={} # %%capture # !pip install tldextract # + id="mv7a-MLIx-3f" colab_type="code" outputId="3da05bf2-f338-40c5-ca7d-edee0955c3e8" colab={"base_uri": "https://localhost:8080/", "height": 424} import tldextract domains['tld'] = domains.apply(lambda row: tldextract.extract(row.url).domain, axis=1) domains # + [markdown] id="jdXFS2yu8XYG" colab_type="text" # #### Next, let's count the distict TLDs. # # + id="1lViQIU48e-u" colab_type="code" outputId="2298858b-e745-4df5-ead2-ccc3e5e57ea8" colab={"base_uri": "https://localhost:8080/", "height": 237} tld_count = domains['tld'].value_counts() tld_count # + [markdown] id="xm_V_0PGzZut" colab_type="text" # #### Next, we'll plot the TLD count. # # + id="N8yNlOa-zmBD" colab_type="code" outputId="9114c48c-f129-4f0c-f000-7fba1f82068f" colab={"base_uri": "https://localhost:8080/", "height": 718} tld_chart = tld_count.head(20).plot.bar(legend=None, figsize=(25,10)) tld_chart.set_xlabel("TLD", fontsize=20) tld_chart.set_ylabel("Count", fontsize=20) tld_chart.set_title("Top Level Domain Distribution", fontsize=22) # + [markdown] id="DWq4hkhoAXNm" colab_type="text" # ## Examining the Web Graph # + [markdown] id="tdCH8zAPAa2M" colab_type="text" # Remember the hyperlink web graph? Let's look at the web graph columns again. # # # + id="-MFkAOcLAWy5" colab_type="code" outputId="127cebb8-0d12-467b-c1bd-803d5b4bb2b1" colab={"base_uri": "https://localhost:8080/", "height": 667} webgraph # + [markdown] id="BFaebkzWAp-R" colab_type="text" # ### What are the most frequent crawl dates? # # + id="kJNT3vqkAxqj" colab_type="code" outputId="aee09d46-787d-4e7a-d617-21c421de5d3e" colab={"base_uri": "https://localhost:8080/", "height": 763} crawl_dates = webgraph['crawl_date'].value_counts() crawl_dates crawl_dates_chart = crawl_dates.plot.line(figsize=(25,12)) crawl_dates_chart.set_xlabel("Crawl Date", fontsize=20) crawl_dates_chart.set_ylabel("Count", fontsize=20) crawl_dates_chart.set_title("Crawl Date Frequency", fontsize=22)
datathon-nyc/parquet_pandas_stonewall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Create an empty Ubuntu VM on Azure # Install HAGrid # ``` # $ pip install hagrid # ``` # Run hagrid launch with these arguments: # ``` # $ hagrid launch to azure --image_name=domain_0.7.0 --jupyter --ansible_extras="install=false" # ``` # What ever you enter into Repo and Branch will be ignored # ![ip_address](img/hagrid_bare_vm.png) # After it is finished you should see this message # ![ip_address](img/hagrid_bare_vm_output.png) # Now provide the `HOST_IP`, Username and Key to the person who is provisioning the system
notebooks/adastra/network-operators/01-network-operators-create-empty-vm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="2KROuTZVuhrp" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="6aEVQQ403kzs" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="P5VpOpSivqgv" # # Customizing what happens in `fit()` # + [markdown] colab_type="text" id="vApNeEfvLLc4" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/keras-team/keras-io/blob/master/tf/customizing_what_happens_in_fit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/keras-team/keras-io/blob/master/guides/customizing_what_happens_in_fit.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/keras-io/tf/customizing_what_happens_in_fit.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="TihRiHIKeeIz" # ## Introduction # # When you're doing supervised learning, you can use `fit()` and everything works # smoothly. # # When you need to write your own training loop from scratch, you can use the # `GradientTape` and take control of every little detail. # # But what if you need a custom training algorithm, but you still want to benefit from # the convenient features of `fit()`, such as callbacks, built-in distribution support, # or step fusing? # # A core principle of Keras is **progressive disclosure of complexity**. You should # always be able to get into lower-level workflows in a gradual way. You shouldn't fall # off a cliff if the high-level functionality doesn't exactly match your use case. You # should be able to gain more control over the small details while retaing a # commensurate amount of high-level convenience. # # When you need to customize what `fit()` does, you should **override the training step # function of the `Model` class**. This is the function that is called by `fit()` for # every batch of data. You will then be able to call `fit()` as usual -- and it will be # running your own learning algorithm. # # Note that this pattern does not prevent you from building models with the Functional # API. You can do this whether you're building `Sequential` models, Functional API # models, or subclassed models. # # Let's see how that works. # + [markdown] colab_type="text" id="XFRryV6yxq2Z" # ## Setup # Requires TensorFlow 2.2 or later. # + colab_type="code" id="BxGJZEXaWrLM" import tensorflow as tf from tensorflow import keras # + [markdown] colab_type="text" id="1yZO4J3zyOfz" # ## A first simple example # # Let's start from a simple example: # # - We create a new class that subclasses `keras.Model`. # - We just override the method `train_step(self, data)`. # - We return a dictionary mapping metric names (including the loss) to their current # value. # # The input argument `data` is what gets passed to fit as training data: # # - If you pass Numpy arrays, by calling `fit(x, y, ...)`, then `data` will be the tuple # `(x, y)` # - If you pass a `tf.data.Dataset`, by calling `fit(dataset, ...)`, then `data` will be # what gets yielded by `dataset` at each batch. # # In the body of the `train_step` method, we implement a regular training update, # similar to what you are already familiar with. Importantly, **we compute the loss via # `self.compiled_loss`**, which wraps the loss(es) function(s) that were passed to # `compile()`. # # Similarly, we call `self.compiled_metrics.update_state(y, y_pred)` to update the state # of the metrics that were passed in `compile()`, and we query results from # `self.metrics` at the end to retrieve their current value. # + colab_type="code" id="sg0aNp6yuNUs" class CustomModel(keras.Model): def train_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update metrics (includes the metric that tracks the loss) self.compiled_metrics.update_state(y, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} # + [markdown] colab_type="text" id="YEdOFRbXmA4d" # Let's try this out: # + colab_type="code" id="1wDUe4ReTaVi" import numpy as np # Construct and compile an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(optimizer="adam", loss="mse", metrics=["mae"]) # Just use `fit` as usual x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.fit(x, y, epochs=3) # + [markdown] colab_type="text" id="tQSwBvcGIeZk" # ## Going lower-level # # Naturally, you could just skip passing a loss function in `compile()`, and instead do # everything *manually* in `train_step`. Likewise for metrics. Here's a lower-level # example, that only uses `compile()` to configure the optimizer: # + colab_type="code" id="9UnwB6gdESVw" mae_metric = keras.metrics.MeanAbsoluteError(name="mae") loss_tracker = keras.metrics.Mean(name="loss") class CustomModel(keras.Model): def train_step(self, data): x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute our own loss loss = keras.losses.mean_squared_error(y, y_pred) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Compute our own metrics loss_tracker.update_state(loss) mae_metric.update_state(y, y_pred) return {"loss": loss_tracker.result(), "mae": mae_metric.result()} # Construct an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) # We don't passs a loss or metrics here. model.compile(optimizer="adam") # Just use `fit` as usual -- you can use callbacks, etc. x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.fit(x, y, epochs=3) # + [markdown] colab_type="text" id="WN0qnQacU9u2" # ## Supporting `sample_weight` & `class_weight` # # You may have noticed that our first basic example didn't make any mention of sample # weighting. If you want to support the `fit()` arguments `sample_weight` and # `class_weight`, you'd simply do the following: # # - Unpack `sample_weight` from the `data` argument # - Pass it to `compiled_loss` & `compiled_metrics` (of course, you could also just apply # it manually if you don't rely on `compile()` for losses & metrics) # - That's it. That's the list. # + colab_type="code" id="fnMF4QYQFNj1" class CustomModel(keras.Model): def train_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. if len(data) == 3: x, y, sample_weight = data else: x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value. # The loss function is configured in `compile()`. loss = self.compiled_loss( y, y_pred, sample_weight=sample_weight, regularization_losses=self.losses, ) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update the metrics. # Metrics are configured in `compile()`. self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight) # Return a dict mapping metric names to current value. # Note that it will include the loss (tracked in self.metrics). return {m.name: m.result() for m in self.metrics} # Construct and compile an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(optimizer="adam", loss="mse", metrics=["mae"]) # You can now use sample_weight argument x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) sw = np.random.random((1000, 1)) model.fit(x, y, sample_weight=sw, epochs=3) # + [markdown] colab_type="text" id="tE4yrX22rlL4" # ## Providing your own evaluation step # # What if you want to do the same for calls to `model.evaluate()`? Then you would # override `test_step` in exactly the same way. Here's what it looks like: # + colab_type="code" id="j0uOhTfBjhYX" class CustomModel(keras.Model): def test_step(self, data): # Unpack the data x, y = data # Compute predictions y_pred = self(x, training=False) # Updates the metrics tracking the loss self.compiled_loss(y, y_pred, regularization_losses=self.losses) # Update the metrics. self.compiled_metrics.update_state(y, y_pred) # Return a dict mapping metric names to current value. # Note that it will include the loss (tracked in self.metrics). return {m.name: m.result() for m in self.metrics} # Construct an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(loss="mse", metrics=["mae"]) # Evaluate with our custom test_step x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.evaluate(x, y) # + [markdown] colab_type="text" id="vaogkBppfg2t" # ## Wrapping up: an end-to-end GAN example # # Let's walk through an end-to-end example that leverages everything you just learned. # # Let's consider: # # - A generator network meant to generate 28x28x1 images. # - A discriminator network meant to classify 28x28x1 images into two classes ("fake" and # "real"). # - One optimizer for each. # - A loss function to train the discriminator. # # + colab_type="code" id="xiE4ZsCtjI9B" from tensorflow.keras import layers # Create the discriminator discriminator = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.GlobalMaxPooling2D(), layers.Dense(1), ], name="discriminator", ) # Create the generator latent_dim = 128 generator = keras.Sequential( [ keras.Input(shape=(latent_dim,)), # We want to generate 128 coefficients to reshape into a 7x7x128 map layers.Dense(7 * 7 * 128), layers.LeakyReLU(alpha=0.2), layers.Reshape((7, 7, 128)), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"), ], name="generator", ) # + [markdown] colab_type="text" id="jyyxuepxgMuF" # Here's a feature-complete GAN class, overriding `compile()` to use its own signature, # and implementing the entire GAN algorithm in 17 lines in `train_step`: # + colab_type="code" id="cxFFOFm7xbCM" class GAN(keras.Model): def __init__(self, discriminator, generator, latent_dim): super(GAN, self).__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim def compile(self, d_optimizer, g_optimizer, loss_fn): super(GAN, self).compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn def train_step(self, real_images): if isinstance(real_images, tuple): real_images = real_images[0] # Sample random points in the latent space batch_size = tf.shape(real_images)[0] random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim)) # Decode them to fake images generated_images = self.generator(random_latent_vectors) # Combine them with real images combined_images = tf.concat([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * tf.random.uniform(tf.shape(labels)) # Train the discriminator with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply_gradients( zip(grads, self.discriminator.trainable_weights) ) # Sample random points in the latent space random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim)) # Assemble labels that say "all real images" misleading_labels = tf.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = self.discriminator(self.generator(random_latent_vectors)) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights)) return {"d_loss": d_loss, "g_loss": g_loss} # + [markdown] colab_type="text" id="wvS2v5pvGM7h" # Let's test-drive it: # + colab_type="code" id="FGTUQysnjlsX" # Prepare the dataset. We use both the training & test MNIST digits. batch_size = 64 (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() all_digits = np.concatenate([x_train, x_test]) all_digits = all_digits.astype("float32") / 255.0 all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) dataset = tf.data.Dataset.from_tensor_slices(all_digits) dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0003), g_optimizer=keras.optimizers.Adam(learning_rate=0.0003), loss_fn=keras.losses.BinaryCrossentropy(from_logits=True), ) # To limit execution time, we only train on 100 batches. You can train on # the entire dataset. You will need about 20 epochs to get nice results. gan.fit(dataset.take(100), epochs=1) # + [markdown] colab_type="text" id="mPJp4mErKaq1" # The idea behind deep learning are simple, so why should their implementation be painful?
tf/customizing_what_happens_in_fit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''lot'': conda)' # name: python388jvsc74a57bd02e6347a50883dfa0598d3f478411c8d6a5b9cf8792810af1a6fbd779ad8b1967 # --- # # Correct implementation of Homework 5 problem # # Hereby I'll be presenting a possible correct interpretation of the homework 5 problem # # Instructions # # >Starting from the implementation contained within the notebook `05-pruning.ipynb`, extend the `magnitude_pruning` function to allow for incremental (iterative) pruning. In the current case, if you try pruning one more time, you'll notice that it will not work as there's no way to communicate to the future calls of `magnitude_pruning` to ignore the parameters which have already been pruned. Find a way to enhance the routine s.t. it can effectively prune networks in a sequential fashion (i.e., if we passed an MLP already pruned of 20% of its parameters, we want to prune *another* 20% of parameters). # Hint: make use the mask. # # In the following cells, I will make use of a very simple example which can be quickly used to (a) present the problem of sequential magnitude pruning and (b) disproof many implementation that I have seen during the corrections. import torch # The two following routines create a very simple network composed of: # * a Linear layer (with weight and bias) with 10 incoming and 10 exiting neurons # * a BatchNorm layer (which is composed of two sets of parameters: the running mean and running sd used to track the stats of the incoming batch of data) # # Since the net is defined as a `Sequential`, we can access its parameters by subscripting it and call the proper `.weight` or `.bias` attribute. # # The net is defined by setting a manual seed to allow for reproducibility, while the weight of the Linear layer are altered by applying `relu` to set some of its parameters to **exactly** zero. # **Note that we suppose this neutralization is due to normal training and not previous pruning.** # # The mask is just a bunch of ones but for the first row, in which some parameters are randomly set to 0 (i.e. they need to be pruned). # + def get_net(): torch.random.manual_seed(123) net = torch.nn.Sequential( torch.nn.Linear(10,10), torch.nn.BatchNorm1d(10) ) net[0].weight.data = torch.nn.functional.relu(net[0].weight).detach() return net def get_mask(): mask = [ torch.ones((10, 10)), torch.ones((10,)), torch.ones((10,)), torch.ones((10,)) ] mask[0][0] = torch.Tensor([1,0,1,0,1,0,1,1,1,0]) return mask # - # For the sake of visualizing the data, we print the first component of the params and the mask net = get_net() net[0].weight mask = get_mask() mask[0] # We now define the routine for the magnitude pruning, which makes use of the mask (if not None) when deciding which parameters to choose from when creating the distribution of parameters to prune. # Note that, conversely to the in-class example, now the mask **does not operate pruning**. To do so, we define later a function **apply_mask**. def magnitude_pruning(net, p, mask=None, params_to_prune=[]): flat = [] for i, (name, par) in enumerate(net.named_parameters()): if any([l in name for l in params_to_prune]): if mask is None: flat.append(par.abs().flatten()) else: flat.append(par[mask[i]!=0].abs().flatten()) flat = torch.cat(flat, dim=0).sort()[0] position = int(p * flat.shape[0]) thresh = flat[position] new_mask = [] for name, par in net.named_parameters(): if any([l in name for l in params_to_prune]): m = torch.where(par.abs() >= thresh, 1, 0) new_mask.append(m) else: new_mask.append(torch.ones_like(par)) return new_mask # We recall the routine for calculating the percentage of ones within the mask; then, we define a routine for applying a mask to a given network # + def pct_of_ones_in_mask(mask): return sum([m.sum().item() for m in mask]) / sum([m.numel() for m in mask]) def apply_mask(net, mask): for p, m in zip(net.parameters(), mask): p.data *= m # - # Let's see the % of ones in the preset mask pct_of_ones_in_mask(mask) # First of all, we build a "pristine" mask from the network setting a pruning rate of 50% without considering the preset mask: net = get_net() mask = magnitude_pruning(net, .5, params_to_prune=["0"]) print(number_of_ones_in_mask(mask)) # Since we're only pruning the Linear layer, it's expected that the result be more than 50%: we have 130 parameters in the network but we only prune 110 (100 weight + 10 bias), hence, we prune 55/130 parameters => 57.69% of the total weights will survive pruning. # Now, let us recover the previous preset mask. We first apply it to the network, pruning around 4% of its weights, then re-create the mask in sequential fashion, by adding a 50% prune on top of it. mask = get_mask() apply_mask(net, mask) mask = magnitude_pruning(net, .5, params_to_prune=["0"], mask=mask) print(number_of_ones_in_mask(mask)) # Now, the parameters in the network are a bit less than 57.69%, because we have to take into consideration that there was a preemptive application of pruning (albeit with a smaller pruning rate). # # You can use this example to test whether your implementation is correct. Note that, if you used the same criteria for determining the pruning threshold: # # ``` # position = int(p * flat.shape[0]) # thresh = flat[position] # ``` # # and populating the mask with `>=` instead of `>` (still correct, it's not an error!), the results should be *exactly* the same as far as % of ones. # # To reuse this example, plug your version of `magnitude_pruning`, adapt this notebook considering whether or not you apply the mask within `magnitude_pruning`, adjust the order of the arguments, and see if it works correctly.
homeworks/Homework_05_zullich.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework - NumPy - Pandas (15 pts) # # <font color=red>!!! Do NOT use for loops for array manipulations. ONLY use numpy.</font> # 1. (3 pts) Generate 100 random two-dimensional (x,y) points within a unit square (i.e. x and y randomly chosen between 0 and 1) and assign them to the variable **points**. Make sure to store the points as a matrix where the 1st column is the x values and the 2nd column is the y values (i.e. each row is a point). Next shift and scale each point so that its x and y values fall between 5 and 10. The variable **points** should refer to the shifted and scaled values. Print the first five points to verify. import numpy as np import pandas as pd import math from sklearn import datasets points = np.random.random((100,2)) points = points * 5 + 5 print(points[0:5, :]) # 2. (3 pts) In a single operation (do NOT do this in two separate steps), multiply all x values by 2 and divide all y values by 10. Make sure that the variable **points** reflects the changed values. Print the first five points to verify. multiply = np.array([2,.1]) points = points * multiply print(points[0:5, :]) # 3. (3 pts) Use numpy.linspace to generate 101 equally spaced $x$ values between and including -10 and 10. For each $x$ value, evaluate $y(x) = x^2 + sin(x)$. Store your results in the variables **x** and **y**. x = np.linspace(-10, 10, num = 101) y = [None] * len(x) for i in range(0, len(x)): y[i] = x[i] * x[i] + math.sin(x[i]) # This cell will plot y vs x. # You don't have to do anything here, # it's just so you can visualize your result. import matplotlib.pyplot as plt plt.plot(x, y) plt.xlabel("x") plt.ylabel("y"); # 4. (3 pts) Put the **x** and **y** arrays from #3 above into a pandas dataframe where the 1st column is the x values and the 2nd column is the y values. Set the column titles to "x" and "y". Use the *head* and *tail* functions to display the first few and last few points. combined = np.column_stack((x,y)) features = "x y".split() df = pd.DataFrame(combined, columns = features) print(df.head(4)) print(df.tail(4)) # 5. (3 pts) Sort the points in the dataframe from #4 above by increasing y value. Make sure your original dataframe variable reflects the sorted points. Verify by displaying the first three, middle three and last three points. df.sort_values(by=['y'], inplace=True) print(df.head(3)) print(df.tail(3)) print(df.iloc[49:52])
homework/homework-numpy-pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="3tID0V3ccEmq" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600860847951, "user_tz": 180, "elapsed": 2315, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} import pandas as pd # Usando o data frame import numpy as np # Calculo com matrizes import scipy import matplotlib.pyplot as plt from scipy.optimize import curve_fit import random r = random.Random() r.seed() # + [markdown] id="SeAPmLuevRht" colab_type="text" # # Aqui vamos fazer uma regressão usando o Pandas/NuPy e scipy.# # + id="bSBBuo8GnJwf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1600860847959, "user_tz": 180, "elapsed": 2247, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="2c345deb-7f67-463f-8d38-edcb399eab40" # Carrega idade e salário (Varíáveis a serem analisadas) idade = [18,25,57,45,37,40,24,33] salario = [x*15.37 + r.random()*3 + 12 for x in idade] print (idade) print (salario) xData = np.array(idade) yData = np.array(salario) # + id="flvD44Swov1_" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600860847977, "user_tz": 180, "elapsed": 2184, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} # define uma função a ser otimizada def equacaoLinear(x, a, b): return a * x + b # + id="toi4bf4eo8ym" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600860847987, "user_tz": 180, "elapsed": 2141, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} # Gera parâmetros iniciais para o otimizador parametrosIniciais = np.array([1.0, 1.0]) # + id="KzSjuuS6pLyT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} executionInfo={"status": "ok", "timestamp": 1600860847994, "user_tz": 180, "elapsed": 2097, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="833d5a87-d812-4b07-d48b-a66b7db25c4c" #realiza a otimização através do erro médio quadrado (MSE) parametrosOtimizados, pcov = curve_fit(equacaoLinear, xData, yData, parametrosIniciais) # parametrosOtimizados, => Parâmetros ajustados da curva # pcov => Covariância dos parâmetros print(parametrosOtimizados) print(pcov) # + id="SZF-q3d-p90R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1600860848001, "user_tz": 180, "elapsed": 2046, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="440f5e29-66b0-4f83-f07c-6b3c7ddf99c2" # Modelo encontrado, quero os pontos que ele definiu yDataModelo = equacaoLinear(xData, *parametrosOtimizados) print(yDataModelo) # + id="WtOurwPDo-OU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1600860848005, "user_tz": 180, "elapsed": 2018, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="e92f42b0-3341-49d0-b836-9aa2d647fd57" # Encontra o erro absoluto erroAbs = yDataModelo - yData print(erroAbs) # + id="YT8uaCKEqj5F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} executionInfo={"status": "ok", "timestamp": 1600860848009, "user_tz": 180, "elapsed": 1988, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="19ba7b04-8429-4a73-d207-cc36cf17b9a3" #Calcula o erro quadrado de cada medida SE = np.square(erroAbs) print("Square Erros : ",SE) MSE = np.mean(SE) print("Média dos Erros ao Quadrado : ",MSE) # + id="azReFNp4okHP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} executionInfo={"status": "ok", "timestamp": 1600860848014, "user_tz": 180, "elapsed": 1959, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="6546f4f0-766c-44c3-e159-0fab1739228f" # Calcula o R^2 ou coeficiente de determinação RSquare = 1.0 - (np.var(erroAbs) / np.var(yData)) print("coeficiente de determinação (R^2) = ", RSquare) # + id="iApQYeIBtCsu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} executionInfo={"status": "ok", "timestamp": 1600860848020, "user_tz": 180, "elapsed": 1928, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="62df68a3-99b1-489b-ed81-0ce01b203f7c" #Imprime os coeficientes print(f"Coeficientes encontrados : {parametrosOtimizados[0]} * X + {parametrosOtimizados[1]}" ) # + id="c1Y6u03VtvSx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 401} executionInfo={"status": "ok", "timestamp": 1600860849550, "user_tz": 180, "elapsed": 3431, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="819ecbc9-3f10-4ab0-b31b-0e016146c567" #Imprime os pontos e a curva f = plt.figure(figsize=(4,4), dpi=100) axes = f.add_subplot(111) ## Plot somente dos dados para ajustar axes.plot(xData, yData, 'ro') xModelo = np.linspace(min(xData), max(xData)) yModelo = equacaoLinear(xModelo, *parametrosOtimizados) axes.plot(xModelo, yModelo) plt.xlabel("idade") plt.ylabel("salário anual (R$)") plt.plot() # + [markdown] id="5NXGCARkwGl3" colab_type="text" # ##Gráfico de resíduos # + id="HFPr6JcgtzAC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 401} executionInfo={"status": "ok", "timestamp": 1600860849555, "user_tz": 180, "elapsed": 3411, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="d07e1e42-c01d-48af-ca9f-d8bf72192886" diff = yData - ( xData * parametrosOtimizados[0] + parametrosOtimizados[1] ) #Imprime os pontos e a curva f = plt.figure(figsize=(4,4), dpi=100) axes = f.add_subplot(111) ## Plot somente dos dados para ajustar axes.plot(xData, diff, 'ro') plt.xlabel("idade") plt.ylabel("salário anual (R$)") plt.plot() # + [markdown] id="5HN1e2twwPFR" colab_type="text" # ## Como ficaria a mesma implementação usando o Sklearn # + id="Z7OAU5c1wMng" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600860849565, "user_tz": 180, "elapsed": 3398, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} # Importando as bibliotecas from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score # + id="NQ_hKRiEws7b" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600860849568, "user_tz": 180, "elapsed": 3378, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} reg = LinearRegression() x_ModeloSkLearn = xData.reshape((-1,1)) regressao = reg.fit(x_ModeloSkLearn, yData) # + id="syqNz0Q_xMGa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} executionInfo={"status": "ok", "timestamp": 1600860849573, "user_tz": 180, "elapsed": 3363, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="e55f2217-dc09-4d10-ddae-724d17939b03" previsao = reg.predict(x_ModeloSkLearn) MSE = mean_squared_error(yData, previsao) print('MSE', MSE) # + id="672O4HFZxXeX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} executionInfo={"status": "ok", "timestamp": 1600860849576, "user_tz": 180, "elapsed": 3344, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="cbed77e7-ba02-46e0-d759-75a356c74390" #O resultado final para comparacao print(f"Y = {reg.coef_}*X + {reg.intercept_}") # + id="GjWzTFJ1x6gf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} executionInfo={"status": "ok", "timestamp": 1600860849582, "user_tz": 180, "elapsed": 3326, "user": {"displayName": "<NAME>\u00fajo", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjO1DWH5Tatw2pMcXSg4xJV4DgvAZh-cRUmnSS6=s64", "userId": "13381797195407462590"}} outputId="c8a6a62f-b6af-404e-885f-f5e10b56dbff" # Calculando o exibindo o R^2 (Coef. Determinacao) R_2 = r2_score(yData, previsao) print("R^2 = ", R_2)
Mod1/implementacao_regressao_linear.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import json d = {} with open("vals.json") as f: d = dict(x.rstrip().split(None, 1) for x in f) vals = pd.read_excel("./Lea.xlsx") with open("vals.json", "w") as text_file: text_file.write(json.dumps(vals.to_dict(orient = "list"))) json.dumps(vals.to_dict(orient = "list"))
.ipynb_checkpoints/JSONConvert-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/IEEESBITBA/Curso-Python/blob/master/Clase_2_Curso_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ghBKrYxhuQTB" colab_type="text" # #Tipos de datos # Cuando se desarrolla un programa es usual la necesidad de trabajar con información; como fue visto en la clase anterior. # Es natural pensar que una computadora debe saber cómo es la información que tiene almacenada. ¿Es un número? ¿Es una letra? ¿Es una palabra? ¿Es una lista de cosas? # # <img src="http://tensor-programming.com/wp-content/uploads/2016/09/variables-788x469.jpg" alt = "Wordcloud con tipos de datos comunes" height = 200 title="Objects y structures son un conjunto de subtipos"> # # # En esta ocasión vamos a profundizar sobre un aspecto en particular de dicha información el cual es llamado su "tipo". # # Distintos lenguajes de programación trabajan de diversas formas este aspecto de la información. En Python en particular, no se debe aclarar explicitamente cuál es el tipo de dato de una variable: el lenguaje de programación nos entiende y asume cuál debe ser dicho tipo. Por esta razón se suele decir que Python es un lenguaje de tipado dinámico. # # A continuación mostraremos ejemplos con el uso de distintos tipos de dato en Python # # + [markdown] id="D0mAa2X5gple" colab_type="text" # ## Tipos numericos # + [markdown] id="aWRzjdqUyy26" colab_type="text" # ### int # Es el tipo de dato más básico, en donde puede guardarse numeros enteros # # + id="8d5ZJsBTxIYm" colab_type="code" colab={} dias = 5 # + [markdown] id="Y-OIPTPOvaqg" colab_type="text" # A veces operando con datos enteros podemos obtener números racionales, por ejemplo: # + id="c_WwVyZYvn7x" colab_type="code" colab={} x = 5 y = 2 print(x/y) # + [markdown] id="wl9jL2a7vng2" colab_type="text" # Para evitar esto podemos usar el operador **división entera: //**, que divide el cociente entero entre los números. # # + id="q8LCevAueZvH" colab_type="code" colab={} x = 5 y = 2 z = x//2 print(z) # + [markdown] id="Xyy2vaK0ecR2" colab_type="text" # # Si queremos saber el resto de la división entera, usamos el operador **módulo %**. Su definición matematica es la siguiente # # $$ \!\!\!\!\!\mod(x,y) = x - \left\lfloor\frac{x}{y} \right\rfloor \cdot y $$ # # En muchos lenguajes de programación se indica con: $ x\%y $ # + id="YsRbDPj8wRZt" colab_type="code" colab={} x = 5 y = 2 print(x // y) print(x % y) print(x // y, "*", y, "+", x%y,"=", x) # + [markdown] id="NnVX8c63UEFN" colab_type="text" # Hacer que el siguiente programa solo imprima multiplos de 3 # + id="YReBj2JdUKQz" colab_type="code" colab={} for i in range(20): print(i) # + [markdown] id="tva8InGGxJas" colab_type="text" # ### float # Tipo de dato utilizado para guardar números reales # # + id="NsxOtoLWu0zi" colab_type="code" colab={} pi = 3.14 x = 3. # Se la indica que es un numero real con el . z = 3 # Este será un número entero print(x) print(z) # + [markdown] id="LjAnxLyjyfLA" colab_type="text" # Si queremos saber la parte entera de un float, podemos simplemente convertirlo a int. # A la acción de convertir una variable de un tipo a otro diferente se la conoce como **cast**. Esto hará la siguiente operacion: # # $$x \leftarrow \lfloor y \rfloor $$ # + id="xY1euahoyevG" colab_type="code" colab={} y = 3.15 x = int(y) print(x) # + [markdown] id="4sR5HbHxzMRW" colab_type="text" # Es importante tener en cuenta que los float no pueden ser infinitamente precisos. Cada variable que usamos se guarda en memoria, y por lo tanto, ¡para infinita precisión necesitaríamos infinita memoria! # # Por esto suele ser preferible, cuando trabajamos con floats, realizar comparaciones del tipo **<** o **>**, en lugar de **==** o **!=** porque pueden surgir problemas. # # ¿Cuál es el problema de estos programas? # + id="MlR-LD7NzKys" colab_type="code" colab={} # No lo vamos a correr porque rompe la compu de la facu, pero si quieren probar # nunca va a terminar. Usar con cuidado! var = 0.11 while var is not 0: print(var) var -= 0.01 # + id="TDRC5TGT6Pqf" colab_type="code" colab={} a = 9999999999999999 b = 0.00000000000001 c = a + b print(c-b == a) # + [markdown] id="wejH0B1Zy8LG" colab_type="text" # ## boolean # Es un tipo de dato que puede asumir dos valores True (Verdadero) o False (Falso) # + id="6zF4f_S0zfel" colab_type="code" colab={} llueve = False soleado = True # + [markdown] id="6Ej-A76P76FX" colab_type="text" # Ya veníamos usando este tipo de datos implícitamente: las comparaciones devuelven booleanos. # # Prácticamente cualquier dato puede castearse (convertirse) a boolean. Para los números, 0 es false y cualquier otro es true. Para otros datos, en general son false si están "vacíos" y true en los demás casos. # + id="RTFoI7Cr88g2" colab_type="code" colab={} print(bool(4)) print(bool(0)) print(bool("hola")) print(bool("")) print(int(True)) print(int(False)) palabraTrue = str(True) palabraFalse = str(False) print(palabraTrue) print(palabraFalse) # + [markdown] id="TdpOoGXVx0Cy" colab_type="text" # ##string # Tipo de dato utilizado para guardar letras, palabras, oraciones, texto, etc. # # + id="tlhEoTyRyeR9" colab_type="code" colab={} nombre = "Juan" # + [markdown] id="rtkix74k1QEM" colab_type="text" # ¡También podemos castear strings a números y viceversa! Así podemos operar con ellos. # + id="BZsXI7P610CL" colab_type="code" colab={} nombre = "3" numero = 3 print(nombre == numero) print(nombre == str(numero)) print(int(nombre) * 4) # + [markdown] id="ZKMPjsOO4_yl" colab_type="text" # ¿Es correcto este código? # + id="PL7B-BAX5JGh" colab_type="code" colab={} print(nombre * 4) print("hola" * 4) # + [markdown] id="D9ohvNTqhmoD" colab_type="text" # ### Operaciones con strings # # Con los strings podemos realizar muchas operaciones, por ejemplo: # # - **x+y**: En esta operacion se agrega al final de la string x el contenido de y. Ej: # # + id="mwYffWASkWAt" colab_type="code" colab={} x= 'ho' y= 'la' x+=y # Recuerden que x+=y es x=x+y print(x) # + [markdown] id="5ytFOjAhkWYR" colab_type="text" # - **startswith**( $algo$ ): Le preguntamos a la string si empieza con cierto texto. Ej: # # + id="b-yjwlABkc22" colab_type="code" colab={} print(x.startswith('ho')) print(x.startswith('x')) # + [markdown] id="neyr-9WckncT" colab_type="text" # - **endswith**( $algo$ ): Le preguntamos a la string si termina con cierto texto. Ej: # + id="eOpZjrHlkvg6" colab_type="code" colab={} print(x.endswith('a')) print(x.endswith('ola')) print(x.endswith('z')) # + [markdown] id="L_4r67VRUpNC" colab_type="text" # Hacer un programa que te permita ingresar nombre y apellido y lo guarde en la variable nombreYapellido. # + id="5rBruUXJUqo0" colab_type="code" colab={} # + [markdown] id="3tipHbU_OHnQ" colab_type="text" # #Estructuras de datos # # Utilizando variables de los distintos tipos de datos como los mostrados anteriormente se puede armar programas con una complejidad notable. Sin embargo, hay un límite en la versatilidad de los programas si solo utilizamos los tipos de datos que vimos hasta ahora. # # <img src="http://www.digitalexpert.co/images/courses/data-structure.png" alt = "Visualización básica de una data structure" height = 280 title="Las estructuras de datos son un concepto muy importante para cualquier lenguaje de programacion"> # # # Se suele llamar "estructura de datos" a cierta clase de tipos de datos más complejos, los cuales nos permiten organizar la información de manera más efectiva. # # En particular, nos permiten agrupar datos (cada uno de ellos con su tipo), e interactuar con ellos de la forma más apropiada. # # A continuación mostraremos dos de las estructuras de datos más sencillas (¡pero útiles!) que Python nos ofrece. # + [markdown] id="0VoVNCBaPdoF" colab_type="text" # ## Listas # # + [markdown] id="MfnKbbeBPjpJ" colab_type="text" # Es una estructura de datos muy sencilla: nos permite almacenar elementos de manera secuencial, los cuales pueden ser accedidos mediante el conocimiento de su posición en la lista. Estas las creeamos de la siguiente manera: # # > a = **[** $ a_{0},a_{1},a_{2},\dots $ **]** # # A continuación se mostrara un ejemplo sencillo con una lista # + id="K23GjgqMQB9-" colab_type="code" colab={} a = [ 13, 40, 10, 30 ] print(a) # + [markdown] id="UlcBp_8UvehI" colab_type="text" # Tambien podemos crear una lista del tamaño que queramos de la siguiente manera: # # > a = **[** (operacion con x) **for** x **in** **range**(10) **]** # + id="hiu1MacNvyQd" colab_type="code" colab={} a = [x*2 for x in range(15)] print(a) b = [i for i in "hola"] print(b) # + [markdown] id="BYiMd8IzlhNO" colab_type="text" # Para acceder a un elemento de una lista se utiliza lista**[**posicion**]**. # # Tambien podemos modificar el elemento asignandole un valor. # # Nota: Para acceder al ultimo elemento podemos acceder con -1 # + id="hH1f4B8fl1vG" colab_type="code" colab={} a = [x+2 for x in [2,3,4,5]] print(a) print(a[0]) print(a[2]) print(a[-1]) a[2] = 0 print(a) # + [markdown] id="njU7dNS8l9FL" colab_type="text" # Notar que, a diferencia de lo que diría el sentido común, el primer elemento de la lista se accede con la posición 0. Esto sucede en muchos lenguajes de programación hoy en día y es un hecho muy aceptado en el ámbito. # + [markdown] id="QSB8OdyDw1i7" colab_type="text" # ### Operaciones sobre una lista # Podemos realizar multiples operaciones sobre una lista: # # - **a+b**: Al igual que los strings se agrega el contenido de **b** al final de **a**. Ej: # + id="dSYKydfFxTSr" colab_type="code" colab={} a = [1,2] b = [3,4] z = a+b print(z) # + [markdown] id="4hht8kbW3Qvv" colab_type="text" # - **append**($algo$): permite agregar un elemento al final de la lista. # + id="vSgGzYcINGRv" colab_type="code" colab={} v = [50,'hola',150] print(v) v.append(200) # agrego al final de la lista un elemento que vale 200 (tipo int) v.append("hola") print(v) # + [markdown] id="kZjHeYw3xpu7" colab_type="text" # - **pop**(): Permite remover el ultimo elemento de la lista. # + id="UDWAtoERNfCw" colab_type="code" colab={} v = [1,'1',"a",'c'] v.pop() #saco el ultimo elemento print(v) # + [markdown] id="xuJGzxm_30Vu" colab_type="text" # - **remove**($valor$): Remueve el primer elemento que sea igual al valor. # + id="rHDPAGdHNwlH" colab_type="code" colab={} v = [1,2,1,2] v.remove(2) # elimino el primer valor que cumpla print(v) v.remove(2) # elimino el primer valor que cumpla print(v) # + [markdown] id="5fLZ42t4zKbG" colab_type="text" # - **del** lista**[**posicion**]**: Para eliminar un elemento en una posición determinada. # + id="yTvsT5UazWN0" colab_type="code" colab={} a = ["hola","como","estas"] del a[1] print(a) # + [markdown] id="JBxjC81WznRe" colab_type="text" # - $algo$ **in** lista: Muchas veces es necesario saber si un elemento esta dentro de una lista, para ello utilizamos esta estructura. Ej: # # + id="mnrOeoecz4mI" colab_type="code" colab={} x = [1,2,3,4] if 4 in x: print(str(4)+' esta en la lista') if 6 not in x: print(6,'no esta en la lista') if 5 in x: print(5,'esta en la lista') else: print(5,'no esta en la lista') # + [markdown] id="XDGgUB3opnot" colab_type="text" # ### for sobre una lista # Ahora cobra mas sentido la estructura for que vimos la clase pasada ya que puede ir avanzando sobre todos los elementos de una lista # + id="UNFJKu_YpwmU" colab_type="code" colab={} y = [1,10,100,1000] for x in y: print(x) # + [markdown] id="ZXLYWjwqVJuc" colab_type="text" # ### Ejercicios # Crear una lista que se inicialice con los numeros pares hasta el 50 # + id="YC8rVjYOVSO0" colab_type="code" colab={} # + [markdown] id="ple-OvO2QdeD" colab_type="text" # #### Challenge de listas: ¿Cómo podría hacer una matriz? # + id="Mh25rvel6BLJ" colab_type="code" colab={} # + [markdown] id="unAFRKQ24-ym" colab_type="text" # ### Nota final # Es importante observar que en las listas los elementos se almacenan en un orden bien definido, es decir: siempre hay un elemento que está antes y otro que está después. Esto no será siempre cierto con otras estructuras de datos. # # Otro detalle es que si bien en este ejemplo utilizamos elementos de tipo de dato int, es posible utilizar cualquier tipo de dato para los elementos cuando se trabaja con una lista, o con cualquier estructura de datos. # # + [markdown] id="LWdgjC8mSxuE" colab_type="text" # ## Diccionarios # + [markdown] id="KhV4CUvITVmc" colab_type="text" # Un diccionario es otra estructura de datos muy útil y muy utilizada cotidianamente. La analogía directa que se suele hacer es con un diccionario físico. Un diccionario (en el sentido físico) contiene una gran cantidad de información organizada por palabras y contenido asociado a ellas. Más precisamente, cada una ordenadas alfabeticamente tiene asociada una información que describe en profundiad su significado. # # <img src="http://www.eslstation.net/ESL310L/dictionary.bk.gif " height = 200 alt = "Un diccionario, literalmente" title="No les puedo asegurar que la imagen sea una buena analogia de lo que es un diccionario en Python..."> # # # En programación los diccionarios no son muy distintos. Un "diccionario" en este contexto es una estructura de datos cuya información esta organizada igual que en un diccionario físico; es decir, cada bloque de información, es decir, cada elemento, tiene asociada una palabra, la cual se suele denominar **key**. Mediante la **key** se puede acceder a dicha informacion, la cual se suele denominar **contenido**. El par **key,contenido** suele llamarse **elemento**. # # Es muy importante notar que no pueden existir dos elementos con igual clave, estos serían indistinguibles. # # La clave suele ser información con tipo de dato **string** (aunque no unicamente), mientras que el contenido puede tener cualquier tipo de dato, esto será decisión de ustedes. # # Estos se crean utilizando la siguiente estructura: # # > x = **{** $k_{0}$**:**$c_{0}$**, $k_{1}$**:**$c_{1}$, $\dots$}** # Noten el : que divide el key del valor y que se usa { } # # Comenzemos por crear un diccionario con la descripción de las palabras: # + id="x_M_vy5fUtyd" colab_type="code" colab={} diccionario = { "trueno": "Ruido muy fuerte que sigue al rayo durante una tempestad, producido por la expansión del aire al paso de la descarga eléctrica.", "rayo": "Chispa eléctrica de gran intensidad producida por la descarga entre dos nubes o entre una nube y la tierra." } # + [markdown] id="L4qzUGar266U" colab_type="text" # Para acceder a los datos de un diccionario se utiliza la misma sintaxis que las listas pero utilizando la key a la que queremos acceder. Ej: # + id="X4NcAVrb3IOa" colab_type="code" colab={} print(diccionario['trueno']) # + [markdown] id="Abnxh6zsuJp9" colab_type="text" # Tambien podemos usar los diccionarios para acceder de forma sencilla a datos. Creemos una base de datos donde se guarda de la siguiente manera: # # > **key**=Legajo **contenido**=Nombre # + id="FLfNRSbXVxOr" colab_type="code" colab={} database = { 50001:"<NAME>", 50002:"<NAME>" } print("Nombre completo del legajo",50002,": ",database[50002]) # + [markdown] id="XfBwbaJi2Epw" colab_type="text" # ### Operaciones con diccionarios # - $key$ **in** diccionario: Nos permite saber si esa key se encuentra en el diccionario. Ej: # + id="IXfr9AU0JMMc" colab_type="code" colab={} database = { 50001:"<NAME>", 50002:"<NAME>" } if 50001 in database: print("la key 50001 se encuentra en el diccionario") if 123 not in database: print("la key 123 no se encuentra en el diccionario") # + [markdown] id="ved0xALF2ffS" colab_type="text" # - **for** key **in** diccionario: Nos permite iterar por todas las keys del diccionario. Ej: # + id="Vrf_s_F1J7uA" colab_type="code" colab={} for key in database: print('La key',key,'tiene asociado el valor',database[key]) # + [markdown] id="xHAzhLywGEvB" colab_type="text" # - **.items**(): nos devuelve la lista de llaves y valores contenidas por el diccionario. Ej: # + id="e58nY61-J_0D" colab_type="code" colab={} for key,value in database.items(): print("key:",key," value: ",value) # + [markdown] id="eK4Lm4w9WHfk" colab_type="text" # ### Ejericicios # Realizar un programa que te pida el numero de legajo y el nombre completo y lo guarde en un diccionario. # # Tip: Usar dos celdas de codigo # + id="2dcL9YmnWfbv" colab_type="code" colab={} # + id="CoPU3LS1WfJq" colab_type="code" colab={} # + [markdown] id="EbcY0DGr9mwE" colab_type="text" # ### Nota final # Es importante ver que un diccionario tiene un orden al igual que las listas; por otro lado; también aceptan elementos repetidos, siempre que tengan distintas claves todo marchará correctamente. # # En la proxima sección veremos un tipo de datos que ya no organizará la información de manera tal que exista un orden establecido; en algunos casos es provechoso que la información se estructure de manera tal que no exista un orden en los elementos # + [markdown] id="otim_uNIDYcm" colab_type="text" # ## Sets # Un set es una estructura de datos ya más avanzada que las anteriores, la cual nos permite almacenar un grupo de elementos cuyo orden no es relevante. Lo único que tiene importancia cuando utilizamos un **set** es qué elemento esta y qué elemento no. # # <img src="https://files.realpython.com/media/t.8b7abb515ae8.png" height = 200 alt = "Representación de Set usando diagrama Venn" title= "A&B"> # # # Un "set" no admite repetidos; ya que por su funcionamiento interno no tiene la capacidad de "darse cuenta" cuando un elemento se encuentra más de una vez; tan solo puede saber que elementos estan y que elementos no. # A simple vista parecería que es muy limitado entonces un "set" ya que no esta ordenado y no acepta repetidos; no obstante, para algunos tipos de operaciones que programarlas en listas o diccionarios sería muy tedioso, con un **set** son muy prácticas. # + [markdown] id="vwXNR2eLMEHH" colab_type="text" # ### Operaciones con sets # - **|**: Es la operacion de $A \cup B$ # + id="pGzhs39jEhPl" colab_type="code" colab={} x= {1,2,3,4,7,7,7,7,7,7} print("Set x=",x) y= {1,2,10} z = x or y #union entre x e y z = x | y print(z) # + [markdown] id="d51LnJdhMgxr" colab_type="text" # # - **&**: Es la operacion de $A \cap B$ # + id="7pXUUZBMMunZ" colab_type="code" colab={} x= {1,2,3,4,7,7,7,7,7,7} y= {1,2,10} w = x & y #intersección entre x e y print(w) # + [markdown] id="i-jBKc8rQz2I" colab_type="text" # - **A-B**: Quita los elementos de B de A. El equivalente logico es $ A\cap \neg B$ # + id="Y-ERvaLKRFbS" colab_type="code" colab={} x= {1,2,3,4,7,7,7,7,7,7} y= {1,2,10} z = x-y print(z) # + [markdown] id="bD9Y4BIeQOY2" colab_type="text" # - **.remove**($valor$): Remueve el valor del set. Ej: # + id="tLd3WIpxGT-o" colab_type="code" colab={} x= {1,2,3,4,7} x.remove(1) print(x) # + [markdown] id="B7B3zozkQZKe" colab_type="text" # - **.add**($valor$): Agrega el valor al set. Ej: # + id="04Qm70ySHlEn" colab_type="code" colab={} x= {1,2,3,4,7} x.add("hola") print(x) # + [markdown] id="GTYqk43YWl_3" colab_type="text" # ### Ejercicios # Tenemos varios sets que contienen las personas que les gustan un cierto sabor de helado: # # > vainilla = {"Juan","Marina","Tomas","Paula"} # # > chocolate = {"Pedro","Paula","Marina"} # # > dulceDeLeche = {"Juan","Julian","Pedro","Marina"} # # Responder usando operaciones de sets: # # - Hay alguna persona que le gusten todos los gustos? # # - Hay alguna persona que le gusten la vainilla y no el dulce de leche? # # - Cuantas personas distintas tenemos? # + id="g3blObRMYI1O" colab_type="code" colab={} # + [markdown] id="hpW3cNya1ESA" colab_type="text" # # Ejercitación Integrada # $\newcommand{\dif}{\bigstar}$$\newcommand{\facil}{\color{\green}{\dif}}$ $\newcommand{\pieceofcake}{\color{\cyan}{\dif}}$$\newcommand{\medio}{\color{\yellow}{\dif\dif}}$$\newcommand{\media}{\medio}$$\newcommand{\normal}{\medio}$ $\newcommand{\dificil}{\color{\orange}{\dif\dif\dif}}$ $\newcommand{\imposible}{\color{\red}{\dif\dif\dif\dif}}$ # + [markdown] id="dz1uvBrZlIHA" colab_type="text" # ### $\facil$ Call me $\frac{\partial}{\partial x}$, or $\mathrm{diff}$ for short # Una operacion muy comun al manejar datos es la derivada # # $$ \frac{d }{dt} (\mathrm{Datos}) $$ # # Objetivo: Escribir una funcion que tome una lista de $n$ numeros y devuelva la *derivada discreta* de la lista de tamano $n-1$. # # Tips # * $\mathrm{derivada}[i]=x[i+1]-x[i]$ # # + id="NwAlHXUVrc7M" colab_type="code" colab={} # + [markdown] id="3YitqSp9Z3sD" colab_type="text" # ###$\normal$ Las naranjas de Miguel # Miguel vive en un pueblo frutero con su hermana en el valle de Oz. Todos los días le llega un pedido recien cosechado de bananas y naranjas. Como son abundantes, suele darle 2 bananas a su hermana y 1 naranja. # # Miguel ahora quiere modernizarse y te viene a pedir si sabrías como escribir un programa que lea el código del pedido y *devuelva la cantidad de bananas y naranjas que debe repartir* teniendo en cuenta que Miguel **siempre** quiere quedarse con por lo menos una naranja. # # `` # ejemploCodigoDePedido = "1111100000000" # `` # # Cada 0 representa una naranja y cada 1 representa una banana. # # **Tips** # * La función `list()` toma un string y lo convierte a una lista # # ``` # P=list(ejemploCodigoDePedido) # >> P = ['1', '1', '1', '1', '1', '0', '0', '0', '0', '0', '0', '0', '0'] # ``` # + id="81Kzws-ka5L9" colab_type="code" colab={} # + [markdown] id="0CqTiYpZG-AO" colab_type="text" # ###$\normal$ Opciones y Menús # Escribir un programa que tome una lista y pida al usuario que seleccione un item de la lista y otorgarle un puntaje entre 1 y 5. Finalmente el programa debería imprimir la elección del usuario y el puntaje otorgado. # # **Ejemplo de lista input:** # # `` # opciones = ["Bundesliga", "La Liga", "Liga BBVA","Premiere League"] # `` # # **Interaccion:** # # ``` # Ingresar número de la opción deseada. # 0 . Bundesliga # 1 . La Liga # 2 . Liga BBVA # 3 . Premiere League # >>>1 # Cuantos puntos le otorga a: La Liga # >>>3 # Se guardó su respuesta como: La Liga con 3 puntos. # ``` # + id="3fotZ5anaVs6" colab_type="code" colab={} # + id="EOKTzse7Zokj" colab_type="code" colab={} # + [markdown] id="58bbfFzqurNj" colab_type="text" # ### $\dificil$ Dr. Chaos, el malevolo semiótico # # "Chaos es caos en inglés" te diría Dr. Chaos, charlando con una taza de té Chai en la mano. En verdad no es tán malo como su nombre lo hace aparentar... si es que tenes un buen manejo de los idiomas. # # Dr. Chaos esta armando un diccionario. Este diccionario tiene la particularidad de no tener definiciones; el diccionario de Dr. Chaos define *una palabra como otra*. Dr. Chaos quiere comenzar a traducir la literatura de todo el mundo usando el diccionario y ha venido a ti, el *Number One* programador de Python. # # **Objetivo:** Cambiar las palabras de una oración usando el diccionario de Dr. Chaos e imprimir la nueva oración en el lenguaje unificado. # # *Ejemplo:* # # `` # diccionario = {"hola":"你好","como":"how","estás":"estáis"} # `` # # `` # oracion = "hola, como estás?" # `` # # # *OUTPUT: * # `` # "你好, how estáis?" # `` # # *Ejemplo 2:* # # `` # diccionario = {"ve":"regards","bien":"bom","se":"it"} # `` # # `` # oracion = "se ve bien!" # `` # # **Tips:** # # * El programa debería tratar los símbolos de interrogación, exclamación, los puntos y comas como `whitespace`, es decir, espacio en blanco. # # * Suponer que las letras son todas minusculas. # # + id="kDvIsxZCxawO" colab_type="code" colab={} # + [markdown] id="2a0QjbS3QrlX" colab_type="text" # ###$\imposible$ Quiero Retruco # El [Truco](https://es.wikipedia.org/wiki/Truco_argentino) es un juego de cartas muy popular en Argentina. Se suele jugar con naipes españoles de 40 cartas, las cuales tienen 4 palos (basto, oro, espada y copa) y 10 números, 1,2,3,4,5,6,7,10,11 y 12. # Si bien en esta ocasión no vamos a programar un juego de truco, si, vamos a resolver uno de los problemas más usuales que surgen cuando jugamos el cual es definir cual carta gana y cual carta pierde cuando hay un duelo entre dos cartas. # # <img src="https://steemitimages.com/640x0/http://k46.kn3.net/taringa/B/D/9/7/2/4/KevinTrimboli/338.png" height=300 alt ="Esquema de hierarquia de cartas para el juego truco argentino" title="Un palo le gana a 7 espadas y ambos pierden ante una espada envainada? What?"> # # En la imagen podemos observar en orden de importancia de las cartas de izquierda a derecha. El 1 de espada es la más importante (y por lo tanto **siempre** gana) mientras que los 4s son las cartas de menor importancia (casi siempre pierden). Las cartas en la misma columna empatan si se enfrentan. # # - Programar una función con dos inputs tipo string **carta A** y **carta B** que retorne la carta ganadora (tipo string), o "empate" en caso de que lo haya. Ejemplos de como debería funcionar # # ``` # dueloDeCartas("1 de espada", "1 de basto") # >>> 1 de basto # dueloDeCartas("7 de oro", "5 de oro") # >>> 7 de oro # dueloDeCartas("11 de copa", "11 de espada") # >>> empate # ``` # # Hint: usar un diccionario donde la **clave** sea el nombre de la carta, y su **contenido** su importancia (un tipo **int**). Aprovechen la instrucción for para evitar tener que cargar todas las cartas una por una # # - A veces se suele jugar al truco con más de dos jugadores. Podría ocurrir duelos en los que participan $n$ cartas. Programar una funcion cuyo input sea una lista de strings con todas las cartas y retorne la ganadora. (En caso de empate que retorne alguna de las ganadoras, o un arreglo con las ganadoras) # Ejemplos de como podria funcionar funcionar # ``` # dueloDeCartas(["7 de basto","7 de espada","12 de espada", "4 de espada"]) # >>> "7 de espada" # dueloDeCartas(["4 de espada","7 de basto","7 de copa", "5 de copa"]) #también podría haber dado 7 de basto # >>> "7 de copa" # ``` # + id="ZCDzrXiLYrOP" colab_type="code" colab={}
Clase_2_Curso_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # To Calculate the Perimeter Of shape with N Number of Sides More than 0 No_of_sides = int(input('Please Define the number of Sides you have :')) sides_length =[] for x in range(No_of_sides): side_length = int(input('please enter the side no '+str(x+1)+':')) sides_length.append(side_length) print('the Perimeter Of shape {} Side is {} '.format(No_of_sides,sum(sides_length)))
Perimeter of rectangle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Euler Problem 148 # ================= # # # We can easily verify that none of the entries in the first seven rows of # Pascal's triangle are divisible by 7: # # However, if we check the first one hundred rows, we will find that only 2361 # of the 5050 entries are not divisible by 7. # # Find the number of entries which are not divisible by 7 in the first one # billion (10^9) rows of Pascal's triangle. # + def f(n): if n == 0: return 1 return (1+(n%7))*f(n//7) def F(n): if n == 0: return 0 r = n % 7 return 28*F(n//7) + r*(r+1)//2*f(n//7) print(F(10**9)) # -
Euler 148 - Exploring Pascal's triangle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="sBNUj9ZOzfvi" # # GNN Implementation # # - Name: <NAME> # - SRN: PES1UG19CS019 # - VI Semester 'A' Section # - Date: 27-04-2022 # + id="9K4RPvyWhZVu" import sys if 'google.colab' in sys.modules: # %pip install -q stellargraph[demos]==1.2.1 # + id="E-BsytH0iHy7" import pandas as pd import os # + id="n2gc643xiNan" import stellargraph as sg from stellargraph.mapper import FullBatchNodeGenerator from stellargraph.layer import GCN # + id="8l3EHQ_0icb9" from tensorflow.keras import layers, optimizers, losses, metrics, Model from sklearn import preprocessing, model_selection from IPython.display import display, HTML import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 51} id="1d7f4zipi89m" outputId="5807e0a7-b4a3-48ae-c534-046ba3ea118c" dataset=sg.datasets.Cora() display(HTML(dataset.description)) G, node_subjects = dataset.load() # + colab={"base_uri": "https://localhost:8080/"} id="eE8BnjRmjaWW" outputId="a6c1b86f-2136-4f3f-a6e4-72e10d16a935" print(G.info()) # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="XaCmIFkQjt-I" outputId="e016fc8e-09c9-40f8-e16d-53bf2aaccf3c" node_subjects.value_counts().to_frame() # + id="eEaOW45fj-pE" train_subjects, test_subjects = model_selection.train_test_split(node_subjects, train_size=140, test_size=None, stratify=node_subjects) val_subjects, test_subjects = model_selection.train_test_split(test_subjects, train_size=500, test_size=None, stratify=test_subjects) # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="TcQLa4SNlpW2" outputId="fcc81be8-56ea-4242-9128-3ee347554c45" train_subjects.value_counts().to_frame() # + id="F2YcuQdwl12t" target_encoding=preprocessing.LabelBinarizer() # + id="X4hng1CBmK1J" train_targets=target_encoding.fit_transform(train_subjects) val_targets=target_encoding.transform(val_subjects) test_targets=target_encoding.transform(test_subjects) # + colab={"base_uri": "https://localhost:8080/"} id="9l1fDgy2moD6" outputId="64a2e58a-edaa-4c57-8f49-82079b93b7b8" from stellargraph.mapper.full_batch_generators import FullBatchGenerator generator = FullBatchNodeGenerator(G, method="gcn") # + id="PL0Kp49tnGCx" train_gen=generator.flow(train_subjects.index, train_targets) # + id="dARsKC-Cnc7H" gcn=GCN(layer_sizes=[16,16], activations=['relu', 'relu'], generator=generator, dropout=0.5) # + colab={"base_uri": "https://localhost:8080/"} id="me9C8wVpnzGG" outputId="3020967f-7908-4871-fae4-778a950b6600" x_inp, x_out = gcn.in_out_tensors() x_out # + id="WvDS1hKuokr3" predictions=layers.Dense(units=train_targets.shape[1], activation="softmax")(x_out) # + colab={"base_uri": "https://localhost:8080/"} id="0pnHfFAGowQp" outputId="8df75cfc-5767-48c8-9fab-e5f1a20714cc" model=Model(inputs=x_inp, outputs=predictions) model.compile(optimizer=optimizers.Adam(lr=0.01), loss=losses.categorical_crossentropy, metrics=["acc"]) # + id="2PH9s5MYpVwF" val_gen = generator.flow(val_subjects.index, val_targets) # + id="ofR04WyWpdUh" from tensorflow.keras.callbacks import EarlyStopping # + id="vF4xpZoPpudK" os_callback = EarlyStopping(monitor="val_acc", patience=50, restore_best_weights=True) # + colab={"base_uri": "https://localhost:8080/"} id="JDy63d67p-lT" outputId="5486b108-b844-44ca-db76-89f99f2a6b39" history = model.fit(train_gen, epochs=200, validation_data=val_gen, verbose=2, shuffle=False, callbacks=[os_callback]) # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="Rzp8ge_qsvSB" outputId="e4f77009-9d31-4e2f-ffe0-02258c84c992" sg.utils.plot_history(history) # + id="drMynHt2sv_S" test_gen=generator.flow(test_subjects.index, test_targets) # + id="yU_LCVaGsyJU" all_nodes=node_subjects.index all_gen=generator.flow(all_nodes) all_predictions=model.predict(all_gen) # + id="iSO50jBHszlX" node_predictions=target_encoding.inverse_transform(all_predictions.squeeze()) # + colab={"base_uri": "https://localhost:8080/", "height": 676} id="Vgelh3Xts1Uh" outputId="97f8e840-6d99-475a-c1ea-497ed2794109" df=pd.DataFrame({"Predicted":node_predictions, "True":node_subjects}) df.head(20) # + colab={"base_uri": "https://localhost:8080/"} id="6m1TWxSBtC6v" outputId="a40268df-fb62-4e9f-e0e4-7fd2eafeda54" embedding_model=Model(inputs=x_inp, outputs=x_out) emb=embedding_model.predict(all_gen) emb.shape # + colab={"base_uri": "https://localhost:8080/"} id="ip3ed6UEv21g" outputId="7b68a775-3b1d-4963-a93c-ad710fd341c7" from sklearn.decomposition import PCA from sklearn.manifold import TSNE X=emb.squeeze(0) X.shape # + colab={"base_uri": "https://localhost:8080/"} id="h20mOTtGwcIG" outputId="1e5edfbc-6d64-4bb9-d71b-de43d4c2b336" transform = TSNE trans=transform(n_components=2) X_reduced=trans.fit_transform(X) X_reduced.shape # + colab={"base_uri": "https://localhost:8080/", "height": 484} id="5R1uiLc0xwRV" outputId="b90ce596-665f-4a71-df0b-ce714a7aa07b" fig, ax = plt.subplots(figsize=(7, 7)) ax.scatter( X_reduced[:, 0], X_reduced[:, 1], c=node_subjects.astype("category").cat.codes, cmap="jet", alpha=0.7, ) ax.set( aspect="equal", xlabel="$X_1$", ylabel="$X_2$", title=f"{transform.__name__} visualization of GCN embeddings for cora dataset", ) # + id="IeWUVX4iy2dA"
Topics of Deep Learning Lab/Lab-3/GNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ # $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ # $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ # $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ # $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ # <font style="font-size:28px;" align="left"><b> <font color="blue"> Solutions for </font>Phase Kickback </b></font> # <br> # _prepared by <NAME>_ # <br><br> # <a id="task1"></a> # <h3> Task 1</h3> # # Create a quantum circuit with two qubits, say $ q[1] $ and $ q[0] $ in the reading order of Qiskit. # # We start in quantum state $ \ket{01} $: # - set the state of $ q[1] $ to $ \ket{0} $, and # - set the state of $ q[0] $ to $ \ket{1} $. # # Apply Hadamard to both qubits. # # Apply CNOT operator, where the controller qubit is $ q[1] $ and the target qubit is $ q[0] $. # # Apply Hadamard to both qubits. # # Measure the outcomes. # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer q = QuantumRegister(2,"q") # quantum register with 2 qubits c = ClassicalRegister(2,"c") # classical register with 2 bits qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers # the up qubit is in |0> # set the down qubit to |1> qc.x(q[0]) # apply x-gate (NOT operator) qc.barrier() # apply Hadamard to both qubits. qc.h(q[0]) qc.h(q[1]) # apply CNOT operator, where the controller qubit is the up qubit and the target qubit is the down qubit. qc.cx(1,0) # apply Hadamard to both qubits. qc.h(q[0]) qc.h(q[1]) # measure both qubits qc.measure(q,c) # draw the circuit in Qiskit reading order display(qc.draw(output='mpl',reverse_bits=True)) # execute the circuit 100 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(qc) print(counts) # - # <a id="task2"></a> # <h3> Task 2 </h3> # # Create a circuit with 7 qubits, say $ q[6],\ldots,q[0] $ in the reading order of Qiskit. # # Set the states of the top six qubits to $ \ket{0} $. # # Set the state of the bottom qubit to $ \ket{1} $. # # Apply Hadamard operators to all qubits. # # Apply CNOT operator ($q[1]$,$q[0]$) # <br> # Apply CNOT operator ($q[4]$,$q[0]$) # <br> # Apply CNOT operator ($q[5]$,$q[0]$) # # Apply Hadamard operators to all qubits. # # Measure all qubits. # # For each CNOT operator, is there a phase-kickback effect? # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # Create a circuit with 7 qubits. q = QuantumRegister(7,"q") # quantum register with 7 qubits c = ClassicalRegister(7) # classical register with 7 bits qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers # the top six qubits are already in |0> # set the bottom qubit to |1> qc.x(0) # apply x-gate (NOT operator) # define a barrier qc.barrier() # apply Hadamard to all qubits. for i in range(7): qc.h(q[i]) # define a barrier qc.barrier() # apply CNOT operator (q[1],q[0]) # apply CNOT operator (q[4],q[0]) # apply CNOT operator (q[5],q[0]) qc.cx(q[1],q[0]) qc.cx(q[4],q[0]) qc.cx(q[5],q[0]) # define a barrier qc.barrier() # apply Hadamard to all qubits. for i in range(7): qc.h(q[i]) # define a barrier qc.barrier() # measure all qubits qc.measure(q,c) # draw the circuit in Qiskit reading order display(qc.draw(output='mpl',reverse_bits=True)) # execute the circuit 100 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(qc) print(counts)
quantum-with-qiskit/Q64_Phase_Kickback_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from matplotlib import cm import matplotlib.pylab as plt import matplotlib.pyplot as mp import numpy as np plt.style.use("seaborn-dark") # print(plt.style.available) from mpl_toolkits.mplot3d import Axes3D import seaborn as sns sns.set() # import matplotlib.pyplot as plt loss_matrix = np.load("loss_matrix_DGM_li_0.2.npy") step_size = 0.02 grid = np.arange(-0.2, 0.2 + step_size, step_size) # + fig = plt.figure(figsize = (12, 10)) ax = fig.add_subplot(projection='3d') mp.gcf().set_facecolor(np.ones(3) * 240/255) # make data xx = np.arange(-0.2, 0.2 + step_size, step_size) yy = np.arange(-0.2, 0.2 + step_size, step_size) x, y = np.meshgrid(xx, yy) z = np.zeros((len(grid), len(grid))) for dx in grid: for dy in grid: itemindex_1 = np.argwhere(grid == dx) itemindex_2 = np.argwhere(grid == dy) z[itemindex_1[0][0], itemindex_2[0][0]] = loss_matrix[itemindex_1[0][0], itemindex_2[0][0]] # plot the surface ax.plot_surface(x, y, z, cmap = 'rainbow', label = 'loss lanscape', linewidth = 0.5) plt.title('Loss landscape of DGM') ax.set_xlabel('Direction 1', size = 12) ax.set_ylabel('Direction 2', size = 12) ax.set_zlabel('Loss', size = 12) plt.savefig("Loss_landscape3D_of_DGM_ResNet_li0.2.eps", dpi = 120) plt.show() # + # set figure: length = 10 width = 6 mp.figure(figsize = (10, 6)) # fill color # mp.contourf(x, y, z, 100, cmap = mp.cm.hot) cset = mp.contourf(x, y, z, 100, cmap = mp.cm.rainbow) # or plt.contourf(X,Y,Z, cmap = 'rainbow') contour = mp.contour(x, y, z, 8, colors = 'k') # label mp.clabel(contour, fontsize = 12, colors = 'k') mp.xlabel('direction 1') mp.ylabel('direction 2') # show bar mp.colorbar(cset) plt.savefig("Loss_landscape2D_of_DGM_ResNet_li0.2.eps", dpi = 120) mp.show() # -
code/Results1D/visualization loss landscape/roughness index and landscape/large internal/DGM_li_0.2_loss_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Commented out IPython magic to ensure Python compatibility. # # %cd /content/ease/src/nltk import nltk # Commented out IPython magic to ensure Python compatibility. # # %cd /content/ease/ease ## IMPORTS ## from essay_set import EssaySet from feature_extractor import FeatureExtractor from predictor_set import PredictorSet from predictor_extractor import PredictorExtractor from sklearn.svm import SVR import pickle import pandas as pd import csv import numpy as np from sklearn.metrics import confusion_matrix import nltk #nltk.download('punkt') #nltk.download('averaged_perceptron_tagger') # The following 3 functions have been taken from <NAME>'s github repository # https://github.com/benhamner/Metrics def Cmatrix(rater_a, rater_b, min_rating=None, max_rating=None): """ Returns the confusion matrix between rater's ratings """ assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating = max(rater_a + rater_b) num_ratings = int(max_rating - min_rating + 1) conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)] for a, b in zip(rater_a, rater_b): conf_mat[a - min_rating][b - min_rating] += 1 return conf_mat def histogram(ratings, min_rating=None, max_rating=None): """ Returns the counts of each type of rating that a rater made """ if min_rating is None: min_rating = min(ratings) if max_rating is None: max_rating = max(ratings) num_ratings = int(max_rating - min_rating + 1) hist_ratings = [0 for x in range(num_ratings)] for r in ratings: hist_ratings[r - min_rating] += 1 return hist_ratings def quadratic_weighted_kappa(y, y_pred): """ Calculates the quadratic weighted kappa axquadratic_weighted_kappa calculates the quadratic weighted kappa value, which is a measure of inter-rater agreement between two raters that provide discrete numeric ratings. Potential values range from -1 (representing complete disagreement) to 1 (representing complete agreement). A kappa value of 0 is expected if all agreement is due to chance. quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b each correspond to a list of integer ratings. These lists must have the same length. The ratings should be integers, and it is assumed that they contain the complete range of possible ratings. quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating is the minimum possible rating, and max_rating is the maximum possible rating """ rater_a = y rater_b = y_pred min_rating=None max_rating=None rater_a = np.array(rater_a, dtype=int) rater_b = np.array(rater_b, dtype=int) assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(min(rater_a), min(rater_b)) if max_rating is None: max_rating = max(max(rater_a), max(rater_b)) conf_mat = Cmatrix(rater_a, rater_b, min_rating, max_rating) num_ratings = len(conf_mat) num_scored_items = float(len(rater_a)) hist_rater_a = histogram(rater_a, min_rating, max_rating) hist_rater_b = histogram(rater_b, min_rating, max_rating) numerator = 0.0 denominator = 0.0 for i in range(num_ratings): for j in range(num_ratings): expected_count = (hist_rater_a[i] * hist_rater_b[j] / num_scored_items) d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0) numerator += d * conf_mat[i][j] / num_scored_items denominator += d * expected_count / num_scored_items return (1.0 - numerator / denominator) train_set = pd.read_csv('/home/mehar/github/ease/ease/train_adv/prompt8/nocontractions_8_train_valid.csv', sep=',') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] # print(z) # y = z[0].split(', ', 1) # # #print(y) scorelist.append(float(z[0])) essaylist.append(z[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") # + ## TESTING FEATURES ## test_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay8/fold_0/test.txt', sep='\t') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=0, a_max=60)/60))) ## TRAINING & PREDICTING clf = SVR(C=1, gamma=0.00001,kernel='rbf') clf.fit(X, scaled_train) final = clf.predict(Y) ## INVERSE_SCALING finals = (np.clip(final,a_min=0,a_max=1)*60) finals_list = finals.tolist() ## QWK Score print("QWK", quadratic_weighted_kappa(test_scorelist,finals)) df_pred = pd.DataFrame(finals_list) df_org = pd.DataFrame(test_scorelist) # print(df_pred.head(5)) # print("org", df_org.head(5)) df_org["diff"] = abs(df_org[0] - df_pred[0]) mean_diff = df_org["diff"].mean() print(""+"{:.3f}".format(mean_diff)) df_pred.to_csv('adv_test_scores/prompt8/NoContractionsTest8.csv', index = False, header=None) # - # ### No Change # + train_set = pd.read_csv('/home/mehar/github/ease/ease/train_adv/prompt8/all_nochange_8_train_valid.csv', sep=',') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] # print(z) # y = z[0].split(', ', 1) # # #print(y) scorelist.append(float(z[0])) essaylist.append(z[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") test_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay8/fold_0/test.txt', sep='\t') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=0, a_max=60)/60))) ## TRAINING & PREDICTING clf = SVR(C=1, gamma=0.00001,kernel='rbf') clf.fit(X, scaled_train) final = clf.predict(Y) ## INVERSE_SCALING finals = (np.clip(final,a_min=0,a_max=1)*60) finals_list = finals.tolist() ## QWK Score print("QWK", quadratic_weighted_kappa(test_scorelist,finals)) df_pred = pd.DataFrame(finals_list) df_org = pd.DataFrame(test_scorelist) # print(df_pred.head(5)) # print("org", df_org.head(5)) df_org["diff"] = abs(df_org[0] - df_pred[0]) mean_diff = df_org["diff"].mean() print(""+"{:.3f}".format(mean_diff)) df_pred.to_csv('adv_test_scores/prompt8/ContractionsAndSynonymsTest8.csv', index = False, header=None) # - # # mixture - Disfluency, Incorrect Grammar, Shuffle # + train_set = pd.read_csv('/home/mehar/github/ease/ease/train_adv/prompt8/mixture_all_8_train_valid_reduce.csv', sep=',') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] # print(z) # y = z[0].split(', ', 1) # # #print(y) scorelist.append(float(z[0])) essaylist.append(z[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") test_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay8/fold_0/test.txt', sep='\t') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=0, a_max=60)/60))) ## TRAINING & PREDICTING clf = SVR(C=1, gamma=0.00001,kernel='rbf') clf.fit(X, scaled_train) final = clf.predict(Y) ## INVERSE_SCALING finals = (np.clip(final,a_min=0,a_max=1)*60) finals_list = finals.tolist() ## QWK Score print("QWK", quadratic_weighted_kappa(test_scorelist,finals)) df_pred = pd.DataFrame(finals_list) df_org = pd.DataFrame(test_scorelist) # print(df_pred.head(5)) # print("org", df_org.head(5)) df_org["diff"] = abs(df_org[0] - df_pred[0]) mean_diff = df_org["diff"].mean() print(""+"{:.3f}".format(mean_diff)) df_pred.to_csv('adv_test_scores/prompt8/MixtureTest8.csv', index = False, header=None) # - # # mixture - Incorrect Grammar, Shuffle; No Disfluency # + train_set = pd.read_csv('/home/mehar/github/ease/ease/train_adv/prompt8/noDisfluency_8_train_valid_reduce.csv', sep=',') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] # print(z) # y = z[0].split(', ', 1) # # #print(y) scorelist.append(float(z[0])) essaylist.append(z[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") test_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay8/fold_0/test.txt', sep='\t') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=0, a_max=60)/60))) ## TRAINING & PREDICTING clf = SVR(C=1, gamma=0.00001,kernel='rbf') clf.fit(X, scaled_train) final = clf.predict(Y) ## INVERSE_SCALING finals = (np.clip(final,a_min=0,a_max=1)*0) finals_list = finals.tolist() ## QWK Score print("QWK", quadratic_weighted_kappa(test_scorelist,finals)) df_pred = pd.DataFrame(finals_list) df_org = pd.DataFrame(test_scorelist) # print(df_pred.head(5)) # print("org", df_org.head(5)) df_org["diff"] = abs(df_org[0] - df_pred[0]) mean_diff = df_org["diff"].mean() print(""+"{:.3f}".format(mean_diff)) df_pred.to_csv('adv_test_scores/prompt8/MixtureNoDisfluencyTest8.csv', index = False, header=None) # - # # mixture - Shuffle; No grammar, disfluency # + train_set = pd.read_csv('/home/mehar/github/ease/ease/train_adv/prompt8/noDisfluency&grammar_8_train_valid_reduce.csv', sep=',') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] # print(z) # y = z[0].split(', ', 1) # # #print(y) scorelist.append(float(z[0])) essaylist.append(z[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") test_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay8/fold_0/test.txt', sep='\t') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=0, a_max=60)/60))) ## TRAINING & PREDICTING clf = SVR(C=1, gamma=0.00001,kernel='rbf') clf.fit(X, scaled_train) final = clf.predict(Y) ## INVERSE_SCALING finals = (np.clip(final,a_min=0,a_max=1)*60) finals_list = finals.tolist() ## QWK Score print("QWK", quadratic_weighted_kappa(test_scorelist,finals)) df_pred = pd.DataFrame(finals_list) df_org = pd.DataFrame(test_scorelist) # print(df_pred.head(5)) # print("org", df_org.head(5)) df_org["diff"] = abs(df_org[0] - df_pred[0]) mean_diff = df_org["diff"].mean() print(""+"{:.3f}".format(mean_diff)) df_pred.to_csv('adv_test_scores/prompt8/MixtureNoDisfluencyNoGrammarTest8.csv', index = False, header=None) # - finals
Model2-EASE/ease/Adv Samples in Training Data, test scores, adv testcases scores.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # How to use pythreejs to plot a superellipsoid # A superellipsoid is given by a parametric function and the equation is very similar to an ellipse equation. We only have different exponents which give us different shapes. For more informations: https://en.wikipedia.org/wiki/Superellipsoid. # # The idea of this example is to construct the mesh of the square $[0, 1]\times[0,1]$ and to do a projection of these points on the superillipse which is the 2D shape and then to do a spherical product to have the 3D shape. # + import numpy as np n = 10 # number of discretisation points for the square in each direction x_box = np.concatenate((np.linspace(-1, 1., n), np.ones(n-2), np.linspace(1, -1., n), -np.ones(n-2))) y_box = np.concatenate((-np.ones(n-1), np.linspace(-1, 1., n), np.ones(n-2), np.linspace(1, -1., n-1, endpoint=False))) nx_box = x_box.size coords = np.empty((nx_box**2, 3)) def superellipse(rx, ry, m): """ superellipse formula with the projection of the unit square Parameters ---------- rx : the radius in the x direction ry : the radius in the y direction m : the exponent of the superellipse Output ------ the coordinates of the superellipse """ return x_box*rx*(1. - .5*np.abs(y_box)**(2./m))**(m/2.), y_box*ry*(1. - .5*np.abs(x_box)**(2./m))**(m/2.) # - def superellipsoid(rx, ry, rz, m1, m2): """ superellipsoid formula with the spherical product of two superellipse and update of the global coords array Parameters ---------- rx : the radius in the x direction ry : the radius in the y direction rz : the radius in the z direction m1 : the exponent of the first superellipse m2 : the exponent of the second superellipse """ gx, gy = superellipse(1, 1, m2) hx, hy = superellipse(1, 1, m1) coords[:, 0] = rx*(gx[np.newaxis, :]*hx[:, np.newaxis]).flatten() coords[:, 1] = ry*(gx[np.newaxis, :]*hy[:, np.newaxis]).flatten() coords[:, 2] = rz*(gy[np.newaxis, :]*np.ones(hx.size)[:, np.newaxis]).flatten() # + # superellipsoid parameters rx = ry = rz = 1. m1 = m2 = 1. superellipsoid(rx, ry, rz, m1, m2) # - # We construct the triangulation by using the ConveHull function in scipy. # + import scipy.spatial as spatial cvx = spatial.ConvexHull(coords) # + from pythreejs import * from IPython.display import display surf_g = PlainGeometry(vertices=coords.tolist(), faces=cvx.simplices.tolist()) surf = Mesh(geometry=surf_g, material=BasicMaterial(color='green', wireframe=True)) scene = Scene(children=[surf, AmbientLight(color='#777777')]) c = PerspectiveCamera(position=[2, 2, 3], up=[0, 0, 1], children=[DirectionalLight(color='white', position=[3, 5, 1], intensity=0.6)]) renderer = Renderer(camera=c, scene=scene, controls=[OrbitControls(controlling=c)]) display(renderer) # + from ipywidgets import FloatSlider, HBox, VBox m1_slider, m2_slider = (FloatSlider(description='m1', min=0.01, max=4.0, step=0.01, value=m1, continuous_update=False, orientation='vertical'), FloatSlider(description='m2', min=0.01, max=4.0, step=0.01, value=m2, continuous_update=False, orientation='vertical')) # - rx_slider, ry_slider, rz_slider = (FloatSlider(description='rx', min=0.01, max=10.0, step=0.01, value=rx, continuous_update=False, orientation='horizontal'), FloatSlider(description='ry', min=0.01, max=10.0, step=0.01, value=ry, continuous_update=False, orientation='horizontal'), FloatSlider(description='rz', min=0.01, max=10.0, step=0.01, value=rz, continuous_update=False, orientation='horizontal')) # + def update(change): superellipsoid(rx_slider.value, ry_slider.value, rz_slider.value, m1_slider.value, m2_slider.value) surf_g.vertices = coords.tolist() m1_slider.observe(update, names=['value']) m2_slider.observe(update, names=['value']) rx_slider.observe(update, names=['value']) ry_slider.observe(update, names=['value']) rz_slider.observe(update, names=['value']) # - VBox([HBox([renderer, m1_slider, m2_slider]), rx_slider, ry_slider, rz_slider])
examples/superellipsoid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # 创建带箭头的supercell nodes, edges = [], [] # ## 创建 nodes 和 edges # ### top 位相关 from catplot.grid_components.nodes import Node2D from catplot.grid_components.edges import Edge2D top = Node2D([0.0, 0.0], size=800, color="#2A6A9C") t1 = Node2D([0.0, 1.0]) t2 = Node2D([1.0, 0.0]) nodes.append(top) e1 = Edge2D(top, t1, width=8) e2 = Edge2D(top, t2, width=8) edges.extend([e1, e2]) # ### bridge 相关 bridge1 = Node2D([0.0, 0.5], style="s", size=600, color="#5A5A5A", alpha=0.6) bridge2 = Node2D([0.5, 0.0], style="s", size=600, color="#5A5A5A", alpha=0.6) b1 = bridge1.clone([0.5, 0.5]) b2 = bridge2.clone([0.5, 0.5]) nodes.extend([bridge1, bridge2]) e1 = Edge2D(bridge1, b1) e2 = Edge2D(bridge1, bridge2) e3 = Edge2D(bridge2, b2) e4 = Edge2D(b1, b2) edges.extend([e1, e2, e3, e4]) # ### hollow 位相关 h = Node2D([0.5, 0.5], style="h", size=700, color="#5A5A5A", alpha=0.3) nodes.append(h) # ### 创建箭头 from catplot.grid_components.edges import Arrow2D top_bri_1 = Arrow2D(top, bridge1, alpha=0.6, color="#ffffff", zorder=3) top_bri_2 = Arrow2D(top, bridge2, alpha=0.6, color="#ffffff", zorder=3) top_hollow = Arrow2D(top, h, alpha=0.6, color="#000000", zorder=3) arrows = [top_bri_1, top_bri_2, top_hollow] # ## 绘制 from catplot.grid_components.grid_canvas import Grid2DCanvas canvas = Grid2DCanvas() # ## 创建supercell from catplot.grid_components.supercell import SuperCell2D supercell = SuperCell2D(nodes, edges, arrows) canvas.add_supercell(supercell) canvas.draw() canvas.figure # ## 扩展supercell expanded_supercell = supercell.expand(4, 4) canvas_big = Grid2DCanvas(figsize=(30, 20), dpi=60) canvas_big.add_supercell(expanded_supercell) canvas_big.draw() canvas_big.figure
examples/grid_2d_examples/supercell_with_arrows.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tensorflow MNIST Model # # * Wrap a Tensorflow MNIST python model for use as a prediction microservice in seldon-core # # * Run locally on Docker to test # * Deploy on seldon-core running on minikube # # ## Dependencies # # * [Helm](https://github.com/kubernetes/helm) # * [Minikube](https://github.com/kubernetes/minikube) # * [S2I](https://github.com/openshift/source-to-image) # # ```bash # pip install seldon-core # ``` # # ## Train locally # # + from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot = True) import tensorflow as tf if __name__ == '__main__': x = tf.placeholder(tf.float32, [None,784], name="x") W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x,W) + b, name="y") y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels})) saver = tf.train.Saver() saver.save(sess, "model/deep_mnist_model") # - # Wrap model using s2i # !s2i build . seldonio/seldon-core-s2i-python37:0.13 deep-mnist:0.1 # !docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1 # Send some random features that conform to the contract # !seldon-core-tester contract.json 0.0.0.0 5000 -p # !docker rm mnist_predictor --force # ## Test using Minikube # # **Due to a [minikube/s2i issue](https://github.com/SeldonIO/seldon-core/issues/253) you will need [s2i >= 1.1.13](https://github.com/openshift/source-to-image/releases/tag/v1.1.13)** # !minikube start --memory 4096 # !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default # !helm init # !kubectl rollout status deploy/tiller-deploy -n kube-system # !helm install ../../../helm-charts/seldon-core-operator --name seldon-core --set usageMetrics.enabled=true --namespace seldon-system # !kubectl rollout status deploy/seldon-controller-manager -n seldon-system # ## Setup Ingress # Please note: There are reported gRPC issues with ambassador (see https://github.com/SeldonIO/seldon-core/issues/473). # !helm install stable/ambassador --name ambassador --set crds.keep=false # !kubectl rollout status deployment.apps/ambassador # ## Wrap Model and Test # !eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-python37:0.13 deep-mnist:0.1 # !kubectl create -f deep_mnist.json # !kubectl rollout status deploy/deep-mnist-single-model-8969cc0 # !seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \ # deep-mnist --namespace default -p # !minikube delete
examples/models/deep_mnist/deep_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href='https://www.learntocodeonline.com/'><img src="../IMGs/learn-to-code-online.png"></a> # Let's talk about using files in your code! # # There are basic methods and functions to manipulate files with. We'll cover the basics here. # # # Opening Files # # Before you can read or write to a file, you must first **open** it: `open()` # # This will create a **file** object. # # ```python # file object = open(file_name [, access_mode][, buffering]) # ``` # # **file_name** # A string value that contains the name of the file that you want to access. (It can include the full path on your local system as well.) # # **access_mode** # Determines the mode with which to open the file with. This is an _optional_ parameter, and the default access mode is read (`r`). # # | Modes | Description | # |----------------- | --------------------------------------------- | # | r | Opens a file for **reading only**. The file pointer is placed at the beginning of the file. _(default mode)_ | # | r+ | Opens a file for **both reading & writing**. File pointer is placed at the beginning of the file. | # | w | Opens a file for **writing only** ... Ovewrwrites if the file exists, otherwise creates a new file. | # | w+ | Opens a file for **both writing & reading** ... Ovewrwrites if the file exists, otherwise creates a new file for writing and reading. | # | a | Opens a file for **appending** with the pointer at the end of the file, if it exists. If file does not exist, it creates a new file for writing. | # | a+ | Opens a file for **both appending and reading** with the pointer at the end, if the file exists. If file does not exist, a new one is created for reading/writing. | # # **buffering** # - If set to 0, then no buffering takes place. # - If set to 1, line buffering is performed while accessing a file. # - If > 1, buffering is performed with the indicated buffering size. # - If < 0, buffer size is the system default. # # Closing Files # # `fileObject.close()` # # Ensure that before you close your file that you have saved to it what you need ... because when you call it? Here's what happens: # # - It flushes any unwritten information. # - It closes the file object (no more reading/writing can be done). # - It is best practice (and efficient coding) to use the `close()` method to close a file. # # [This Stack Overflow response](https://stackoverflow.com/a/17459915/10474024) provides some great additional details. # # ## Examples # # Here are a couple of examples. # # ### Example - Vulnerable Code # # This code is vulnerable because without the 3rd line, you could run into file access issues. # # <div class="alert alert-warning"> # # ```python # fo = open('foo.txt', 'wb') # print("Name of the file: {}".format(fo.name)) # fo.close() # ``` # </div> # ### Example - Best Practice # # Why do you think [**with()**](https://docs.python.org/3/reference/compound_stmts.html#with) is best practice? (Hint is in the Stack Overflow link above!) # # _Calling `file_obj.write()` without using the with keyword or calling `file_obj.close()` might result in the arguments of `file_obj.write()` not being completely written to the disk, even if the program exits successfully._ # # <div class="alert alert-warning"> # # ```python # with open('foo.txt', 'w+') as file_obj: # file_obj.write('some stuff') # ``` # </div> # <div class="alert alert-success"> # Be sure to check your file! Did it change? # </div> # ## Additional Best Practice Suggestions # # If you're not using the **with** statement, you should be using try/except/finally block. # # The **close()** method should be in the finally block of your try-except. # # Learn more about file object methods [here](https://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects). # # Reading Files # # `fileobject.read([count])` # # This function of the file object allows you to read a string from an open file. There is an optional byte **count** where if empty usually reads until the end of the file. # # <div class="alert alert-success"> # <b>Try this!</b> # # ```python # fo = open('foo.txt', 'r+') # fo.seek(2) # str_info = fo.read(8) # print("Read string is: {}".format(str_info)) # fo.close() # ``` # </div> # # Writing To Files # # `fileObject.write(some_str)` # # This **[write()](https://docs.python.org/3/library/io.html#io.BufferedIOBase.write)** function writes any string to an open file. # # It can be binary OR text - it just depends on how it's opened: # - `rb` for binary # - `rt` for text # Keep in mind that this does NOT add a new line character. You need to add it in. # # <div class="alert alert-success"> # <b>Try this!</b> # # ```python # fo = open('foo.txt', 'w') # fo.write("Python is a great language.\nYeah it's great!\n") # fo.close() # ``` # </div> # # File Positions # # When you open a file, you have the ability to determine and/or move to different locations in your file. # # `tell()` - this provides the current position in the file. The next read/write will occur at that many bytes from the beginning of your file. # # `seek(offset[, from])` - this changes the current file position. # # **offset** is an argument that indicates the number of bytes to be moved # # # **from** is an argument that specifies the reference position from where the bytes are to be moved # # # - If **from** is set to 0, then the reference position is at the beginning of the file. # - If **from** is set to 1, then it uses the current position as the reference point. # - If **from** is set to 2, then the end of the file is the reference point. # <div class="alert alert-success"> # <b>Try this!</b> # # ```python # # open a file # fo = open('foo.txt', 'r+') # temp_str = fo.read(10) # print("Read string is: {}".format(temp_str)) # # # check current position # pos = fo.tell() # print('Current file position: {}'.format(pos)) # # # Reposition pointer at the beginning again # pos = fo.seek(0, 0) # temp_str = fo.read(10) # print("Again read string is: {}".format(temp_str)) # # close opened file # fo.close() # ``` # </div> # # Renaming & Deleting Files # # In order to do this, you need to import the **os** module. This module provides methods to perform file processing operations. # # ## Renaming Files # # [`rename()`](https://docs.python.org/3/library/os.html#os.rename) requires two arguments: `os.rename(curr_file_name, new_file_name)` # # ```python # import os # os.rename('test1.txt', 'test2.txt') # ``` # # ## Removing Files # # [`remove()`](https://docs.python.org/3/library/os.html#os.remove) deletes files by supplying the name to be deleted: `os.remove(file_name)` # # ```python # import os # os.remove('text2.txt') # ```
Week_3/Python_Basics_17_-_Input_&_Output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import os.path import csv from itertools import islice # + path = 'Tidal-Predictions-Annual.txt' header_lines = 20 def load_tidal_predictions(path, header_lines): """ Load tidal predictions from annual text file from NOAA website. User must specify the number of headerlines in the document. For 2016, this number is 20. returns tidal_predictions, a dictionary indexed by date strings that contains the times and elevations (in cm) of high and low tides for that date. """ with open(path,'r') as f: reader = csv.reader(f,delimiter = '\t') for i in range(header_lines): next(reader) tides = [] row_1 = next(reader) date1 = row_1[0] tides.append([row_1[2], float(row_1[5]), row_1[7]]) tidal_predictions = dict() for row_1 in reader: tides.append([row_1[2], float(row_1[5]), row_1[7]]) row_2 = next(reader) date2 = row_2[0] if date1 != date2: tidal_predictions[date1] = tides tides = [] tides.append([row_2[2], float(row_2[5]), row_2[7]]) else: tides.append([row_2[2], float(row_2[5]), row_2[7]]) date1 = date2 return tidal_predictions # - load_tidal_predictions(path,header_lines) date1 date2 date2 = '2016/12/30' date1==date2 a = [1,2,3] b = [4,5,6] c = [a,b] c.append(a) print(c)
ARTEMIS/.ipynb_checkpoints/tidal-predictions-test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from collections import OrderedDict import time import os import logging from qiskit.optimization.algorithms import CplexOptimizer from qiskit.optimization.algorithms.optimization_algorithm import OptimizationResultStatus from dwave.system import DWaveCliqueSampler, LeapHybridSampler from random_lp.lp_random_gen import create_models from utilities.helpers import create_dwave_meo # - TEST_DIR = 'TEST_DATA' + "/" + time.strftime("%d_%m_%Y") + "/DENSE/" RES_DIR = 'RESULTS' + "/" + time.strftime("%d_%m_%Y") + "/DENSE/" os.makedirs(RES_DIR, exist_ok=True) TEST_DIR logger = logging.getLogger() logger.setLevel(logging.INFO) qps = create_models(TEST_DIR) # init Optimizers cplex = CplexOptimizer() hybrid = create_dwave_meo(LeapHybridSampler()) clique = create_dwave_meo(DWaveCliqueSampler()) dwave_auto = create_dwave_meo() # + results = OrderedDict() for qp_name, qp in qps.items() : print(qp_name) print("number of qubits: ", qp.qubo.get_num_vars()) output_file_handler = logging.FileHandler(filename=RES_DIR + qp.name + ".log") logger.addHandler(output_file_handler) if qp.qubo.get_num_vars() > clique.min_eigen_solver.sampler.largest_clique_size: dwave = dwave_auto else: dwave = clique with open(RES_DIR + qp.name + '.res', 'w') as file: file.write(str("Start " + qp.name + "\n " + str(qp.qubo.get_num_vars()) + " qubits needed")) file.write("\n Leap: \n") logger.info("\n Leap: \n ") res_classic = cplex.solve(qp) res_hybrid = hybrid.solve(qp) logger.info(str(res_hybrid.min_eigen_solver_result.sampleset.info)) if res_hybrid.status != OptimizationResultStatus.SUCCESS: file.write("No solution found with DWave Hybrid Sampler Leap.") file.write("\n CPLEX:\n") file.write(str(res_classic)) break else: print("Leap successful!") if res_hybrid.fval == res_classic.fval: file.write("\n Leap found optimal solution\n") else: print("\n optimal value Leap "+str(res_hybrid.fval) + " , cplex:"+ str(res_classic.fval)) file.write("\n Leap: \n") file.write(str(res_hybrid)) file.write("\n CPLEX:\n") file.write(str(res_classic)) file.write("\n DWave Quantum: \n") logger.info("\n DWave Quantum: \n ") try: res_quantum = dwave.solve(qp) problem_id = str(res_quantum.min_eigen_solver_result.sampleset.info['problem_id']) logger.info("\n problem id " + problem_id) file.write("\n problem id: " + problem_id) if res_quantum.status != OptimizationResultStatus.SUCCESS: print("Dwave quantum solver found no solution.") file.write("\n No solution found with DWave Quantum Solver \n") else: print("Dwave Quantum successful!") if res_quantum.fval == res_classic.fval: file.write("\n DWave quantum solver found optimal solution\n") else: print("\n optimal value Dwave quantum "+str(res_quantum.fval) + " , cplex:"+ str(res_classic.fval)) file.write("\n DWave quantum solver:\n") file.write(str(res_quantum)) file.write("\n CPLEX:\n") file.write(str(res_classic)) except Exception as ex: print(qp_name, " ", type(ex).__name__, " : ", ex) file.write("\n DWave Solver produced an exception:\n") file.write(str(ex)) logger.removeHandler(output_file_handler) # see Class SubstituteVariables in https://qiskit.org/documentation/_modules/qiskit/optimization/problems/quadratic_program.html#QuadraticProgram # for logger warning constraint A0_leb0 is infeasible due to substitution # Afterwards a check for feasability of substituted solution is done to set OptimizationResultStatus # - res_hybrid.min_eigen_solver_result.sampleset
comparison/Ocean/LinearProgramming/DWave_Qiskit_Plugin_Test/RandomLP/DWave_LP_Dense_Bin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Improvements to Consistent Hashing # # Normally, [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing) is a little expensive, because each node needs the whole set of keys to know which subset it should be working with. # # But with a little ingenuity in key design, we can enable a pattern that allows each node to only query the work it needs to do! # # ## How Consistent Hashing Works # # Consistent hashing works by effectively splitting up a ring into multiple parts, and assigning each node a (more or less) equal share. # # It does this by having each node put the same number of dots on a circle: # + import math PointNode = namedtuple("PointNode", ["point", "node"]) POINTS_BY_NODE = [ PointNode(0, "a"), PointNode(math.pi / 2, "b"), PointNode(math.pi, "c"), PointNode(math.pi * 3 / 2, "d'") ] # - # Effectively enabling buckets in between the points. In the example above, we can just find the point that is less than the point we're attempting to bucket: # + import bisect def get_node_for_point(node_by_point, point): """ given the node_by_point, return the node that the point belongs to. """ as_point_node = PointNode(point, "_") index = bisect.bisect_right(node_by_point, as_point_node) if index == len(node_by_point): index = -1 return node_by_point[index].node get_node_for_point(POINTS_BY_NODE, math.pi * 7 / 4) # - # We can construct our own ring from any arbitrary set of nodes, as long as we have a way to uniquely name on versus the other: # + import bisect import math import pprint from collections import namedtuple LENGTH = 2 * math.pi PointNode = namedtuple("PointNode", ["point", "node"]) def _calculate_point_for_node(node, point_num): """ return back the point for the node, between 0 and 2 * PI """ return hash(node + str(point_num)) % LENGTH def points_for_node(node, num_points): return [_calculate_point_for_node(node, i) for i in range(num_points)] def get_node_by_point(node_names, num_points): """ return a tuple of (point, node), ordering by point """ point_by_node = [PointNode(p, n) for n in node_names for p in points_for_node(n, num_points)] point_by_node.sort() return point_by_node node_by_point = get_node_by_point(["a", "b", "c", "d"], 4) get_node_for_point(node_by_point, 2) # - # ## Bucketing the Points without all the keys # Normaly, consistent hashing requires the one executing the algorithm to be aware of two sets of data: # # 1. the identifiers of all the nodes in the cluster # 2. the set of keys to assign. # # This is because the standard algorithm runs through the list of all keys, and assigns them: # + def assign_nodes(node_by_point, items): key_by_bucket = {} for i in items: value = hash(i) % LENGTH node = get_node_for_point(node_by_point, value) key_by_bucket.setdefault(node, []) key_by_bucket[node].append(i) return key_by_bucket items = list(range(40)) assign_nodes(node_by_point, items) # - # (note the lack of even distribution here: as a pseudorandom algorithm, you will end up with some minor uneven distribution. We'll talk about that later.) # # But getting all keys can be inefficient for larger data sets. What happens when we want to consistently hash against a data set of 1 million points? # # Consistent hashing requires every node to have the full set of keys. But what if each node could just query for the data that's important to it? # # There is a way to know what those are. Given all the nodes, we can calculate which ranges each node is responsible for: # + def get_ranges_by_node(node_by_point): """ return a Dict[node, List[Tuple[lower_bound, upper_bound]]] for the raw nodes by point """ range_by_node = {} previous_point, previous_node = 0, node_by_point[-1].node for point, node in node_by_point: point_range = (previous_point, point) range_by_node.setdefault(node, []) range_by_node[node].append(point_range) previous_point, previous_node = point, node # we close the loop by one last range to the end of the ring first_node = node_by_point[0].node range_by_node[first_node].append((previous_point, LENGTH)) return range_by_node get_ranges_by_node(node_by_point) # - # Now we have the ranges this node is responsible for. Now we just need a database that knows how to query these ranges. # # We can accomplish this by storing the range value in the database itself, and index against that: # + import bisect import random import string def _calculate_point(value): return hash(value) % LENGTH def _random_string(): return ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) VALUES = [_random_string() for _ in range(100)] DATABASE = {_calculate_point(v): v for v in VALUES} INDEX = sorted(DATABASE.keys()) def query_database(index, database, bounds): lower, upper = bounds lower_index = bisect.bisect_right(index, lower) upper_index = bisect.bisect_left(index, upper) return [database[index[i]] for i in range(lower_index, upper_index)] query_database(INDEX, DATABASE, (0.5, 0.6)) # - # At that point, we can pinpoint and query the specific values that are relevant to our node. We can accomplish this with just the information about the nodes themselves: # + def query_values_for_node(node_by_point, index, database, node): range_by_node = get_ranges_by_node(node_by_point) values = [] for bounds in range_by_node[node]: values += query_database(index, database, bounds) return values query_values_for_node(node_by_point, INDEX, DATABASE, "a") # - # There's additional performance benefits that can come from storing the index as it's position on the ring. If your database ensures data locality using the same key (such as DynamoDB's shard key), you can gain the advantage of all of your keys living close to each other on disk. This can make the reads for each node's items even faster. # # # ## Bucketing Values Evenly # # As you may have noted earlier, the buckets themselves are not always even. That depends entirely on the distribution of points: for a random distribution, and a high enough number, we will have an extremely high likelyhood of bucketing evenly. # # So how man buckets is enough? With the approach explained above, it's important to keep the bucket count low: the lower, the fewer queries that have to be made on the database, and the more performant the query on the database.
Consistent Hashing ++.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Research on car sales ads # # You're an analyst at Crankshaft List. Hundreds of free advertisements for vehicles are published on your site every day. # # You need to study data collected over the last few years and determine which factors influence the price of a vehicle. # ### Step 1. Open the data file and study the general information. import pandas as pd df = pd.read_csv('/datasets/vehicles_us.csv') print(df.head()) print(df.tail()) print(df.info()) print(df.describe()) print(len(df)) print(df.sample(10)) # + # checking for missing values at random: model_year_by_type = df.groupby('type').agg({'model_year': lambda x: x.isnull().sum()}) df_type = df.groupby('type').count() model_year_missing_v = model_year_by_type / df_type print(round(model_year_missing_v.dropna(axis=1)*100, 2).astype(str)+'%') model_odometer_v = df.groupby('condition').agg({'odometer': lambda x: x.isnull().sum()}) df_condition = df.groupby('condition').count() odometer_missing_v = model_odometer_v / df_condition print(round(odometer_missing_v.dropna(axis=1) * 100, 2).astype(str)+'%') df_paint_color = df.groupby('type').agg({'paint_color': lambda x: x.isnull().sum()}) # - # In order to check whether the data is missing at random or not I did calculate the percentage of missing value for model year from total model years values grouped by type and odometer missing values from the total odometer values grouped by condition, based on that calculation I can conclude that that the data is missing at random at this columns. # ### Conclusion # After checking the data I found out that there is missing values at the 'model_year', 'cylinders', 'odometer', 'paint_color' and 'is_4wd' columns that need to be treated but first of all I wanna get rid of all duplicates and work with up to date data. # I also assumed that there may be same ads that were placed more than once but with different dates and left only the latest ads that were published. # ### Step 2. Data preprocessing # + dup = df.duplicated().sum() print(dup) # there is no duplicates #I assumed that there may be same ads that were placed more than once but with different dates: duplicate = df[df.duplicated(['price', 'odometer', 'model', 'model_year', 'paint_color', 'is_4wd', 'type', 'fuel', 'cylinders'])] print((duplicate).sort_values(['price', 'odometer', 'model', 'model_year', 'paint_color', 'is_4wd', 'type', 'fuel', 'cylinders'])) #I left only the latest ads that were published: df = df.sort_values('date_posted', ascending = False).drop_duplicates(['price', 'odometer', 'model', 'model_year', 'paint_color', 'is_4wd', 'type', 'fuel', 'cylinders'],keep = 'first') #dealing with missing values: print(df[df['model_year'].isnull()].count()) df['model_year'] = df['model_year'].fillna(df.groupby('model')['model_year'].transform('median')) print(df[df['cylinders'].isnull()].count()) df['cylinders'] = df.groupby(['model', 'model_year'])['cylinders'].fillna(method='ffill') print(df[df['odometer'].isnull()].count()) df['odometer'] = df['odometer'].fillna(df.groupby('model_year')['odometer'].transform('median')) print(df[df['paint_color'].isna()].count()) df['paint_color'] = df['paint_color'].fillna(value = 'Not reported') print(df[df['is_4wd'].isnull()].count()) # There is only values = 1, so it's reasonable to assume that the missing values = 0 df['is_4wd'] = df['is_4wd'].fillna(value = 0) # Fill the missing values with 0 df['is_4wd'] = df['is_4wd'].astype(int) # Integer type more appropriate df.info() #there is still 256 'cylinders' missing values, it's less than 1% so I decided to get rid of them: df = df.dropna() df # - # After checking missing values at 'is_4wd' column # Under the 'is_4wd' column there is only values = 1, so it's reasonable to assume that the missing values = 0, I think just because the intention of empty cell was that w4d is equal to zero. # Same logic I can assume for odometer missing values of the new cars. I will let know those who are responsible for data collection to fill the missing values with zeros in order to avoid this in future.I filled missing values with median value calculation by each missing value type, based on that median value is the more common value on the market. # ### Step 3. Make calculations and add them to the table # + df['date_posted'] = pd.to_datetime(df['date_posted']) df['day_of_the_week'] = df['date_posted'].dt.weekday df['month'] = df['date_posted'].dt.month df['year'] = df['date_posted'].dt.year df["vehicle's_age"] = df['year'] - df['model_year'] for i in df['odometer']: if i > 0 : df['average_mileage'] = df['odometer'] / df["vehicle's_age"] else: df['average_mileage'] = 0 df['average_mileage'] = df['average_mileage'].round(decimals = 3) print(df[(df['condition'] == 'new')&(df['odometer'] > 1000)]) #there is vehicles that uder new condition but they drived more then 1000 km df.loc[(df['condition'] == 'new')&(df['odometer'] > 1000),'condition']='like new' def numeric_scale(condition): if condition == 'new' : return 5 elif condition == 'like new': return 4 elif condition == 'excellent': return 3 elif condition == 'good': return 2 elif condition == 'fair': return 1 elif condition == 'salvage': return 0 df['condition_numeric_scale'] = df['condition'].apply(numeric_scale) df['condition_numeric_scale'] = df['condition_numeric_scale'].astype(int) # Integer type more appropriate print(df.info()) print(df) # - # ### Step 4. Carry out exploratory data analysis # + # Let's study the outliers: import matplotlib.pyplot as plt columns_name = ['price', "vehicle's_age", 'odometer', 'cylinders', 'condition_numeric_scale'] for name in columns_name: Q1 = df[name].quantile(0.25) Q3 = df[name].quantile(0.75) df.boxplot(column = name) plt.hlines(y=[Q1, Q3], xmin=0.9, xmax=1.1, color='red') plt.show() # In order determine which factors influence the price of a vehicle I need to ges rid of outliers: for name in columns_name: Q1 = df[name].quantile(0.25) Q3 = df[name].quantile(0.75) IQR = Q3 - Q1 if name == 'price': df_out = df[(df[name] > Q1) & (df[name] < Q3 + 1.5 * IQR)] # there is a sharp spike around 0 price which make no sense if name == "vehicle's_age": df_out = df[df[name] < Q3 + 1.5 * IQR] # it's legitimate that there are also new cars for sale if name == 'odometer': df_out = df[df[name] < Q3 + 1.5 * IQR] # new cars usually has 0 mileage if name == 'cylinders': df_out = df[(df[name] > Q1 - IQR * 1.5) & (df['cylinders'] < Q3 + 1.5 * IQR)] if name == 'condition_numeric_scale': df_out = df[(df[name] > Q1 - IQR * 1.5) & (df['condition_numeric_scale'] < Q3 + 1.5 * IQR)] # Comparing original data with without outliers data: import matplotlib.pyplot as plt bins = 30 for name in columns_name: df.hist(column = name, bins = bins); plt.xlabel('original data') plt.ylabel('amount of ads'); df_out.hist(column = name, bins = bins); plt.xlabel('without outliners') plt.ylabel('amount of ads'); plt.show() df_out.hist(column = 'days_listed', bins = 30) plt.show() median_ad = df_out['days_listed'].median() mean_ad = df_out['days_listed'].mean() print('The median time advertisements were displayed for {:.0f}'.format(median_ad), 'days') print() print('The mean time advertisements were displayed for {:.0f}'.format(mean_ad), 'days') print() Q1 = df['days_listed'].quantile(0.25) Q3 = df['days_listed'].quantile(0.75) print('Typical lifetime of an ad between {:.0f}'.format(Q1), 'and {:.0f}'.format(Q3), 'days') # I took a IQR as typical life time of an ad quickly_ads = df_out[df_out['days_listed'] < Q1] # all the ads that removed quickly long_ads = df_out[df_out['days_listed'] > Q3] # all the ads that listed for abnormally long time print() for name in columns_name: print('the correlation of "quick ads"',name,'on days_listed ads :{:.3f}'.format(quickly_ads[name].corr(quickly_ads['days_listed']))) print('the correlation of "long ads"',name,'on days_listed ads :{:.3f}'.format(long_ads[name].corr(long_ads['days_listed']))) # There is no dependency between the quantitative columns in columns_name and days_listed. # - # After checking the variables distributions I pay attention to that there is a sharp spike around 0 on the graf price which make no sense, in order to know wich factor influence the price of vehicles # I determined the typical price based on IQR method, based on that method I got rid from other outliers as well, for example vehicle's age which is really old and much above the typical market age does not reflect the influence of the age on average market price and needs to be excluded. The outliers affect the average and don't let to see the real common of things, after I got rid of them you can see the effect on the graphs which is show more realistic influence of price factors. # + # I will plot scatter for the categorical columns and lets see: cat_columns =['fuel', 'transmission', 'type', 'paint_color'] for name in cat_columns: plt.scatter(df[name].values, df['days_listed'], color="blue", alpha=0.5) plt.gcf().set_size_inches((12, 9)) plt.show() # everything becomes very clear as we look at the graphs for example the shortest ads have bus type # and as more rare the color of a car then shorter the time of an ad (orange and purple for example) #In order to nalyze the number of ads and the average price for each type of vehicle: ads_by_type = df_out.pivot_table(index ='type', values = 'price', aggfunc = 'count').sort_values('price') ads_by_type.plot(kind = 'bar', figsize=(12, 9)) print(ads_by_type) num_of_ads_by_type = df_out.pivot_table(index ='type', values = 'model', aggfunc = 'count').sort_values('model') num_of_ads_by_type.plot(kind = 'bar', figsize=(12, 9)) # two types with the greatest number of ads are truck and SUV and apparently are most expensive. # + print('SUV price depends:') df_SUV = df_out[(df_out['type'] == 'SUV')] price_SUV_corr = df_SUV[['price', "vehicle's_age", 'odometer', 'condition_numeric_scale']] pd.plotting.scatter_matrix(price_SUV_corr, figsize=(9, 9)) print(price_SUV_corr.corr()) print() print('truck price depends:') print() df_truck = df_out[(df_out['type'] == 'truck')] price_truck_corr = df_truck[['price', "vehicle's_age", 'odometer', 'condition_numeric_scale']] pd.plotting.scatter_matrix(price_truck_corr, figsize=(9, 9)) print(price_truck_corr.corr()) # + import seaborn as sns # In order to filter categorical variables that have at least 50 ads I sorted them in ascending order print(df_SUV.groupby('transmission').count().sort_values('paint_color')) print(df_SUV.groupby('paint_color').count().sort_values('transmission')) # In need to exclude Not reported, purple and yellow colors Not = pd.Series(['purple', 'yellow','Not reported']) df_SUV = df_SUV.loc[~df_SUV['paint_color'].isin(Not)] print() print(df_SUV['paint_color'].value_counts()) sns.boxplot( x = df_SUV['paint_color'], y = df_SUV['price']) # - sns.boxplot(x = df_SUV['transmission'], y = df_SUV['price']) # + # In order to filter categorical variables that have at least 50 ads I sorted them in ascending order print(df_truck.groupby('paint_color').count().sort_values('transmission')) # In need to exclude Not reported, purple, orange, yellow colors Not = pd.Series(['purple', 'yellow', 'orange','Not reported']) df_track = df_truck.loc[~df_truck['paint_color'].isin(Not)] print() print(df_truck['paint_color'].value_counts()) sns.boxplot( x = df_truck['paint_color'], y = df_truck['price']) # + print(df_truck.groupby('transmission').count().sort_values('paint_color')) # There is less then 50 ads for 'other' under 'transmission' column for track category so I need to ged rid of them: df_truck = df_truck.loc[df_truck['transmission'] != 'other'] sns.boxplot( x = df_truck['transmission'], y = df_truck['price']) # - # # # ### Step 5. Overall conclusion # My first conclusion is that vehicle's type popularity affect its price, more popular car, is more expensive. After detecting two most popular cars: truck and SUV, I found out that color factor has impact on price, it's seems that purple color is the most "expensive" one but after deeper examination its tern out that purple is an outlier and different color has more effect for different type, for SUV it's an orange and for truck its black, the orange color can be explaine by its rarity and if we exclude it, there is black "expensive" color for SUV too and also wite, which are two most popular and expensive colors. # The automatic transmission has much more impact on truck price then on SUV's price. SUV's manual transmission has just slightly more effect on it's price than automatic. Two more factors, vehicle's_age and odometer has logical and obvious effect: the more new the vehicle and has less mileage the more expensive it is, when truck's age has more effect on price than SUV's. # The condition scale surprisingly has much less effect than was expected by me, I can explain it by how reliable is "excellent" condition is, or by how hard to tell what dose "like new" really mean. The main conclusion is that different vehicle types has different price factor impact, but one factor in common is popularity the more the factor is popular the price is more expensive. Thank you for reading! # ### Project completion checklist # # Mark the completed tasks with 'x'. Then press Shift+Enter. # - [x] file opened # - [x] files explored (first rows printed, info() method) # - [x] missing values determined # - [x] missing values filled in # - [x] clarification of the discovered missing values provided # - [x] data types converted # - [x] explanation of which columns had the data types changed and why # - [x] calculated and added to the table: day of the week, month, and year the ad was placed # - [x] calculated and added to the table: the vehicle's age (in years) when the ad was placed # - [x] calculated and added to the table: the vehicle's average mileage per year # - [x] the following parameters investigated: price, vehicle's age when the ad was placed, mileage, number of cylinders, and condition # - [x] histograms for each parameter created # - [x] task completed: "Determine the upper limits of outliers, remove the outliers and store them in a separate DataFrame, and continue your work with the filtered data." # - [x] task completed: "Use the filtered data to plot new histograms. Compare them with the earlier histograms (the ones that included outliers). Draw conclusions for each histogram." # - [x] task completed: "Study how many days advertisements were displayed (days_listed). Plot a histogram. Calculate the mean and median. Describe the typical lifetime of an ad. Determine when ads were removed quickly, and when they were listed for an abnormally long time. " # - [x] task completed: "Analyze the number of ads and the average price for each type of vehicle. Plot a graph showing the dependence of the number of ads on the vehicle type. Select the two types with the greatest number of ads. " # - [x] task completed: "What factors impact the price most? Take each of the popular types you detected at the previous stage and study whether the price depends on age, mileage, condition, transmission type, and color. For categorical variables (transmission type and color), plot box-and-whisker charts, and create scatterplots for the rest. When analyzing categorical variables, note that the categories must have at least 50 ads; otherwise, their parameters won't be valid for analysis. " # - [x] each stage has a conclusion # - [x] overall conclusion drawn # <div style="border:solid black 2px; padding: 20px"> # <h3>Checklist</h3> # # Let's make your project truly awesome and work a little bit more on the following issues: # # 1. **Move the text from code cells to markdown cells.** # 2. **Add some conclusions to step 2.** # 3. **Think about the way to calculate average_mileage one more time.** # 4. **Comment variables distributions in step 4.** # 5. **Fix the code.** # # Looking forward to seeing your perfect project next time! Be free to ask whatever questions you have. Good luck 🙂 # </div> # - I did add print(df.sample(10)) in order to check if there is missing data at random. # - Under the 'is_4wd' column there is only values = 1, so it's reasonable to assume that the missing values = 0, I think just because the intention of empty cell was that w4d is equal to zero. # - I added some logic explonation to step 1 conclusion # - I did change the an average mileage calculation and I assume that a new car has an average mileage equal to odometer value for obvious reason ;) # - the bag code was fixed. # - Thank you for your review, I need to get to the next project other way I am out # <div style="border:solid black 2px; padding: 20px"> # <h3>Checklist v2</h3> # # Let's work a bit more oh the project: # # 1. **Add some conclusions to step 2.** We should check whether the data is missing at random or not, think of some reasons for missing data, and explain the logic behind choosing some particular method of value imputation. # 2. **Comment variables distributions in step 4.** # 3. **Fix the code.** # # Looking forward to seeing your project! Don't be nervous and feel free to ask questions. Good luck 🙂 # </div> # - The conclusions to step 2 were added in additional cell # - The Comment were added # - I fixed the cod # # - Polina its just my second project and I have no coding back ground so p
Car_sales_ads_research/car_sales_ads_research.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import pandas as pd import numpy as np import os, requests from astropy.time import Time from astropy.table import Table from astropy.coordinates import SkyCoord from style import output_folder, big_fontsize, base_width, base_height, dpi import seaborn as sns import json from astropy.time import Time # + notice_summary_urls = { 'BRONZE_GOLD': "https://gcn.gsfc.nasa.gov/amon_icecube_gold_bronze_events.html", 'EHE': "https://gcn.gsfc.nasa.gov/amon_ehe_events.html", 'HESE': "https://gcn.gsfc.nasa.gov/amon_hese_events.html" } def get_summary_table(k, renew=False): fn = f"data/gcn_notice_summary_{k}.html" if not os.path.isfile(fn) or renew: _t = pd.read_html(requests.get(notice_summary_urls[k]).text)[0] _t.to_html(fn) else: _t = pd.read_html(fn, index_col=0)[0] return _t # - def parse_notice_info(ic_name, alert_class, verbose=True): if ic_name in ['IC210503A', 'IC200107A', 'IC210717A']: if verbose: print(f"{ic_name}: No notice becasue selected offline") return None, None, None, None if ('BRONZE' in alert_class) or ('GOLD' in alert_class): _alert_class = 'BRONZE_GOLD' else: _alert_class = alert_class _pos_ext = ' [deg]' if _alert_class == 'BRONZE_GOLD' else '' _error_ext = '90 [arcmin]' if _alert_class == 'BRONZE_GOLD' else '' summary_table = get_summary_table(_alert_class) _date_from_name = ic_name.split('IC')[-1][:-1] _dates_in_table = summary_table['EVENT', 'Date'].apply(lambda x: x.replace('/', '')) _mask = _dates_in_table == _date_from_name if 'Rev' in summary_table['EVENT'].columns: _mask = _mask & (summary_table['EVENT', 'Rev'] == 0) _selected = summary_table[_mask] if len(_selected) != 1: if 'IC160427A' in ic_name: if verbose: print(f'{ic_name}: selecting the third notice of {len(_selected)}') _ind = 1 elif len(_selected) == 2: if verbose: print(f"{ic_name}: Two matching dates.") _ras = _selected["OBSERVATION"][f"RA{_pos_ext}"] _decs = _selected["OBSERVATION"][f"Dec{_pos_ext}"] _coords = SkyCoord(_ras, _decs, unit='deg') _sep = _coords[0].separation(_coords[1]).deg if verbose: print(f"\t{_sep:.2f} degrees apart") if _sep > 1: if verbose: print(f"\tassuming it's two alerts at the same day") dates = [d.replace('/','-') for d in _selected['EVENT', 'Date']] tstrings = np.array([f"20{_s['EVENT', 'Date'].replace('/','-')}T{_s['EVENT', 'Time UT']}" for _, _s in _selected.iterrows()]) times = Time(tstrings) _ind = np.argmin(times) if ic_name.endswith('A') else np.argmax(times) if ic_name.endswith('B') else None else: if verbose: print(f"\tassuming second notice is refined info from circular. choosing first one") _ind = 0 else: raise Exception(f"More than one entry for {ic_name}: {_selected}") else: _ind = 0 _selected = _selected.iloc[_ind] _date = _selected["EVENT"]["Date"].replace("/", "-") _obstime = _selected["EVENT"]["Time UT"] _ra = _selected["OBSERVATION"][f"RA{_pos_ext}"] _dec = _selected["OBSERVATION"][f"Dec{_pos_ext}"] _error90 = _selected["OBSERVATION"][f"Error{_error_ext}"] _arrivaltime = f"20{_date} {_obstime}" return _arrivaltime, _ra, _dec, _error90 # + obs = pd.read_csv("data/nu_alerts_observed.csv", skiprows=[0, 1, 2]) obs = obs[~np.isnan(obs["RA"])] non = pd.read_csv("data/nu_alerts_unobserved.csv", skiprows=[0, 1], usecols=range(11)) comb = pd.concat([obs, non], ignore_index=True) # Splitting the EHE and HESE info into two rows m = comb['Event'] == 'IC160731A' comb.loc[m, 'Class'] = 'EHE' to_append = comb.loc[m].copy() to_append['Class'] = 'HESE' comb = comb.append(to_append) new_cols = ['arrival time [UT]','initial RA','initial Dec','initial Error90 [arcmin]'] for c in new_cols: comb[c] = np.nan for j, (i, row) in enumerate(comb.iterrows()): m = (comb.Event == row.Event) & (comb.Class == row.Class) comb.loc[m, new_cols] = parse_notice_info(row['Event'], row['Class']) comb # - comb['retracted'] = (comb['Rejection reason'] == 'Alert retraction') | (comb['Rejection reason'] == 'Alert Retraction') keep_cols = ['Event', 'Class', 'RA', 'RA Unc (rectangle)', 'Dec', 'Dec Unc (rectangle)', 'arrival time [UT]', 'initial RA', 'initial Dec', 'initial Error90 [arcmin]', 'retracted'] out = comb[keep_cols].sort_values('Event') out out.to_csv('data/ASASSN_sample_paper_IceCube_info.csv')
notebooks/alerts_for_asassn_paper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mralamdari/Machine-Learning-Adventure/blob/main/h3_Titanic_Survival_Prediction__clf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vwc8n5XYPO8_" # #Kaggle Titanic Survival Prediction Project # [Source](https://www.kaggle.com/c/titanic/data) # + id="fHg4EUHYWtjg" colab={"base_uri": "https://localhost:8080/"} outputId="2eb0c8be-9276-43e3-9d27-b148714723ef" import os import xgboost import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn import svm from sklearn import tree from sklearn import metrics from sklearn import impute from sklearn import decomposition from sklearn import ensemble from sklearn import neighbors from sklearn import naive_bayes from sklearn import linear_model from sklearn import preprocessing from sklearn import model_selection from sklearn import feature_selection # !wget https://raw.githubusercontent.com/rwrdvsqaesdfh32s/datapreprocessing/main/esi_datapreproccessing.py import esi_datapreproccessing as dpp # + [markdown] id="-ntft8K6YCle" # #Step 1: Read Data and Understand It # + id="9GC03wALO6Mo" colab={"base_uri": "https://localhost:8080/"} outputId="285afb63-b8aa-4e89-b5ae-8fe4d41913f9" os.environ["KAGGLE_CONFIG_DIR"] = '/content/drive/MyDrive/Colab Notebooks/' # !kaggle competitions download -c titanic # + id="tvHDv_y-XY6E" train_data = pd.read_csv("train.csv") test_data = pd.read_csv('test.csv') sub_data = pd.read_csv('gender_submission.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="HVgpYqz9YAB0" outputId="780316c5-12fe-4701-9f9c-5acf81700846" train_data # + id="EZQpH8paZaZm" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="fa017c4e-7137-4349-d7b9-f88afadcdeac" """ PassengerId: (int) : index + 1, Not important at all. Survived: (int): (Target) , Very Important. Pclass: (int):(CATEGORICAL), Ticket Class , Important 1st = Upper 2nd = Middle 3rd = Lower, Name: (str): Not important at all. Sex: (str): (CATEGORICAL), important. Age: (float): important SibSp: (int): (CATEGORICAL), number("#") of siblings / spouses aboard the Titanic , important. Sibling = brother, sister, stepbrother, stepsister Spouse = husband, wife (mistresses and fiancés were ignored) Parch: (int): (CATEGORICAL), number('#') of parents / children aboard the Titanic , important. Parent = mother, father Child = daughter, son, stepdaughter, stepson Some children travelled only with a nanny, therefore parch=0 for them. Ticket: (str): maybe Not important at all. Fare: (float): Important Cabin: (str): (CATEGORICAL), Cabin number maybe important. Embarked: (str): (CATEGORICAL), Port of Embarkation (بندر سوار شدن) important. """ # + [markdown] id="M0J5febubX7D" # #Step 2: Data Cleaning and Preprations # + colab={"base_uri": "https://localhost:8080/"} id="ujzwCsT8YBvs" outputId="d2a02279-d2c7-473c-bc59-412c7d937ed2" test_data.columns sub_data.columns train_data.columns, len(train_data) # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="bqahHd59Yhh9" outputId="47977aef-2e62-4c67-8b28-5f7d670ae594" # becuse Embarked has only 2 missing values, i replace it with the most common one. train_data["Embarked"].fillna('S', inplace=True) train_data[train_data.duplicated()] # + colab={"base_uri": "https://localhost:8080/", "height": 421} id="Pw0eBm6Ek6up" outputId="83d436ec-55c4-4403-8ee7-9e9fa41c7c1c" null_df = pd.DataFrame([np.int64(np.sum(train_data.isnull())), round(np.sum(train_data.isnull()) * 100 / len(train_data), 2)]).T null_df.index = train_data.columns null_df.columns = ['count', 'percent'] null_df # + id="BWkKy07adxoO" # X.Age.describe() train_data['Age'].fillna(np.int64(train_data.Age.mean()), inplace=True) # + id="hxXftDSjbXmN" # Cabin's 77% of the data is null, so it is useless train_data.drop(columns=["PassengerId", "Name", "Ticket", 'Cabin'], inplace=True) test_data.drop(columns=["Name", "Ticket", 'Cabin'], inplace=True) # + id="zsNArmUEfAW6" X = train_data.drop(columns=['Survived']) Y = train_data.Survived # + colab={"base_uri": "https://localhost:8080/"} id="t5ztmsmWUh1b" outputId="488e567e-6300-45ff-8166-d51036e88c96" one_x = X.copy() le_x = X.copy() def oneHotEncoder_function(df, c): one = preprocessing.OneHotEncoder(sparse=False, dtype=np.int64) temp = one.fit_transform(pd.DataFrame(df[c])) df = pd.concat([df, pd.DataFrame(temp, columns=one.categories_[0])], axis=1) df.drop(columns=[c], inplace=True) return df def labelEncoder_function(df): """ Sex ['female'=0, 'male'=1] Embarked ['C'=0, 'Q'=1, 'S'=2] """ le = preprocessing.LabelEncoder() for c in ['Sex', "Embarked"]: df[c] = le.fit_transform(df[c]) print(c, le.classes_) return df one_x = oneHotEncoder_function(one_x, 'Sex') one_x = oneHotEncoder_function(one_x, 'Embarked') le_x = labelEncoder_function(le_x) # + [markdown] id="qfKB9dZT3o5S" # #Step 3: Data Visualizing # + [markdown] id="KcI45_fq3y1C" # ##Target Visualization # + colab={"base_uri": "https://localhost:8080/"} id="VSPZSd5Q3oiF" outputId="9ad0b04d-c722-4048-fdcc-cb9562b87539" Y.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 386} id="QwP5-M0YhixW" outputId="212b2252-47fa-4a6e-bb8c-c52a4e1d0111" sns.displot(Y) # + colab={"base_uri": "https://localhost:8080/"} id="xQlshKtu6IRp" outputId="f196bd91-f3c1-496b-d12e-80bf8e817af0" y_1 = 100 * sum(Y>=1) / len(Y) y_0 = 100 - y_1 print(f"""The Target contains % {y_1} one value and % {y_0} zero value""") #So more than half of the The people in titanic were that # + [markdown] id="lw7IHXhA9KP8" # ##Features Data Visualization # + id="IurzWOAF68Yg" categorical_data = ["Sex", 'Embarked', "Pclass", "SibSp", "Parch"] le_x # + id="5DB5lU2tDWID" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="324a0ccd-c877-406c-9b49-67e0b036d1d8" dpp.clf_countplot(X, Y) # + id="F1pRRJO_MfiY" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="98b6664f-4460-4934-df39-3dc05a53b7a0" dpp.clf_boxplot(X, Y) # + [markdown] id="I-TzGyS5W2Hx" # #Step 4: Data Preprocessing # + id="ExVehJHyVMcw" le_data = pd.concat([le_x, Y], axis=1) one_data = pd.concat([one_x, Y], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 537} id="AXfD4uMpW1M7" outputId="3a40ba17-cb13-48cc-9339-5625b8c08dfe" dpp.heatmap(one_data) # + id="NLRZ5-q3bkRs" one_x.drop(columns=['male', 'Q', 'S'], inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 537} id="phsRH3sIb-wG" outputId="5ec7e2d2-0452-4a1b-f085-95c6ca2360b9" dpp.heatmap(one_data) # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="BVflLZGVcL6K" outputId="e10728cd-c17e-4b09-bdef-cf6e39813c00" dpp.importance_plot(le_x, Y) # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="uB0--gZxdjGv" outputId="aea42dbb-e6cd-415f-b104-1f8716bc248f" dpp.importance_plot(one_x, Y) # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="DwitcNBofGbz" outputId="1f3fbdf4-9998-4965-dda6-7171ef61742f" dpp.multicollinearity(le_data) # + colab={"base_uri": "https://localhost:8080/", "height": 444} id="6X-r4VTXfYwW" outputId="e4427bc6-e609-4dd1-f308-6ab457a642f8" dpp.multicollinearity(one_data) # + id="Vgn2LR85faJA" standard_scaler = preprocessing.StandardScaler() minmax_scaler = preprocessing.MinMaxScaler() normalizer = preprocessing.Normalizer() le_x_standard = standard_scaler.fit_transform(le_x) le_x_minmax = minmax_scaler.fit_transform(le_x) le_x_normalizer = normalizer.fit_transform(le_x) one_x_standard = standard_scaler.fit_transform(one_x) one_x_minmax = minmax_scaler.fit_transform(one_x) one_x_normalizer = normalizer.fit_transform(one_x) # + id="Fjb-4dlXzA0I" np.random.seed(34) train_xl, test_xl, train_yl, test_yl = model_selection.train_test_split(le_x, Y, test_size=0.2, random_state=52) train_xo, test_xo, train_yo, test_yo = model_selection.train_test_split(one_x, Y, test_size=0.2, random_state=52) train_xos, test_xos, train_yos, test_yos = model_selection.train_test_split(one_x_standard, Y, test_size=0.2, random_state=52) train_xom, test_xom, train_yom, test_yom = model_selection.train_test_split(one_x_minmax, Y, test_size=0.2, random_state=52) train_xon, test_xon, train_yon, test_yon = model_selection.train_test_split(one_x_normalizer, Y, test_size=0.2, random_state=52) train_xls, test_xls, train_yls, test_yls = model_selection.train_test_split(le_x_standard, Y, test_size=0.2, random_state=52) train_xlm, test_xlm, train_ylm, test_ylm = model_selection.train_test_split(le_x_minmax, Y, test_size=0.2, random_state=52) train_xln, test_xln, train_yln, test_yln = model_selection.train_test_split(le_x_normalizer, Y, test_size=0.2, random_state=52) # + [markdown] id="6nOHa50j2HCk" # #Step 5: Model Selection # # at first with the Pure data # + id="R5F2rcpfHO6j" x_train, x_test, y_train, test_y = train_xo, test_xo, train_yo, test_yo # train_xl, test_xl, train_yl, test_yl # train_xos, test_xos, train_yos, test_yos # train_xom, test_xom, train_yom, test_yom # train_xon, test_xon, train_yon, test_yon # train_xls, test_xls, train_yls, test_yls # train_xlm, test_xlm, train_ylm, test_ylm # train_xln, test_xln, train_yln, test_yln # + colab={"base_uri": "https://localhost:8080/"} id="rUNT4Jop2GY8" outputId="7af464be-dda6-48b7-ae1a-ed4162ae7016" model1 = linear_model.LogisticRegressionCV() model1.fit(x_train, y_train) pred1=model1.predict(x_test) acc_1=metrics.accuracy_score(test_y, pred1) # model_selection.cross_val_score() acc_1 # + colab={"base_uri": "https://localhost:8080/"} id="ca9NthSA12k1" outputId="d32e1c4f-19be-44e4-ed6c-d11fcc88d901" model2 = linear_model.RidgeClassifierCV() model2.fit(x_train, y_train) pred2=model2.predict(x_test) acc_2=metrics.accuracy_score(test_y, pred2) acc_2 # + colab={"base_uri": "https://localhost:8080/"} id="LabE7bRzBPBn" outputId="656cc3e2-10de-4796-93db-094953e58453" model3 = linear_model.SGDClassifier() model3.fit(x_train, y_train) pred3=model3.predict(x_test) acc_3=metrics.accuracy_score(test_y, pred3) acc_3 # + colab={"base_uri": "https://localhost:8080/"} id="f-z4yehdBbJS" outputId="c7571517-4299-45b2-ea8c-1866f96b7e37" model4 = neighbors.KNeighborsClassifier() model4.fit(x_train, y_train) pred4=model4.predict(x_test) acc_4=metrics.accuracy_score(test_y, pred4) acc_4 # + colab={"base_uri": "https://localhost:8080/"} id="_XfKd7lJBq7b" outputId="17a58507-3055-4dbe-bac6-2384016c17aa" model5 = svm.LinearSVC() model5.fit(x_train, y_train) pred5=model5.predict(x_test) acc_5=metrics.accuracy_score(test_y, pred5) acc_5 # + colab={"base_uri": "https://localhost:8080/"} id="O4I29nTOCG_O" outputId="d97382a0-b02e-4542-c8ea-8f59b477e68d" model6 = svm.SVC() model6.fit(x_train, y_train) pred6=model6.predict(x_test) acc_6=metrics.accuracy_score(test_y, pred6) acc_6 # + colab={"base_uri": "https://localhost:8080/"} id="f0cEOJYJCW7B" outputId="fddff6e3-6592-4c4c-e6a6-af3513bd9b7f" model7 = svm.NuSVC() model7.fit(x_train, y_train) pred7=model7.predict(x_test) acc_7=metrics.accuracy_score(test_y, pred7) acc_7 # + colab={"base_uri": "https://localhost:8080/"} id="OLeRghXPCgqp" outputId="06257d09-b110-433d-c835-0e872446a021" model8 = tree.DecisionTreeClassifier() model8.fit(x_train, y_train) pred8=model8.predict(x_test) acc_8=metrics.accuracy_score(test_y, pred8) acc_8 # + colab={"base_uri": "https://localhost:8080/"} id="nu-Rba-lCs5m" outputId="a08ae575-dd69-447e-f44d-e315ed733458" model9 = tree.ExtraTreeClassifier() model9.fit(x_train, y_train) pred9=model9.predict(x_test) acc_9=metrics.accuracy_score(test_y, pred9) acc_9 # + colab={"base_uri": "https://localhost:8080/"} id="SGBf8vPECzpt" outputId="98c1c3e4-e719-4659-f4aa-4580581404f5" model10 = ensemble.AdaBoostClassifier() model10.fit(x_train, y_train) pred10=model10.predict(x_test) acc_10=metrics.accuracy_score(test_y, pred10) acc_10 # + colab={"base_uri": "https://localhost:8080/"} id="dYmskojqC8Pp" outputId="b37829c2-bd3e-4490-89b5-842f45fd344d" model11 = ensemble.BaggingClassifier() model11.fit(x_train, y_train) pred11=model11.predict(x_test) acc_11=metrics.accuracy_score(test_y, pred11) acc_11 # + colab={"base_uri": "https://localhost:8080/"} id="UgQ6IwJHDMZJ" outputId="9bed2b6c-d3c0-48d9-9a1a-c36c4d631415" model12 = ensemble.ExtraTreesClassifier() model12.fit(x_train, y_train) pred12=model12.predict(x_test) acc_12=metrics.accuracy_score(test_y, pred12) acc_12 # + colab={"base_uri": "https://localhost:8080/"} id="5zUNKku6DUot" outputId="1d0d1f47-00d7-40ea-f120-9ce84a010aea" model13 = ensemble.GradientBoostingClassifier() model13.fit(x_train, y_train) pred13=model13.predict(x_test) acc_13=metrics.accuracy_score(test_y, pred13) acc_13 # + colab={"base_uri": "https://localhost:8080/"} id="KCo4HadjDdSA" outputId="3fe82e4d-b0d4-428f-f635-398fe2c9b6fa" model14 = ensemble.RandomForestClassifier() model14.fit(x_train, y_train) pred14=model14.predict(x_test) acc_14=metrics.accuracy_score(test_y, pred14) acc_14 # + colab={"base_uri": "https://localhost:8080/"} id="4JsD4vRISuAM" outputId="e09bc850-dab4-42af-a277-53d3ae87749c" model15 = xgboost.XGBClassifier() model15.fit(x_train, y_train) pred15=model15.predict(x_test) acc_15=metrics.accuracy_score(test_y, pred15) acc_15 # + colab={"base_uri": "https://localhost:8080/"} id="GEPcAufqS3-R" outputId="d5b5c6df-7e97-45a8-e5a0-2b9f3aa72e2e" model16 = xgboost.XGBRFClassifier() model16.fit(x_train, y_train) pred16=model16.predict(x_test) acc_16=metrics.accuracy_score(test_y, pred16) acc_16 # + id="gzGMO2eKDmTx" def model_selection_func(x_train, y_train): model1 = linear_model.LogisticRegressionCV() model1.fit(x_train, y_train) pred1=model1.predict(x_test) acc_1=metrics.accuracy_score(test_y, pred1) model2 = linear_model.RidgeClassifierCV() model2.fit(x_train, y_train) pred2=model2.predict(x_test) acc_2=metrics.accuracy_score(test_y, pred2) model3 = linear_model.SGDClassifier() model3.fit(x_train, y_train) pred3=model3.predict(x_test) acc_3=metrics.accuracy_score(test_y, pred3) model4 = neighbors.KNeighborsClassifier() model4.fit(x_train, y_train) pred4=model4.predict(x_test) acc_4=metrics.accuracy_score(test_y, pred4) model5 = svm.LinearSVC() model5.fit(x_train, y_train) pred5=model5.predict(x_test) acc_5=metrics.accuracy_score(test_y, pred5) model6 = svm.SVC() model6.fit(x_train, y_train) pred6=model6.predict(x_test) acc_6=metrics.accuracy_score(test_y, pred6) model7 = svm.NuSVC() model7.fit(x_train, y_train) pred7=model7.predict(x_test) acc_7=metrics.accuracy_score(test_y, pred7) model8 = tree.DecisionTreeClassifier() model8.fit(x_train, y_train) pred8=model8.predict(x_test) acc_8=metrics.accuracy_score(test_y, pred8) model9 = tree.ExtraTreeClassifier() model9.fit(x_train, y_train) pred9=model9.predict(x_test) acc_9=metrics.accuracy_score(test_y, pred9) model10 = ensemble.AdaBoostClassifier() model10.fit(x_train, y_train) pred10=model10.predict(x_test) acc_10=metrics.accuracy_score(test_y, pred10) model11 = ensemble.BaggingClassifier() model11.fit(x_train, y_train) pred11=model11.predict(x_test) acc_11=metrics.accuracy_score(test_y, pred11) model12 = ensemble.ExtraTreesClassifier() model12.fit(x_train, y_train) pred12=model12.predict(x_test) acc_12=metrics.accuracy_score(test_y, pred12) model13 = ensemble.GradientBoostingClassifier() model13.fit(x_train, y_train) pred13=model13.predict(x_test) acc_13=metrics.accuracy_score(test_y, pred13) model14 = ensemble.RandomForestClassifier() model14.fit(x_train, y_train) pred14=model14.predict(x_test) acc_14=metrics.accuracy_score(test_y, pred14) val_score_1=model_selection.cross_val_score(model1, x_train, y_train), val_score_2=model_selection.cross_val_score(model2, x_train, y_train), val_score_3=model_selection.cross_val_score(model3, x_train, y_train), val_score_4=model_selection.cross_val_score(model4, x_train, y_train), val_score_5=model_selection.cross_val_score(model5, x_train, y_train), val_score_6=model_selection.cross_val_score(model6, x_train, y_train), val_score_7=model_selection.cross_val_score(model7, x_train, y_train), val_score_8=model_selection.cross_val_score(model8, x_train, y_train), val_score_9=model_selection.cross_val_score(model9, x_train, y_train), val_score_10=model_selection.cross_val_score(model10, x_train, y_train), val_score_11=model_selection.cross_val_score(model11, x_train, y_train), val_score_12=model_selection.cross_val_score(model12, x_train, y_train), val_score_13=model_selection.cross_val_score(model13, x_train, y_train), val_score_14=model_selection.cross_val_score(model14, x_train, y_train), acc_dff = { 'model_1': acc_1, 'model_2': acc_2, 'model_3': acc_3, 'model_4': acc_4, 'model_5': acc_5, 'model_6': acc_6, 'model_7': acc_7, 'model_8': acc_8, 'model_9': acc_9, 'model_10': acc_10, 'model_11': acc_11, 'model_12': acc_12, 'model_13': acc_13, 'model_14': acc_14, } val_score_dff = { 'model_1': np.mean(val_score_1), 'model_2': np.mean(val_score_2), 'model_3': np.mean(val_score_3), 'model_4': np.mean(val_score_4), 'model_5': np.mean(val_score_5), 'model_6': np.mean(val_score_6), 'model_7': np.mean(val_score_7), 'model_8': np.mean(val_score_8), 'model_9': np.mean(val_score_9), 'model_10': np.mean(val_score_10), 'model_11': np.mean(val_score_11), 'model_12': np.mean(val_score_12), 'model_13': np.mean(val_score_13), 'model_14': np.mean(val_score_14), } return acc_dff, val_score_dff # + colab={"base_uri": "https://localhost:8080/"} id="1M5m-vH1IYDq" outputId="2ee074b1-54f8-41f6-ea3f-3bd1cfb71f7e" data_groups = [[train_xl, train_yl], [train_xo, train_yo], [train_xos, train_yos], [train_xom, train_yom], [train_xon, train_yon], [train_xls, train_yls], [train_xlm, train_ylm], [train_xln, train_yln]] all_acc_df = pd.DataFrame() all_val_score_df = pd.DataFrame() for i, g in enumerate(data_groups): acc, val_score = model_selection_func(g[0], g[1]) all_acc_df[i] = acc.values() all_val_score_df[i] = val_score.values() # + colab={"base_uri": "https://localhost:8080/"} id="ReAeaVahMI9H" outputId="3d78ef49-c29f-451a-eb32-b7768ed0e062" all_val_score_df # .describe() # .mean(axis=1).sort_values(ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="T4HxDyI2ptA8" outputId="4b91e072-cb74-4e12-fdbc-d7381262556e" all_acc_df # .mean(axis=1).sort_values(ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="zBSf_b3mSSBn" outputId="2f82f7d4-cbf9-40a1-9a6e-c7e1a845e394" all_val_score_df.mean(axis=0).sort_values(ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="NsYJ0QERpvWF" outputId="689bf330-fb33-4e28-be10-e7b376906db5" all_acc_df.mean(axis=0).sort_values(ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="r6g9dtSgiUoS" outputId="f3f20671-3197-40a1-db74-6284c8513439" model15 = xgboost.XGBClassifier() model15.fit(x_train, y_train) pred15=model15.predict(x_test) acc_15=metrics.accuracy_score(test_y, pred15) model16 = xgboost.XGBRFClassifier() model16.fit(x_train, y_train) pred16=model16.predict(x_test) acc_16=metrics.accuracy_score(test_y, pred16) acc_15, acc_16 # + [markdown] id="9K4MuATrHnAj" # #Step 6: Final Model Selection # ## Model13: ensemble.GradientBoostingClassifier() # # ## Model15: xgboost.XGBClassifier() # # + id="zigVCh-tIEE0" m1 = ensemble.GradientBoostingClassifier() m2 = xgboost.XGBClassifier() # + colab={"base_uri": "https://localhost:8080/"} id="n9f6g84NrAT6" outputId="9f2b735d-1a54-4c6e-9546-ade52f6ce9e8" # x_train, x_test, y_train, y_test = train_xo, test_xo, train_yo, test_yo # x_train, x_test, y_train, y_test = train_xl, test_xl, train_yl, test_yl # x_train, x_test, y_train, y_test = train_xos, test_xos, train_yos, test_yos # x_train, x_test, y_train, y_test = train_xom, test_xom, train_yom, test_yom # x_train, x_test, y_train, y_test = train_xon, test_xon, train_yon, test_yon # x_train, x_test, y_train, y_test = train_xls, test_xls, train_yls, test_yls # x_train, x_test, y_train, y_test = train_xlm, test_xlm, train_ylm, test_ylm x_train, x_test, y_train, y_test = train_xln, test_xln, train_yln, test_yln m1.fit(x_train, y_train) p1=m1.predict(x_test) a1=metrics.accuracy_score(y_test, p1) m2.fit(x_train, y_train) p2=m2.predict(x_test) a2=metrics.accuracy_score(y_test, p2) print(metrics.classification_report(y_test, p1)) print(metrics.classification_report(y_test, p2)) a1, a2 # + [markdown] id="AE0YzAD1P-68" # #Tunnig the final data and final model # + colab={"base_uri": "https://localhost:8080/"} id="1AoOE648P-Ua" outputId="10b1adaa-fcee-4528-adc6-a80e61b8db61" x_train, x_test, y_train, y_test = model_selection.train_test_split(one_x, Y, test_size=0.01) train_xo, test_xo, train_yo, test_yo model = ensemble.GradientBoostingClassifier(learning_rate=0.1, n_estimators=100) model.fit(x_train, y_train) pred=model.predict(x_test) a1=metrics.accuracy_score(y_test, pred) print(metrics.classification_report(y_test, pred)) a1 # + id="6fZJ0NSLOP2t" # print(one_x.shape[0]/100) # params = {'learning_rate': [0.1, 0.2 , 0.3, 0.4, 0.5, 0.6, 0.001, 0.0001, 0.01, 0.000001], # 'scoring' : ['accuracy', 'precision','recall'], # "max_features":["log2","sqrt"], # 'n_estimators': [5,10,15,20, 30, 40, 50, 60, 70, 80, 90, 100]} # res=model_selection.GridSearchCV(model, param_grid=params, scoring='accuracy') # res.score() # res.fit(x_train, y_train) # # res.best_params # # dpp.model_parameters_chooser(x_train, y_train, model, reg=False, hints=False, score_choice=0, sf_choice=1) param_grid = { 'n_estimators': [20,30, 40, 50, 60, 70, 80, 90, 100], 'learning_rate': [0.1, 0.2 , 0.3, 0.4, 0.5, 0.6, 0.001, 0.0001, 0.01, 0.000001], # 'scoring' : ['accuracy', 'precision','recall'], "max_features":["log2","sqrt"], # 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [6, 7, 8, 9, 10, 11,12, 13, 14, 15, 16, 17, 18, 19,20, 21, 22,23,24, 25,26,27,28,29,30, 50], } # , refit = True, verbose = 3, n_jobs=-1, cv= 5 grid = model_selection.GridSearchCV(estimator=model, param_grid=param_grid) grid.fit(x_train, y_train) grid.best_params_ # + id="i0z7R-tuQmcE" best_score=0 for i in range(10000): clf = ensemble.GradientBoostingClassifier(learning_rate=0.1, n_estimators=np.random.randint(100, 1001)) clf.fit(x_train, y_train) predd = clf.predict(x_test) score=metrics.accuracy_score(y_test, predd) if score > best_score: best_score=score model = clf print(best_score) # + id="9tjUmFEmVgmt" colab={"base_uri": "https://localhost:8080/"} outputId="e98e152c-6828-45c4-94ae-c985a7eb9831" import pickle # pickle.dump(model, open("/content/drive/MyDrive/Colab Notebooks/Heoku_projects/h3_best_model.sav", "wb")) loadm = pickle.load(open("/content/drive/MyDrive/Colab Notebooks/Heoku_projects/h3_best_model.sav", "rb")) loadm.fit(x_train, y_train) loadpred = loadm.predict(x_test) metrics.accuracy_score(y_test, loadpred) # list(zip(loadpred, y_test)) # pickle.dump(loadm, open("/content/drive/MyDrive/Colab Notebooks/Heoku_projects/h2_all_best_model.sav", "wb")) # + id="6ZBiR80V_xmN" loadm.fit(le_x, Y) le = preprocessing.LabelEncoder() for l in test_data.columns[1:]: test_data[l] = le.fit_transform(test_data[l]) # + colab={"base_uri": "https://localhost:8080/"} id="e7HeF6TtARAR" outputId="d1914c49-e3bd-40a4-f668-3f963b70c89b" p = loadm.predict(test_data[test_data.columns[1:]]) metrics.accuracy_score(sub_data.Survived, p) # + colab={"base_uri": "https://localhost:8080/"} id="Ue6eQAojCNRU" outputId="0c4b65c5-9985-4861-8096-2b059fd59084" # inputs = [Pclass, Sex, Age, SibSp, Parch, Fare, Embarked] # inputs = [Pclass=2, Sex=1 Age=23, SibSp=0, Parch=1, Fare=23, Embarked=1] # """ # Sex ['female'=0, 'male'=1] # Embarked ['C'=0, 'Q'=1, 'S'=2] # """ inputs = np.array([2, 1, 23,43,4, 4, 1]).reshape(-1, 1) loadm.predict(inputs.T) # + id="mgce2Q8LTux0" test = test_data[test_data.columns[1:]].values train = le_x.astype(np.int64).values all_data_x = np.concatenate([train, test]) all_data_y = np.concatenate([Y, sub_data.Survived.values]) loadm.fit(all_data_x, all_data_y) # pickle.dump(loadm, open("/content/drive/MyDrive/Colab Notebooks/Heoku_projects/h3_best_model.sav", "wb"))
h3_Titanic_Survival_Prediction__clf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import pandas as pd # - # # Load beneficiaries = pd.read_csv('data/esif_2014_2020_beneficiaries_lat_lon.csv') beneficiaries.head() beneficiaries = beneficiaries.rename(columns={ 'Coût total du projet £m': 'project_cost', 'Investissement FEDER/FSE £m': 'eu_investment', '% du projet cofinancé par l’UE': 'prop_eu_financed', 'latitude': 'lat', 'longitude': 'lng', 'Bénéficiaire': 'beneficiary', 'Nom du projet': 'project', 'Zone de partenariat économique local': 'economic_zone', 'Résumé du projet': 'summary', 'Date de commencement': 'start_date' }) beneficiaries['start_date'] = pd.to_datetime(beneficiaries.start_date, format='%Y-%m-%d') beneficiaries[['project_cost', 'eu_investment']] = beneficiaries[['project_cost', 'eu_investment']].apply( lambda s: s.str.strip().str.replace(',', '').str.replace(r'(\.)[\d|\.]{3,}\b', '') ).astype(float) beneficiaries['project'] = beneficiaries.project.str.replace('\n', '').str.replace("''", '') beneficiaries[['beneficiary', 'project', 'summary', 'start_date', 'project_cost', 'eu_investment', 'prop_eu_financed', 'lat', 'lng']].head() # # Save beneficiaries.to_pickle('data/beneficiaries.pkl') def make_beneficiaries_data_geo_json(beneficiaries): def make_feature(row): properties = { property: row[property] for property in ['beneficiary', 'project', 'project_cost', 'eu_investment'] } return { 'type': 'Feature', 'geometry': { "type": "Point", "coordinates": [row['lng'], row['lat']] }, 'properties': properties } features = list(beneficiaries.apply(make_feature, axis=1)) return { 'type': 'FeatureCollection', 'features': features } with open('data/beneficiaries.geo.json', 'w') as file: json.dump(make_beneficiaries_data_geo_json(beneficiaries), file, sort_keys=True)
prototype/Transform beneficiaries for heatmap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os # + data_dir = '../data/processed_filtered' dataset = 'shakespeare' base_filename = 'train.csv' full_path = os.path.join(data_dir, dataset, base_filename) parallel_df = pd.read_csv(full_path, names=['text', 'label']) # - parallel_df def create_prompt(text, label): if label == 0: in_bucket = 'low' out_bucket = 'high' else: in_bucket = 'high' out_bucket = 'low' return f"transfer: {text} | input: {in_bucket} | output: {out_bucket} </s>" parallel_df['prompt_text'] = parallel_df.apply(lambda x : create_prompt(x['text'], x['label']), axis=1) parallel_df[['prompt_text', 'label']].to_csv(os.path.join(data_dir, dataset + '_prompt', base_filename), index=False, header=False)
ml/notebooks/Transfer_Prompt_Converter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: lbnc # language: python # name: lbnc # --- # # Fitting a model to data with MCMC # # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import plot_helper as plot_helper import pandas as pd import emcee # 2.2.1 import corner import progressbar import scipy.optimize as op # This is a very short introduction to using MCMC for fitting a model to data; see references below for much more detailed examples. # ### The ground truth # # Let us suppose we are interested in some physical process which relates the quantities $x$ and $y$ as # # \begin{equation} # y=y_{max}\frac{x}{x+K}, # \end{equation} # with true parameter values $y_{max}=1$ and $K=2$. def model(x,ymax,K): return ymax*x/(x+K) ymax=1 K=2 x=np.linspace(0,10,50) y=model(x,ymax,K) plot_helper.plot1(x,y,title='Ground truth') # Suppose we make some observations to measure $y_{max}$ and $K$. N=9 xobs=(np.random.rand(N))*10 yerrtrue=0.03*np.random.randn(N) # normally distributed errors yobs=model(xobs,ymax,K)+yerrtrue yerr=yerrtrue*1 # Our estimated error is not necessarily equal to the true error plot_helper.plot2(x,y,xobs,yobs,yerr,title='Ground truth+observations') # We would like to estimate the posterior probability distribution for $y_{max}$ and $K$, given these observations. In other words, we want $P(model|data)$, the probability of our model parameters given the data. Bayes' theorem gives an expression for this quantity: # # \begin{equation} # P(model|data)=\frac{P(data|model)P(model)}{P(data)} # \end{equation} # # Let's unpack this equation. # + [markdown] heading_collapsed=true # ### The prior # # $P(model)$ is the prior; it is a description of the uncertainty we palce on the parameters in our model. For instance, let us assume that our parameters are initially normally distributed: # # \begin{align} # y_{max}&=\mathcal{N}(1,0.2) \\ # K&=\mathcal{N}(2,0.2) # \end{align} # # so that our model becomes # # \begin{equation} # \hat{y}=\mathcal{N}(1,0.2)\frac{x}{x+\mathcal{N}(2,0.2)}. # \end{equation} # # The prior probability of our model given parameters $y_{max}$ and $K$ is # # \begin{equation} # P(model)=\mathcal{N}(y_{max}-1,0.2)\mathcal{N}(\mu-2,0.2). # \end{equation} # # Typically we express these probablities in terms of log-probabilities so that the terms become additive: # # \begin{equation} # \ln P(model)=\ln\mathcal{N}(y_{max}-1,0.2)+\ln\mathcal{N}(\mu-2,0.2). # \end{equation} # + hidden=true def prior(x,mu,sigma): return 1/np.sqrt(2*np.pi*sigma**2)*np.exp(-(x-mu)**2/(2*sigma**2)) mu1=1 mu2=2 sigma=0.2 xp=np.linspace(0,3,100) y1=prior(xp,mu1,sigma) y2=prior(xp,mu2,sigma) plot_helper.plot3(xp,y1,xp,y2,title='Prior') # + [markdown] hidden=true # ### The likelihood # # $P(data|model)$ is known as the likelihood. It's a measure of how likely it is that our model generates the observed data. In order to calculate this term we need a measure of how far our model predictions are from the actual observed data; typically we assume that deviations are due to normally-distributed noise, in which case our likelihood takes the simple form of squared residuals for each of the data points $y_n$ with error $s_n$: # # \begin{equation} # P(data|model)=\prod_n\frac{1}{2\pi s_n^2}\exp\left(-\frac{(y_n-\hat{y}_n)^2}{2s_n^2}\right) # \end{equation} # # The negative log-likelihood is therefore # # \begin{equation} # \ln P(data|model)=-\frac{1}{2}\sum_n \left(\frac{(y_n-\hat{y}_n)^2}{s_n^2}+\ln (2 \pi s_n^2) \right) # \end{equation} # - # ### MCMC # # What we want to do is determine the posterior probability distribution $\Pi(model|data)$. From this distribution we can determine probabilities as well as the expectation values of any quantity of interest by integrating. In other words, we would like to generate the probability landscape of likely model parameters, given our observations. In order to do this we must sample the landscape by varying the parameters. MCMC allows us to do this, without having to calculate the third term $P(data)$ in the Bayes formula which is nontrivial. The simplest MCMC algorithm is that of Metropolis: # #### The Metropolis algorithm # # 1) First we start at an initial point for the parameters $y_{max,0}$, $K_0$. We compute the probabilities # # \begin{equation} # P(data|y_{max,0},K_0)P(y_{max,0},K_0). # \end{equation} # # 2) Then we move to a new location $y_{max,1}$, $K_1$. This new location is called the proposal, and it's generated by randomly moving to a new point with probability given by a normal distribution centered around the current location, and a fixed variance (the proposal width). # # 3) We calculate the new probabilities # # \begin{equation} # P(data|y_{max,1},K_1)P(y_{max,1},K_1). # \end{equation} # # 4) We then calculate the acceptance ratio: # # \begin{equation} # \alpha=\frac{P(data|y_{max,1},K_1)P(y_{max,1},K_1)}{P(data|y_{max,0},K_0)P(y_{max,0},K_0)}. # \end{equation} # # If $\alpha$ is greater than 1, i.e. the probability at the new point is higher, we accept the new point and move there. If $\alpha$ is smaller than 1, then we accept the move with a probability equal to $\alpha$. # # + def normalprior(param,mu,sigma): return np.log( 1.0 / (np.sqrt(2*np.pi)*sigma) ) - 0.5*(param - mu)**2/sigma**2 def like(pos,x,y,yerr): ymax=pos[0] K=pos[1] model=ymax*x/(x+K) inv_sigma2=1.0/(yerr**2) return -0.5*(np.sum((y-model)**2*inv_sigma2-np.log(inv_sigma2))) def prior(pos): ymax=pos[0] K=pos[1] mu1=1 sigma1=0.5 log_Prymax=normalprior(ymax,mu1,sigma1) mu2=2 sigma2=0.5 log_PrK=normalprior(K,mu2,sigma2) return log_Prymax+log_PrK def norm(pos,width): return pos+width*np.random.randn(2) def metropolis(pos,MC,steps,width): for i in range(steps): proposal=norm(pos,width) newloglike=like(proposal,xobs,yobs,yerr)+prior(proposal) oldloglike=like(pos,xobs,yobs,yerr)+prior(pos) if newloglike>=oldloglike: # If new probability is higher then accept pos=proposal else: a=np.exp(newloglike-oldloglike) if np.random.rand()<a: # If old probability is higher than only accept with probability a. pos=proposal else: pos=pos MC[i]=pos return MC steps=5000 width=0.1 MC=np.zeros(steps*2).reshape(steps,2) pos=np.array([1,2]) MC=metropolis(pos,MC,steps,width) plt.plot(MC[:,0],MC[:,1],'-') plt.show() # - # Our Markov chain samples positions in parameter space, spending proportionately more time in regions of high probability mass. While the Metropolis algorithm is intuitive and instructive it is not the most efficient MCMC algorithm, so for the next part we will apply a more efficient ensemble sampler. # #### A more efficient algorithm: Goodman and Weare affine-invariant ensemble samplers # + def lnlike(theta,x,y,yerr): ymax,K=theta model=ymax*x/(x+K) inv_sigma2=1.0/(yerr**2) return -0.5*(np.sum((y-model)**2*inv_sigma2-np.log(inv_sigma2))) def lnprior(theta): ymax,K=theta if not (0<ymax and 0<K) : return -np.inf # Hard-cutoff for positive value constraint mu1=1 sigma1=0.5 log_Prymax=np.log( 1.0 / (np.sqrt(2*np.pi)*sigma1) ) - 0.5*(ymax - mu1)**2/sigma1**2 mu2=2 sigma2=0.5 log_PrK=np.log( 1.0 / (np.sqrt(2*np.pi)*sigma2) ) - 0.5*(K - mu2)**2/sigma2**2 return log_Prymax+log_PrK def lnprob(theta, x, y, yerr): lp = lnprior(theta) if not np.isfinite(lp): return -np.inf return lp + lnlike(theta, x, y, yerr) # - ndim,nwalkers,threads,iterations,tburn=2,20,8,1000,200 labels=["$y_{max}$","$K$"] parametertruths=[1,2] pos=[np.array([ 1*(1+0.05*np.random.randn()), 1*(1+0.05*np.random.randn())]) for i in range(nwalkers)] sampler=emcee.EnsembleSampler(nwalkers,ndim,lnprob,a=2,args=(xobs,yobs,yerr),threads=threads) # + code_folding=[0] ### Start MCMC iterations=iterations bar=progressbar.ProgressBar(max_value=iterations) for i, result in enumerate(sampler.sample(pos, iterations=iterations)): bar.update(i) ### Finish MCMC samples=sampler.chain[:,:,:].reshape((-1,ndim)) # shape = (nsteps, ndim) df=pd.DataFrame(samples) df.to_csv(path_or_buf='samplesout_.csv',sep=',') df1=pd.read_csv('samplesout_.csv',delimiter=',') data=np.zeros(df1.shape[0]*(df1.shape[1]-1)).reshape(df1.shape[0],(df1.shape[1]-1)) for i in range(0,int(df1.shape[1]-1)): data[:,i]=np.array(df1.iloc[:,i+1]) # Put dataframe into array. Dataframe has no. columns = no. parameters. data2=np.zeros((df1.shape[0]-tburn*nwalkers)*(df1.shape[1]-1)).reshape((df1.shape[0]-(tburn*nwalkers)),(df1.shape[1]-1)) for i in range(0,int(df1.shape[1]-1)): for j in range(1,nwalkers+1): data2[(iterations-tburn)*(j-1):(iterations-tburn)*(j),i]=np.array(df1.iloc[iterations*j-iterations+tburn:iterations*j,i+1]) samplesnoburn=data2 #plot_helper.plottraces(samples,labels,parametertruths,nwalkers,iterations,1) fig=corner.corner(samplesnoburn, labels=labels,truths=parametertruths,quantiles=[0.16, 0.5, 0.84],show_titles=True, title_fmt='.2e', title_kwargs={"fontsize": 10},verbose=False) fig.savefig("triangle.pdf") # - plot_helper.plot4(xp,y1,xp,y2,samplesnoburn,title='Posterior') plot_helper.plot5(x,y,xobs,yobs,yerr,samplesnoburn,xlabel='x',ylabel='y',legend=False,title=False) # **References** # # * MacKay 2003 http://www.inference.org.uk/itprnn/book.html - the bible for MCMC and inferential methods in general # # * Goodman and Weare 2010 https://projecteuclid.org/euclid.camcos/1513731992 - original paper describing affine-invariant ensemble sampling # # * emcee http://dfm.io/emcee/current/user/line/ - Python implementation of the Goodman and Weare algorithm # # * Fitting a model to data https://arxiv.org/abs/1008.4686 - excellent tutorial on how to 'properly' fit your data # # * Hamiltonian Monte Carlo https://arxiv.org/abs/1701.02434 - a more efficient MCMC algorithm, as implemented in Stan (http://mc-stan.org) # # * Another nice online tutorial http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/ # # Fit to tellurium ODE model import tellurium as te # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import plot_helper as plot_helper import pandas as pd import emcee import corner import progressbar # Here is a more sophisticated example similar to what you might encounter in the lab. Suppose we have a dynamical system # + code_folding=[] def MM_genmodel(): ''' Michaelis-Menten enzyme model ''' rr = te.loada(''' J1: E+S->ES ; k1*E*S-k2*ES ; J2: ES->E+S+P ; k3*ES ; k1=0; k2=0; k3=0; ''') return(rr) def simulatemodel(rr,tmax,nsteps,paramdict): for j in rr.model.getGlobalParameterIds(): rr[j]=paramdict[j] # set parameters for j in rr.model.getFloatingSpeciesIds(): rr[j]=paramdict[j] # set concentrations out=rr.simulate(0,tmax,points=nsteps) return(out,rr) tmax=20 nsteps=51 keys=['k1','k2','k3','E','S','ES','P'] params=[1,1,1,1,10,0,0] paramdict=dict(zip(keys,params)) # Generate model rr=MM_genmodel() # Simulate model out,_=simulatemodel(rr,tmax,nsteps,paramdict) rr.plot() # - # Let's do a titration experiment and MCMC to extract kinetic parameters for this enzyme. # + code_folding=[0] np.random.seed(42) def titration_expt(titration,k1,k2,k3,tmax,nsteps,rr): Parr=np.zeros((nsteps,len(titration))) for j in range(len(titration)): keys=['k1','k2','k3','E','S','ES','P'] params=[k1,k2,k3,1,titration[j],0,0] paramdict=dict(zip(keys,params)) out,_=simulatemodel(rr,tmax,nsteps,paramdict) Parr[:,j]=out[:,4] return Parr rr=MM_genmodel() tmax=20 nsteps=51 Parr=titration_expt([0,5,10,15,20],1,10,1,tmax,nsteps,rr) Parr+=0.2*np.random.randn(Parr.shape[0],Parr.shape[1])*Parr+0.0001*np.random.randn(Parr.shape[0],Parr.shape[1]) # Add noise plt.plot(Parr,'o') ; plt.show() # + code_folding=[35] # Define MCMC functions def normalprior(param,mu,sigma): return np.log( 1.0 / (np.sqrt(2*np.pi)*sigma) ) - 0.5*(param - mu)**2/sigma**2 def lnlike(theta,inputs): k1,k2,k3=theta # DATA y=inputs['y'] yerr=inputs['yerr'] # MODEL INPUTS tmax=inputs['tmax'] nsteps=inputs['nsteps'] titration=inputs['titration'] rr=inputs['model'] ymodel=titration_expt(titration,k1,k2,k3,tmax,nsteps,rr) inv_sigma2=1.0/(yerr**2) return -0.5*(np.sum((y-ymodel)**2*inv_sigma2-np.log(inv_sigma2))) def lnprior(theta): k1,k2,k3=theta if not (0<k1 and 0<k2 and 0<k3) : return -np.inf # Hard-cutoff for positive value constraint log_PRs=[normalprior(k1,5,10), normalprior(k2,10,10), normalprior(k3,1,0.01)] return np.sum(log_PRs) def lnprob(theta,inputs): lp = lnprior(theta) if not np.isfinite(lp): return -np.inf return lp + lnlike(theta,inputs) def gelman_rubin(chain): ''' Gelman-Rubin diagnostic for one walker across all parameters. This value should tend to 1. ''' ssq=np.var(chain,axis=1,ddof=1) W=np.mean(ssq,axis=0) Tb=np.mean(chain,axis=1) Tbb=np.mean(Tb,axis=0) m=chain.shape[0]*1.0 n=chain.shape[1]*1.0 B=n/(m-1)*np.sum((Tbb-Tb)**2,axis=0) varT=(n-1)/n*W+1/n*B Rhat=np.sqrt(varT/W) return Rhat # + # Load data yobs=Parr yerr=Parr*0.2 # Generate model rr=MM_genmodel() inputkeys=['tmax','nsteps','titration','model','y','yerr'] inputvalues=[20,51,[0,5,10,15,20],rr,yobs,yerr] inputs=dict(zip(inputkeys,inputvalues)) np.random.seed(42) # + code_folding=[] # MLE pos=[ 5, # k1 10, # k2 1 # k3 ] nll= lambda *args: -lnlike(*args) result=op.minimize(nll,pos,method='BFGS', args=(inputs)) paramstrue = result["x"] k1_MLE=paramstrue[0] k2_MLE=paramstrue[1] k3_MLE=paramstrue[2] print(k1_MLE,k2_MLE,k3_MLE) tmax=20 nsteps=51 titration=[0,5,10,15,20] ymodel=titration_expt(titration,k1_MLE,k2_MLE,k3_MLE,tmax,nsteps,rr) plt.plot(yobs,'o') plt.plot(ymodel,'k-',alpha=1) ; plt.show() # + code_folding=[] # Run MCMC ndim,nwalkers,threads,iterations,tburn=3,50,1,3000,1000 labels=["$k_1$","$k_2$","$k_3$"] parametertruths=[1,10,1] pos=[np.array([ k1_MLE*(1+0.05*np.random.randn()), k2_MLE*(1+0.05*np.random.randn()), k3_MLE*(1+0.05*np.random.randn())]) for i in range(nwalkers)] sampler=emcee.EnsembleSampler(nwalkers,ndim,lnprob,a=2,args=([inputs]),threads=threads) ### Start MCMC iterations=iterations bar=progressbar.ProgressBar(max_value=iterations) for i, result in enumerate(sampler.sample(pos, iterations=iterations)): bar.update(i) ### Finish MCMC samples=sampler.chain[:,:,:].reshape((-1,ndim)) # shape = (nsteps, ndim) samplesnoburn=sampler.chain[:,tburn:,:].reshape((-1,ndim)) # shape = (nsteps, ndim) df=pd.DataFrame(samples) df.to_csv(path_or_buf='samplesout_MM.csv',sep=',') plot_helper.plottraces(samples,labels,parametertruths,nwalkers,iterations,1) fig=corner.corner(samplesnoburn, labels=labels,truths=parametertruths,quantiles=[0.16, 0.5, 0.84],show_titles=True, title_fmt='.2e', title_kwargs={"fontsize": 10},verbose=False) fig.savefig("triangle_MM.pdf") ### Gelman-Rubin diagnostic # NOT RELIABLE ESTIMATE FOR EMCEE AS WALKERS NOT INDEPENDENT! plt.close("all") figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3. font_options={'size':'12','family':'sans-serif','sans-serif':'Arial'} plt.rc('figure', **figure_options) plt.rc('font', **font_options) chain=sampler.chain[:,tburn:,:] # shape = nwalkers, iterations-tburn, ndim print('Mean acceptance fraction', np.mean(sampler.acceptance_fraction)) print('GR diagnostic for one walker', gelman_rubin(chain)[0]) # Change index to get a different walker chain_length=chain.shape[1] step_sampling=np.arange(int(0.2*chain_length),chain_length,50) rhat=np.array([gelman_rubin(chain[:,:steps,:])[0] for steps in step_sampling]) plt.plot(step_sampling,rhat); ax=plt.gca(); ax.axhline(y=1.1,color='k'); ax.set_title('GR diagnostic'); plt.show() # - # Autocorrelation time analysis. 'c' should be as large as possible (default is 5) tau = np.mean([emcee.autocorr.integrated_time(walker,c=1) for walker in sampler.chain[:,:,:]], axis=0) print('Tau', tau) for k1,k2,k3 in samplesnoburn[np.random.randint(len(samplesnoburn), size=10)]: tmax=20 nsteps=51 titration=[0,5,10,15,20] ymodel=titration_expt(titration,k1,k2,k3,tmax,nsteps,rr) plt.plot(ymodel,'k-',alpha=0.1) plt.plot(yobs,'o'); plt.show()
.ipynb_checkpoints/Fitting a model to data with MCMC-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from chart_ipynb import pie, utils import random pie_chart = pie.Pie() labels = [['Eating', 'Dinner'], ['Drinking', 'Water'], 'Sleeping', ['Designing', 'Graphics'], 'Coding', 'Cycling', 'Running'] colors = ['red', 'blue', 'green', 'cyan', 'pink', 'violet', 'purple'] data = [round(random.random() * 100) for i in range(len(labels))] pie_chart.add_dataset(labels, data, 'dataset', backgroundColor = [utils.color_rgb(i,0.1) for i in colors], borderColor = [utils.color_rgb(i) for i in colors]) pie_chart.set_title('Pie Chart - single dataset') pie_chart.setup() pie_chart.debugging_display() # + # https://stackoverflow.com/questions/26257268/click-events-on-pie-charts-in-chart-js def index_callback(index): print("for", index, "label is", labels[index]) pie_chart.js_init(""" var canvas = element.chart_info.canvas[0]; var chart = element.chart_info.chart; canvas.onclick = function(event) { debugger; console.log("onclick called" + event); var data = chart.getElementAtEvent(event); console.log("data=" + data); var index = data[0]._index; console.log("index = " + index); index_callback(index); } """, index_callback=index_callback) # - from IPython.display import display from IPython.display import HTML for i in range(10): display(HTML("<h1>hello %s</h1>" % i))
notebooks/pie events experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Spark Image](https://upload.wikimedia.org/wikipedia/commons/thumb/f/f3/Apache_Spark_logo.svg/1200px-Apache_Spark_logo.svg.png) # # Low-Level Unstructured APIs # In this notebook we will discuss the oldest fundamental concept in spark called *RDDs (Resilient distributed # datasets)*.<br> # To truly understand how Spark works, `you must understand the essence of RDDs`. They provide an extremely solid foundation that other abstractions are built upon. Starting with Spark 2.0, Spark users will have fewer needs for directly interacting with RDD, but having a strong mental model of how RDD works is essential. `In a nutshell, Spark revolves around the concept of RDDs`. # ## Introduction to RDDs # An RDD in Spark is simply an immutable distributed collection of objects. Each is split into multiple partitions, which may be computed on different nodes of the cluster.<br> # RDDs are `immutable`, `fault-tolerant`, `parallel data structures` that let users explicitly persist intermediate results `in memory`, control their partitioning to optimize data placement, and `manipulate` them using a rich set of `operators`. # ## Immutable # # RDDs are designed to be immutable, which means you `can’t` specifically `modify a particular row` in the dataset represented by that RDD. You can call one of the available RDD operations to manipulate the rows in the RDD into the way you want, but that operation will `return a new RDD`. The `basic RDD will stay unchanged`, and the new RDD # will contain the data in the way that you want. *Spark leverages Immutability to efficiently provide the fault tolerance capability.* # ## Fault Tolerant # # The ability to process multiple datasets in parallel usually requires a cluster of machines to host and execute the computational logic. If one or more machices dies due to unexpected circumstances then whats happens to the data in those machines?. Spark automatically takes care of handling the failure on behalf of its users by rebuilding the failed portion using the lineage information. # ## Parallel Data Structures # # Suppose you have huge amount of data and you need process each and every row of the datset. One solution will be to iterate over each row and process it one by one. But that would be very slow. So instead we will divide the huge chuck of Data in smaller chunks of Data. Each chunk contains a collection of rows, and all the chunks are being processed in parallel. This is where the phrase parallel data structures comes from. # ## In-Memory Computing # # The idea of speeding up the computation of large datasets that reside on disks in a parallelized manner using a cluster of machines was introduced by a MapReduce paper from Google. RDD pushes the speed boundary by introducing a novel idea, which is the ability to do distributed in-memory computation. # ## RDD Operations # # RDDs provide a rich set of commonly needed data processing operations. They include the ability to perform data transformation, filtering, grouping, joining, aggregation, sorting, and counting.<br> # Each row in a dataset is represented as a Java object, and the structure of this Java object is opaque to Spark. The user of RDD has complete control over how to manipulate this Java object. This flexibility comes with a lot of responsibilities, meaning some of the commonly needed operations such as the computing average will have to be handcrafted. Higher-level abstractions such as the Spark SQL component will provide this functionality out of the box.<br> # # ***The RDD operations are classified into two types: `transformations` and `actions`*** # # | Type | Evaluation | Returned Value | # |--|--|--| # | Transformation | Lazy | Another RDD | # | Action | Eager | Some result or write result to disk | # # Transformation operations are lazily evaluated, meaning Spark will delay the evaluations of the invoked operations until an action is taken. In other words, the transformation operations merely record the specified transformation logic and will apply them at a later point. On the other hand, invoking an action operation will trigger the evaluation of all the transformations that preceded it, and it will either return some result to the driver or write data to a storage system, such as HDFS or the local file system. # ## Initialising Spark # The programming language Python is used for the implementation in this course - for this we use 'pyspark. (PySpark documentation https://spark.apache.org/docs/latest/api/python/) # PySpark is an interface for Apache Spark in Python. It not only allows you to write Spark applications using Python APIs, but also provides the PySpark shell for interactively analyzing your data in a distributed environment. # + # ipmort libraries from pyspark from pyspark import SparkConf, SparkContext # set values for Spark configuration conf = SparkConf().setMaster("local").setAppName("Tutorial") # get (if already running) or create a Spark Context sc = SparkContext.getOrCreate(conf=conf) # - # check (try) if Spark context variable (sc) exists and print information about the Spark context try: sc except NameError: print("Spark context does not context exist. Please create Spark context first (run cell above).") else: configurations = sc.getConf().getAll() for item in configurations: print(item) # print link to Spark UI, Version, Master and AppName sc # ## Creating RDDs # **There are two ways to create RDDs:** # # **`The first way to create an RDD is to parallelize an python object, meaning converting it to a distributed dataset that can be operated in parallel.`** # create a list of strings stringList = ["Spark is awesome","Spark is cool"] # covert list of strings into a Spark RDD stringRDD = sc.parallelize(stringList) # output RDD information stringRDD # *One thing to notice is that you are not able to see the output, because of Spark's Lazy evaluation utill you call an action on that RDD.* # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) stringRDD.collect() # *.collect() is an `action` as it name suggests it collects all the rows from each of the partitions in an RDD and brings them over to the driver program.* # **`The second way to create an RDD is to read a dataset from a storage system, which can be a local computer file system, HDFS, Cassandra, Amazon S3, and so on.`** # read text file inro RDD ratings = sc.textFile("data/ml-1m/ratings.dat") # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) and output first 5 rows ratings.collect()[:5] # In this particular example we had 1M rows calling .collect() of it didn't take lot of time but If your RDD contains 100 billion rows, then it is not a good idea to invoke the collect action because the driver program most likely doesn’t have sufficient memory to hold all those rows. As a result, the driver will most likely run into an out-of-memory error and your Spark application or shell will die. This action is typically used once the RDD is filtered down to a smaller size that can fit the memory size of the driver program. # take the first 5 elements of the RDD ratings.take(5) # ## Transformations # # Transformations are operations on RDDs that return a new RDD. Transformed RDDs are computed lazily, only when you # use them in an action. # # Following Table describes commonly used transformations. # # <table> # <tbody><tr><th style="width:25%">Transformation</th><th>Meaning</th></tr> # <tr> # <td> <b>map</b>(<i>func</i>) </td> # <td> Return a new distributed dataset formed by passing each element of the source through a function <i>func</i>. </td> # </tr> # <tr> # <td> <b>filter</b>(<i>func</i>) </td> # <td> Return a new dataset formed by selecting those elements of the source on which <i>func</i> returns true. </td> # </tr> # <tr> # <td> <b>flatMap</b>(<i>func</i>) </td> # <td> Similar to map, but each input item can be mapped to 0 or more output items (so <i>func</i> should return a Seq rather than a single item). </td> # </tr> # <tr> # <td> <b>mapPartitions</b>(<i>func</i>) <a name="MapPartLink"></a> </td> # <td> Similar to map, but runs separately on each partition (block) of the RDD, so <i>func</i> must be of type # Iterator&lt;T&gt; =&gt; Iterator&lt;U&gt; when running on an RDD of type T. </td> # </tr> # <tr> # <td> <b>mapPartitionsWithIndex</b>(<i>func</i>) </td> # <td> Similar to mapPartitions, but also provides <i>func</i> with an integer value representing the index of # the partition, so <i>func</i> must be of type (Int, Iterator&lt;T&gt;) =&gt; Iterator&lt;U&gt; when running on an RDD of type T. # </td> # </tr> # <tr> # <td> <b>sample</b>(<i>withReplacement</i>, <i>fraction</i>, <i>seed</i>) </td> # <td> Sample a fraction <i>fraction</i> of the data, with or without replacement, using a given random number generator seed. </td> # </tr> # <tr> # <td> <b>union</b>(<i>otherDataset</i>) </td> # <td> Return a new dataset that contains the union of the elements in the source dataset and the argument. </td> # </tr> # <tr> # <td> <b>intersection</b>(<i>otherDataset</i>) </td> # <td> Return a new RDD that contains the intersection of elements in the source dataset and the argument. </td> # </tr> # <tr> # <td> <b>distinct</b>([<i>numPartitions</i>])) </td> # <td> Return a new dataset that contains the distinct elements of the source dataset.</td> # </tr> # <tr> # <td> <b>groupByKey</b>([<i>numPartitions</i>]) <a name="GroupByLink"></a> </td> # <td> When called on a dataset of (K, V) pairs, returns a dataset of (K, Iterable&lt;V&gt;) pairs. <br> # <b>Note:</b> If you are grouping in order to perform an aggregation (such as a sum or # average) over each key, using <code>reduceByKey</code> or <code>aggregateByKey</code> will yield much better # performance. # <br> # <b>Note:</b> By default, the level of parallelism in the output depends on the number of partitions of the parent RDD. # You can pass an optional <code>numPartitions</code> argument to set a different number of tasks. # </td> # </tr> # <tr> # <td> <b>reduceByKey</b>(<i>func</i>, [<i>numPartitions</i>]) <a name="ReduceByLink"></a> </td> # <td> When called on a dataset of (K, V) pairs, returns a dataset of (K, V) pairs where the values for each key are aggregated using the given reduce function <i>func</i>, which must be of type (V,V) =&gt; V. Like in <code>groupByKey</code>, the number of reduce tasks is configurable through an optional second argument. </td> # </tr> # <tr> # <td> <b>aggregateByKey</b>(<i>zeroValue</i>)(<i>seqOp</i>, <i>combOp</i>, [<i>numPartitions</i>]) <a name="AggregateByLink"></a> </td> # <td> When called on a dataset of (K, V) pairs, returns a dataset of (K, U) pairs where the values for each key are aggregated using the given combine functions and a neutral "zero" value. Allows an aggregated value type that is different than the input value type, while avoiding unnecessary allocations. Like in <code>groupByKey</code>, the number of reduce tasks is configurable through an optional second argument. </td> # </tr> # <tr> # <td> <b>sortByKey</b>([<i>ascending</i>], [<i>numPartitions</i>]) <a name="SortByLink"></a> </td> # <td> When called on a dataset of (K, V) pairs where K implements Ordered, returns a dataset of (K, V) pairs sorted by keys in ascending or descending order, as specified in the boolean <code>ascending</code> argument.</td> # </tr> # <tr> # <td> <b>join</b>(<i>otherDataset</i>, [<i>numPartitions</i>]) <a name="JoinLink"></a> </td> # <td> When called on datasets of type (K, V) and (K, W), returns a dataset of (K, (V, W)) pairs with all pairs of elements for each key. # Outer joins are supported through <code>leftOuterJoin</code>, <code>rightOuterJoin</code>, and <code>fullOuterJoin</code>. # </td> # </tr> # <tr> # <td> <b>cogroup</b>(<i>otherDataset</i>, [<i>numPartitions</i>]) <a name="CogroupLink"></a> </td> # <td> When called on datasets of type (K, V) and (K, W), returns a dataset of (K, (Iterable&lt;V&gt;, Iterable&lt;W&gt;)) tuples. This operation is also called <code>groupWith</code>. </td> # </tr> # <tr> # <td> <b>cartesian</b>(<i>otherDataset</i>) </td> # <td> When called on datasets of types T and U, returns a dataset of (T, U) pairs (all pairs of elements). </td> # </tr> # <tr> # <td> <b>pipe</b>(<i>command</i>, <i>[envVars]</i>) </td> # <td> Pipe each partition of the RDD through a shell command, e.g. a Perl or bash script. RDD elements are written to the # process's stdin and lines output to its stdout are returned as an RDD of strings. </td> # </tr> # <tr> # <td> <b>coalesce</b>(<i>numPartitions</i>) <a name="CoalesceLink"></a> </td> # <td> Decrease the number of partitions in the RDD to numPartitions. Useful for running operations more efficiently # after filtering down a large dataset. </td> # </tr> # <tr> # <td> <b>repartition</b>(<i>numPartitions</i>) </td> # <td> Reshuffle the data in the RDD randomly to create either more or fewer partitions and balance it across them. # This always shuffles all data over the network. <a name="RepartitionLink"></a></td> # </tr> # <tr> # <td> <b>repartitionAndSortWithinPartitions</b>(<i>partitioner</i>) <a name="Repartition2Link"></a></td> # <td> Repartition the RDD according to the given partitioner and, within each resulting partition, # sort records by their keys. This is more efficient than calling <code>repartition</code> and then sorting within # each partition because it can push the sorting down into the shuffle machinery. </td> # </tr> # </tbody></table> # ## Transformation Examples # ### Map transformation # # *Return a new RDD by applying a function to each element of this RDD* # use the already created RDD and convert all letter to uppercase # using the map transformation and the upper function stringRDD_uppercase= stringRDD.map(lambda x: x.upper()) # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) stringRDD_uppercase.collect() # + # implement the function 'alternate_char_upper' which converts every other letter to upper case def alternate_char_upper(text): new_text= [] for i, character in enumerate(text): if i % 2 == 0: new_text.append(character.upper()) else: new_text.append(character) return ''.join(new_text) # use the already created RDD and use the map transformation with the 'alternate_char_upper' function stringRDD_alternate_uppercase= stringRDD.map(alternate_char_upper) # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) stringRDD_alternate_uppercase.collect() # - # ### Flat Map Transfermation # # *Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results* # use the already created RDD and split the string at every ' ' character # using the flatMap transformation and the split function flatMap_Split= stringRDD.flatMap(lambda x: x.split(" ")) # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) flatMap_Split.collect() # ### Difference Between Map and FlatMap print("Split using Map transformation:") # use the already created RDD (stringRDD) and split the string at every ' ' character # using the map transformation and the split function map_Split= stringRDD.map(lambda x: x.split(" ")) # retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) map_Split.collect() print("Split using FlatMap transformation:") # the FlatMap tranformation on the RDD (stringRDD) is already defined (see two code cells above) # so we just have to retrieve all the elements of the RDD/DataFrame/Dataset (from all nodes) flatMap_Split.collect() # Since the source RDD contains two strings, the map transformation returns two separate objects (each with separate strings). The flatMap transformation returns only one object with all separated strings from both input objects (strings). # ### Filter Transformation # # *Return a new RDD containing only the elements that satisfy a predicate* # filter all objects from RDD containing 'awesome' awesomeLineRDD = stringRDD.filter(lambda x: "awesome" in x) awesomeLineRDD.collect() # filter all objects from RDD containing 'spark' sparkLineRDD = stringRDD.filter(lambda x: "spark" in x.lower()) sparkLineRDD.collect() # ### Union Transformation # # *Return a new RDD containing all items from two original RDDs. Duplicates are not culled.* # create two new RDDs rdd1 = sc.parallelize([1,2,3,4,5]) rdd2 = sc.parallelize([1,6,7,8]) # create a third RDD with 'union' transformation on RDD1 and RDD2 rdd3 = rdd1.union(rdd2) rdd3.collect() # ### Intersection Transformation # # *Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did.* # create two new RDDs rdd1 = sc.parallelize(["One", "Two", "Three"]) rdd2 = sc.parallelize(["two","One","threed","One"]) # create a third RDD with 'intersection' transformation on RDD1 and RDD2 rdd3 = rdd1.intersection(rdd2) rdd3.collect() # ### Substract Trsnformation # # *Return each value in `self` that is not contained in `other`.* </br> # (return a new DataFrame containing rows in this DataFrame but not in another DataFrame.)</br> # This is equivalent to EXCEPT DISTINCT in SQL. # + # create a new RDD 'words', use transformation flatMap and map ... all in one line words = sc.parallelize(["The amazing thing about spark \ is that it is very simple to learn"]).flatMap(lambda x: x.split(" ")).map(lambda c: c.lower()) # create a new TDD 'stopWords' stopWords = sc.parallelize(["the", "it", "is", "to", "that", '']) # use substract transformation on words RDD. realWords = words.subtract(stopWords) realWords.collect() # - # ### Distinct Transformation # # *Return a new RDD containing distinct items from the original RDD (omitting all duplicates)* # create new RDD 'duplicateValueRDD' duplicateValueRDD = sc.parallelize(["one", 1,"two", 2, "three", "one", "two", 1, 2]) # use distinct transformation on RDD and collect action - in one line duplicateValueRDD.distinct().collect() # ### Sample Transformation # # *Return a new RDD containing a statistical sample of the original RDD* # + # create a new RDD 'numbers'. The second parameter of the parallelize transformation is optional integer value # and defines the number of partitions the data would be parallelized to. numbers = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 2) # The transformation 'sample' returns a sampled subset of the numbers RDD. # The first parameter (here True) defines 'withReplacement'. The same element can be produced more than # once as the result of sample. # The second parameter (here 0.3) defines the fraction of rows to generate. Note that it doesn’t guarantee # to provide the exact number of the fraction of records. numbers.sample(True, 0.3).collect() # - # ### GroupBy Transformation # # *Group the data in the original RDD. Create pairs where the key is the output of a user function, and the value is all items for which the function yields this key.* # create a new RDD 'x' x = sc.parallelize(['John', 'Fred', 'Anna', 'James']) # groupBy all elements by the first letter of each element (which will be used as keys) y = x.groupBy(lambda w: w[0]) # 'loop' through all element of the 'y' RDD and print the objects print([(k, list(v)) for (k, v) in y.collect()]) # ## GroupByKey Transformation # # *Group the values for each key in the original RDD. Create a new pair where the original key corresponds to this collected group of values.* # create new 'x' RDD with key,value pairs x = sc.parallelize([('B',5),('B',4),('A',3),('A',2),('A',1)]) # create RDD 'y' using the groupBy transformation on the keys of RDD 'x' y = x.groupByKey() # print objects of RDD 'x' print(x.collect()) # 'loop' through all element of the 'y' RDD and print the objects print(list((j[0], list(j[1])) for j in y.collect())) # ## MapPartitions Transformation # # *Return a new RDD by applying a function to each partition of this RDD* # create new RDD with two partitions x = sc.parallelize([1,2,3], 2) # define the function 'f' - it is an iterable. The function sums all values of a partition # and returns the sum and the number 42 as an object def f(iterator): yield sum(iterator); yield 42 # use the transformation 'mapPartitions' with the function 'f' y = x.mapPartitions(f) # glom() flattens elements on the same partition print(x.glom().collect()) print(y.glom().collect()) # ### MapPartitionWithIndex Transformation # # *Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition.* # create new RDD with two partitions x = sc.parallelize([1,2,3], 2) # define the function 'f' - it is an iterable. The function sums all values of a partition # and returns the index of the origina partition and the sum as an object def f(partitionIndex, iterator): yield (partitionIndex, sum(iterator)) # use the transformation 'mapPartitionsWithIndex' with the function 'f' y = x.mapPartitionsWithIndex(f) # glom() flattens elements on the same partition print(x.glom().collect()) print(y.glom().collect()) # ### Join Transformation # # *Return a new RDD containing all pairs of elements having the same key in the original RDDs* # # `union(otherRDD, numPartitions=None)` # # create RDD 'x' x = sc.parallelize([("a", 1), ("b", 2)]) # create RDD 'y' y = sc.parallelize([("a", 3), ("a", 4), ("b", 5)]) # create RDD 'x' as a join result on keys from RDD 'x' and 'y' z = x.join(y) print(z.collect()) # ### Coalesce Transformation # # *Return a new RDD which is reduced to a smaller number of partitions* # # `coalesce(numPartitions, shuffle=False)` # create a RDD with three partition x = sc.parallelize([1, 2, 3, 4, 5], 3) # reduce the number of partitions to two by using the coalesce transformation y = x.coalesce(2) print(x.glom().collect()) print(y.glom().collect()) # ### KeyBy Transformation # # *Create a Pair RDD, forming one pair for each item in the original RDD. The pair’s key is calculated from the value via a user-supplied function.* # # create a new RDD 'x' x = sc.parallelize(['John', 'Fred', 'Anna', 'James']) # use the first letter of each element as the key for the element y = x.keyBy(lambda w: w[0]) print(y.collect()) # ### PartitionBy Transformation # # *Return a new RDD with the specified number of partitions, placing original items into the partition returned by a user supplied function* # # `partitionBy(numPartitions, partitioner=portable_hash)` # # create a RDD with three partition x = sc.parallelize([('J','James'),('F','Fred'),('A','Anna'),('J','John')], 3) # creta a new RDD 'y' with only two partitions and place each item in partition 0 # if the first letter of the item is < 'H'. The item will be placed in partition 1 otherwise. y = x.partitionBy(2, lambda w: 0 if w[0] < 'H' else 1) # glom() flattens elements on the same partition print(x.glom().collect()) print(y.glom().collect()) # ### Zip Transformation # # *Return a new RDD containing pairs whose key is the item in the original RDD, and whose # value is that item’s corresponding element (same partition, same index) in a second RDD* # # `zip(otherRDD)` # create RDD 'x' x = sc.parallelize([1, 2, 3]) # create RDD 'y' using the transformation map on RDD 'x' y = x.map(lambda n:n*n) # create RDD 'z' using the transformation zip on RDDs 'x' and 'y' z = x.zip(y) print(x.collect()) print(y.collect()) print(z.collect()) # ## Actions # <table class="table"> # <tbody><tr><th>Action</th><th>Meaning</th></tr> # <tr> # <td> <b>reduce</b>(<i>func</i>) </td> # <td> Aggregate the elements of the dataset using a function <i>func</i> (which takes two arguments and returns one). The function should be commutative and associative so that it can be computed correctly in parallel. </td> # </tr> # <tr> # <td> <b>collect</b>() </td> # <td> Return all the elements of the dataset as an array at the driver program. This is usually useful after a filter or other operation that returns a sufficiently small subset of the data. </td> # </tr> # <tr> # <td> <b>count</b>() </td> # <td> Return the number of elements in the dataset. </td> # </tr> # <tr> # <td> <b>first</b>() </td> # <td> Return the first element of the dataset (similar to take(1)). </td> # </tr> # <tr> # <td> <b>take</b>(<i>n</i>) </td> # <td> Return an array with the first <i>n</i> elements of the dataset. </td> # </tr> # <tr> # <td> <b>takeSample</b>(<i>withReplacement</i>, <i>num</i>, [<i>seed</i>]) </td> # <td> Return an array with a random sample of <i>num</i> elements of the dataset, with or without replacement, optionally pre-specifying a random number generator seed.</td> # </tr> # <tr> # <td> <b>takeOrdered</b>(<i>n</i>, <i>[ordering]</i>) </td> # <td> Return the first <i>n</i> elements of the RDD using either their natural order or a custom comparator. </td> # </tr> # <tr> # <td> <b>saveAsTextFile</b>(<i>path</i>) </td> # <td> Write the elements of the dataset as a text file (or set of text files) in a given directory in the local filesystem, HDFS or any other Hadoop-supported file system. Spark will call toString on each element to convert it to a line of text in the file. </td> # </tr> # <tr> # <td> <b>saveAsSequenceFile</b>(<i>path</i>) <br> (Java and Scala) </td> # <td> Write the elements of the dataset as a Hadoop SequenceFile in a given path in the local filesystem, HDFS or any other Hadoop-supported file system. This is available on RDDs of key-value pairs that implement Hadoop's Writable interface. In Scala, it is also # available on types that are implicitly convertible to Writable (Spark includes conversions for basic types like Int, Double, String, etc). </td> # </tr> # <tr> # <td> <b>saveAsObjectFile</b>(<i>path</i>) <br> (Java and Scala) </td> # <td> Write the elements of the dataset in a simple format using Java serialization, which can then be loaded using # <code>SparkContext.objectFile()</code>. </td> # </tr> # <tr> # <td> <b>countByKey</b>() <a name="CountByLink"></a> </td> # <td> Only available on RDDs of type (K, V). Returns a hashmap of (K, Int) pairs with the count of each key. </td> # </tr> # <tr> # <td> <b>foreach</b>(<i>func</i>) </td> # <td> Run a function <i>func</i> on each element of the dataset. This is usually done for side effects such as updating an Accumulator or interacting with external storage systems. # <br><b>Note</b>: modifying variables other than Accumulators outside of the <code>foreach()</code> may result in undefined behavior. See Understanding closures for more details.</td> # </tr> # </tbody></table> # ### GetNumpartitions Action # # *Return the number of partitions in RDD* # create RDD 'x' with two partitions x = sc.parallelize([1,2,3], 2) # get the number of partitions of RDD 'x' - the return value is from type integer y = x.getNumPartitions() # glom() flattens elements on the same partition print(x.glom().collect()) print(y) # ### Collect Action # # *Return all items in the RDD to the driver in a single list* # create RDD 'x' with two partitions x = sc.parallelize([1,2,3], 2) # create list 'y' (no RDD - y is from type 'list') y = x.collect() print(x.glom().collect()) print(y) # ### Count Action # # *Return the number of elements in this RDD.* # create new RDD 'numberRDD' with two partitions numberRDD = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 2) # the action count returns the number of element in the dataset - independent of the number of partitions numberRDD.count() # ### First Action # # *Return the first element in this RDD.* # create new RDD 'numberRDD' with two partitions numberRDD = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 2) # return the first element - the order of the elements within the RDD is not effected by the partitioning numberRDD.first() # ### Take Action # # *Take the first num elements of the RDD.* # create new RDD 'numberRDD' with two partitions numberRDD = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 2) # return the first FOUR element - the order of the elements within the RDD is not effected by the partitioning numberRDD.take(4) # ### Reduce Action # # *Aggregate all the elements of the RDD by applying a user function pairwise to elements and partial results, and returns a result to the driver* # create new RDD 'x' x = sc.parallelize([1,2,3,4]) # apply function pairwise (a,b) to elements and return the sum - return type is integer y = x.reduce(lambda a,b: a+b) print(x.collect()) print(y) # ### Aggregate Action # Since RDD’s are partitioned, the aggregate takes full advantage of it by first aggregating elements in each partition and then aggregating results of all partition to get the final result. # # Aggregate all the elements of the RDD by: # - applying a user function to combine elements with user-supplied objects, # - then combining those user-defined results via a second user function, # - and finally returning a result to the driver. # The seqOp operator is used to accumulate the results of each partition and stores the running # accumulated result to data. seqOp = lambda data, item: (data[0] + [item], data[1] + item) # The combOp is used to combine the results of all partitions combOp = lambda d1, d2: (d1[0] + d2[0], d1[1] + d2[1]) # create new RDD 'x' x = sc.parallelize([1,2,3,4]) # aggregate all elements of the RDD y = x.aggregate(([], 0), seqOp, combOp) print(y) # ### Max Action # # *Return the maximum item in the RDD* # create new RDD 'x' x = sc.parallelize([2,4,1]) # return the maximum value from the dataset y = x.max() print(x.collect()) print(y) # ## Stop The Spark Session # stop the underlying SparkContext. try: sc except NameError: print("Spark context does not context exist - nothing to stop.") else: sc.stop() # --- # *This Notebook was an introduction to the Apache Spark concept of RDDs (Resilient distributed # datasets) and how to use transformations and actions on RDDs.* # # **Next UP: [Data Wrangling using RDDs](./03_Data_Wrangling_using_RDDs.ipynb)**
02_Low_Level_APIs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Power Quality Classification using CNN # This notebook focusses on developing a Convolutional Neural Network which classifies a particular power signal into its respective power quality condition. The dataset used here contains signals which belong to one of the 6 classes(power quality condition). The sampling rate of this data is 256. This means that each signal is characterized by 256 data points. Here the signals provided are in time domain. #importing the required libraries import matplotlib.pyplot as plt import pandas as pd import numpy as np import datetime from scipy.fft import fft,fftfreq from scipy import signal from sklearn.preprocessing import StandardScaler from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation from tensorflow.keras.optimizers import Adam #loading the dataset using pandas x_train = pd.read_csv("../Dataset2/Train/Voltage_L1_train.csv") y_train = pd.read_csv("../Dataset2/Train/output_train.csv") x_test = pd.read_csv("../Dataset2/Test/Voltage_L1_test.csv") y_test = pd.read_csv("../Dataset2/Test/output_test.csv") print("x_train",x_train.shape) print("y_train",y_train.shape) print("x_test",x_test.shape) print("y_test",y_test.shape) # ## Data Preprocessing # This segment of notebook contains all the preprocessing steps which are performed on the data. #dropna() function is used to remove all those rows which contains NA values x_train.dropna(axis=0,inplace=True) y_train.dropna(axis=0,inplace=True) x_test.dropna(axis=0,inplace=True) y_test.dropna(axis=0,inplace=True) #shape of the data frames after dropping the rows containing NA values print("x_train",x_train.shape) print("y_train",y_train.shape) print("x_test",x_test.shape) print("y_test",y_test.shape) #here we are constructing the array which will finally contain the column names header =[] for i in range(1,x_train.shape[1]+1): header.append("Col"+str(i)) #assigning the column name array to the respectinve dataframes x_train.columns = header x_test.columns = header #assinging the column name for the y_train and y_test header = ["output"] y_train.columns = header y_test.columns = header x_train.head() x_test.head() y_train.head() y_test.head() #further splitting the train dataset to train and validation from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.20, random_state=42) print('x_train',x_train.shape) print('y_train',y_train.shape) print('x_val',x_val.shape) print('y_val',y_val.shape) print('x_test',x_test.shape) print('y_test',y_test.shape) # get_dummies function is used here to perform one hot encoding of the y_* numpy arrays y_train_hot = pd.get_dummies(y_train['output']) y_test_hot = pd.get_dummies(y_test['output']) y_val_hot = pd.get_dummies(y_val['output']) y_train_hot.head() y_train_arr = y_train_hot.to_numpy() y_test_arr = y_test_hot.to_numpy() y_val_arr = y_val_hot.to_numpy() print("y_train:",y_train_arr.shape) print("y_test:",y_test_arr.shape) print("y_val:",y_val_arr.shape) no_of_classes = y_train_arr.shape[1] # ### Data transformation # The data transformation steps employed here are as follows:<br> # # 1) Fourier Transform<br> # 2) Normalization x_train_tr = x_train.to_numpy() x_test_tr = x_test.to_numpy() x_val_tr = x_val.to_numpy() '''for i in range(0,x_train.shape[0]): x_train_tr[i][:] = np.abs(fft(x_train_tr[i][:])) for i in range(0,x_test.shape[0]): x_test_tr[i][:] = np.abs(fft(x_test_tr[i][:])) for i in range(0,x_val.shape[0]): x_val_tr[i][:] = np.abs(fft(x_val_tr[i][:]))''' transform = StandardScaler() x_train_tr = transform.fit_transform(x_train) x_test_tr = transform.fit_transform(x_test) x_val_tr = transform.fit_transform(x_val) print("Training",x_train_tr.shape) print(y_train_arr.shape) print("Validation",x_val_tr.shape) print(y_val_arr.shape) print("Test",x_test_tr.shape) print(y_test_arr.shape) sampling_rate = x_train_tr.shape[1] # ## Model creation and training #Reshaping the Data so that it could be used in 1D CNN x_train_re = x_train_tr.reshape(x_train_tr.shape[0],x_train_tr.shape[1], 1) x_test_re = x_test_tr.reshape(x_test_tr.shape[0],x_test_tr.shape[1], 1) x_val_re = x_val_tr.reshape(x_val_tr.shape[0],x_val_tr.shape[1], 1) x_train_re.shape #importing required modules for working with CNN import tensorflow as tf from tensorflow.keras.layers import Conv1D from tensorflow.keras.layers import Convolution1D, ZeroPadding1D, MaxPooling1D, BatchNormalization, Activation, Dropout, Flatten, Dense from tensorflow.keras.regularizers import l2 #initializing required parameters for the model batch_size = 64 num_classes = 6 epochs = 20 input_shape=(x_train_tr.shape[1], 1) model = Sequential() model.add(Conv1D(128, kernel_size=3,padding = 'same',activation='relu', input_shape=input_shape)) model.add(BatchNormalization()) model.add(MaxPooling1D(pool_size=(2))) model.add(Conv1D(128,kernel_size=3,padding = 'same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling1D(pool_size=(2))) model.add(Flatten()) model.add(Dense(16, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() # + #compiling the model log_dir = "logs2/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) # - #training the model history = model.fit(x_train_re, y_train_hot, batch_size=batch_size, epochs=epochs, validation_data=(x_val_re, y_val_hot), callbacks=[tensorboard_callback]) # %load_ext tensorboard # %tensorboard --logdir logs2/fit print(model.metrics_names) # ## Model evaluation print("min val:",min(history.history['val_accuracy'])) print("avg val",np.mean(history.history['val_accuracy']) ) print("max val:",max(history.history['val_accuracy'])) print() print("min train:",min(history.history['accuracy'])) print("avg train",np.mean(history.history['accuracy']) ) print("max train:",max(history.history['accuracy'])) pred_acc = model.evaluate(x_test_re,y_test_hot) print("Test accuracy is {}".format(pred_acc)) from sklearn.metrics import confusion_matrix import seaborn as sn array = confusion_matrix(y_test_hot.to_numpy().argmax(axis=1), model.predict(x_test_re).argmax(axis=1)) array to_cm = pd.DataFrame(array, index = [i for i in ["Type-1","Type-2","Type-3","Type-4","Type-5","Type-6"]], columns = [i for i in ["Type-1","Type-2","Type-3","Type-4","Type-5","Type-6"]]) plt.figure(figsize = (13,9)) sn.heatmap(to_cm, annot=True) # + #model.save("CNN_model_data2.h5")
CNN/CNN_implementation_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #Snippets and Programs from Chapter 6: Drawing Geometric Shapes and Fractals # %matplotlib inline # + #P151: Drawing a Circle ''' Example of using matplotlib's Circle patch ''' import matplotlib.pyplot as plt def create_circle(): circle = plt.Circle((0, 0), radius = 0.5) return circle def show_shape(patch): ax = plt.gca() ax.add_patch(patch) plt.axis('scaled') plt.show() if __name__ == '__main__': c = create_circle() show_shape(c) # + #P153: A growing circle ''' The animation is not viiewable in notebook. See: http://jakevdp.github.io/blog/2013/05/12/embedding-matplotlib-animations/ A growing circle ''' from matplotlib import pyplot as plt from matplotlib import animation def create_circle(): circle = plt.Circle((0, 0), 0.05) return circle def update_radius(i, circle): circle.radius = i*0.5 return circle, def create_animation(): fig = plt.gcf() ax = plt.axes(xlim=(-10, 10), ylim=(-10, 10)) ax.set_aspect('equal') circle = create_circle() ax.add_patch(circle) anim = animation.FuncAnimation(fig, update_radius, fargs = (circle,), frames=30, interval=50) plt.title('Simple Circle Animation') plt.show() if __name__ == '__main__': create_animation() # + #P156: Animating a projectile's projectory ''' Animate the trajectory of an object in projectile motion ''' from matplotlib import pyplot as plt from matplotlib import animation import math g = 9.8 def get_intervals(u, theta): t_flight = 2*u*math.sin(theta)/g intervals = [] start = 0 interval = 0.005 while start < t_flight: intervals.append(start) start = start + interval return intervals def update_position(i, circle, intervals, u, theta): t = intervals[i] x = u*math.cos(theta)*t y = u*math.sin(theta)*t - 0.5*g*t*t circle.center = x, y return circle, def create_animation(u, theta): intervals = get_intervals(u, theta) xmin = 0 xmax = u*math.cos(theta)*intervals[-1] ymin = 0 t_max = u*math.sin(theta)/g ymax = u*math.sin(theta)*t_max - 0.5*g*t_max**2 fig = plt.gcf() ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax)) circle = plt.Circle((xmin, ymin), 1.0) ax.add_patch(circle) anim = animation.FuncAnimation(fig, update_position, fargs=(circle, intervals, u, theta), frames=len(intervals), interval=1, repeat=False) plt.title('Projectile Motion') plt.xlabel('X') plt.ylabel('Y') plt.show() if __name__ == '__main__': try: u = float(input('Enter the initial velocity (m/s): ')) theta = float(input('Enter the angle of projection (degrees): ')) except ValueError: print('You entered an invalid input') else: theta = math.radians(theta) create_animation(u, theta) # + #P160: Random walk of a point in a plane ''' Example of selecting a transformation from two equally probable transformations ''' import matplotlib.pyplot as plt import random def transformation_1(p): x = p[0] y = p[1] return x + 1, y - 1 def transformation_2(p): x = p[0] y = p[1] return x + 1, y + 1 def transform(p): # list of transformation functions transformations = [transformation_1, transformation_2] # pick a random transformation function and call it t = random.choice(transformations) x, y = t(p) return x, y def build_trajectory(p, n): x = [p[0]] y = [p[1]] for i in range(n): p = transform(p) x.append(p[0]) y.append(p[1]) return x, y if __name__ == '__main__': # initial point p = (1, 1) n = int(input('Enter the number of iterations: ')) x, y = build_trajectory(p, n) # plot plt.plot(x, y) plt.xlabel('X') plt.ylabel('Y') plt.show() # + #P165: Draw Barnsley Fern ''' Draw Barnsley Fern ''' import random import matplotlib.pyplot as plt def transformation_1(p): x = p[0] y = p[1] x1 = 0.85*x + 0.04*y y1 = -0.04*x + 0.85*y + 1.6 return x1, y1 def transformation_2(p): x = p[0] y = p[1] x1 = 0.2*x - 0.26*y y1 = 0.23*x + 0.22*y + 1.6 return x1, y1 def transformation_3(p): x = p[0] y = p[1] x1 = -0.15*x + 0.28*y y1 = 0.26*x + 0.24*y + 0.44 return x1, y1 def transformation_4(p): x = p[0] y = p[1] x1 = 0 y1 = 0.16*y return x1, y1 def get_index(probability): r = random.random() c_probability = 0 sum_probability = [] for p in probability: c_probability += p sum_probability.append(c_probability) for item, sp in enumerate(sum_probability): if r <= sp: return item return len(probability)-1 def transform(p): # list of transformation functions transformations = [transformation_1, transformation_2, transformation_3, transformation_4] probability = [0.85, 0.07, 0.07, 0.01] # pick a random transformation function and call it tindex = get_index(probability) t = transformations[tindex] x, y = t(p) return x, y def draw_fern(n): # We start with (0, 0) x = [0] y = [0] x1, y1 = 0, 0 for i in range(n): x1, y1 = transform((x1, y1)) x.append(x1) y.append(y1) return x, y if __name__ == '__main__': n = int(input('Enter the number of points in the Fern: ')) x, y = draw_fern(n) # Plot the points plt.plot(x, y, 'o') plt.title('Fern with {0} points'.format(n)) plt.show() # + #P174: Example of using the imshow() function import matplotlib.pyplot as plt import matplotlib.cm as cm import random def initialize_image(x_p, y_p): image = [] for i in range(y_p): x_colors = [] for j in range(x_p): x_colors.append(0) image.append(x_colors) return image def color_points(): x_p = 20 y_p = 20 image = initialize_image(x_p, y_p) for i in range(y_p): for j in range(x_p): image[i][j] = random.randint(0, 10) plt.imshow(image, origin='lower', extent=(0, 5, 0, 5), cmap=cm.Greys_r, interpolation='nearest') plt.colorbar() plt.show() if __name__ == '__main__': color_points()
chapter6/Chapter6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Testing out our chatbot model # First we import the needed packages from keras.models import load_model import nltk import numpy as np import pickle nltk.download('punkt') from nltk.stem.lancaster import LancasterStemmer stemmer = LancasterStemmer() # Create a couple of functions to create a bag of words from the given sentence # + def tokenize_sentence(sentence): sentence_words = nltk.word_tokenize(sentence) sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words def extract_words(sentence, words): sentence_words = tokenize_sentence(sentence) bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: bag[i] = 1 return(np.array(bag)) # - # Let's load the saved model and its accompanying data (classes and word list) model = load_model('chatbot_model.h5') data = pickle.load( open( "chatbot-data.pkl", "rb" ) ) words = data['words'] classes = data['classes'] p = extract_words("I want to return a book", words) print (p) print (classes) # + input = np.array([p]) prediction = model.predict(inputvar) print("matches intent:" , classes[np.argmax(prediction)]) # + p = extract_words("List some books", words) input = np.array([p]) prediction = model.predict(inputvar) print("matches intent:" , classes[np.argmax(prediction)]) # + p = extract_words("Show me all your books", words) input = np.array([p]) prediction = model.predict(inputvar) print("matches intent:" , classes[np.argmax(prediction)]) # -
Section 5/CODE/section5_video4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a id="title_ID"></a> # # JWST Pipeline Validation Notebook: calwebb_image3: source_catalog with MIRI # # <span style="color:red"> **Instruments Affected**</span>: MIRI, NIRCam # # # Tested on MIRI Simulated data # # ### Table of Contents # <div style="text-align: left"> # # <br> [Introduction](#intro_ID) <br> [Imports](#imports_ID) <br> [Run Pipeline](#pipeline_ID) <br> [Check Results](#output_ID) <br> [About This Notebook](#about_ID) <br> # # # </div> # <a id="intro_ID"></a> # # ## Introduction # # This notebook processes a set of images through calwebb_image2 and calwebb_image3 and examines the output table of the source_catalog step. # # This test uses simulated MIRI F770W (options for F560W or F770W) data of a crowded star + galaxy field. # # The pipeline documentation can be found here: https://jwst-pipeline.readthedocs.io/en/latest/jwst/source_catalog/main.html # # The pipeline code is available on GitHub: https://github.com/spacetelescope/jwst # # # ### JWST Calibration working group algorithm # # The algorithm and discussion for this step can be found at the following page: # # https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Point+Source+Catalog # ### Test description # # The steps of this test are as follow: # # 1) Set up data path and directory and image files name. # # 2) Run output of calwebb_detector1 through calwebb_image2. # # 3) Run output of calwebb_image2 through calwebb_image3. # # 4) Read in output table of source_catalog step and print ecsv table # # 5) Display image and overplot detector sources from ecsv table. # # 6) Look at plots of total flux in Jy and AB mag. # # 7) Look for matches between expected source positions (RA and Dec) from simulated catalog to output from source_catalog. # # 8) Compare magnitudes and magnitude differences between input simulated catalog and output found sources. # ### Data description # # The data used in this test consist of a set of four simulated images in the F770W filter of MIRI, at four different dither positions. The data have 4827 stars of varying flux levels and just over 200 galaxies in the image. The MIRI Image simulator (MIRISim) was used to create the simulations. # ### Create a temporary directory for all of the test data # + # Create a temporary directory to hold notebook output, and change the working directory to that directory. from tempfile import TemporaryDirectory import os data_dir = TemporaryDirectory() os.chdir(data_dir.name) # For info, print out where the script is running print("Running in {}".format(os.getcwd())) # - # <a id="imports_ID"></a> # ### Set up import statements # # The following packages are needed to enable this notebook to run: # * astropy for coordinate handling and calculations # * jwst to run the pipeline steps and create associations # * matplotlib for plotting # * ci_watson for retrieving data from artifactory # * set of tools to retrieve data from Box # # [Top of Page](#title_ID) # + #import pytest import numpy as np from glob import glob import json import matplotlib.pyplot as plt import photutils from astropy.io import fits, ascii from astropy.coordinates import Angle from astropy.table import Table, vstack, unique, join from astropy import table from astropy.coordinates import SkyCoord, match_coordinates_sky from astropy.visualization import simple_norm from astropy import units as u from astropy.modeling import models from astropy.wcs import WCS # Box download imports from astropy.utils.data import download_file from pathlib import Path from shutil import move from os.path import splitext import jwst from jwst import datamodels from jwst.datamodels import RampModel, ImageModel from jwst import associations from jwst.associations import asn_from_list from jwst.associations.lib.rules_level3_base import DMS_Level3_Base from jwst.pipeline import calwebb_image3 from jwst.pipeline import calwebb_image2 from jwst.pipeline import calwebb_detector1 from jwst.pipeline import Detector1Pipeline, Image2Pipeline, Image3Pipeline from ci_watson.artifactory_helpers import get_bigdata # - # ### Put in some helper scripts # def create_scatterplot(catalog_colx, catalog_coly, title=None): ''' Function to generate a generic scatterplot. ''' fig = plt.figure(figsize=(8, 8)) ax = plt.subplot() ax.scatter(catalog_colx,catalog_coly) plt.xlabel(catalog_colx.name) plt.ylabel(catalog_coly.name) if title: plt.title(title) # + # Band info from constants in miricap # wref in microns and zeropoint in Jy band_info = { "2MASS J": {'wref': 1.235, 'zeropoint': 1594}, "2MASS H": {'wref': 1.662, 'zeropoint': 1024}, "2MASS Ks": {'wref': 2.159, 'zeropoint': 666.7}, "<NAME>": {'wref': 0.36, 'zeropoint': 1823}, "<NAME>": {'wref': 0.44, 'zeropoint': 4130}, "<NAME>": {'wref': 0.55, 'zeropoint': 3781}, "<NAME>": {'wref': 0.71, 'zeropoint': 2941}, "<NAME>": {'wref': 0.97, 'zeropoint': 2635}, "<NAME>": {'wref': 1.25, 'zeropoint': 1603}, "<NAME>": {'wref': 1.60, 'zeropoint': 1075}, "<NAME>": {'wref': 2.22, 'zeropoint': 667}, "<NAME>": {'wref': 3.54, 'zeropoint': 288}, "<NAME>": {'wref': 4.80, 'zeropoint': 170}, "<NAME>": {'wref': 10.6, 'zeropoint': 36}, "<NAME>": {'wref': 21.0, 'zeropoint': 9.4}, "UKIRT V": {'wref': 0.5556, 'zeropoint': 3540}, "UKIRT I": {'wref': 0.9, 'zeropoint': 2250}, "UKIRT J": {'wref': 1.25, 'zeropoint': 1600}, "UKIRT H": {'wref': 1.65, 'zeropoint': 1020}, "UKIRT K": {'wref': 2.20, 'zeropoint': 657}, "UKIRT L": {'wref': 3.45, 'zeropoint': 290}, "UKIRT L'": {'wref': 3.80, 'zeropoint': 252}, "UKIRT M": {'wref': 4.8, 'zeropoint': 163}, "UKIRT N": {'wref': 10.1, 'zeropoint': 39.8}, "UKIRT Q": {'wref': 20.0, 'zeropoint': 10.4}, "MIRLIN N": {'wref': 10.79, 'zeropoint': 33.4}, "MIRLIN Q-s": {'wref': 17.90, 'zeropoint': 12.4}, "MIRLIN N0": {'wref': 7.91, 'zeropoint': 60.9}, "MIRLIN N1": {'wref': 8.81, 'zeropoint': 49.4}, "MIRLIN N2": {'wref': 9.69, 'zeropoint': 41.1}, "MIRLIN N3": {'wref': 10.27, 'zeropoint': 36.7}, "MIRLIN N4": {'wref': 11.70, 'zeropoint': 28.5}, "MIRLIN N5": {'wref': 12.49, 'zeropoint': 25.1}, "MIRLIN Q0": {'wref': 17.20, 'zeropoint': 13.4}, "MIRLIN Q1": {'wref': 17.93, 'zeropoint': 12.3}, "MIRLIN Q2": {'wref': 18.64, 'zeropoint': 11.4}, "MIRLIN Q3": {'wref': 20.81, 'zeropoint': 9.2}, "MIRLIN Q4": {'wref': 22.81, 'zeropoint': 7.7}, "MIRLIN Q5": {'wref': 24.48, 'zeropoint': 6.7}, "MIRLIN K": {'wref': 2.2, 'zeropoint': 650.0}, "MIRLIN M": {'wref': 4.68, 'zeropoint': 165.0}, "WISE W1": {'wref': 3.4, 'zeropoint':309.54}, "WISE W2": {'wref': 4.6, 'zeropoint':171.787}, "WISE W3": {'wref': 12., 'zeropoint':31.674}, "WISE W4": {'wref': 22., 'zeropoint':8.363}, } # + # code from miricap.imager section written by <NAME> of the MIRI EC team. def get_band_info(band, system): """ Retrieve information on a given band in a dictionnary 'band_info' that need to be available in global variable of the script at this level. :param str band: Band name (e.g. V for Johnson) :param str system: possible values: Johnson, 2MASS, UKIRT, MARLIN) :return: wref in microns and zeropoint in Jy :rtype: tuple(Quantity, Quantity) """ system_list = ["Johnson", "2MASS", "UKIRT", "MARLIN"] if system not in system_list: LOG.info(f"Unknown system '{system}'. Possible values: {system_list}") return None, None key = f"{system} {band}" try: band_dict = band_info[key] zeropoint = band_dict["zeropoint"] wref = band_dict["wref"] except KeyError: bands = [k.split()[1] for k in band_info.keys() if system in k] LOG.info(f"Unknown band '{band}' for '{system}'. Available bands are: {', '.join(bands)}") return None, None return wref * u.micron, zeropoint * u.Jy def mag2flux(magnitude, band, system="Johnson"): """ Convert magnitude in a given band/system into flux in mJy (and return the corresponding wavelength reference :param float magnitude: magnitude in a given bandpass :param str band: band name (e.g 'V' or Johnson system) :param str system: (By default Johnson, possible values: Johnson, 2MASS, UKIRT, MARLIN) :return: flux in mJy and wref in microns :rtype: tuple(float, float) """ wref, zero_point = get_band_info(band, system) flux = zero_point * 10.0 ** (-0.4 * magnitude) print(f"Magnitude {magnitude} in {system} band {band} -> Flux: {flux} at {wref} microns") return flux.to(u.mJy).value, wref.value def extrapolate_flux(flux, wref, waves, temperature_star): """ From a flux and reference wavelength, will return the flux for other wavelength (using the star effective temperature for the spectrum shape) To convert magnitude flux in a band, use one of the following website for instance: - http://ssc.spitzer.caltech.edu/warmmission/propkit/pet/magtojy/ - https://www.gemini.edu/sciops/instruments/midir-resources/imaging-calibrations/fluxmagnitude-conversion :param float flux: Star flux in mJy :param float wref: reference wavelength (microns) corresponding to the flux given in parameter :param waves: list of wavelengths you want to extrapolate the star flux on. :type waves: float or list(float) or np.array(float) :param float temperature_star: star effective temperature in K :return: flux values for all required wavelengths. Unit will be the unit of the input flux :rtype: quantity or np.array(quantity) """ flux = flux * u.mJy wref = wref * u.micron waves = waves * u.micron bb_star = models.BlackBody(temperature_star * u.K) extrapolated_flux = flux * bb_star(waves) / bb_star(wref) print(f"Assuming T={temperature_star} K, Flux: {flux} at {wref} -> Flux: {extrapolated_flux} at {waves}") return extrapolated_flux # - # ### Print pipeline version number print(jwst.__version__) print(data_dir) # ### Read in data from artifactory # + # Read in new dataset from Box def get_box_files(file_list): for box_url,file_name in file_list: if 'https' not in box_url: box_url = 'https://stsci.box.com/shared/static/' + box_url downloaded_file = download_file(box_url) if Path(file_name).suffix == '': ext = splitext(box_url)[1] file_name += ext move(downloaded_file, file_name) # Use F560W data set #file_urls = ['https://stsci.box.com/shared/static/g8ozd2wkdo9klrtkafpe9qegpzhpd66a.fits', # 'https://stsci.box.com/shared/static/ym75nbqdi32t9wnbanm6s57itqixniyc.fits', # 'https://stsci.box.com/shared/static/0bqh6ijsc6kd7cyv6gtnj9kvly9t885f.fits', # 'https://stsci.box.com/shared/static/tywadpqcjhp9yxyv0sy91hc79red92xm.fits', # 'https://stsci.box.com/shared/static/lpcgp7jkq6lmjj2lxpk63jz9u05qgk3a.cat', # 'https://stsci.box.com/shared/static/15j6hkjc28zow4rjepob2t75dl887zs3.cat'] #file_names = ['det_image_seq1_MIRIMAGE_F560Wexp1_rate.fits', # 'det_image_seq2_MIRIMAGE_F560Wexp1_rate.fits', # 'det_image_seq3_MIRIMAGE_F560Wexp1_rate.fits', # 'det_image_seq4_MIRIMAGE_F560Wexp1_rate.fits', # 'input_sim_stars.cat', # 'input_sim_galaxies.cat'] # Test with F770W data set file_urls = ['https://stsci.box.com/shared/static/8573htf1p8mhk4e49z9b483dp975y3lz.fits', 'https://stsci.box.com/shared/static/p69h7uzlmqwlzf6kqqbf4lu9ibz28tc6.fits', 'https://stsci.box.com/shared/static/js8k6j20rek1oago057wxn08dhuticij.fits', 'https://stsci.box.com/shared/static/es253mc3m1sptusj9c9ctl1blidr1qdl.fits', 'https://stsci.box.com/shared/static/lpcgp7jkq6lmjj2lxpk63jz9u05qgk3a.cat', 'https://stsci.box.com/shared/static/15j6hkjc28zow4rjepob2t75dl887zs3.cat'] file_names = ['det_image_seq1_MIRIMAGE_F770Wexp1_rate.fits', 'det_image_seq2_MIRIMAGE_F770Wexp1_rate.fits', 'det_image_seq3_MIRIMAGE_F770Wexp1_rate.fits', 'det_image_seq4_MIRIMAGE_F770Wexp1_rate.fits', 'input_sim_stars.cat', 'input_sim_galaxies.cat'] box_download_list = [(url,name) for url,name in zip(file_urls,file_names)] get_box_files(box_download_list) # - # <a id="pipeline_ID"></a> # ## Run Pipelines # # Read in the data and run the JWST calibration pipelines. # # ### Run output of calwebb_detector1 through calwebb_image2 # + # Run Calwebb_image2 on output files from detector1 ratefiles = glob('*rate.fits') print('There are ', len(ratefiles), ' images.') callist = [] # cycle through files for im in ratefiles: pipe2 = Image2Pipeline() rampfile = ImageModel(im) filename = rampfile.meta.filename # Set pipeline parameters pipe2.save_results = True pipe2.output_file = filename +'_cal.fits' pipe2.resample.save_results = True pipe2.suffix = None calfile = pipe2.run(rampfile) callist.append(calfile) print(callist) # - # ### Run output of calwebb_image2 through calwebb_image3 # ### Create an association file to combine the input calibrated files # + # use asn_from_list to create association table calfiles = glob('*_cal.fits') asn = asn_from_list.asn_from_list(calfiles, rule=DMS_Level3_Base, product_name='starfield_combined.fits') # use this if you need to add non'science' exposure types #asn['products'][0]['members'][1]['exptype'] = 'background' #asn['products'][0]['members'][2]['exptype'] = 'sourcecat' # dump association table to a .json file for use in image3 with open('starfield_asnfile.json', 'w') as fp: fp.write(asn.dump()[1]) print(asn) # - # ### Run Calwebb_Image3 pipeline # # For MIRI, the FWHM values are dependent on filter and should be set using the table below: # # |Filter | FWHM | # |-------| -------| # |F560W | 1.636 | # |F770W | 2.187 | # |F1000W | 2.888 | # |F1130W | 3.318 | # |F1280W | 3.713 | # |F1500W | 4.354 | # |F1800W | 5.224 | # |F2100W | 5.989 | # |F2550W | 7.312 | # |F2550WR | 7.312 | # # For the fit geometry keyword, the following options are available: # fitgeometry: A str value indicating the type of affine transformation to be considered when fitting catalogs. Allowed values: # # * 'shift': x/y shifts only # # * 'rscale': rotation and scale # # * 'rshift': rotation and shifts # # * 'general': shift, rotation, and scale (Default=”general”) # # + # Run Calwebb_image3 on the association table # set any specific parameters # tweakreg parameters to allow data to run fwhm = 2.187 # Gaussian kernel FWHM of objects expected, default=2.5 minobj = 5 # minimum number of objects needed to match positions for a good fit, default=15 snr = 8 # signal to noise threshold, default=5 sigma = 5 # clipping limit, in sigma units, used when performing fit, default=3 fit_geom = 'shift' # ftype of affine transformation to be considered when fitting catalogs, default='general' use2dhist = False # boolean indicating whether to use 2D histogram to find initial offset, default=True pipe3=Image3Pipeline() pipe3.tweakreg.kernel_fwhm = fwhm pipe3.tweakreg.snr_threshold = snr pipe3.tweakreg.minobj = minobj pipe3.tweakreg.sigma = sigma pipe3.tweakreg.fitgeometry = fit_geom pipe3.tweakreg.use2dhist = use2dhist pipe3.source_catalog.save_results = True pipe3.source_catalog.snr_threshold = snr pipe3.source_catalog.kernel_fwhm = fwhm pipe3.save_results = True # run Image3 image = pipe3.run('starfield_asnfile.json') print('Image 3 pipeline finished.') # - # <a id="output_ID"></a> # ## Results # Read in the output of the pipeline and check your results. # # ### Read in output table of source_catalog step and print ecsv table photfile = 'starfield_combined_cat.ecsv' input_file = 'starfield_combined_i2d.fits' # + # Look at subset of table with full columns # If you have negative fluxes or repeated values of xcentroid or ycentroid, you may be finding spurious sources at image edges data = table.Table.read(photfile, format='ascii', comment='#') smalltable = data['label', 'xcentroid', 'ycentroid','aper30_flux', 'aper50_flux', 'aper70_flux', 'CI_50_30', 'CI_70_50','aper_total_flux'] smalltable.pprint_all() #print(smalltable) # + # Look at catalog table that shows all columns, but subset of rows # Pay attention to rows with a large number of nans, as this may indicate a spurious source catalog = Table.read("starfield_combined_cat.ecsv") catalog # - # ### Display image and overplot detector sources from ecsv table # Read in i2d combined Image im_i2d = ImageModel(input_file) # + # read in ecsv photom file from astropy.visualization import LogStretch, PercentileInterval, ManualInterval from astropy import table viz2 = LogStretch() + ManualInterval(0,10) plt.figure(figsize=(20,20)) plt.imshow(viz2(im_i2d.data), origin='lower') #plt.imshow(im_i2d.data, origin='lower', cmap='rainbow', vmin=0, vmax=0.3) plt.colorbar() plt.scatter(data['xcentroid'], data['ycentroid'],lw=1, s=10,color='red') # - # ### In the image above, check that the stars found by source catalog (red) overlap with stars in the image # ### Look at the fluxes of the sources found # + #create_scatterplot(catalog['label'], catalog['aper_total_flux'],title='Total Flux in '+str(catalog['aper_total_flux'].unit)) fig = plt.figure(figsize=(10, 10)) ax = plt.subplot() ax.scatter(catalog['label'], catalog['aper_total_flux']) plt.yscale('log') plt.title('Total Flux in '+ str(catalog['aper_total_flux'].unit)) plt.xlabel('label') plt.ylabel('aper_total_flux') # - # ### Look at AB mag of sources found create_scatterplot(catalog['label'], catalog['aper_total_abmag'],title='Total AB mag') # ### Look at the errors on the AB magnitudes create_scatterplot(catalog['aper_total_abmag'], catalog['aper_total_abmag_err'],title='Total AB mag vs error') # ### Manually find matches # Since this is a simulated data set, we can compare the output catalog information from the pipeline with the input catalog information used to create the simulation. Grab the input catalog x,y values and the output catalog x and y values. # Read in catalogs used for simulated data sim_star_cat = Table.read('input_sim_stars.cat', format='ascii') sim_gal_cat = Table.read('input_sim_galaxies.cat', format='ascii') sim_star_cat # join the star and galaxy catalogs used to make the simulations combined = table.vstack([sim_star_cat, sim_gal_cat]) combined sim_ra = combined['x']/3600. sim_dec = combined['y']/3600. # ### Use MIRI software to put x, y coordinates from input catalog into RA, Dec coordinates # + # Put star positions into RA and Dec # import coordinate conversion tools #import miricoord.tel.tel_tools as teltools #import miricoord.imager.mirim_pipetools as mpt #import miricoord.imager.mirim_tools as mt #import pysiaf #filt = 'F560W' # use pipeline to get v2ref and v3ref coordinates #siaf = pysiaf.Siaf('MIRI') #xref,yref=siaf['MIRIM_FULL'].XDetRef,siaf['MIRIM_FULL'].YDetRef #xref,yref=xref-1,yref-1 #v2ref,v3ref=mt.xytov2v3(xref,yref,filt) # set reference for RA and Dec #raref = 0.0 #decref = 0.0 #rollref = 0.0 #sim_ra = np.zeros(len(combined)) #sim_dec = np.zeros(len(combined)) # cycle through list of coordinates input into scene and get RA/Dec values out, printing to file #for i in range(len(combined)): #print(combined['x'][i]) # v2 = v2ref + combined['x'][i] # v3 = v3ref + combined['y'][i] # ra,dec,newroll=teltools.jwst_v2v3toradec(v2,v3,v2ref=v2ref,v3ref=v3ref,raref=raref,decref=decref,rollref=rollref) # sim_ra[i] = ra # sim_dec[i] = dec #print(np.shape(sim_ra)) #print(sim_ra, sim_dec) # - # ### Compare RA, Dec coordinates between simulator catalog and output catalog to find matches cat_ra = catalog['sky_centroid'].ra.deg for i in range(len(cat_ra)): if (cat_ra[i]>180.): cat_ra[i] -= 360. cat_dec = catalog['sky_centroid'].dec.deg # + # set the tolerance for differences and initialize counters tol = 1.e-5 # Set tolerance around 30 mas (units here are in degrees; 36 mas ~ 1e-5 deg) found_count=0 multiples_count=0 missed_count=0 # + # Set up array for matches detected = np.chararray(len(sim_ra)) #print(np.shape(detected)) for ra,dec,idx in zip(sim_ra, sim_dec,range(len(sim_ra))): match = np.where((np.abs(ra-cat_ra) < tol) & (np.abs(dec-cat_dec) < tol)) #print('match', match) if np.size(match) == 1: found_count +=1 detected[idx] = 'Y' if np.size(match) > 1: multiples_count +=1 if np.size(match) < 1: missed_count +=1 #print(np.shape(detected)) # + total_percent_found = (found_count/len(sim_ra))*100 print('\n') print('SNR threshold used for pipeline: ',pipe3.source_catalog.snr_threshold) print('Total matches found:',found_count) print('Total missed:',missed_count) print('Number of multiples: ',multiples_count) print('Total number of input (simulated) sources:',len(combined)) print('Total number in output catalog:',len(catalog)) print('Total percent found:',total_percent_found) print('\n') # - # ### Use photutils to find catalog matches and compare which sources were found and matched. # # Photutils includes a package to match sources between catalogs by providing a max separation value. Set that value and compare the two catalogs. # catalog_in = SkyCoord(ra=sim_ra*u.degree, dec=sim_dec*u.degree) # full simulated catalog, stars+galaxies catalog_out = SkyCoord(ra=cat_ra*u.degree, dec=cat_dec*u.degree) max_sep = 0.04 * u.arcsec # Set match at around 30 mas # + # match full tables idx, d2d, d3d = match_coordinates_sky(catalog_in, catalog_out) sep_constraint = d2d < max_sep combined_matched = combined[sep_constraint] # simulated sources matched catalog_matched = catalog[idx[sep_constraint]] # sources found matched print(catalog_matched.colnames) print() print(catalog_matched) # - #match RA and Dec idx, d2d, d3d = match_coordinates_sky(catalog_in, catalog_out) sep_constraint = d2d < max_sep catalog_in_matches = catalog_in[sep_constraint] catalog_out_matches = catalog_out[idx[sep_constraint]] print(catalog_out_matches) # Now, catalog_in_matches and catalog_out_matches are the matched sources in catalog_in and catalog_out, respectively, which are separated less than our max_sep value. print('Number of matched sources using max separation of '+str(max_sep)+': ',len(catalog_matched)) # ### Plot RA and Dec positions of both catalogs # The simulated positions are in red and the sources found with source catalog are marked in blue. Matched sources from both are marked in green. # + #rrr = catalog_in_matches.ra.deg #ddd = catalog_in_matches.dec.deg rrr = catalog_matched['sky_centroid'].ra.deg ddd = catalog_matched['sky_centroid'].dec.deg # make sure ra is in proper range for display plt.figure(figsize=(20,20)) for i in range(len(rrr)): if (rrr[i]>180.): rrr[i] -= 360. plt.scatter(rrr, ddd, lw=1, s=108,color='green') plt.scatter(sim_ra, sim_dec,lw=1, s=18,color='red') plt.scatter(cat_ra, cat_dec,lw=1, s=18,color='blue') # - # ### Convert matched RA/Dec coordinates back to x,y positions and plot matched stars on image # + # Convert matched RA/Dec values back to x, y to plot on image #print(catalog_out_matches) # Read in wcs values from file header (in extension 1) #hdu = fits.open(input_file)[1] #wcs = WCS(hdu.header) # convert positions from RA/Dec back to x,y #x_matched, y_matched = wcs.world_to_pixel(catalog_out_matches) #print(x_matched) # + # Plot all matched sources in red viz2 = LogStretch() + ManualInterval(0,10) plt.figure(figsize=(20,20)) plt.imshow(viz2(im_i2d.data), origin='lower') #plt.imshow(im_i2d.data, origin='lower', cmap='rainbow', vmin=0, vmax=0.3) plt.colorbar() #plt.scatter(x_matched, y_matched,lw=1, s=10,color='red') plt.scatter(catalog_matched['xcentroid'], catalog_matched['ycentroid'], lw=1, s=10, color='red') # - # ### Compare to full catalog and see where differences are # The full source_catalog output is shown with red dots and the matched sources are shown with smaller white dots over the red dots. This shows the sources that were found in the catalog (source_catalog step) but not listed as a match between the input and calculated catalogs. # + viz2 = LogStretch() + ManualInterval(0,10) plt.figure(figsize=(20,20)) plt.imshow(viz2(im_i2d.data), origin='lower') #plt.imshow(im_i2d.data, origin='lower', cmap='rainbow', vmin=0, vmax=0.3) plt.colorbar() plt.scatter(data['xcentroid'], data['ycentroid'],lw=1, s=10,color='red') #plt.scatter(x_matched, y_matched,lw=1, s=5,color='white') plt.scatter(catalog_matched['xcentroid'], catalog_matched['ycentroid'], lw=1, s=5, color='white') # - # ### Look at positions of sources in the simulated images from the catalogs used to create the images # # Are there more stars/galaxies in the simulation input catalogs than actually fit on the combined image? That would explain the sources beyond image edges. print(catalog_in) # + # Read in wcs values from file header (in extension 1) hdu = fits.open(input_file)[1] wcs = WCS(hdu.header) # convert positions from RA/Dec back to x,y x_sim, y_sim = wcs.world_to_pixel(catalog_in) # Many sources go beyond image edges. Only display sources that were actually in FOV of combined image. minval = 0 maxval = 1140 ind = np.where((x_sim > minval) & (x_sim < 1110)& (y_sim > minval) & (y_sim < maxval)) # + viz2 = LogStretch() + ManualInterval(0,10) plt.figure(figsize=(20,20)) plt.imshow(viz2(im_i2d.data), origin='lower') #plt.imshow(im_i2d.data, origin='lower', cmap='rainbow', vmin=0, vmax=0.3) plt.colorbar() plt.scatter(x_sim[ind], y_sim[ind],lw=1, s=5,color='red') # - # ## Compare fluxes between input sources and output catalog # # Input catalog 'combined' has Ks and J columns for stars and flux for galaxies. # Output source catalog 'catalog' has aper_total_flux, 'isophotal_flux', 'aper_total_abmag', 'aper_total_vegamag', 'isophotal_abmag' and isophotal_vegamag. # # The first plot compares the magnitudes for the input simulated catalog (in blue) and the sources found with source catalog in green. # ### Compare the star magnitudes and magnitude differences to the x position of the stars. # flux, wref = mag2flux(combined_matched['Ks'], band="Ks", system="2MASS") fnew = extrapolate_flux(flux, wref, 7.7, combined_matched['Teff']) F770W_ABmag = -2.5*np.log10((fnew.to(u.Jy)).value) + 8.9 # ### Plot AB mag vs x position # Simulated stars in blue, sources found in green. # + fig = plt.figure(figsize=(10, 10)) ax = plt.subplot() ax.scatter(catalog_matched['xcentroid'], F770W_ABmag, color='blue', label='Simulated stars') ax.scatter(catalog_matched['xcentroid'], catalog_matched['aper_total_abmag'], color='green', label='Sources found') ax.legend() plt.xlabel('X position in pixels') plt.ylabel('AB mag') # - # ### Plot difference in magnitude against x and y position. # Found catalog minus matched simulated AB mag vs. x position and vs. y position, with the median magnitude difference plotted as a horizontal line in red. # + fig = plt.figure(figsize=(10, 10)) ax = plt.subplot() ax.set_ylim(-1,1) ax.scatter(catalog_matched['xcentroid'], F770W_ABmag-catalog_matched['aper_total_abmag'], color='blue') #ax.legend() plt.title('ABmag difference (simulated - found) vs. x position') plt.xlabel('X position in pixels') plt.ylabel('Delta AB mag') ok = np.logical_and(F770W_ABmag>0, F770W_ABmag<100) m = F770W_ABmag[ok]-catalog_matched['aper_total_abmag'][ok] np.median(m) plt.hlines(np.median(m), min(catalog_matched['xcentroid']), max(catalog_matched['xcentroid']), color='red') # + fig = plt.figure(figsize=(10, 10)) ax = plt.subplot() ax.set_ylim(-1,1) ax.scatter(catalog_matched['ycentroid'], F770W_ABmag-catalog_matched['aper_total_abmag'], color='blue') #ax.legend() plt.title('ABmag difference (simulated - found) vs. y position') plt.xlabel('Y position in pixels') plt.ylabel('Delta AB mag') ok = np.logical_and(F770W_ABmag>0, F770W_ABmag<100) m = F770W_ABmag[ok]-catalog_matched['aper_total_abmag'][ok] np.median(m) plt.hlines(np.median(m), min(catalog_matched['ycentroid']), max(catalog_matched['ycentroid']), color='red') # - # ### Plot AB magnitude difference against AB mag to see if there is a pattern in the differences by mag. # # Median difference marked by line in red. # + fig = plt.figure(figsize=(10, 10)) ax = plt.subplot() ax.set_ylim(-1,1) ax.scatter(F770W_ABmag, F770W_ABmag-catalog_matched['aper_total_abmag'], color='blue') #ax.legend() plt.title('Difference in AB mag (simulated-found) vs. ABmag (simulated)') plt.xlabel('AB mag') plt.ylabel('Delta AB mag') ok = np.logical_and(F770W_ABmag>0, F770W_ABmag<100) m = F770W_ABmag[ok]-catalog_matched['aper_total_abmag'][ok] np.median(m) plt.hlines(np.median(m), 16, 24.5, color='red') # - # #### Passing criteria # # Look at the matched image to see what sources were matched between the simulated and found catalogs and see if there is a pattern to which 'found' sources were not matched to the simulated catalog. Are they mostly galaxies or were the found sources in areas of confusion between two stars or sources where the source finding algorithm might not have pinpointed the actual centroid of the source. If the majority of the sources matched seem to be the stars that are clearly marked and properly found, that part of the test passes. # # If the plots showing magnitudes of the matched sources show that they're close (exact criteria for 'close' TBD), then that test passes. # # This test is mostly visual inspection of the images and plots. As long as nothing is obviously wrong (no columns in the table marked as all NaNs, sources found seem to be real sources, and a good number of them match the simulated sources input into the image), then this test passes. # #### About this Notebook # # Authors: <NAME>, <NAME> and <NAME>, MIRI Branch # # Updated On: 08/18/2021 #
jwst_validation_notebooks/source_catalog/jwst_source_catalog_miri_test/jwst_source_catalog_miri_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Author: <NAME> # # Downloading BSESN historial price data using Yahoo Finance API # - if you want to know more about API feel free to visit https://algotrading101.com/learn/yahoo-finance-api-guide/ import pandas as pd import yfinance as yf import datetime import time import requests import io # !pip install yahoo_fin from yahoo_fin.stock_info import get_data bsesn = get_data("^bsesn", start_date="14/12/2015", end_date="12/07/2021", index_as_date = False, interval="1d") bsesn # # Saving the data into csv file bsesn.to_csv("BSESN1.csv") # - This noteboook is only for extracting/ mining data; next task will be completed in the other notebook # # Thank You! :)
Data_mining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <a href="https://www.cognitiveclass.ai"><img src = "https://cognitiveclass.ai/wp-content/themes/bdu3.0/static/images/cc-logo.png" width = 300, align = "center"></a> # # <h1 align=center><font size = 5>INTRO TO CONVOLUTIONAL NEURAL NETWORK</font></h1> # ## Introduction # In this section, we will use the famous [MNIST Dataset](http://yann.lecun.com/exdb/mnist/) to build two Neural Networks capable to perform handwritten digits classification. The first Network is a simple Multi-layer Perceptron (MLP) and the second one is a Convolutional Neural Network (CNN from now on). In other words, our algorithm will say, with some associated error, what type of digit is the presented input. # # This lesson is not intended to be a reference for _machine learning, convolutions or TensorFlow_. The intention is to give notions to the user about these fields and awareness of Data Scientist Workbench capabilities. We recommend that the students search for further references to understand completely the mathematical and theoretical concepts involved. # # --- # ## Table of contents # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <font size = 3><strong>Clik on the links to see the sections:</strong></font> # <br> # - <p><a href="#ref1">What is Deep Learning</a></p> # - <p><a href="#ref2">Simple test: Is tensorflow working?</a></p> # - <p><a href="#ref3">1st part: classify MNIST using a simple model</a></p> # - <p><a href="#ref4">Evaluating the final result</a></p> # - <p><a href="#ref5">How to improve our model?</a></p> # - <p><a href="#ref6">2nd part: Deep Learning applied on MNIST</a></p> # - <p><a href="#ref7">Summary of the Deep Convolutional Neural Network</a></p> # - <p><a href="#ref8">Define functions and train the model</a></p> # - <p><a href="#ref9">Evaluate the model</a></p> # --- # <a id="ref1"></a> # # What is Deep Learning? # **Brief Theory:** Deep learning (also known as deep structured learning, hierarchical learning or deep machine learning) is a branch of machine learning based on a set of algorithms that attempt to model high-level abstractions in data by using multiple processing layers, with complex structures or otherwise, composed of multiple non-linear transformations. # <img src="https://ibm.box.com/shared/static/gcbbrh440604cj2nksu3f44be87b8ank.png" alt="HTML5 Icon" style="width:600px;height:450px;"> # <div style="text-align:center">It's time for deep learning. Our brain does't work with one or three layers. Why it would be different with machines?. </div> # --- # In this tutorial, we first classify MNIST using a simple Multi-layer percepetron and then, in the second part, we use deeplearning to improve the accuracy of our results. # # <a id="ref3"></a> # # 1st part: classify MNIST using a simple model. # We are going to create a simple Multi-layer percepetron, a simple type of Neural Network, to performe classification tasks on the MNIST digits dataset. If you are not familiar with the MNIST dataset, please consider to read more about it: <a href="http://yann.lecun.com/exdb/mnist/">click here</a> # ### What is MNIST? # According to Lecun's website, the MNIST is a: "database of handwritten digits that has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image". # ### Import the MNIST dataset using TensorFlow built-in feature # It's very important to notice that MNIST is a high optimized data-set and it does not contain images. You will need to build your own code if you want to see the real digits. Another important side note is the effort that the authors invested on this data-set with normalization and centering operations. import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # The <span style="background-color:#dcdcdc # "> One-hot = True</span> argument only means that, in contrast to Binary representation, the labels will be presented in a way that only one bit will be on for a specific digit. For example, five and zero in a binary code would be: # <pre> # Number representation: 0 # Binary encoding: [2^5] [2^4] [2^3] [2^2] [2^1] [2^0] # Array/vector: 0 0 0 0 0 0 # # Number representation: 5 # Binary encoding: [2^5] [2^4] [2^3] [2^2] [2^1] [2^0] # Array/vector: 0 0 0 1 0 1 # </pre> # Using a different notation, the same digits using one-hot vector representation can be show as: # <pre> # Number representation: 0 # One-hot encoding: [5] [4] [3] [2] [1] [0] # Array/vector: 0 0 0 0 0 1 # # Number representation: 5 # One-hot encoding: [5] [4] [3] [2] [1] [0] # Array/vector: 1 0 0 0 0 0 # </pre> # ### Understanding the imported data # The imported data can be divided as follow: # # - Training (mnist.train) >> Use the given dataset with inputs and related outputs for training of NN. In our case, if you give an image that you know that represents a "nine", this set will tell the neural network that we expect a "nine" as the output. # - 55,000 data points # - mnist.train.images for inputs # - mnist.train.labels for outputs # # # - Validation (mnist.validation) >> The same as training, but now the date is used to generate model properties (classification error, for example) and from this, tune parameters like the optimal number of hidden units or determine a stopping point for the back-propagation algorithm # - 5,000 data points # - mnist.validation.images for inputs # - mnist.validation.labels for outputs # # # - Test (mnist.test) >> the model does not have access to this informations prior to the test phase. It is used to evaluate the performance and accuracy of the model against "real life situations". No further optimization beyond this point. # - 10,000 data points # - mnist.test.images for inputs # - mnist.test.labels for outputs # # ### Creating an interactive section # You have two basic options when using TensorFlow to run your code: # # - [Build graphs and run session] Do all the set-up and THEN execute a session to evaluate tensors and run operations (ops) # - [Interactive session] create your coding and run on the fly. # # For this first part, we will use the interactive session that is more suitable for environments like Jupyter notebooks. sess = tf.InteractiveSession() # ### Creating placeholders # It's a best practice to create placeholders before variable assignments when using TensorFlow. Here we'll create placeholders for inputs ("Xs") and outputs ("Ys"). # # __Placeholder 'X':__ represents the "space" allocated input or the images. # * Each input has 784 pixels distributed by a 28 width x 28 height matrix # * The 'shape' argument defines the tensor size by its dimensions. # * 1st dimension = None. Indicates that the batch size, can be of any size. # * 2nd dimension = 784. Indicates the number of pixels on a single flattened MNIST image. # # __Placeholder 'Y':___ represents the final output or the labels. # * 10 possible classes (0,1,2,3,4,5,6,7,8,9) # * The 'shape' argument defines the tensor size by its dimensions. # * 1st dimension = None. Indicates that the batch size, can be of any size. # * 2nd dimension = 10. Indicates the number of targets/outcomes # # __dtype for both placeholders:__ if you not sure, use tf.float32. The limitation here is that the later presented softmax function only accepts float32 or float64 dtypes. For more dtypes, check TensorFlow's documentation <a href="https://www.tensorflow.org/versions/r0.9/api_docs/python/framework.html#tensor-types">here</a> # x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) # ### Assigning bias and weights to null tensors # Now we are going to create the weights and biases, for this purpose they will be used as arrays filled with zeros. The values that we choose here can be critical, but we'll cover a better way on the second part, instead of this type of initialization. # Weight tensor W = tf.Variable(tf.zeros([784,10],tf.float32)) # Bias tensor b = tf.Variable(tf.zeros([10],tf.float32)) # ### Execute the assignment operation # Before, we assigned the weights and biases but we did not initialize them with null values. For this reason, TensorFlow need to initialize the variables that you assign. # Please notice that we're using this notation "sess.run" because we previously started an interactive session. # run the op initialize_all_variables using an interactive session sess.run(tf.initialize_all_variables()) # ### Adding Weights and Biases to input # The only difference from our next operation to the picture below is that we are using the mathematical convention for what is being executed in the illustration. The tf.matmul operation performs a matrix multiplication between x (inputs) and W (weights) and after the code add biases. # # <img src="https://ibm.box.com/shared/static/88ksiymk1xkb10rgk0jwr3jw814jbfxo.png" alt="HTML5 Icon" style="width:400px;height:350px;"> # <div style="text-align:center">Illustration showing how weights and biases are added to neurons/nodes. </div> # #mathematical operation to add weights and biases to the inputs tf.matmul(x,W) + b # ### Softmax Regression # Softmax is an activation function that is normally used in classification problems. It generate the probabilities for the output. For example, our model will not be 100% sure that one digit is the number nine, instead, the answer will be a distribution of probabilities where, if the model is right, the nine number will have the larger probability. # # For comparison, below is the one-hot vector for a nine digit label: # + active="" # 0 --> 0 # 1 --> 0 # 2 --> 0 # 3 --> 0 # 4 --> 0 # 5 --> 0 # 6 --> 0 # 7 --> 0 # 8 --> 0 # 9 --> 1 # - # A machine does not have all this certainty, so we want to know what is the best guess, but we also want to understand how sure it was and what was the second better option. Below is an example of a hypothetical distribution for a nine digit: # + active="" # 0 -->.0.1% # 1 -->...2% # 2 -->...3% # 3 -->...2% # 4 -->..12% # 5 -->..10% # 6 -->..57% # 7 -->..20% # 8 -->..55% # 9 -->..80% # - y = tf.nn.softmax(tf.matmul(x,W) + b) # Logistic function output is used for the classification between two target classes 0/1. Softmax function is generalized type of logistic function. That is, Softmax can output a multiclass categorical probability distribution. # ### Cost function # It is a function that is used to minimize the difference between the right answers (labels) and estimated outputs by our Network. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # ### Type of optimization: Gradient Descent # This is the part where you configure the optimizer for you Neural Network. There are several optimizers available, in our case we will use Gradient Descent that is very well stablished. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # ### Training batches # Train using minibatch Gradient Descent. # # In practice, Batch Gradient Descent is not often used because is too computationally expensive. The good part about this method is that you have the true gradient, but with the expensive computing task of using the whole dataset in one time. Due to this problem, Neural Networks usually use minibatch to train. batch = mnist.train.next_batch(50) batch[0].shape type(batch[0]) mnist.train.images.shape #Load 50 training examples for each training iteration for i in range(1000): batch = mnist.train.next_batch(50) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) # ### Test correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) acc = accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}) * 100 print("The final accuracy for the simple ANN model is: {} % ".format(acc) ) sess.close() #finish the session # --- # <a id="ref4"></a> # # Evaluating the final result # Is the final result good? # # Let's check the best algorithm available out there (10th june 2016): # # _Result:_ 0.21% error (99.79% accuracy) # <a href="http://cs.nyu.edu/~wanli/dropc/">Reference here</a> # <a id="ref5"></a> # # How to improve our model? # #### Several options as follow: # - Regularization of Neural Networks using DropConnect # - Multi-column Deep Neural Networks for Image Classification # - APAC: Augmented Pattern Classification with Neural Networks # - Simple Deep Neural Network with Dropout # # #### In the next part we are going to explore the option: # - Simple Deep Neural Network with Dropout (more than 1 hidden layer) # --- # <a id="ref6"></a> # # 2nd part: Deep Learning applied on MNIST # In the first part, we learned how to use a simple ANN to classify MNIST. Now we are going to expand our knowledge using a Deep Neural Network. # # # Architecture of our network is: # # - (Input) -> [batch_size, 28, 28, 1] >> Apply 32 filter of [5x5] # - (Convolutional layer 1) -> [batch_size, 28, 28, 32] # - (ReLU 1) -> [?, 28, 28, 32] # - (Max pooling 1) -> [?, 14, 14, 32] # - (Convolutional layer 2) -> [?, 14, 14, 64] # - (ReLU 2) -> [?, 14, 14, 64] # - (Max pooling 2) -> [?, 7, 7, 64] # - [fully connected layer 3] -> [1x1024] # - [ReLU 3] -> [1x1024] # - [Drop out] -> [1x1024] # - [fully connected layer 4] -> [1x10] # # # The next cells will explore this new architecture. # ### Starting the code # + import tensorflow as tf # finish possible remaining session sess.close() #Start interactive session sess = tf.InteractiveSession() # - # ### The MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # ### Initial parameters # Create general parameters for the model width = 28 # width of the image in pixels height = 28 # height of the image in pixels flat = width * height # number of pixels in one image class_output = 10 # number of possible classifications for the problem # ### Input and output # Create place holders for inputs and outputs x = tf.placeholder(tf.float32, shape=[None, flat]) y_ = tf.placeholder(tf.float32, shape=[None, class_output]) # #### Converting images of the data set to tensors # The input image is a 28 pixels by 28 pixels and 1 channel (grayscale) # # In this case the first dimension is the __batch number__ of the image (position of the input on the batch) and can be of any size (due to -1) x_image = tf.reshape(x, [-1,28,28,1]) x_image # ### Convolutional Layer 1 # #### Defining kernel weight and bias # Size of the filter/kernel: 5x5; # Input channels: 1 (greyscale); # 32 feature maps (here, 32 feature maps means 32 different filters are applied on each image. So, the output of convolution layer would be 28x28x32). In this step, we create a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]` W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1)) b_conv1 = tf.Variable(tf.constant(0.1, shape=[32])) # need 32 biases for 32 outputs # <img src="https://ibm.box.com/shared/static/f4touwscxlis8f2bqjqg4u5zxftnyntc.png" style="width:800px;height:400px;" alt="HTML5 Icon" > # # #### Convolve with weight tensor and add biases. # # Defining a function to create convolutional layers. To creat convolutional layer, we use __tf.nn.conv2d__. It computes a 2-D convolution given 4-D input and filter tensors. # # Inputs: # - tensor of shape [batch, in_height, in_width, in_channels]. x of shape [batch_size,28 ,28, 1] # - a filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]. W is of size [5, 5, 1, 32] # - stride which is [1, 1, 1, 1] # # # Process: # - change the filter to a 2-D matrix with shape [5\*5\*1,32] # - Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, 28, 28, 5*5*1]`. # - For each patch, right-multiplies the filter matrix and the image patch vector. # # Output: # - A `Tensor` (a 2-D convolution) of size <tf.Tensor 'add_7:0' shape=(?, 28, 28, 32)- Notice: the output of the first convolution layer is 32 [28x28] images. Here 32 is considered as volume/depth of the output image. convolve1= tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1 # <img src="https://ibm.box.com/shared/static/brosafd4eaii7sggpbeqwj9qmnk96hmx.png" style="width:800px;height:400px;" alt="HTML5 Icon" > # # #### Apply the ReLU activation Function # In this step, we just go through all outputs convolution layer, __covolve1__, and wherever a negative number occurs,we swap it out for a 0. It is called ReLU activation Function. h_conv1 = tf.nn.relu(convolve1) # #### Apply the max pooling # Use the max pooling operation already defined, so the output would be 14x14x32 # Defining a function to perform max pooling. The maximum pooling is an operation that finds maximum values and simplifies the inputs using the spacial correlations between them. # # __Kernel size:__ 2x2 (if the window is a 2x2 matrix, it would result in one output pixel) # __Strides:__ dictates the sliding behaviour of the kernel. In this case it will move 2 pixels everytime, thus not overlapping. # # # <img src="https://ibm.box.com/shared/static/awyoq0e2r3hfx3n7xrvhw4y7gly683p4.png" alt="HTML5 Icon" style="width:800px;height:400px;"> # # h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #max_pool_2x2 # #### First layer completed layer1= h_pool1 # ### Convolutional Layer 2 # #### Weights and Biases of kernels # Filter/kernel: 5x5 (25 pixels) ; Input channels: 32 (from the 1st Conv layer, we had 32 feature maps); 64 output feature maps # __Notice:__ here, the input is 14x14x32, the filter is 5x5x32, we use 64 filters, and the output of the convolutional layer would be 14x14x64. # # __Notice:__ the convolution result of applying a filter of size [5x5x32] on image of size [14x14x32] is an image of size [14x14x1], that is, the convolution is functioning on volume. # + W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1)) b_conv2 = tf.Variable(tf.constant(0.1, shape=[64])) #need 64 biases for 64 outputs # - # #### Convolve image with weight tensor and add biases. convolve2= tf.nn.conv2d(layer1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')+ b_conv2 # #### Apply the ReLU activation Function h_conv2 = tf.nn.relu(convolve2) # #### Apply the max pooling h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #max_pool_2x2 # #### Second layer completed layer2= h_pool2 # So, what is the output of the second layer, layer2? # - it is 64 matrix of [7x7] # # ### Fully Connected Layer 3 # Type: Fully Connected Layer. You need a fully connected layer to use the Softmax and create the probabilities in the end. Fully connected layers take the high-level filtered images from previous layer, that is all 64 matrics, and convert them to an array. # # So, each matrix [7x7] will be converted to a matrix of [49x1], and then all of the 64 matrix will be connected, which make an array of size [3136x1]. We will connect it into another layer of size [1024x1]. So, the weight between these 2 layers will be [3136x1024] # # # <img src="https://ibm.box.com/shared/static/hvbegd0lfr1maxpq2gpq3g8ibvk8d2eo.png" alt="HTML5 Icon" style="width:800px;height:400px;"> # # #### Flattening Second Layer layer2_matrix = tf.reshape(layer2, [-1, 7*7*64]) # #### Weights and Biases between layer 2 and 3 # Composition of the feature map from the last layer (7x7) multiplied by the number of feature maps (64); 1027 outputs to Softmax layer W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1)) b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024])) # need 1024 biases for 1024 outputs # #### Matrix Multiplication (applying weights and biases) fcl3=tf.matmul(layer2_matrix, W_fc1) + b_fc1 # #### Apply the ReLU activation Function h_fc1 = tf.nn.relu(fcl3) # #### Third layer completed layer3= h_fc1 layer3 # #### Optional phase for reducing overfitting - Dropout 3 # It is a phase where the network "forget" some features. At each training step in a mini-batch, some units get switched off randomly so that it will not interact with the network. That is, it weights cannot be updated, nor affect the learning of the other network nodes. This can be very useful for very large neural networks to prevent overfitting. keep_prob = tf.placeholder(tf.float32) layer3_drop = tf.nn.dropout(layer3, keep_prob) # ### Layer 4- Readout Layer (Softmax Layer) # Type: Softmax, Fully Connected Layer. # #### Weights and Biases # In last layer, CNN takes the high-level filtered images and translate them into votes using softmax. # Input channels: 1024 (neurons from the 3rd Layer); 10 output features W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1)) #1024 neurons b_fc2 = tf.Variable(tf.constant(0.1, shape=[10])) # 10 possibilities for digits [0,1,2,3,4,5,6,7,8,9] # #### Matrix Multiplication (applying weights and biases) fcl4=tf.matmul(layer3_drop, W_fc2) + b_fc2 # #### Apply the Softmax activation Function # __softmax__ allows us to interpret the outputs of __fcl4__ as probabilities. So, __y_conv__ is a tensor of probablities. y_conv= tf.nn.softmax(fcl4) layer4= y_conv layer4 # --- # <a id="ref7"></a> # # Summary of the Deep Convolutional Neural Network # Now is time to remember the structure of our network # #### 0) Input - MNIST dataset # #### 1) Convolutional and Max-Pooling # #### 2) Convolutional and Max-Pooling # #### 3) Fully Connected Layer # #### 4) Processing - Dropout # #### 5) Readout layer - Fully Connected # #### 6) Outputs - Classified digits # --- # <a id="ref8"></a> # # Define functions and train the model # #### Define the loss function # # We need to compare our output, layer4 tensor, with ground truth for all mini_batch. we can use __cross entropy__ to see how bad our CNN is working - to measure the error at a softmax layer. # # The following code shows an toy sample of cross-entropy for a mini-batch of size 2 which its items have been classified. You can run it (first change the cell type to __code__ in the toolbar) to see hoe cross entropy changes. # + active="" # import numpy as np # layer4_test =[[0.9, 0.1, 0.1],[0.9, 0.1, 0.1]] # y_test=[[1.0, 0.0, 0.0],[1.0, 0.0, 0.0]] # np.mean( -np.sum(y_test * np.log(layer4_test),1)) # - # __reduce_sum__ computes the sum of elements of __(y_ * tf.log(layer4)__ across second dimension of the tensor, and __reduce_mean__ computes the mean of all elements in the tensor.. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(layer4), reduction_indices=[1])) # #### Define the optimizer # # It is obvious that we want minimize the error of our network which is calculated by cross_entropy metric. To solve the problem, we have to compute gradients for the loss (which is minimizing the cross-entropy) and apply gradients to variables. It will be done by an optimizer: GradientDescent or Adagrad. train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # #### Define prediction # Do you want to know how many of the cases in a mini-batch has been classified correctly? lets count them. correct_prediction = tf.equal(tf.argmax(layer4,1), tf.argmax(y_,1)) # #### Define accuracy # It makes more sense to report accuracy using average of correct cases. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # #### Run session, train sess.run(tf.initialize_all_variables()) # *If you want a fast result (**it might take sometime to train it**)* for i in range(1100): batch = mnist.train.next_batch(50) if i%100 == 0: #train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0}) loss, train_accuracy = sess.run([cross_entropy, accuracy], feed_dict={x: batch[0],y_: batch[1],keep_prob: 1.0}) print("step %d, loss %g, training accuracy %g"%(i, float(loss),float(train_accuracy))) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) # <div class="alert alert-success alertsuccess" style="margin-top: 20px"> # <font size = 3><strong>*You can run this cell if you REALLY have time to wait (**change the type of the cell to code**)*</strong></font> # + active="" # for i in range(20000): # batch = mnist.train.next_batch(50) # if i%100 == 0: # train_accuracy = accuracy.eval(feed_dict={ # x:batch[0], y_: batch[1], keep_prob: 1.0}) # print("step %d, training accuracy %g"%(i, train_accuracy)) # train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) # - # _PS. If you have problems running this notebook, please shutdown all your Jupyter runnning notebooks, clear all cells outputs and run each cell only after the completion of the previous cell._ # --- # <a id="ref9"></a> # # Evaluate the model # Print the evaluation to the user print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) # ## Visualization # Do you want to look at all the filters? kernels = sess.run(tf.reshape(tf.transpose(W_conv1, perm=[2, 3, 0,1]),[32,-1])) from utils import tile_raster_images import matplotlib.pyplot as plt from PIL import Image # %matplotlib inline image = Image.fromarray(tile_raster_images(kernels, img_shape=(5, 5) ,tile_shape=(4, 8), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (18.0, 18.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') # Do you want to see the output of an image passing through first convolution layer? # import numpy as np plt.rcParams['figure.figsize'] = (5.0, 5.0) sampleimage = mnist.test.images[1] plt.imshow(np.reshape(sampleimage,[28,28]), cmap="gray") ActivatedUnits.shape ActivatedUnitsL1 = sess.run(convolve1,feed_dict={x:np.reshape(sampleimage,[1,784],order='F'),keep_prob:1.0}) filters = ActivatedUnitsL1.shape[3] plt.figure(1, figsize=(20,20)) n_columns = 6 n_rows = np.math.ceil(filters / n_columns) + 1 for i in range(filters): plt.subplot(n_rows, n_columns, i+1) plt.title('Cov1_ ' + str(i)) plt.imshow(ActivatedUnitsL1[0,:,:,i], interpolation="nearest", cmap="gray") # What about second convolution layer? ActivatedUnitsL2 = sess.run(convolve2,feed_dict={x:np.reshape(sampleimage,[1,784],order='F'),keep_prob:1.0}) filters = ActivatedUnitsL2.shape[3] plt.figure(1, figsize=(20,20)) n_columns = 8 n_rows = np.math.ceil(filters / n_columns) + 1 for i in range(filters): plt.subplot(n_rows, n_columns, i+1) plt.title('Conv_2 ' + str(i)) plt.imshow(ActivatedUnitsL2[0,:,:,i], interpolation="nearest", cmap="gray") sess.close() #finish the session # ### Thanks for completing this lesson! # <h3>Authors:</h3> # <article class="teacher"> # <div class="teacher-image" style=" float: left; # width: 115px; # height: 115px; # margin-right: 10px; # margin-bottom: 10px; # border: 1px solid #CCC; # padding: 3px; # border-radius: 3px; # text-align: center;"><img class="alignnone wp-image-2258 " src="https://ibm.box.com/shared/static/tyd41rlrnmfrrk78jx521eb73fljwvv0.jpg" alt="<NAME>" width="178" height="178" /></div> # <h4><NAME></h4> # <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a>, PhD is Sr. Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p> # </article> # # <hr> # Learn more about __Deep Leaning and TensorFlow:__ # # <a href="https://cognitiveclass.ai/courses/deep-learning-tensorflow/"><img src = "https://cognitiveclass.ai/wp-content/uploads/2017/03/TF_course_logo-1024x369.png" width = 500, align = "center"></a> # ### References: # # https://en.wikipedia.org/wiki/Deep_learning # http://sebastianruder.com/optimizing-gradient-descent/index.html#batchgradientdescent # http://yann.lecun.com/exdb/mnist/ # https://www.quora.com/Artificial-Neural-Networks-What-is-the-difference-between-activation-functions # https://www.tensorflow.org/versions/r0.9/tutorials/mnist/pros/index.html
tutorials/Step_5b_Underestanding_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Matplotlib Jupyter Widget Backend # # Enabling interaction with matplotlib charts in the Jupyter notebook and JupyterLab # # https://github.com/matplotlib/jupyter-matplotlib # + # Enabling the `widget` backend. # This requires jupyter-matplotlib a.k.a. ipympl. # ipympl can be install via pip or conda. # %matplotlib widget import matplotlib.pyplot as plt import numpy as np # - # Testing matplotlib interactions with a simple plot fig = plt.figure() plt.plot(np.sin(np.linspace(0, 20, 100))); # # 3D plotting # + from mpl_toolkits.mplot3d import axes3d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Grab some test data. X, Y, Z = axes3d.get_test_data(0.05) # Plot a basic wireframe. ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) plt.show() # - # # Subplots # + # A more complex example from the matplotlib gallery np.random.seed(0) n_bins = 10 x = np.random.randn(1000, 3) fig, axes = plt.subplots(nrows=2, ncols=2) ax0, ax1, ax2, ax3 = axes.flatten() colors = ['red', 'tan', 'lime'] ax0.hist(x, n_bins, density=1, histtype='bar', color=colors, label=colors) ax0.legend(prop={'size': 10}) ax0.set_title('bars with legend') ax1.hist(x, n_bins, density=1, histtype='bar', stacked=True) ax1.set_title('stacked bar') ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False) ax2.set_title('stack step (unfilled)') # Make a multiple-histogram of data-sets with different length. x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]] ax3.hist(x_multi, n_bins, histtype='bar') ax3.set_title('different sample sizes') fig.tight_layout() plt.show() # - # # Interactions with other widgets and layouting # + # When using the `widget` backend from ipympl, # fig.canvas is a proper Jupyter interactive widget, which can be embedded in # an ipywidgets layout. See https://ipywidgets.readthedocs.io/en/stable/examples/Layout%20Templates.html # One can bound figure attributes to other widget values. from ipywidgets import AppLayout, FloatSlider plt.ioff() slider = FloatSlider( orientation='horizontal', description='Factor:', value=1.0, min=0.02, max=2.0 ) slider.layout.margin = '0px 30% 0px 30%' slider.layout.width = '40%' fig = plt.figure() fig.canvas.header_visible = False fig.canvas.layout.min_height = '400px' plt.title('Plotting: y=sin({} * x)'.format(slider.value)) x = np.linspace(0, 20, 500) lines = plt.plot(x, np.sin(slider.value * x)) def update_lines(change): plt.title('Plotting: y=sin({} * x)'.format(change.new)) lines[0].set_data(x, np.sin(change.new * x)) fig.canvas.draw() fig.canvas.flush_events() slider.observe(update_lines, names='value') AppLayout( center=fig.canvas, footer=slider, pane_heights=[0, 6, 1] ) # -
ipympl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.4 64-bit (''base'': conda)' # name: python37464bitbasecondae802c8b5a0d74528aa1d856311a88e21 # --- # + country=set() n=input() for i in range(n): country.add(input()) print(len(country)) # - country=set() country=country.add('5') print(country)
Set .add().ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/felixbmuller/nlp-commonsense/blob/main/NLP_Commonsense_Assignment_2_KB_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="RTrSIu0TT3as" # # NLP Commonsense Assignment 2 - Knowledge Base Model # # ## Setup # + colab={"base_uri": "https://localhost:8080/"} id="MOsHUjgdIrIW" outputId="d98985cc-1523-45f9-d107-914cf810e2d9" # !pip install -q transformers datasets torch torchvision # !apt install git-lfs >/dev/null # + colab={"base_uri": "https://localhost:8080/", "height": 387, "referenced_widgets": ["a07ec9a74b5d4cf6a146c57098f30d58", "ecfbb6fa4b40480a84063db6595e0eb7", "e2da35c99a0b4361b2aaab987eaae4cd", "94ad517a6aef407d8208a29ca8f8a7ad", "3e4a7e86dc6d4319b5994adb27a3414a", "2f3acc7d10cb4933a6ef65725ed9aa4b", "383185d7fdb44ca8be31e99964c3c0fa", "d5f714f7f4c34ad39b647a687b01e5af", "2ee739c01f934e1685dd74a1f73d0871", "<KEY>", "<KEY>", "<KEY>", "b7226aa2e97a486c9eef6dda5fd17afe", "b3b0a6fd4db043a7aced5cf710922125", "fb7644f81b184af3accea9becec89eb3", "283add761b9043c48e7a1470d16fff22", "9a4beec611b34cd8bfe4d1f049cf32c3"]} id="EmZHn2tbwMCg" outputId="5cc89dd2-fc56-42f7-f70b-f7d0ec945580" from huggingface_hub import notebook_login notebook_login() # + colab={"base_uri": "https://localhost:8080/"} id="hc83C8HDI5RP" outputId="7123bc34-f0cc-4341-fc8b-4de608653cb1" # !git clone https://github.com/felixbmuller/nlp-commonsense.git --depth 1 # + colab={"base_uri": "https://localhost:8080/", "height": 217, "referenced_widgets": ["aee6d7e86c264490bdf6bc2be333d6c0", "60b52139a9774bee9d440aeaf6c73f5e", "cc07736eef5142fb8fbbfdd60a588197", "9795ba4197804643818878e35a2d87e4", "<KEY>", "<KEY>", "<KEY>", "8f1f4e4d6fae459e8e3040eb6ecdf22b", "<KEY>", "<KEY>", "fa00a97ca8d14db590550b8feac269c7", "b0bee04b6fc7462ba98051e7f3a090de", "<KEY>", "<KEY>", "<KEY>", "d6a4616ca10b43d0891f4481a9374042", "<KEY>", "aa9c3a79605f4fb3ae964d76ca33b4e4", "<KEY>", "<KEY>", "1f07ef0a3bcc415b8af444e1ae0ec602", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "08fa6c9a506d46759ec997e042b1447c", "e3afecccda46473bb3262748a078dcfb", "<KEY>", "aa9d8d6080e446358fe4e36dbc1ffe98", "340e487a140141129173d08cb0e2aebb", "d9d976c07a3745efa5428539e70c9054", "<KEY>", "<KEY>", "<KEY>", "22ae755c99344a0bbaceab84367ad9a0", "417ceaad46b1448d9d906b3945132683", "<KEY>", "<KEY>", "6e018ad67f8a48a580d11b6144826204", "18bdf5663cab49368f58ef5ef7f1ceab", "<KEY>", "<KEY>", "fa2514d3e6b249ea8d528a59ad8cd342", "6e6412c7e4da49a98a7d0c48cf1de604", "adbae66177964cad848584e08eb15b9a", "<KEY>", "3bdbed2ba16746cea24b5a901a73d394", "496b0b613aad4064a9581d8cb3ad70f0", "3d98e49085724802b651654a279b6a74", "85678b291e224d17b998db2ac8aac468", "f25cd1b2eafa458c9ed7a1f0e359ee5b", "0d2599ecf9884d1db92dc9aff7ae8b2e", "<KEY>", "<KEY>", "3de0a3f058314bb690a1482491a83b6b", "<KEY>", "<KEY>", "f98c4a308ff348e18337828fb5297668", "<KEY>", "ceffea806bde4189b9aad092fbde21c1", "f857694f61b742e79a5712f84a06780b", "<KEY>", "97ba460538224539bee89a9a74d97b0e", "13c8652ec3dc444f93a8c6a088266449", "426b51fd7e4b464a9be6c63da5d37ca6", "45cee06283724547a0427ec422e39c9c", "<KEY>", "<KEY>", "f1f2d183546e42a48fe1053c716ae4da", "df2397af2ed34587a2a47b945ceb9819", "<KEY>", "<KEY>", "<KEY>", "db6a0b84e58b4e93b438f9180cdf1b90", "ec93357fe37345db83aeac8ae832ada1", "<KEY>"]} id="IreSlFmlIrIm" outputId="49fea625-81b6-4bd1-eda9-36f539c76f90" from datasets import load_dataset, load_metric import pandas as pd import transformers print(transformers.__version__) model_checkpoint = "bert-base-uncased" batch_size = 16 datasets = load_dataset("super_glue", "copa") # + [markdown] id="VlqkQKpQJW_n" # ## Setup and Test Knowledge Base # + colab={"base_uri": "https://localhost:8080/"} id="B2GGiE0iJfQX" outputId="8d872592-673d-4d09-ccf7-9d645a0c476c" # %cd /content/nlp-commonsense/src/ # !git pull # + colab={"base_uri": "https://localhost:8080/"} id="jDCj3v3DQaPT" outputId="90a3cb1d-e33f-4ba7-9b08-774089305bfa" # %load_ext autoreload # %autoreload 2 import utils import process_examples import find_shortest_path import renderer as R import qa_preprocessing as QA # + id="ooSKqY0ZJqjd" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="8d29b54a-a809-4b58-9342-db87e5f0e87c" conceptnet = utils.load_conceptnet(load_compressed=True) # + colab={"base_uri": "https://localhost:8080/"} id="iQdutE-9NC8j" outputId="1da5d545-59ae-4fc9-dc26-ca87bd97d322" example = datasets["train"][0] example # + colab={"base_uri": "https://localhost:8080/"} id="o-PAQ5QQOJt6" outputId="198eb226-817e-4863-f37e-344042d600b4" print(process_examples.extract_terms(example["premise"])) print(process_examples.extract_terms(example["choice1"])) print(process_examples.extract_terms(example["choice2"])) print(find_shortest_path.find_word_path('body', 'sun', conceptnet)) print(find_shortest_path.find_word_path('body', 'sun', conceptnet, renderer=None)) # + colab={"base_uri": "https://localhost:8080/"} id="5S4Ms9fPmy_M" outputId="df4ee5d7-8f58-43c1-be29-b3db0d5df231" R.render_path_natural([], conceptnet) # + colab={"base_uri": "https://localhost:8080/"} id="qQh6aT4gocnK" outputId="ae83b33b-7abc-41ff-e806-557265ca1e1b" R.render_path_natural([182090, 1539020], conceptnet) # + colab={"base_uri": "https://localhost:8080/"} id="AW4zfFFuQj-J" outputId="09b9c152-2ed5-4eb5-c911-3ba9c7cf1d1d" print(QA.get_knowledge_for_example(example["premise"], example["choice1"], conceptnet, max_paths=100)) print(QA.get_knowledge_for_example(example["premise"], example["choice1"], conceptnet, max_paths=3)) # + [markdown] id="n9qywopnIrJH" # ## Preprocessing the data # + id="T-UHvLveEu_R" from tqdm.notebook import tqdm # + colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["0f9e9c19637a4874b1516deb6b1b3e27", "0972a0f2260a4a15a29b4f0d00b014be", "10d5dc3455b1486a84e3c88d4aaf7385", "dac913a8a82547a7b2c8c068a9f02a69", "ab3322419eb8421182161c3af3555d77", "45899703401e4641b2d14a60d21fb854", "16aa4754f38f4559ad2ed352294f1c3c", "ee6d5365f5044f7a974bd03a3ca9133d", "4e7db9fccde64ee9b5a297667d644482", "<KEY>", "0ba59d2b67424af783bca8f5dae9d05f", "232e31e33dda40e28ddec92c854cf0b4", "6ce538116c0342acafaa65ccb547936b", "b51969bd8d444d339e1d83f96f754945", "<KEY>", "e6d0756bafea48838ee962d3bb8bbe49", "5dfdb41ef780407fa0daf66effb90487", "<KEY>", "a03473fc529949078a82a6ba4e82f81f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "853ec55ba12442d7b184aca8f910ed4e", "<KEY>", "4f5fc45fbb0e437a82915f7663d7d5eb", "<KEY>", "fd29334fa7764b528ff20ad1676dc1c3", "e64433a9db1b4364a8a70dd7cff926f4", "<KEY>", "4a16f67fb2a2417ba05c3bca23a826c1", "21c592e2923844e2945966d4e02aeaab", "a7977761b0064674ae0ddfb8728d01ec", "b87807a87d0a4717827fefeb5a340af8", "ca563c1fab0c4c4194a128fa5f5ba586", "47da311f81454a70a1947fcaad4a5559", "4a89782be3064d428cf3e6a7c4fbcc9e", "5bcaba1207a544ab826e81f4f386dc62", "6d73e8941dba4bcfafc2d20917b7aa7b", "26ce9fb246e04f05949a9bd76a21c653", "7d3547de25ef41fdb65d10b42f15d029", "3a8834921f3447c1a71034a2825be927", "d540c189688d4dceafe2a349bccc3a70", "86cdc5df12534f488e0197445a7f3dde"]} id="eXNLu_-nIrJI" outputId="0fc8955a-f264-4212-d4f5-e042fdb589e7" from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True) # + id="vc0BSBLIIrJQ" ending_names = ["choice1", "choice2"] QUESTION_MAP = { "cause": "What was the cause of this?", "effect": "What happened as a RESULT?", } MAX_PATHS = 3 # only take the three most relevant knowledge paths into account def preprocess_function(examples): # Repeat premise and question twice for both possible answers # for each repetitions, add knowledge from the knowledge base in front of # the premise. The knowledge added is about connections between the premise # and the answer choice. The type of question (cause/effect) is also taken # into account first_sentences = [ [f"{QA.get_knowledge_for_example(f'{context} {question}', c1, conceptnet, MAX_PATHS)} {context} {QUESTION_MAP[question]}", f"{QA.get_knowledge_for_example(f'{context} {question}', c2, conceptnet, MAX_PATHS)} {context} {QUESTION_MAP[question]}"] for context, question, c1, c2 in zip( tqdm(examples["premise"]), examples["question"], examples["choice1"], examples["choice2"] ) ] # Grab all second sentences possible for each context. second_sentences = [[c1, c2] for c1, c2 in zip(examples["choice1"], examples["choice2"])] # Flatten everything first_sentences = sum(first_sentences, []) second_sentences = sum(second_sentences, []) if not len(first_sentences) == len(second_sentences): raise ValueError("lengths dont match") # Tokenize tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) # Un-flatten return {k: [v[i:i+2] for i in range(0, len(v), 2)] for k, v in tokenized_examples.items()} # + [markdown] id="0lm8ozrJIrJR" # This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists of lists for each key: a list of all examples (here 5), then a list of all choices (4) and a list of input IDs (length varying here since we did not apply any padding): # # ### Test Tokenizer and Preprocessing # + colab={"base_uri": "https://localhost:8080/"} id="a5hBlsrHIrJL" outputId="3e42a321-0e23-45f1-e633-1b52924e5803" tokenizer("Hello, this one sentence!", "And this sentence goes with it.") # + colab={"base_uri": "https://localhost:8080/", "height": 84, "referenced_widgets": ["5f0c4cf4479e4a10bc94f6700c4fff19", "acfa067ef1024c378929bf7eabd45848", "35fcea4bcb25431ab5a7fd899e3e74bc", "3ec7083ffbc74c5cbc89c810cd3b9faf", "a3b34ab61275475d96ac31a503df4eaf", "6d422967a1af4931b45d4a41190f54ef", "ef5685a800424947b203a68f8e2f8778", "fdee26cae8bb41de909d8d711d9997eb", "360f6e06430f4c698f70d40da0419dd3", "415a1b05eb9740159fd15041cc6c771a", "4a6466fa8eb340b7a85e87ea066c5b98"]} id="iNAkSGA39I4v" outputId="0c521389-c91d-4193-d0a3-60b152056331" examples = datasets["train"][:2] features = preprocess_function(examples) print(features.keys()) print(len(features["input_ids"]), len(features["input_ids"][0]), [len(x) for x in features["input_ids"][0]]) # + [markdown] id="yIrsFYVGwMDL" # To check we didn't do anything group when grouping all possibilites then unflattening, let's have a look at the decoded inputs for a given example: # + colab={"base_uri": "https://localhost:8080/"} id="1fS-6Brh4-xR" outputId="9e0cbe7d-67a6-409e-c71c-bbbff8b4715f" len(datasets["train"]), len(datasets["test"]), len(datasets["validation"]) # + colab={"base_uri": "https://localhost:8080/"} id="uw3uC9zlwMDM" outputId="d0d1f00f-0af6-4d90-9372-0e549216a658" idx = 3 [tokenizer.decode(features["input_ids"][idx][i]) for i in range(2)] # + [markdown] id="amc7plFfwMDN" # We can compare it to the ground truth: # + colab={"base_uri": "https://localhost:8080/"} id="7aOlPZ2HwMDN" outputId="1895931a-5cca-4cc1-bbc9-b93561a944c7" datasets["train"][3] # + [markdown] id="JpcfLWrgVsxn" # ### Apply Preprocessing to the Whole Dataset # # Applying the preprocessing including querying the knowledge base takes around 15 seconds per example. To avoid lengthy calulcations at every execution, this sections allows to save/retrieve results using Google drive. We do not apply preprocessing to the test set, as it is not needed anyways. # + id="KV-Yt5mtCC2A" import joblib import pyarrow as pa from datasets import Dataset, DatasetDict, concatenate_datasets use_gdrive = False # + colab={"base_uri": "https://localhost:8080/"} id="eyT-ZmzgBSLj" outputId="6b52a9c5-41ac-44e7-e71b-da124127723b" # Mount google drive # You can skip this if you don't want to load/save intermediate results from/to # Google drive from google.colab import drive drive.mount('/content/drive') use_gdrive=True # + colab={"background_save": true, "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["2dd58c9080dd4099ae50a181748b0bb9", "904654b8156640b5b98b805c95044d3e", "ca0b3e8858de4615bf8ed2ef2395a647", "37d52a4f9c3247a1bc50aaebee6ed7f5", "b28acda0e1274da68a446d966ea59b8f", "3ccc2f30a88446f0838ea6878d65fd03", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "496e7f4d81af4871a9335e56a43d979f"]} id="3M43RPgfClKG" outputId="bc61c432-739d-4db6-f364-1eaa590a9c38" encoded_val = preprocess_function(datasets["validation"]) if use_gdrive: joblib.dump(encoded_val, "../../drive/MyDrive/nlp-commonsense/copa_val.joblib") # + id="9d0fFppgAh9U" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["d4e9be92e7764db7a3e5939744cf867d", "a5b7716db39c4e3c8342c72bf7008210", "4609a53ae02b4a4f9fa3a17cf6c76f1e", "c9f87feea2454633a60912c9189f01e2", "<KEY>", "ce679ed580504fd9b41d2d433e9500c2", "4d592a78391a4501800fe97fadb70afe", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]} outputId="44fa6698-2243-47b2-9f62-7b7530f209ca" encoded_train = preprocess_function(datasets["train"]) if use_gdrive: joblib.dump(encoded_train, "../../drive/MyDrive/nlp-commonsense/copa_train.joblib") # + id="wcuuuvc_qxsh" if use_gdrive: encoded_val = joblib.load("../../drive/MyDrive/nlp-commonsense/copa_val.joblib") encoded_train = joblib.load("../../drive/MyDrive/nlp-commonsense/copa_train.joblib") # + id="ub4Ykzjm-MGk" train_ds = Dataset(pa.Table.from_pydict(encoded_train)) val_ds = Dataset(pa.Table.from_pydict(encoded_val)) # + id="t_5iJpjm-QHV" # merge tokenizer output with labels from the original dataset train_ds = concatenate_datasets([train_ds, datasets["train"]], split="train", axis=1) val_ds = concatenate_datasets([val_ds, datasets["validation"]], split="validation", axis=1) # + id="cNtAVwlY4l26" encoded_datasets = DatasetDict( train=train_ds, validation=val_ds) # + [markdown] id="eYnlD4WreWi_" # **Add Sorting** # # The following code can be used to sort the datasets according to the average number of tokens (average is needed because each datapoint contains two sequences, one for choice 1 and one for choice 2). As this gave worse results, I did not use this in the final solution. # + id="mNvZ7hbIZPAO" def avg_input_lens(batch): vals = [(len(v[0]) + len(v[1]))/2 for v in batch["input_ids"]] return {"avg_input_len": vals} # Uncomment to apply sorting #encoded_datasets = encoded_datasets.map(avg_input_lens, batched=True) #encoded_datasets = encoded_datasets.sort("avg_input_len") # + id="w-98r89yaYP2" s0 = pd.Series(len(encoded_datasets["train"]["input_ids"][i][0]) for i in range(400)) s1 = pd.Series(len(encoded_datasets["train"]["input_ids"][i][1]) for i in range(400)) len_df = pd.DataFrame({"input_ids0": s0, "input_ids1": s1}) # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="CrJ9y_P5aoJP" outputId="c6193a83-ae35-4f1a-bc18-7a1e5f8e2587" len_df # + colab={"base_uri": "https://localhost:8080/"} id="36SjIgWr8GgF" outputId="1009e11c-c624-4f41-fdd9-edda6044a8df" encoded_datasets # + [markdown] id="545PP3o8IrJV" # ## Fine-tuning the model # + colab={"base_uri": "https://localhost:8080/"} id="TlqNaB8jIrJW" outputId="05148327-bec2-4a72-e4ca-ebdbdd5ab157" from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer from dataclasses import dataclass from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy from typing import Optional, Union import torch import numpy as np model = AutoModelForMultipleChoice.from_pretrained(model_checkpoint) model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-copa-kb", evaluation_strategy = "epoch", learning_rate=5e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=3, weight_decay=0.01, push_to_hub=True, ) @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) # Un-flatten batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} # Add back labels batch["labels"] = torch.tensor(labels, dtype=torch.int64) return batch def compute_metrics(eval_predictions): predictions, label_ids = eval_predictions preds = np.argmax(predictions, axis=1) return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()} trainer = Trainer( model, args, train_dataset=encoded_datasets["train"], eval_dataset=encoded_datasets["validation"], tokenizer=tokenizer, data_collator=DataCollatorForMultipleChoice(tokenizer), compute_metrics=compute_metrics, ) # + [markdown] id="tC2VY9dZwMDX" # When called on a list of examples, it will flatten all the inputs/attentions masks etc. in big lists that it will pass to the `tokenizer.pad` method. This will return a dictionary with big tensors (of shape `(batch_size * 4) x seq_length`) that we then unflatten. # # ### Test Collator # # We can check this data collator works on a list of features, we just have to make sure to remove all features that are not inputs accepted by our model (something the `Trainer` will do automatically for us after): # + id="7QKeXPm5wMDX" accepted_keys = ["input_ids", "attention_mask", "label"] features = [{k: v for k, v in encoded_datasets["train"][i].items() if k in accepted_keys} for i in range(10)] batch = DataCollatorForMultipleChoice(tokenizer)(features) # + [markdown] id="uYQVVuw8wMDY" # Again, all those flatten/un-flatten are sources of potential errors so let's make another sanity check on our inputs: # + colab={"base_uri": "https://localhost:8080/"} id="gUnysV6JwMDY" outputId="584412bd-6841-4fb9-fd00-937666b3faf2" [tokenizer.decode(batch["input_ids"][8][i].tolist()) for i in range(2)] # + colab={"base_uri": "https://localhost:8080/"} id="E_kP2Nn0wMDZ" outputId="66b985bf-e588-4e50-f9aa-c333222f217f" encoded_datasets["train"][8] # + [markdown] id="CdzABDVcIrJg" # ### Run Training # + colab={"base_uri": "https://localhost:8080/", "height": 676} id="TOs9vKvBwMDc" outputId="c331b94d-6bd9-476e-8ffc-dc2d3ea5fae2" trainer.train() #model.push_to_hub("felixbmuller/bert-base-uncased-finetuned-copa") # + [markdown] id="6UtZp0E_W1RY" # ## Evalute the Model # # + colab={"base_uri": "https://localhost:8080/", "height": 126} id="GDbBFDwsXOkF" outputId="2c8bf0fd-bf64-4814-f6aa-eafe738f9c89" predictions, label_ids, metrics = trainer.predict(encoded_datasets["validation"], metric_key_prefix="val") # + colab={"base_uri": "https://localhost:8080/"} id="yKC6LmncK6wX" outputId="85c40ecb-de7e-49ca-e4e0-2554beec9884" metrics # + id="xYzBS9gXb6Cb" val = pd.DataFrame(datasets["validation"]) val["label_ids"] = label_ids val["pred0"] = predictions[:, 0] val["pred1"] = predictions[:, 1] val["pred_label"] = np.argmax(predictions, axis=1) # + [markdown] id="shDYcMVZdYuv" # Sanity check to ensure that predictions work the way I expect them to do # + colab={"base_uri": "https://localhost:8080/"} id="8iomqLf1K-yx" outputId="597f3945-f145-4968-a4cf-e797e0ce6ace" joblib.dump(val, "../../drive/MyDrive/nlp-commonsense/bert-base-uncased-finetuned-copa-kb-validation-results.joblib") # + id="G62E5dzHwIDs" import joblib val = joblib.load("/content/drive/MyDrive/nlp-commonsense/bert-base-uncased-finetuned-copa-kb-validation-results.joblib") # + colab={"base_uri": "https://localhost:8080/", "height": 773} id="BRUvWRXEcXBp" outputId="14996314-5f44-4021-8c3a-8ed910f9aecc" val.head(20) # + colab={"base_uri": "https://localhost:8080/", "height": 768} id="1lhcUMI2deeP" outputId="c930c393-fc67-4f71-9230-25aa4d0f266b" wrong_samples = val[val.label != val.pred_label] wrong_samples.sample(25, random_state=42) # + [markdown] id="74J7g1TEJiGi" # # Calculate t-test # + id="thv7XeW17hc5" baseline = { "P": [91, 70, 65, 52, 98], "C": [38, 49, 97, 10, 36, 4, 55], "U": [73, 25, 26, 3, 42, 30, 9, 89], "E": [35, 8], "R": [82, 14, 86] } kb_model = { "P": [52, 28, 98, 62, 83, 0], "C": [38, 55, 10, 63], "U": [94, 27, 19, 30, 71, 25, 3, 33], "E": [54, 8, 35, 59], "R": [14, 82, 17], } # + id="SvV47L728cv-" baseline_vec = {k: [(1 if i in v else 0) for i in range(100)] for k, v in baseline.items()} kb_model_vec = {k: [(1 if i in v else 0) for i in range(100)] for k, v in kb_model.items()} # + colab={"base_uri": "https://localhost:8080/"} id="gi2rjC0F9Fd-" outputId="a628bf97-45ab-48b2-b970-959d50e06c41" print({k: sum(v)*4 for k, v in baseline_vec.items()}) print({k: sum(v)*4 for k, v in kb_model_vec.items()}) # + id="K6CxsTaB9gxz" from scipy.stats import ttest_rel, ttest_ind # + colab={"base_uri": "https://localhost:8080/"} id="CtMMiysH-ku2" outputId="f3fa134d-663b-4fb4-ff38-2fe6f11a5c20" for k in baseline.keys(): print(f"{k}: stat, p_value: {ttest_rel(baseline_vec[k], kb_model_vec[k])}")
notebooks/NLP_Commonsense_Assignment_2_KB_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Exploration-I # + # # Questions? # Size of dataset, N features, missing data, data type, distribution/correlations # - # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np df = pd.read_csv('data/titanic-train.csv') type(df) df.head() df.info() df.describe() df.iloc[3] df.loc[0:4, 'Ticket'] df['Ticket'].head() df[['Ticket', 'Embarked']].head() df[df['Age'] > 70] df.query("Age > 70") df[(df['Age'] == 11) & (df['SibSp'] == 5)] df[(df.Age == 11) | (df.SibSp == 5)] df.query('(Age == 11) | (SibSp == 5)') df.Embarked.unique() df['Embarked'].unique() df.sort_values('Age', ascending=False).head()
data_exploration1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/diogojorgebasso/bootcamp-python-igti/blob/main/coletando_twitters.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="O2P3igccermQ" # https://apps.twitter.com/ # + id="xuXR1ktqZWnR" colab={"base_uri": "https://localhost:8080/"} outputId="6f37e4a2-46f5-430c-b346-aaec605af11d" # !pip install tweepy # + id="zVmJTtUgZCMz" import tweepy import json # + id="26gKlhxAfmv4" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="08737f35-b11b-4a6f-d8a1-2a47d9609035" with open('credenciais.json') as arquivo: credenciais = json.load(arquivo) # + id="QuLywm3Lgghb" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="e992bd67-861d-48cd-d5f3-0b8e09d3883e" print(credenciais['consumer_key']) # + id="fnJfxTtog<PASSWORD>" # Criando o objeto para autenticação auth = tweepy.OAuthHandler(credenciais['consumer_key'], credenciais['consumer_secret']) # Adicionando as credenciais de acesso auth.set_access_token(credenciais['access_token'], credenciais['access_token_secret']) # Cria o objeto para acesso a API a partir da autenticação api = tweepy.API(auth) # + id="ZTeuT7m5hmH3" colab={"base_uri": "https://localhost:8080/", "height": 381} outputId="a94a719a-64c8-4952-881e-5e1ae3baec54" # coletando os twitters em minha timeline tweets_da_minha_timeline = api.home_timeline() # Mostrando os twitters for tweet in tweets_da_minha_timeline: # realizando o print dos twitters que estão armazenados dentro do objeto twitter print(tweet.text) #atributo dentro do objeto # + id="fTZrGkapiXRs" type(tweets_da_minha_timeline[0]) # + id="XJfmBx7bi25X" #coletando twitters através da # # Criando o objeto twitter api = tweepy.API(auth) # + id="ljLUD_dajAai" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="943ef40f-6884-4fd6-d418-36d78ab89f04" # O termo que desejamos pesquisar pesquisa = "Bolsonaro" # linguagem (padrão ISO 639-1 ) linguagem = "pt" # definindo a pesquisa a ser realizada resultados = api.search(q=pesquisa, lang=linguagem) # realiza o print para cada twitter encontrado for tweet in resultados: # realiza o print do texto presente no json retornado pelo twitter print(tweet.user.screen_name,"Tweetado:",tweet.text) # + id="lMdTciF4j15t" colab={"base_uri": "https://localhost:8080/", "height": 857} outputId="d4b14216-0848-46d4-b62d-70bc8294a008" #trending topics # Where On Earth ID para o Brazil é 23424768. BRAZIL_WOE_ID = 23424768 brazil_trends = api.trends_place(BRAZIL_WOE_ID) trends = json.loads(json.dumps(brazil_trends, indent=1)) for trend in trends[0]["trends"]: print (trend["name"])#.strip("#"))
coletando_twitters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Sources # - 'data/Patch_Size_Data_Revisions.csv' # # This csv file contains the the patch size data from the 26 patients included in Baker et al., Cell Reports, 2014 along with the age and diagnosis of each patient and the total number of crypts analysed. Whether the samples were fresh-frozen (instead of FFPE) is also recorded. # # Analysis # + # Import relevant python modules import numpy as np import pandas as pd from scipy.optimize import fsolve from skimage.measure import label, regionprops import matplotlib.pyplot as plt import seaborn as sns # - # We shall define some functions to calculate the fission rate per crypt, apply both the original and revised corrections to the data and print the mean fission rates for the 3 disease groups (Normal, FAP and AFAP). # + # Define functions def patch_size_distribution(b, t, m): K_m = (1 - np.exp(-b * t))**m / (b*t*m) return K_m def function_to_solve(b, mean_patch_size, t): # This function is a rearranged version of d(LL)/db which will # maximise the log-likelihood when set to zero func = mean_patch_size - (np.exp(b*t) - 1) / (b*t) return func def wrapper_function(mean_patch_size, t): # A wrapper function to pass to fsolve (which can only accept # functions that have a single parameter) return lambda b: function_to_solve(b, mean_patch_size, t) def error_on_estimate(b, t, patch_dist): # The standard error of a maximum likelihood estimate can be calculated # from the Fisher Information, which in turn can be calculated from # d^2(LL)/db^2. patch_size = patch_dist.index.astype(int).values sum_m = np.sum(patch_size * patch_dist) Npatches = np.sum(patch_dist) d2LL_db2 = Npatches/(b**2) + sum_m * (t**2) / (2 - 2*np.cosh(b*t)) sd = 1 / np.sqrt(- d2LL_db2) return sd def calculate_fission_rate(data): # This function calculates the fission rate per crypt for each patient # from data in the form of a pandas array using the above wrapper function patch_data = data.loc[:, '1':'15'] age = data.Ages patch = patch_data.columns.values.astype(int) b_rate = list() b_rate_error = list() b_initial = 0.01 for index, t in enumerate(age): patch_dist = patch_data.loc[index, :] mean_patch_size = sum(patch * patch_dist) / sum(patch_dist) f_fission = wrapper_function(mean_patch_size, t) fission_rate = fsolve(f_fission, b_initial)[0] error = error_on_estimate(fission_rate, t, patch_dist) b_rate.append(fission_rate) b_rate_error.append(error) data['fission_rate'] = b_rate data['fission_rate_error'] = b_rate_error return data def original_correction(data): # The original correction applied to data in Baker et al. (2014) # in which singleton crypts are ignored data = data.copy() total = data.TotalCrypts number_of_neighbours = 4 number_of_patches = np.sum(data.loc[:, '1':'15'], axis = 1) # Correct the data for spontaneous mutation of adjacent patches mutation_rate = number_of_patches/total data['2'] = (data['2'] - (mutation_rate)**2 * (number_of_neighbours / 2) * total) data.loc[data['2'] < 0, '2'] = 0 data['1'] = 0 return data def revised_correction(data): # The revised correction in which we do not ignore the singleton crypts data = data.copy() total = data.TotalCrypts number_of_neighbours = 4 number_of_patches = np.sum(data.loc[:, '1':'15'], axis = 1) # Correct the data for spontaneous mutation of adjacent patches mutation_rate = number_of_patches/total data['1'] = (data['1'] + 2 * (mutation_rate)**2 * (1-mutation_rate)**(2*number_of_neighbours-2) * (number_of_neighbours / 2) * total) data['2'] = (data['2'] - (mutation_rate)**2 * (1-mutation_rate)**(2*number_of_neighbours-2) * (number_of_neighbours / 2) * total) data.loc[data['2'] < 0, '2'] = 0 return data def print_mean_fission_rate(fission_data): # A function that prints the mean fission rate of data that has been # passed to the calculate_fission_rate function disease = ['Normal', 'FAP', 'AFAP'] for d in disease: average = np.mean(fission_data.loc[fission_data['Disease'] == d, 'fission_rate']) sd = np.std(fission_data.loc[fission_data['Disease'] == d, 'fission_rate'], ddof=1) print('The mean fission rate for {} is {:.2} ± {:.1}'.format(d, average, sd)) # - # Firstly, we shall import the data from a csv file. # + # Load original data from a csv file OriginalDataPath = 'data/Patch_Size_Data_Revisions.csv' OriginalData = pd.read_csv(OriginalDataPath) OriginalData # - # We shall now apply the original correction to the original data included in the 2014 paper. # + OriginalData_OriginalCorrections = original_correction(OriginalData) OriginalData_OriginalCorrections_fission = calculate_fission_rate(OriginalData_OriginalCorrections) round_dict = {key:1 for key in OriginalData.columns[5:] } OriginalData_OriginalCorrections_fission.round(round_dict) # - # Print the mean fission rate of the original data print('Original data with the original correction applied:\n') print_mean_fission_rate(OriginalData_OriginalCorrections_fission) # The data included in the 2014 paper included several Formalin-Fixed Paraffin-Embedded (FFPE) tissue rather than fresh-frozen. The enzyme histochemistry techniques described in the manuscript are not effective on FFPE tissue, so an alternative immunohistochemical approach was taken for these samples. Whilst it is is likely that these different approaches yield similar staining results, for consistency we explore the effect of excluding these samples from the analysis. We shall now re-run the analysis on the original data for comparison, and finally on the revised dataset. # + RevisedData = OriginalData.loc[OriginalData['FreshFrozen'], :] RevisedData.reset_index(inplace=True) RevisedData = RevisedData.drop(columns = 'index') RevisedData_OriginalCorrections = original_correction(RevisedData) RevisedData_OriginalCorrections_fission = calculate_fission_rate(RevisedData_OriginalCorrections) RevisedData_OriginalCorrections_fission.round(round_dict) # - # Print the mean fission rate of the original data print('Revised data with the original correction applied:\n') print_mean_fission_rate(RevisedData_OriginalCorrections_fission) # Excluding the FFPE samples has a negligible effect on the calculated fission rates, as expected. # # The data must be corrected for the possibility of neighbouring crypts being spontaneously labelled (but not clonally related). The original paper gave the probability of spontaneous neighbours becoming labelled as $p^2 (1-p)^{\zeta - 2}/\frac{\zeta}{2}$. However, the number of neighbours of the $m=2$ 'patch' is $2(\zeta - 1)$, not $\zeta - 2$. Further, the factor $\frac{\zeta}{2}$ should multiply rather than divide the other terms to account for the differend orientations of the patch. The probabilty that two neighboring crypts are clonally marked by chance is then given by: $$\frac{\zeta}{2} p^2 (1-p)^{2\zeta - 2}$$ # Below we perform a quick simulation to confirm this. # + # A simulation to confirm the form of the revised correction runs = 1000 # Number of simulation runs p = 0.01 # Probability of a 'crypt' becoming labelled N = 1000 # Length of square lattice number_of_crypts = N ** 2 # Number of lattice points number_of_neighbours = 4 # Coordination of crypts (must be 4 in this simulation) simulation = np.zeros((runs, 1)) for i in range(runs): # Generate a matrix containing number_of_crypts crypts with each crypt having a probability p of being labelled simulation_matrix = np.random.choice((True, False), size=(N, N), p=(p, 1-p)) labels = label(simulation_matrix, number_of_neighbours) stats = regionprops(labels) area = [r.area for r in stats] # Count the number of patches of size 2 num_adjacent_sim = area.count(2) simulation[i] = num_adjacent_sim mean_sim = np.mean(simulation) std_sim = np.std(simulation) # Calculate the expectation of patches of size 2 using the equation above num_adjacent_predict = (p)**2 * (1-p)**(2*number_of_neighbours-2) * (number_of_neighbours / 2) * number_of_crypts print('The predicted number of patches of size 2 due to spontaneous labelling is {:.1f}'.format(num_adjacent_predict)) print('The mean number of patches of size 2 in the simulation is {:.1f} with standard deviation {:.1f}'.format(mean_sim, std_sim)) relative_error = abs(num_adjacent_predict - mean_sim) / num_adjacent_predict print('The relative error is {:.1}'.format(relative_error)) # - h = plt.hist(simulation, 9, edgecolor='k', color="#3F5D7D") l = plt.axvline(num_adjacent_predict, color='r', linestyle='dashed', linewidth=1) # # As we can see, the equation well predicts the number of patches of size two due to the spontaneous labelling of neighbouring crypts. # # In the calculation performed in the 2014 paper, there was thought to be "too many" singleton crypts and it was hypothesised that there were localized spatial variations in the crypt fission rate. To account for this, the patches of size 1 were excluded from the data. However, on reflection, we believe that the inclusion of the singleton crypts is important as it serves to normalize the number of fission events that have occurred to the number of labelled crypts. We shall now repeat the analysis on the original dataset but including singleton crypts (assuming that each crypt has 4 neighbours). # + OriginalData_RevisedCorrections = revised_correction(OriginalData) OriginalData_RevisedCorrections_fission = calculate_fission_rate(OriginalData_RevisedCorrections) OriginalData_RevisedCorrections_fission.round(round_dict) # - # Print the mean fission rate of the original data print('Original data with the revised correction applied:\n') print_mean_fission_rate(OriginalData_RevisedCorrections_fission) # And finally we apply the appropriate correction to the revised data. # + RevisedData_RevisedCorrections = revised_correction(RevisedData) RevisedData_RevisedCorrections_fission = calculate_fission_rate(RevisedData_RevisedCorrections) RevisedData_RevisedCorrections_fission.round(round_dict) # - # Print the mean fission rate of the original data print('Revised data with the revised correction applied:\n') print_mean_fission_rate(RevisedData_RevisedCorrections_fission) # Including the singleton crypts yields significantly lower mean fission rates across all three patient cohorts. # The original correction applied to the data ignores the patches of size 1, so as a consistency check we can compare the number of singleton crypts predicted by the patch size distribution using the fission rates estimated from the model with the actual number of observed singleton crypts. OriginalFissionRate = RevisedData_OriginalCorrections_fission['fission_rate'] ProbabilityPatchOne = patch_size_distribution(OriginalFissionRate, RevisedData['Ages'], 1) Number_of_patches = np.sum(RevisedData.loc[:, '1':'15'], 1) ExpectationPatchOne = ProbabilityPatchOne * Number_of_patches Fraction_CCO = Number_of_patches/RevisedData['TotalCrypts'] FoldChange = RevisedData['1'] / ExpectationPatchOne RevisedData_Expectation = pd.DataFrame({'PatientNumber':RevisedData['PatientNumber'], 'Disease':RevisedData['Disease'], 'FractionCCODeficient':Fraction_CCO, 'ActualPatchOne':RevisedData['1'], 'ExpectationPatchOne':ExpectationPatchOne, 'FoldChange': FoldChange}, columns=['PatientNumber', 'Disease', 'FractionCCODeficient', 'ActualPatchOne', 'ExpectationPatchOne', 'FoldChange']) RevisedData_Expectation.round({'FractionCCODeficient':3, 'FoldChange':2}).astype({'ExpectationPatchOne':int}) print('The number of crypts of patch size 1 observed/expected is {:.3}'.format(np.sum(RevisedData['1']) / np.sum(ExpectationPatchOne))) # The original approach underestimates the number of singleton crypts in 19/20 samples. Note that, as discussed above, some apparent patches of size 2 are actually two clonally unrelated crypts (i.e. 2 'patches' of size 1), so the observed number of singleton crypts is probably lower than the true number, implying the degree to which the original method underestimates the number of singleton crypts is likely to be even higher than that calculated above. ax = sns.regplot(x='FractionCCODeficient', y='FoldChange', data=RevisedData_Expectation, fit_reg=False) # There appears to be no correlation between the fraction of CCO deficient crypts and the degree to which the original method underestimates the number of singleton crypts. # There was an error in the code used to analyse the data for the paper in which the factor of $(1-p)^{2\zeta - 2}$ was not included. This leads to a slight underestimate of the number of doublet crypts. # + total = RevisedData.TotalCrypts number_of_neighbours = 4 number_of_patches = np.sum(RevisedData.loc[:, '1':'15'], axis = 1) # Correct the data for spontaneous mutation of adjacent patches mutation_rate = number_of_patches/total doublets_original = np.sum(RevisedData_OriginalCorrections['2']) doublets_revised = np.sum(RevisedData_RevisedCorrections['2']) print('The number of doublets under the orginal correction - {0:.1f}'.format(doublets_original)) print('The number of doublets under the revised correction - {0:.1f}'.format(doublets_revised)) # - # However, this has a negligible effect on the calculated fission rates. # # # If we want to discard the singleton crypts to account for possible spatial heterogeneity, we must renormalize the probability distribution to $m \ge 2$ by doing $K'_m(T) = \frac{K_m(T)}{1-K_1(T)}$. This yields: # $$K'_m(T) = \frac{\left( 1-e^{-\kappa T}\right)^m}{m\left( \kappa T - \left(1 - e^{-\kappa T}\right) \right)}$$ # # This changes the log-likelihood to: # # $$LL' = \sum_{i=1}^{N} m_i \log\left( 1-e^{-\kappa T}\right) - \log(m_i) - \log \left( \kappa T - 1 + e^{-\kappa T}\right)$$ # # Which is maximised when $\kappa$ satisfies the equation: # $$ \frac{\left(e^{\kappa T} - 1\right)^2}{1 - e^{\kappa T} + \kappa T e^{\kappa T}} - \frac{1}{N}\sum_{i=1}^{N}m_i =0$$ # # We shall now perform the above analysis with patches of size $2$ or greater using this approach. # # + def revised_function_to_solve(b, mean_patch_size, t): # This function is a rearranged version of d(LL)/db which will # maximise the log-likelihood when set to zero func = mean_patch_size - (np.exp(b*t) - 1) ** 2 / (1 - np.exp(b*t) + b*t * np.exp(b*t)) return func def revised_wrapper_function(mean_patch_size, t): # A wrapper function to pass to fsolve (which can only accept # functions that have a single parameter) return lambda b: revised_function_to_solve(b, mean_patch_size, t) # + patch_data = RevisedData_RevisedCorrections.loc[:, '1':'15'].copy() patch_data['1'] = 0 age = RevisedData_RevisedCorrections.Ages patch = patch_data.columns.values.astype(int) b_rate = list() # b_rate_error = list() b_initial = 0.01 for index, t in enumerate(age): patch_dist = patch_data.loc[index, :] mean_patch_size = sum(patch * patch_dist) / sum(patch_dist) f_fission = revised_wrapper_function(mean_patch_size, t) fission_rate = fsolve(f_fission, b_initial)[0] # error = error_on_estimate(fission_rate, t, patch_dist) b_rate.append(fission_rate) # b_rate_error.append(error) RevisedData_RevisedCorrections['revised_fission_rate'] = b_rate # RevisedData_OriginalCorrections['revised_fission_rate_error'] = b_rate_error disease = ['Normal', 'FAP', 'AFAP'] for d in disease: average = np.mean(RevisedData_RevisedCorrections.loc[RevisedData_RevisedCorrections['Disease'] == d, 'revised_fission_rate']) sd = np.std(RevisedData_RevisedCorrections.loc[RevisedData_RevisedCorrections['Disease'] == d, 'revised_fission_rate'], ddof=1) print('The mean fission rate for {} is {:.2} ± {:.1}'.format(d, average, sd)) # - # These rates is in-line with the mean fission rates when we include the singleton crypts, suggesting that the inflated fission rate that was calculated in the 2014 paper was an artefact of artificially inflating the mean-patch-size rather than as a consequence of spatial variation. # + RevisedProbabilityPatchOne = patch_size_distribution(RevisedData_RevisedCorrections['revised_fission_rate'], RevisedData['Ages'], 1) RevisedExpectationPatchOne = RevisedProbabilityPatchOne * Number_of_patches RevisedFoldChange = RevisedData['1'] / RevisedExpectationPatchOne Expectation_NewMethod = pd.DataFrame({'PatientNumber':RevisedData['PatientNumber'], 'Disease':RevisedData['Disease'], 'FractionCCODeficient':Fraction_CCO, 'ActualPatchOne':RevisedData['1'], 'ExpectationPatchOne':RevisedExpectationPatchOne, 'FoldChange': RevisedFoldChange}, columns=['PatientNumber', 'Disease', 'FractionCCODeficient', 'ActualPatchOne', 'ExpectationPatchOne', 'FoldChange']) Expectation_NewMethod # - print('The number of crypts of patch size 1 observed/expected is {:.3}'.format(np.sum(RevisedData['1']) / np.sum(RevisedExpectationPatchOne))) # This method better predicts the number of singleton crypts than in the 2014 paper in which we did not explicitly take the omission of singleton crypts into the patch size distribution.
WigglesRevisions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="g_nWetWWd_ns" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="2pHVBk_seED1" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="6msVLevwcRhm" # # Neural style transfer # + [markdown] colab_type="text" id="Ds4o1h4WHz9U" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/alpha/tutorials/generative/style_transfer"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/style_transfer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/style_transfer.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="aDyGj8DmXCJI" # This tutorial uses deep learning to compose one image in the style of another image (ever wish you could paint like Picasso or Van Gogh?). This is known as *neural style transfer* and the technique is outlined in <a href="https://arxiv.org/abs/1508.06576" class="external">A Neural Algorithm of Artistic Style</a> (Gatys et al.). # # Neural style transfer is an optimization technique used to take two images—a *content* image and a *style reference* image (such as an artwork by a famous painter)—and blend them together so the output image looks like the content image, but “painted” in the style of the style reference image. # # This is implemented by optimizing the the output image to match the content statistics of the content image and the style statistics of the style reference image. These statistics are extracted from the images using a convolutional network. # # For example, let’s take an image of this turtle and Wassily Kandinsky's Composition 7: # # <img src="http://tensorflow.org/alpha/tutorials/generative/images/Green_Sea_Turtle_grazing_seagrass.jpg" alt="Drawing" style="width: 200px;"/> # # [Image of Green Sea Turtle](https://commons.wikimedia.org/wiki/File:Green_Sea_Turtle_grazing_seagrass.jpg) -By P.Lindgren [CC BY-SA 3.0]((https://creativecommons.org/licenses/by-sa/3.0), from Wikimedia Common # # <img src="http://tensorflow.org/alpha/tutorials/generative/images/kadinsky.jpg" alt="Drawing" style="width: 200px;"/> # # # Now how would it look like if Kandinsky decided to paint the picture of this Turtle exclusively with this style? Something like this? # # <img src="http://tensorflow.org/alpha/tutorials/generative/images/kadinsky-turtle.jpg" alt="Drawing" style="width: 500px;"/> # # + [markdown] colab_type="text" id="U8ajP_u73s6m" # ## Setup # # + [markdown] colab_type="text" id="eqxUicSPUOP6" # ### Import and configure modules # + colab={} colab_type="code" id="2Mdpou0qzCm6" from __future__ import absolute_import, division, print_function, unicode_literals # + colab={} colab_type="code" id="NyftRTSMuwue" # !pip install tensorflow-gpu==2.0.0-alpha0 import tensorflow as tf # + colab={} colab_type="code" id="sc1OLbOWhPCO" import IPython.display as display import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['figure.figsize'] = (10,10) mpl.rcParams['axes.grid'] = False import numpy as np import time import functools # + [markdown] colab_type="text" id="oeXebYusyHwC" # Download some images: # + colab={} colab_type="code" id="wqc0OJHwyFAk" turtle_path = tf.keras.utils.get_file('turtle.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Green_Sea_Turtle_grazing_seagrass.jpg') kadinsky_path = tf.keras.utils.get_file('kandinsky.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg') # + [markdown] colab_type="text" id="xE4Yt8nArTeR" # ## Visualize the input # + [markdown] colab_type="text" id="ticzKBxhtzKt" # Choose a style image and a content image: # + colab={} colab_type="code" id="tTxsXG96tvrY" content_path = turtle_path style_path = kadinsky_path # + [markdown] colab_type="text" id="klh6ObK2t_vH" # Define a function to load an image and limit its maximum dimension to 512 pixels. # + colab={} colab_type="code" id="3TLljcwv5qZs" def load_img(path_to_img): max_dim = 512 img = tf.io.read_file(path_to_img) img = tf.image.decode_jpeg(img) img = tf.image.convert_image_dtype(img, tf.float32) shape = tf.cast(tf.shape(img)[:-1], tf.float32) long = max(shape) scale = max_dim/long new_shape = tf.cast(shape*scale, tf.int32) img = tf.image.resize(img, new_shape) img = img[tf.newaxis, :] return img # + [markdown] colab_type="text" id="2yAlRzJZrWM3" # Create a simple function to display an image: # + colab={} colab_type="code" id="cBX-eNT8PAK_" def imshow(image, title=None): if len(image.shape) > 3: image = tf.squeeze(image, axis=0) plt.imshow(image) if title: plt.title(title) # + colab={} colab_type="code" id="_UWQmeEaiKkP" plt.figure(figsize=(12, 12)) content_image = load_img(content_path) style_image = load_img(style_path) plt.subplot(1, 2, 1) imshow(content_image, 'Content Image') plt.subplot(1, 2, 2) imshow(style_image, 'Style Image') # + [markdown] colab_type="text" id="GEwZ7FlwrjoZ" # ## Define content and style representations # # Use the intermediate layers of the model to get the *content* and *style* representations of the image. Starting from the network's input layer, the first few layer activations represent low-level features like edges and textures. As you step through the network, the final few layers represent higher-level features—object parts like *wheels* or *eyes*. In this case, you are using the VGG19 network architecture, a pretrained image classification network. These intermediate layers are necessary to define the representation of content and style from the images. For an input image, try to match the corresponding style and content target representations at these intermediate layers. # # + [markdown] colab_type="text" id="LP_7zrziuiJk" # Load a [VGG19](https://keras.io/applications/#vgg19) and test run it on our image to ensure it's used correctly: # + colab={} colab_type="code" id="fMbzrr7BCTq0" x = tf.keras.applications.vgg19.preprocess_input(content_image*255) x = tf.image.resize(x, (224, 224)) vgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet') r = vgg(x) # + colab={} colab_type="code" id="1_FyCm0dYnvl" #tf.keras.applications.vgg19.decode_predictions(r.numpy()) labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt') imagenet_labels = np.array(open(labels_path).read().splitlines()) print(imagenet_labels[np.argsort(r)[0,::-1][:5]+1]) # + [markdown] colab_type="text" id="ljpoYk-0f6HS" # Now load a `VGG19` without the classification head, and list the layer names # + colab={} colab_type="code" id="Yh_AV6220ebD" vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet') print() for layer in vgg.layers: print(layer.name) # + [markdown] colab_type="text" id="Wt-tASys0eJv" # Choose intermediate layers from the network to represent the style and content of the image: # # + colab={} colab_type="code" id="ArfX_6iA0WAX" # Content layer where will pull our feature maps content_layers = ['block5_conv2'] # Style layer we are interested in style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] num_content_layers = len(content_layers) num_style_layers = len(style_layers) # + [markdown] colab_type="text" id="2o4nSwuN0U3X" # #### Intermediate layers for style and content # # So why do these intermediate outputs within our pretrained image classification network allow us to define style and content representations? # # At a high level, in order for a network to perform image classification (which this network has been trained to do), it must understand the image. This requires taking the raw image as input pixels and building an internal representation that converts the raw image pixels into a complex understanding of the features present within the image. # # This is also a reason why convolutional neural networks are able to generalize well: they’re able to capture the invariances and defining features within classes (e.g. cats vs. dogs) that are agnostic to background noise and other nuisances. Thus, somewhere between where the raw image is fed into the model and the output classification label, the model serves as a complex feature extractor. By accessing intermediate layers of the model, you're able to describe the content and style of input images. # + [markdown] colab_type="text" id="Jt3i3RRrJiOX" # ## Build the model # # The networks in `tf.keras.applications` are designed so you can easily extract the intermediate layer values using the Keras functional API. # # To define a model using the functional API, specify the inputs and outputs: # # `model = Model(inputs, outputs)` # # This following function builds a VGG19 model that returns a list of intermediate layer outputs: # + colab={} colab_type="code" id="nfec6MuMAbPx" def vgg_layers(layer_names): """ Creates a vgg model that returns a list of intermediate output values.""" # Load our model. We load pretrained VGG, trained on imagenet data vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet') vgg.trainable = False outputs = [vgg.get_layer(name).output for name in layer_names] model = tf.keras.Model([vgg.input], outputs) return model # + [markdown] colab_type="text" id="jbaIvZf5wWn_" # And to create the model: # + colab={} colab_type="code" id="LkyvPpBHSfVi" style_extractor = vgg_layers(style_layers) style_outputs = style_extractor(style_image*255) #Look at the statistics of each layer's output for name, output in zip(style_layers, style_outputs): print(name) print(" shape: ", output.numpy().shape) print(" min: ", output.numpy().min()) print(" max: ", output.numpy().max()) print(" mean: ", output.numpy().mean()) print() # + [markdown] colab_type="text" id="lGUfttK9F8d5" # ## Calculate style # # The content of an image is represented by the values of the intermediate feature maps. # # It turns out, the style of an image can be described by the means and correlations across the different feature maps. Calculate a Gram matrix that includes this information by taking the outer product of the feature vector with itself at each location, and averaging that outer product over all locations. This Gram matrix can be calcualted for a particular layer as: # # $$G^l_{cd} = \frac{\sum_{ij} F^l_{ijc}(x)F^l_{ijd}(x)}{IJ}$$ # # This can be implemented concisely using the `tf.linalg.einsum` function: # + colab={} colab_type="code" id="HAy1iGPdoEpZ" def gram_matrix(input_tensor): result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor) input_shape = tf.shape(input_tensor) num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32) return result/(num_locations) # + [markdown] colab_type="text" id="pXIUX6czZABh" # ## Extract style and content # # + [markdown] colab_type="text" id="1HGHvwlJ1nkn" # Build a model that returns the style and content tensors. # + colab={} colab_type="code" id="Sr6QALY-I1ja" class StyleContentModel(tf.keras.models.Model): def __init__(self, style_layers, content_layers): super(StyleContentModel, self).__init__() self.vgg = vgg_layers(style_layers + content_layers) self.style_layers = style_layers self.content_layers = content_layers self.num_style_layers = len(style_layers) self.vgg.trainable = False def call(self, input): "Expects float input in [0,1]" input = input*255.0 preprocessed_input = tf.keras.applications.vgg19.preprocess_input(input) outputs = self.vgg(preprocessed_input) style_outputs, content_outputs = (outputs[:self.num_style_layers], outputs[self.num_style_layers:]) style_outputs = [gram_matrix(style_output) for style_output in style_outputs] content_dict = {content_name:value for content_name, value in zip(self.content_layers, content_outputs)} style_dict = {style_name:value for style_name, value in zip(self.style_layers, style_outputs)} return {'content':content_dict, 'style':style_dict} # + [markdown] colab_type="text" id="Xuj1o33t1edl" # When called on an image, this model returns the gram matrix (style) of the `style_layers` and content of the `content_layers`: # + colab={} colab_type="code" id="rkjO-DoNDU0A" extractor = StyleContentModel(style_layers, content_layers) results = extractor(tf.constant(content_image)) style_results = results['style'] print('Styles:') for name, output in sorted(results['style'].items()): print(" ", name) print(" shape: ", output.numpy().shape) print(" min: ", output.numpy().min()) print(" max: ", output.numpy().max()) print(" mean: ", output.numpy().mean()) print() print("Contents:") for name, output in sorted(results['content'].items()): print(" ", name) print(" shape: ", output.numpy().shape) print(" min: ", output.numpy().min()) print(" max: ", output.numpy().max()) print(" mean: ", output.numpy().mean()) # + [markdown] colab_type="text" id="y9r8Lyjb_m0u" # ## Run gradient descent # # With this style and content extractor, you can now implement the style transfer algorithm. Do this by calculating the mean square error for your image's output relative to each target, then take the weighted sum of these losses. # # Set your style and content target values: # + colab={} colab_type="code" id="PgkNOnGUFcKa" style_targets = extractor(style_image)['style'] content_targets = extractor(content_image)['content'] # + [markdown] colab_type="text" id="CNPrpl-e_w9A" # Define a `tf.Variable` to contain the image to optimize. To make this quick, initialize it with the content image (the `tf.Variable` must be the same shape as the content image): # + colab={} colab_type="code" id="J0vKxF8ZO6G8" image = tf.Variable(content_image) # + [markdown] colab_type="text" id="M6L8ojmn_6rH" # Since this is a float image, define a function to keep the pixel values between 0 and 1: # + colab={} colab_type="code" id="kdgpTJwL_vE2" def clip_0_1(image): return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) # + [markdown] colab_type="text" id="MBU5RFpcAo7W" # Create an optimizer. The paper recomends LBFGS, but `Adam` works okay, too: # + colab={} colab_type="code" id="r4XZjqUk_5Eu" opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1) # + [markdown] colab_type="text" id="As-evbBiA2qT" # To optimize this, use a weighted combination of the two losses to get the total loss: # + colab={} colab_type="code" id="Dt4pxarvA4I4" style_weight=1e-2 content_weight=1e4 # + colab={} colab_type="code" id="0ggx2Na8oROH" def style_content_loss(outputs): style_outputs = outputs['style'] content_outputs = outputs['content'] style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2) for name in style_outputs.keys()]) style_loss *= style_weight/num_style_layers content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2) for name in content_outputs.keys()]) content_loss *= content_weight/num_content_layers loss = style_loss+content_loss return loss # + [markdown] colab_type="text" id="vbF2WnP9BI5M" # Use `tf.GradientTape` to update the image. # + colab={} colab_type="code" id="0t0umkajFIuh" @tf.function() def train_step(image): with tf.GradientTape() as tape: outputs = extractor(image) loss = style_content_loss(outputs) grad = tape.gradient(loss, image) opt.apply_gradients([(grad, image)]) image.assign(clip_0_1(image)) # + [markdown] colab_type="text" id="5FHMJq4UBRIQ" # Now run a few steps to test: # + colab={} colab_type="code" id="Y542mxi-O2a2" train_step(image) train_step(image) train_step(image) plt.imshow(image.read_value()[0]) # + [markdown] colab_type="text" id="mNzE-mTbBVgY" # Since it's working, perform a longer optimization: # + colab={} colab_type="code" id="rQW1tXYoLbUS" import time start = time.time() epochs = 10 steps_per_epoch = 100 step = 0 for n in range(epochs): for m in range(steps_per_epoch): step += 1 train_step(image) print(".", end='') display.clear_output(wait=True) imshow(image.read_value()) plt.title("Train step: {}".format(step)) plt.show() end = time.time() print("Total time: {:.1f}".format(end-start)) # + [markdown] colab_type="text" id="GWVB3anJMY2v" # ## Total variation loss # # One downside to this basic implementation is that it produces a lot of high frequency artifacts. Decrease these using an explicit regularization term on the high frequency components of the image. In style transfer, this is often called the *total variation loss*: # + colab={} colab_type="code" id="7szUUybCQMB3" def high_pass_x_y(image): x_var = image[:,:,1:,:] - image[:,:,:-1,:] y_var = image[:,1:,:,:] - image[:,:-1,:,:] return x_var, y_var # + colab={} colab_type="code" id="Atc2oL29PXu_" x_deltas, y_deltas = high_pass_x_y(content_image) plt.figure(figsize=(14,10)) plt.subplot(2,2,1) imshow(clip_0_1(2*y_deltas+0.5), "Vertical Deltas: Origional") plt.subplot(2,2,2) imshow(clip_0_1(2*x_deltas+0.5), "Horizontal Deltas: Original") x_deltas, y_deltas = high_pass_x_y(image) plt.subplot(2,2,3) imshow(clip_0_1(2*y_deltas+0.5), "Vertical Deltas: Styled") plt.subplot(2,2,4) imshow(clip_0_1(2*x_deltas+0.5), "Horizontal Deltas: Styled") # + [markdown] colab_type="text" id="lqHElVgBkgkz" # This shows how the high frequency components have increased. # # Also, this high frequency component is basically an edge-detector. You can get similar output from the Sobel edge detector, for example: # + colab={} colab_type="code" id="HyvqCiywiUfL" plt.figure(figsize=(14,10)) sobel = tf.image.sobel_edges(content_image) plt.subplot(1,2,1) imshow(clip_0_1(sobel[...,0]/4+0.5), "Vertical Sobel-edges") plt.subplot(1,2,2) imshow(clip_0_1(sobel[...,1]/4+0.5), "Horizontal Sobel-edges") # + [markdown] colab_type="text" id="vv5bKlSDnPP7" # The regularization loss asociated with this is the sum of the squares of the values: # + colab={} colab_type="code" id="mP-92lXMIYPn" def total_variation_loss(image): x_deltas, y_deltas = high_pass_x_y(image) return tf.reduce_mean(x_deltas**2)+tf.reduce_mean(y_deltas**2) # + [markdown] colab_type="text" id="nTessd-DCdcC" # ## Re-run the optimization # # Choose a weight for the `total_variation_loss`: # + colab={} colab_type="code" id="tGeRLD4GoAd4" total_variation_weight=1e8 # + [markdown] colab_type="text" id="kG1-T4kJsoAv" # Now include it in the `train_step` function: # + colab={} colab_type="code" id="BzmfcyyYUyWq" @tf.function() def train_step(image): with tf.GradientTape() as tape: outputs = extractor(image) loss = style_content_loss(outputs) loss += total_variation_weight*total_variation_loss(image) grad = tape.gradient(loss, image) opt.apply_gradients([(grad, image)]) image.assign(clip_0_1(image)) # + [markdown] colab_type="text" id="lcLWBQChsutQ" # Re-initialise the optimization variable: # + colab={} colab_type="code" id="a-dPRr8BqexB" image = tf.Variable(content_image) # + [markdown] colab_type="text" id="BEflRstmtGBu" # And run the optimization: # + colab={} colab_type="code" id="q3Cc3bLtoOWy" import time start = time.time() epochs = 10 steps = 100 step = 0 for n in range(epochs): for m in range(steps_per_epoch): step += 1 train_step(image) print(".", end='') display.clear_output(wait=True) imshow(image.read_value()) plt.title("Train step: {}".format(step)) plt.show() end = time.time() print("Total time: {:.1f}".format(end-start)) # + [markdown] colab_type="text" id="KKox7K46tKxy" # Finally, save the result: # + colab={} colab_type="code" id="SSH6OpyyQn7w" from PIL import Image import matplotlib file_name = 'kadinsky-turtle.png' matplotlib.image.imsave(file_name, image[0]) try: from google.colab import files except ImportError: pass else: files.download(file_name)
site/en/r2/tutorials/generative/style_transfer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns % matplotlib inline df = pd.read_csv('sodas_data.csv') df.shape df.head() df.rssi.max() df.rssi.min() sns.distplot(df.rssi, kde=False) len(df[df.rssi>-95]) np.union1d(df.user.unique(), df.user2.unique()) strangeusers = df[(df.user>810) | (df.user2>810)] len(strangeusers) df = df[~((df.user>810) | (df.user2>810))] df.shape strangeusers = df[(df.user>810) | (df.user2>810)] len(strangeusers) np.union1d(df.user.unique(), df.user2.unique()) len(np.union1d(df.user.unique(), df.user2.unique())) df = df[['user','user2','ts','rssi']] df.head() df.to_csv('sodas_data_cleaned.csv', index=False)
examproj/data/explore_and_clean_sodas_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### <center> Assignment 3 </center> # # <center> Kaggle:IBM Attrition </center> # Based on your analysis in Assignment 2, express your results in the form of the most appropriate visualization using any visualization library of your choice. # + gather={"logged": 1618165280133} #Imports import pandas as pd import numpy as np import matplotlib.pyplot as plt # #!pip install seaborn import seaborn as sns # - # ## Loading the dataset # + gather={"logged": 1618165280725} df = pd.read_csv('WA_Fn-UseC_-HR-Employee-Attrition.csv') df # - # ## Exploratory Data Analysis # ### Basic understanding of the Dataset # + gather={"logged": 1618165281284} #first 5 records df.head() # + gather={"logged": 1618165281731} #last five records df.tail() # + gather={"logged": 1618165282597} #number of rows and columns in the dataset df.shape # + gather={"logged": 1618165283208} #checking for null values df.isnull().sum() # + gather={"logged": 1618165283724} #basic summary of the dataset df.info() # + gather={"logged": 1618165284836} #ststistical summary of numerical columns df.describe() # - # ### Uncovering factors that may have led to attrition # + gather={"logged": 1618165285567} # Attrition df['Attrition'].value_counts().plot(kind='pie' , title="Distribution of Attrition", startangle=90,autopct='%1.f%%') # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165286271} df['Attrition'].value_counts().plot(kind='bar' , title="Distribution of Attrition") # + gather={"logged": 1618165286726} # Gender df['Gender'].value_counts() # + gather={"logged": 1618165287788} pd.crosstab(df['Gender'],df['Attrition']) # + gather={"logged": 1618165288408} #obtatining the Attrition percentages of Male and Female Attrition_Male = (150/732)*100 Attrition_Female = (87/588)*100 print('Attrition_Male:',Attrition_Male, '\n''Attrition_Female:',Attrition_Female) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165289017} # Attrition by Gender ax=sns.catplot(x="Gender", hue="Attrition", data=df, kind='count') plt.title("Attrition by Gender") # + [markdown] nteract={"transient": {"deleting": false}} # More Males attrited than females # + gather={"logged": 1618165290328} # Age df_age = df[['Age','Attrition']] df_age # + gather={"logged": 1618165291014} #employee maximum age df_age['Age'].max() # + gather={"logged": 1618165291681} #employee minimum age df_age['Age'].min() # + gather={"logged": 1618165292195} #Percentage of Attrition per age group group1 = (176/(764+176))*100 group2 = (57/(465+57))*100 print ('Age group 18-39 % of attrition:',group1, '\n','Age group 39-60 % of attrition:',group2 ) # - # The rate of attrition is higher among younger employees i.e below 39 than in older employees above 39 # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165293751} attrited = df.loc[df['Attrition']=='Yes'] sns.catplot(x="Age", hue="Attrition", data=attrited, kind='count' ,height=8.27, aspect=11.7/8.27,) # + gather={"logged": 1618165294481} pd.crosstab(df['Department'],df['Attrition']) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165295441} ax=sns.catplot(x="Department", hue="Attrition", data=df, kind='count',height=8.27, aspect=11.7/8.27) plt.title("Attrition by Department") # - # ### 1. breakdown of distance from home by job role and attrition. # + gather={"logged": 1618165295999} df.groupby(['JobRole','Attrition'])['DistanceFromHome'].mean().reset_index() # - # The mean distance for employees who attrited is generally higher than that of employees who did not attrit with exception of Sales Representatives, Research directors and manufacturing directors where the mean distance for employees who did not attrit is slightly higher. # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165296778} # Average Distance by Job role df_distance=df.groupby(['JobRole','Attrition'])['DistanceFromHome'].mean().reset_index() plt.figure(figsize=(20,10)) sns.barplot(x="JobRole", y="DistanceFromHome", hue = 'Attrition', data=df_distance) plt.title("Average Distance From Home by Job Role and Attrition ") plt.xlabel("Job Role") plt.ylabel("Distance") # + [markdown] nteract={"transient": {"deleting": false}} # ### 2. Compare average monthly income by education and attrition. # + gather={"logged": 1618165297394} df.groupby(['Education','Attrition'])['MonthlyIncome'].mean() # + [markdown] nteract={"transient": {"deleting": false}} # For all the education levels employees who attrited had a lesser average monthly income. # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618165297947} # Average Distance by Job role df_monthly_income=df.groupby(['Education','Attrition'])['MonthlyIncome'].mean().reset_index() plt.figure(figsize=(20,10)) sns.barplot(x="Education", y="MonthlyIncome", hue = 'Attrition', data=df_monthly_income) plt.title("Average Monthly Income by Education and Attrition ") plt.xlabel("Education") plt.ylabel("Monthly Income") # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
Assignment3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/netcdf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a> # # Uncomment the following line to install [geemap](https://geemap.org) if needed. # + # # !pip install geemap # - import geemap import os import xarray as xr from ipyleaflet import Map, Velocity, TileLayer, basemaps Map = geemap.Map(center=[44.33, -130.60], zoom=3, interpolation='nearest') Map.add_layer(basemaps.CartoDB.DarkMatter) Map file_path = os.path.abspath('../data/wind-global.nc') if not os.path.exists(file_path): url = 'https://github.com/giswqs/geemap/raw/master/examples/data/wind-global.nc' import requests r = requests.get(url) wind_data = r.content with open(file_path, 'wb') as f: f.write(wind_data) ds = xr.open_dataset(file_path) ds display_options = { 'velocityType': 'Global Wind', 'displayPosition': 'bottomleft', 'displayEmptyString': 'No wind data', } wind = Velocity( data=ds, name='Velocity', zonal_speed='u_wind', meridional_speed='v_wind', latitude_dimension='lat', longitude_dimension='lon', velocity_scale=0.01, max_velocity=20, display_options=display_options, ) Map.add_layer(wind)
examples/notebooks/netcdf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # look at tools/set_up_magics.ipynb yandex_metrica_allowed = True ; get_ipython().run_cell('# one_liner_str\n\nget_ipython().run_cell_magic(\'javascript\', \'\', \'// setup cpp code highlighting\\nIPython.CodeCell.options_default.highlight_modes["text/x-c++src"] = {\\\'reg\\\':[/^%%cpp/]} ;\')\n\n# creating magics\nfrom IPython.core.magic import register_cell_magic, register_line_magic\nfrom IPython.display import display, Markdown, HTML\nimport argparse\nfrom subprocess import Popen, PIPE\nimport random\nimport sys\nimport os\nimport re\nimport signal\nimport shutil\nimport shlex\nimport glob\n\n@register_cell_magic\ndef save_file(args_str, cell, line_comment_start="#"):\n parser = argparse.ArgumentParser()\n parser.add_argument("fname")\n parser.add_argument("--ejudge-style", action="store_true")\n args = parser.parse_args(args_str.split())\n \n cell = cell if cell[-1] == \'\\n\' or args.no_eof_newline else cell + "\\n"\n cmds = []\n with open(args.fname, "w") as f:\n f.write(line_comment_start + " %%cpp " + args_str + "\\n")\n for line in cell.split("\\n"):\n line_to_write = (line if not args.ejudge_style else line.rstrip()) + "\\n"\n if line.startswith("%"):\n run_prefix = "%run "\n if line.startswith(run_prefix):\n cmds.append(line[len(run_prefix):].strip())\n f.write(line_comment_start + " " + line_to_write)\n continue\n run_prefix = "%# "\n if line.startswith(run_prefix):\n f.write(line_comment_start + " " + line_to_write)\n continue\n raise Exception("Unknown %%save_file subcommand: \'%s\'" % line)\n else:\n f.write(line_to_write)\n f.write("" if not args.ejudge_style else line_comment_start + r" line without \\n")\n for cmd in cmds:\n display(Markdown("Run: `%s`" % cmd))\n get_ipython().system(cmd)\n\n@register_cell_magic\ndef cpp(fname, cell):\n save_file(fname, cell, "//")\n\n@register_cell_magic\ndef asm(fname, cell):\n save_file(fname, cell, "//")\n \n@register_cell_magic\ndef makefile(fname, cell):\n assert not fname\n save_file("makefile", cell.replace(" " * 4, "\\t"))\n \n@register_line_magic\ndef p(line):\n try:\n expr, comment = line.split(" #")\n display(Markdown("`{} = {}` # {}".format(expr.strip(), eval(expr), comment.strip())))\n except:\n display(Markdown("{} = {}".format(line, eval(line))))\n \ndef show_file(file, clear_at_begin=True, return_html_string=False):\n if clear_at_begin:\n get_ipython().system("truncate --size 0 " + file)\n obj = file.replace(\'.\', \'_\').replace(\'/\', \'_\') + "_obj"\n html_string = \'\'\'\n <!--MD_BEGIN_FILTER-->\n <script type=text/javascript>\n var entrance___OBJ__ = 0;\n var errors___OBJ__ = 0;\n function refresh__OBJ__()\n {\n entrance___OBJ__ -= 1;\n var elem = document.getElementById("__OBJ__");\n if (elem) {\n var xmlhttp=new XMLHttpRequest();\n xmlhttp.onreadystatechange=function()\n {\n var elem = document.getElementById("__OBJ__");\n console.log(!!elem, xmlhttp.readyState, xmlhttp.status, entrance___OBJ__);\n if (elem && xmlhttp.readyState==4) {\n if (xmlhttp.status==200)\n {\n errors___OBJ__ = 0;\n if (!entrance___OBJ__) {\n elem.innerText = xmlhttp.responseText;\n entrance___OBJ__ += 1;\n console.log("req");\n window.setTimeout("refresh__OBJ__()", 300); \n }\n return xmlhttp.responseText;\n } else {\n errors___OBJ__ += 1;\n if (errors___OBJ__ < 10 && !entrance___OBJ__) {\n entrance___OBJ__ += 1;\n console.log("req");\n window.setTimeout("refresh__OBJ__()", 300); \n }\n }\n }\n }\n xmlhttp.open("GET", "__FILE__", true);\n xmlhttp.setRequestHeader("Cache-Control", "no-cache");\n xmlhttp.send(); \n }\n }\n \n if (!entrance___OBJ__) {\n entrance___OBJ__ += 1;\n refresh__OBJ__(); \n }\n </script>\n \n <font color="white"> <tt>\n <p id="__OBJ__" style="font-size: 16px; border:3px #333333 solid; background: #333333; border-radius: 10px; padding: 10px; "></p>\n </tt> </font>\n <!--MD_END_FILTER-->\n <!--MD_FROM_FILE __FILE__ -->\n \'\'\'.replace("__OBJ__", obj).replace("__FILE__", file)\n if return_html_string:\n return html_string\n display(HTML(html_string))\n \nBASH_POPEN_TMP_DIR = "./bash_popen_tmp"\n \ndef bash_popen_terminate_all():\n for p in globals().get("bash_popen_list", []):\n print("Terminate pid=" + str(p.pid), file=sys.stderr)\n p.terminate()\n globals()["bash_popen_list"] = []\n if os.path.exists(BASH_POPEN_TMP_DIR):\n shutil.rmtree(BASH_POPEN_TMP_DIR)\n\nbash_popen_terminate_all() \n\ndef bash_popen(cmd):\n if not os.path.exists(BASH_POPEN_TMP_DIR):\n os.mkdir(BASH_POPEN_TMP_DIR)\n h = os.path.join(BASH_POPEN_TMP_DIR, str(random.randint(0, 1e18)))\n stdout_file = h + ".out.html"\n stderr_file = h + ".err.html"\n run_log_file = h + ".fin.html"\n \n stdout = open(stdout_file, "wb")\n stdout = open(stderr_file, "wb")\n \n html = """\n <table width="100%">\n <colgroup>\n <col span="1" style="width: 70px;">\n <col span="1">\n </colgroup> \n <tbody>\n <tr> <td><b>STDOUT</b></td> <td> {stdout} </td> </tr>\n <tr> <td><b>STDERR</b></td> <td> {stderr} </td> </tr>\n <tr> <td><b>RUN LOG</b></td> <td> {run_log} </td> </tr>\n </tbody>\n </table>\n """.format(\n stdout=show_file(stdout_file, return_html_string=True),\n stderr=show_file(stderr_file, return_html_string=True),\n run_log=show_file(run_log_file, return_html_string=True),\n )\n \n cmd = """\n bash -c {cmd} &\n pid=$!\n echo "Process started! pid=${{pid}}" > {run_log_file}\n wait ${{pid}}\n echo "Process finished! exit_code=$?" >> {run_log_file}\n """.format(cmd=shlex.quote(cmd), run_log_file=run_log_file)\n # print(cmd)\n display(HTML(html))\n \n p = Popen(["bash", "-c", cmd], stdin=PIPE, stdout=stdout, stderr=stdout)\n \n bash_popen_list.append(p)\n return p\n\n\n@register_line_magic\ndef bash_async(line):\n bash_popen(line)\n \n \ndef show_log_file(file, return_html_string=False):\n obj = file.replace(\'.\', \'_\').replace(\'/\', \'_\') + "_obj"\n html_string = \'\'\'\n <!--MD_BEGIN_FILTER-->\n <script type=text/javascript>\n var entrance___OBJ__ = 0;\n var errors___OBJ__ = 0;\n function halt__OBJ__(elem, color)\n {\n elem.setAttribute("style", "font-size: 14px; background: " + color + "; padding: 10px; border: 3px; border-radius: 5px; color: white; "); \n }\n function refresh__OBJ__()\n {\n entrance___OBJ__ -= 1;\n if (entrance___OBJ__ < 0) {\n entrance___OBJ__ = 0;\n }\n var elem = document.getElementById("__OBJ__");\n if (elem) {\n var xmlhttp=new XMLHttpRequest();\n xmlhttp.onreadystatechange=function()\n {\n var elem = document.getElementById("__OBJ__");\n console.log(!!elem, xmlhttp.readyState, xmlhttp.status, entrance___OBJ__);\n if (elem && xmlhttp.readyState==4) {\n if (xmlhttp.status==200)\n {\n errors___OBJ__ = 0;\n if (!entrance___OBJ__) {\n if (elem.innerHTML != xmlhttp.responseText) {\n elem.innerHTML = xmlhttp.responseText;\n }\n if (elem.innerHTML.includes("Process finished.")) {\n halt__OBJ__(elem, "#333333");\n } else {\n entrance___OBJ__ += 1;\n console.log("req");\n window.setTimeout("refresh__OBJ__()", 300); \n }\n }\n return xmlhttp.responseText;\n } else {\n errors___OBJ__ += 1;\n if (!entrance___OBJ__) {\n if (errors___OBJ__ < 6) {\n entrance___OBJ__ += 1;\n console.log("req");\n window.setTimeout("refresh__OBJ__()", 300); \n } else {\n halt__OBJ__(elem, "#994444");\n }\n }\n }\n }\n }\n xmlhttp.open("GET", "__FILE__", true);\n xmlhttp.setRequestHeader("Cache-Control", "no-cache");\n xmlhttp.send(); \n }\n }\n \n if (!entrance___OBJ__) {\n entrance___OBJ__ += 1;\n refresh__OBJ__(); \n }\n </script>\n\n <p id="__OBJ__" style="font-size: 14px; background: #000000; padding: 10px; border: 3px; border-radius: 5px; color: white; ">\n </p>\n \n </font>\n <!--MD_END_FILTER-->\n <!--MD_FROM_FILE __FILE__.md -->\n \'\'\'.replace("__OBJ__", obj).replace("__FILE__", file)\n if return_html_string:\n return html_string\n display(HTML(html_string))\n\n \nclass TInteractiveLauncher:\n tmp_path = "./interactive_launcher_tmp"\n def __init__(self, cmd):\n try:\n os.mkdir(TInteractiveLauncher.tmp_path)\n except:\n pass\n name = str(random.randint(0, 1e18))\n self.inq_path = os.path.join(TInteractiveLauncher.tmp_path, name + ".inq")\n self.log_path = os.path.join(TInteractiveLauncher.tmp_path, name + ".log")\n \n os.mkfifo(self.inq_path)\n open(self.log_path, \'w\').close()\n open(self.log_path + ".md", \'w\').close()\n\n self.pid = os.fork()\n if self.pid == -1:\n print("Error")\n if self.pid == 0:\n exe_cands = glob.glob("../tools/launcher.py") + glob.glob("../../tools/launcher.py")\n assert(len(exe_cands) == 1)\n assert(os.execvp("python3", ["python3", exe_cands[0], "-l", self.log_path, "-i", self.inq_path, "-c", cmd]) == 0)\n self.inq_f = open(self.inq_path, "w")\n interactive_launcher_opened_set.add(self.pid)\n show_log_file(self.log_path)\n\n def write(self, s):\n s = s.encode()\n assert len(s) == os.write(self.inq_f.fileno(), s)\n \n def get_pid(self):\n n = 100\n for i in range(n):\n try:\n return int(re.findall(r"PID = (\\d+)", open(self.log_path).readline())[0])\n except:\n if i + 1 == n:\n raise\n time.sleep(0.1)\n \n def input_queue_path(self):\n return self.inq_path\n \n def close(self):\n self.inq_f.close()\n os.waitpid(self.pid, 0)\n os.remove(self.inq_path)\n # os.remove(self.log_path)\n self.inq_path = None\n self.log_path = None \n interactive_launcher_opened_set.remove(self.pid)\n self.pid = None\n \n @staticmethod\n def terminate_all():\n if "interactive_launcher_opened_set" not in globals():\n globals()["interactive_launcher_opened_set"] = set()\n global interactive_launcher_opened_set\n for pid in interactive_launcher_opened_set:\n print("Terminate pid=" + str(pid), file=sys.stderr)\n os.kill(pid, signal.SIGKILL)\n os.waitpid(pid, 0)\n interactive_launcher_opened_set = set()\n if os.path.exists(TInteractiveLauncher.tmp_path):\n shutil.rmtree(TInteractiveLauncher.tmp_path)\n \nTInteractiveLauncher.terminate_all()\n \nyandex_metrica_allowed = bool(globals().get("yandex_metrica_allowed", False))\nif yandex_metrica_allowed:\n display(HTML(\'\'\'<!-- YANDEX_METRICA_BEGIN -->\n <script type="text/javascript" >\n (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)};\n m[i].l=1*new Date();k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)})\n (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym");\n\n ym(59260609, "init", {\n clickmap:true,\n trackLinks:true,\n accurateTrackBounce:true\n });\n </script>\n <noscript><div><img src="https://mc.yandex.ru/watch/59260609" style="position:absolute; left:-9999px;" alt="" /></div></noscript>\n <!-- YANDEX_METRICA_END -->\'\'\'))\n\ndef make_oneliner():\n html_text = \'("В этот ноутбук встроен код Яндекс Метрики для сбора статистики использований. Если вы не хотите, чтобы по вам собиралась статистика, исправьте: yandex_metrica_allowed = False" if yandex_metrica_allowed else "")\'\n html_text += \' + "<""!-- MAGICS_SETUP_PRINTING_END -->"\'\n return \'\'.join([\n \'# look at tools/set_up_magics.ipynb\\n\',\n \'yandex_metrica_allowed = True ; get_ipython().run_cell(%s);\' % repr(one_liner_str),\n \'display(HTML(%s))\' % html_text,\n \' #\'\'MAGICS_SETUP_END\'\n ])\n \n\n');display(HTML(("В этот ноутбук встроен код Яндекс Метрики для сбора статистики использований. Если вы не хотите, чтобы по вам собиралась статистика, исправьте: yandex_metrica_allowed = False" if yandex_metrica_allowed else "") + "<""!-- MAGICS_SETUP_PRINTING_END -->")) #MAGICS_SETUP_END # # Что такое сигналы и как закрыть vim # # Сигналы, которые можно послать из терминала запущенной программе: # * Ctrl-C посылает SIGINT (от interrupt), обычное действие - завершение программы. # * Ctrl-\\ посылает SIGQUIT, обычное действие - завершение с дампом памяти. **В целом срабатывает чаще чем Ctrl-C** # * Ctrl-Z посылает SIGTSTP, обычное действие - остановка процесса. То есть как SIGSTOP # # Другие полезные сигналы: # * SIGKILL - безусловное убиение процесса. # * SIGSTOP - безусловная остановка программы. # * SIGCONT - продолжение выполнения (отмена SIGSTOP) # # Как убить неубиваемое? # * `killall -9 vim` или `ps aux | grep vim`, а потом уже `kill -9 <selected_pid>`. Надо заметить, что `-9` лучше писать как `-SIGKILL`, но это длиннее, так что на конкретной платформе в интерактивном режиме проще писать `-9`. А `-SIGKILL` оставить для переносимых приложений. # # # [Ссылка на ридинг Яковлева](https://github.com/victor-yacovlev/mipt-diht-caos/tree/master/practice/signal-1) # и на [вторую его часть](https://github.com/victor-yacovlev/mipt-diht-caos/blob/master/practice/signal-2/README.md) # # [Пост на хабре](https://habr.com/ru/post/141206/) # # [Правила использования сигналов в UNIX](https://www.opennet.ru/base/dev/unix_signals.txt.html) - хорошая статья о том, как работать с сигналами. # # **Все это дело (сигналы) плохо сочетается с потоками** # # <details> # <summary>Доставка сигналов в случае, когда есть несколько потоков</summary> # <p> # # Сигнал поступает в нить, если он не должен игнорироваться. Доставка сигналов в процессах с несколькими нитями подчиняется следующим правилам: # # * Если по сигналу следует завершить, остановить или продолжить целевую нить, то при обработке сигнала соответственно завершается, останавливается или возобновляется весь процесс (а следовательно, все его нити). Это означает, что программы с одной нитью можно переработать в программы с несколькими нитями, не изменяя в них видимую сторону обработки сигналов. # Рассмотрим пользовательскую команду с несколькими нитями, например, команду grep. Пользователь может запустить эту команду из оболочки и затем попытаться прервать ее выполнение, передав соответствующий сигнал командой kill. Очевидно, что этот сигнал прервет весь процесс, в котором выполняется команда grep. # # * Сигналы, соответствующие конкретной нити и отправленные с помощью функций pthread_kill или raise, передаются в эту нить. Если эта нить заблокировала доставку данного сигнала, то сигнал переходит в состояние ожидания на уровне нити, пока доставка не будет разблокирована. Если выполнение нити завершилось раньше доставки сигнала, то сигнал будет проигнорирован. # * Сигналы, соответствующие процессу и отправленные, например, с помощью функции kill, передаются только одной нити процесса. Если одна или несколько нитей вызвали функцию sigwait, то сигнал передается ровно в одну из них. В противном случае сигнал передается ровно в одну нить из числа тех нитей, которые не блокировали его доставку. Если нитей, удовлетворяющих этим условиям, нет, то сигнал переходит в состояние ожидания на уровне процесса до тех пор, пока какая-либо нить не вызовет функцию sigwait с указанием этого сигнала или пока доставка не будет разблокирована. # # # Если ожидающий сигнал (на уровне нити или процесса) должен игнорироваться, то он игнорируется. # # [Источник](http://www.regatta.cs.msu.su/doc/usr/share/man/info/ru_RU/a_doc_lib/aixprggd/genprogc/signal_mgmt.htm) # # **Как с этим жить?** Принимать сигналы только в одном потоке и вызывать хендлеры в контексте выполнения потока. Тогда можно будет не беспокоиться о async-signal safety и ограничиться thread safety. # # </p> # </details> # # # ## Как сигналы приходят в программу? # # Когда одна программу отправляет сигнал другой, то этот сигнал записывается в атрибуты программы получателя. Если это обычный сигнал, то проставится бит в маске ожидающих доставки сигналов (SigPnd), если сигнал реального времени, то запишется в очередь сигналов. # # Сама программа-получатель из своего кода не взаимодействует с маской ожидающих сигналов или очередью сигналов. С ними взаимодействует ядро системы. Оно же обрабатывает (или не обрабатывает, если доставка сигналов заблокирована SigBlk) эти сигналы либо действиями по умолчанию, либо игнорированем (SigIgn), либо **останавливая выполнение программы в произвольный для нее момент** и вызывая в ней обработчик сигнала в отдельном контексте (на отдельном стеке вызовов функций). # # Отсюда вытекает **требование к асинхронной безопасности обработчиков сигналов**. Например, если основная программа может быть прервана на обработку сигнала в момент вызова printf, и в обработчике тоже используется эта функция, то есть шанс, что в вас все взорвется (испортится глобальный стейт функции printf и, возможно, еще что-то) или намертво зависнет (если в основной программе был взят lock, и теперь снова пытается взяться уже из обработчика сигнала). # # Бороться с этим можно несколькими способами: # 1. Писать честные асинхронно-безопасные обработчики (взаимодействие с остальной частью программы только через `volatile sig_atomic_t` переменные и системные вызовы). # <br> Как вариант в обработчике **можно писать в пайп**. То есть получить некий аналог `signalfd`, только переносимый. # 1. Использовать `sigsuspend` (чтобы обработчики могли выполняться, только пока выполняется sigsuspend) # 1. Использовать `signalfd`. С ней можно обрабатывать сигналы в контексте основного потока программы, соответственно никаких требований к асинхронной безопасности. Но это linux-специфичное решение. # ## Что значит завершение с дампом памяти? # + # %%cpp coredump.c # %run gcc -g coredump.c -o coredump.exe # %run rm core # удаляем старый файл с coredump # %run ./coredump.exe #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/time.h> #include <sys/resource.h> // can be replaced with 'ulimit -c unlimited' in terminal void enable_core() { struct rlimit rlim; assert(0 == getrlimit(RLIMIT_CORE, &rlim)); rlim.rlim_cur = rlim.rlim_max; assert(0 == setrlimit(RLIMIT_CORE, &rlim)); } int f(int a) { if (1) { assert(a > 4); // тоже вызывает SIGABRT } else { if (a < 4) { raise(SIGABRT); // посылаем сигнал себе } } return 0; } int main(int argc, char** argv) { enable_core(); return f(argc); } # - # !test -f ./core && gdb -ex='r' -ex="bt full" -batch ./coredump.exe ./core || echo "No core file :(" # # alarm - таймер с использованием сигналов # # Системный вызов `alarm` запускает таймер, по истечении которого процесс сам себе отправит сигнал `SIGALRM`. # # ``` # unsigned int alarm(unsigned int seconds); # ``` # + # %%cpp alarm.c # %run gcc -g alarm.c -o alarm.exe # %run timeout -s SIGKILL 5 ./alarm.exe ; echo $? # выводим так же код возврата #include <unistd.h> #include <stdio.h> int main() { alarm(3); pause(); printf("Is this command unreachable?"); // достижима ли эта команда? return 0; } # - # # signal # В прошлом примере мы заметили, что использование сигналов без их обработки имеет ограниченную ценность. # # Поэтому рассмотрим способ для начала блокировать сигналы. # + # %%cpp alarm_block.c # %run gcc -g alarm_block.c -o alarm_block.exe # %run timeout -s SIGKILL 5 ./alarm_block.exe ; echo $? # выводим так же код возврата #include <unistd.h> #include <stdio.h> #include <signal.h> int main() { signal(SIGALRM, SIG_IGN); alarm(3); pause(); printf("Is this command unreachable?"); // достижима ли эта команда? return 0; } # - # Упс, но теперь сигнал вообще до нас не доходит. # # Это бывает довольно полезно, например, если мы хотим сделать программу устойчивой к прерываниям через Ctrl-C. Но в остальных случаях это не помогает. # # Значит нужно явно задавать обработчики сигналов (пусть даже пустые). Это можно делать через вызов signal, передавая функцию-обработчик, но так лучше не делать, так как там нестандартизированное поведение и лучше использовать более новый вызов sigaction. Впрочем, особенности вызова signal стоит знать, если вы вдруг на него наткнетесь. # # # sigaction # # Позволяет установить функцию-обработчик на сигнал. Функция-обработчик должна быть async-signal safe. То есть ее вызов должен быть безопасен в любой момент выполнения основного кода программы. Это условие сильнее чем thread-safe. # + # %%cpp alarm_handle.c # %run gcc -g alarm_handle.c -o alarm_handle.exe # %run ./alarm_handle.exe ; echo $? # выводим так же код возврата #include <unistd.h> #include <stdio.h> #include <signal.h> #include <sys/types.h> static void handler(int signum) { static char buffer[100]; int size = snprintf(buffer, sizeof(buffer), "Get signal %d, do nothing\n", signum); write(2, buffer, size); // можно использовать системные вызовы, они async-signal safe // fprintf(stderr, "Get signal %d, do nothing\n", signum); // А вот это уже использовать нелья } int main() { sigaction(SIGALRM, // лаконичный способ использования структуры, но не совместим с С++ &(struct sigaction){ .sa_handler = handler, .sa_flags = SA_RESTART // используйте всегда. Знаю, что waitpid очень плохо себя ведет, когда прерывается сигналом }, NULL); alarm(1); pause(); printf("Is this command unreachable?\n"); // достижима ли эта команда? return 0; } # - # # Делаем программу-терминатора # # По умолчанию все сигналы обрабатываются немедленно, но это может вызвать гонку данных и поэтому неудобно. К счастью, есть способ приостановить обработку сигналов до поры до времени (заблокировать сигнал), а потом, когда захочется, выполнить обработчики. # # # sigprocmask, sigsuspend # sigprocmask позволяет выбрать сигналы, которые будут заблокированы. sigsuspend позволяет подождать, пока придут определенные сигналы (он как бы разблокирует сигналы, подождет пока что-то придет, потом снова заблокирует). # # Если сигнал придет в то время когда он заблокирован. А потом сигнал разблокируется и снова заблокируется с помощью sigprocmask, то гарантируется, что он будет обработан в разблокированный промежуток. (Если таких сигналов несколько, то гарантия только для одного). [Источник.](https://www.gnu.org/software/libc/manual/html_node/Process-Signal-Mask.html) # # В следующем примере нужно вручную из терминала посылать сигналы в запущенный процесс (`kill -SIGINT <pid>`) # + # %%cpp terminator.c # %run gcc -g terminator.c -o terminator.exe # %run timeout -s SIGKILL 3 ./terminator.exe #include <unistd.h> #include <stdio.h> #include <signal.h> #include <sys/types.h> int inside_sigsuspend = 0; static void handler(int signum) { // Сейчас у нас есть некоторая гарантия, что обработчик будет вызван только внутри sigprocmask // (ну или раньше изначального sigprocmask) // поэтому в случае однопоточного приложения можно использовать асинхронно-небезопасные функции fprintf(stderr, "Get signal %d, inside_sigsuspend = %d ( == 1 ?), do nothing\n", signum, inside_sigsuspend); } int main() { sigset_t mask; sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, NULL); // try comment out for (int signal = 0; signal < 100; ++signal) { sigaction(signal, &(struct sigaction) { .sa_handler=handler, .sa_flags=SA_RESTART, // этот параметр говорит, что во время вызова обработчика сигнала // будут заблокированы сигналы указанные в маске (то есть все) .sa_mask=mask }, NULL); } sigemptyset(&mask); printf("pid = %d\n", getpid()); int res = 0; raise(SIGINT); raise(SIGCHLD); raise(SIGCHLD); while (1) { inside_sigsuspend = 1; sigsuspend(&mask); // try comment out inside_sigsuspend = 0; for (int i = 0; i < 10000000; ++i) { res ^= i; } } return res; } # - # # Ping-pong # + # %%cpp pipo.c # %run gcc -g pipo.c -o pipo.exe # %run ./pipo.exe #include <unistd.h> #include <stdio.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> // если здесь не поставить volatile, то компилятор может // соптимизировать `if (last_signal)` до `if (0)`. // Так как, если компилятору не указывать явно, он будет оптимизировать // код как однопоточный (+ без учета возможности прерываний хендлерами сигналов). volatile sig_atomic_t last_signal = 0; static void handler(int signum) { last_signal = signum; // что плохо с таким обработчиком? } int main() { sigset_t mask; sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, NULL); int signals[] = {SIGUSR1, SIGINT, 0}; for (int* signal = signals; *signal; ++signal) { sigaction(*signal, &(struct sigaction){.sa_handler=handler, .sa_flags=SA_RESTART, .sa_mask=mask}, NULL); } sigemptyset(&mask); int parent_pid = getpid(); int child_pid = fork(); assert(child_pid >= 0); if (child_pid == 0) { while (1) { sigsuspend(&mask); if (last_signal) { if (last_signal == SIGUSR1) { printf("Child process: Pong\n"); fflush(stdout); kill(parent_pid, SIGUSR1); } else { printf("Child process finish\n"); fflush(stdout); return 0; } last_signal = 0; } } } else { for (int i = 0; i < 3; ++i) { printf("Parent process: Ping\n"); fflush(stdout); kill(child_pid, SIGUSR1); while (1) { sigsuspend(&mask); if (last_signal) { last_signal = 0; break; } } } printf("Parent process: Request child finish\n"); fflush(stdout); kill(child_pid, SIGINT); int status; waitpid(child_pid, &status, 0); } return 0; } # - # # Сигналы реального времени. # Они передаются через очередь, а не через маску, как обычные. # + # %%cpp sigqueue.c # %run gcc -g sigqueue.c -o sigqueue.exe # %run ./sigqueue.exe #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> volatile sig_atomic_t last_signal = 0; static void handler(int signum) { if (signum == SIGUSR1) { printf("Child process: got SIGUSR1\n"); fflush(stdout); } else if (signum == SIGINT) { printf("Child process: got SIGINT, finish\n"); fflush(stdout); exit(0); } else { printf("Child process: got SIGRTMIN\n"); fflush(stdout); } } int main() { assert(SIGRTMIN < SIGRTMAX); sigset_t mask; sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, NULL); int signals[] = {SIGUSR1, SIGINT, SIGRTMIN, 0}; for (int* signal = signals; *signal; ++signal) { sigaction(*signal, &(struct sigaction){.sa_handler=handler, .sa_flags=SA_RESTART, .sa_mask=mask}, NULL); } sigemptyset(&mask); int parent_pid = getpid(); int child_pid = fork(); assert(child_pid >= 0); if (child_pid == 0) { while (1) { sigsuspend(&mask); } } else { for (int i = 0; i < 10; ++i) assert(kill(child_pid, SIGUSR1) == 0); for (int i = 0; i < 10; ++i) assert(sigqueue(child_pid, SIGRTMIN, (union sigval){0}) == 0); sleep(1); printf("Parent process: Request child finish with SIGINT\n"); fflush(stdout); kill(child_pid, SIGINT); int status; waitpid(child_pid, &status, 0); } return 0; } # - # # Ping-pong c sigqueue и доп. информацией # + # %%cpp pipoqu.c # %run gcc -g pipoqu.c -o pipoqu.exe # %run ./pipoqu.exe #include <unistd.h> #include <stdio.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> volatile sig_atomic_t last_signal = 0; volatile sig_atomic_t last_signal_value = 0; // через info принимаем дополнительный int static void handler(int signum, siginfo_t* info, void* ucontext) { last_signal = signum; last_signal_value = info->si_value.sival_int; // сохраняем переданное число } int main() { sigset_t mask; sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, NULL); int signals[] = {SIGUSR1, SIGINT, 0}; for (int* signal = signals; *signal; ++signal) { // обратите внимание, что хендлер теперь принимает больше аргументов // и записывается в другое поле // и еще есть флаг SA_SIGINFO, говорящий, что именно такой хендлер будет использоваться sigaction(*signal, &(struct sigaction){ .sa_sigaction = handler, .sa_flags = SA_RESTART | SA_SIGINFO, .sa_mask=mask}, NULL); } sigemptyset(&mask); int parent_pid = getpid(); int child_pid = fork(); assert(child_pid >= 0); if (child_pid == 0) { while (1) { sigsuspend(&mask); if (last_signal) { if (last_signal == SIGUSR1) { printf("Child process: Pong (get %d, send %d)\n", last_signal_value, last_signal_value * 2); fflush(stdout); // вместе с сигналом передаем число sigqueue(parent_pid, SIGUSR1, (union sigval) {.sival_int = last_signal_value * 2 }); } else { printf("Child process finish\n"); fflush(stdout); return 0; } last_signal = 0; } } } else { int child_response = 10; for (int i = 0; i < 3; ++i) { printf("Parent process: Ping (got %d, send %d)\n", child_response, child_response + 1); fflush(stdout); sigqueue(child_pid, SIGUSR1, (union sigval) {.sival_int = child_response + 1 }); while (!last_signal) { sigsuspend(&mask); } last_signal = 0; child_response = last_signal_value; } printf("Parent process: Request child finish\n"); fflush(stdout); kill(child_pid, SIGINT); int status; waitpid(child_pid, &status, 0); } return 0; } # - # # Ping-pong c sigqueue и sigwaitinfo доп. информацией # + # %%cpp pipoquwa.c # %run gcc -g pipoquwa.c -o pipoquwa.exe # %run ./pipoquwa.exe #include <unistd.h> #include <stdio.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> int main() { sigset_t full_mask; sigfillset(&full_mask); sigprocmask(SIG_BLOCK, &full_mask, NULL); int parent_pid = getpid(); int child_pid = fork(); assert(child_pid >= 0); if (child_pid == 0) { while (1) { siginfo_t info; sigwaitinfo(&full_mask, &info); // вместо sigsuspend и обработчика int received_signal = info.si_signo; int received_value = info.si_value.sival_int; if (received_signal == SIGUSR1) { printf("Child process: Pong (get %d, send %d)\n", received_value, received_value * 2); fflush(stdout); // вместе с сигналом передаем число sigqueue(parent_pid, SIGUSR1, (union sigval) {.sival_int = received_value * 2 }); } else { printf("Child process finish\n"); fflush(stdout); return 0; } } } else { int child_response = 100; for (int i = 0; i < 3; ++i) { printf("Parent process: Ping (got %d, send %d)\n", child_response, child_response + 1); fflush(stdout); sigqueue(child_pid, SIGUSR1, (union sigval) {.sival_int = child_response + 1 }); siginfo_t info; sigwaitinfo(&full_mask, &info); child_response = info.si_value.sival_int; } printf("Parent process: Request child finish\n"); fflush(stdout); kill(child_pid, SIGINT); int status; waitpid(child_pid, &status, 0); } return 0; } # - # # Как ждать одновременно сигнал и другое событие? # # Процесс получения сигналов сводится к чтению из файлового дескриптора. А для ожидания событий по нескольким файловым дескрипторам есть средства мультиплексирования (`select`, `poll`, `epoll`). # # Для сведЕния есть два варианта: `signalfd` (только linux) и писать в обработчике в пайп. # + # %%cpp signalfd.c # %run gcc -g signalfd.c -o signalfd.exe # %run timeout -s SIGINT 1 timeout -s SIGTERM 2 ./signalfd.exe #include <unistd.h> #include <stdio.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/signalfd.h> int main() { sigset_t mask; sigfillset(&mask); sigdelset(&mask, SIGCONT); sigprocmask(SIG_BLOCK, &mask, NULL); // сводим получение сигналов к файловому дескриптору int fd = signalfd(-1, &mask, 0); struct signalfd_siginfo fdsi; while (1) { read(fd, &fdsi, sizeof(struct signalfd_siginfo)); printf("Got signal %d\n", fdsi.ssi_signo); if (fdsi.ssi_signo == SIGTERM) { printf(" ... and it is SIGTERM\n"); break; } } return 0; } # + # %%cpp signalpipe.c # %run gcc -g signalpipe.c -o signalpipe.exe # %run timeout -s SIGINT 1 timeout -s SIGTERM 2 ./signalpipe.exe #define _GNU_SOURCE #include <fcntl.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <assert.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/signalfd.h> #include <errno.h> static int signalpipe_fds[2] = {-1, -1}; static void signalpipe_handler(int signum) { // Если вы зараз получите умопомрачительное число сигналов, то можете переполнить буффер пайпа int written = write(signalpipe_fds[1], &signum, sizeof(int)); if (written < 0) { if (errno != EAGAIN) { dprintf(2, "Strange error during writing to signal pipe"); abort(); } dprintf(2, "Pipe buffer is full, drop signal"); } else if (written != 4) { dprintf(2, "Incomplete writing to signal pipe"); abort(); } } int signalpipe(int* signals) { pipe2(signalpipe_fds, O_CLOEXEC); // Делаем запись неблокирующей fcntl(signalpipe_fds[1], F_SETFL, fcntl(signalpipe_fds[1], F_GETFL, 0) | O_NONBLOCK); for (int* signal = signals; *signal; ++signal) { sigaction(*signal, &(struct sigaction){.sa_handler = signalpipe_handler, .sa_flags = SA_RESTART}, NULL); } return signalpipe_fds[0]; } int main() { // Сводим получение сигналов к файловому дескриптору int signals[] = {SIGINT, SIGTERM, 0}; int fd = signalpipe(signals); int signum; while (1) { assert(read(fd, &signum, sizeof(int)) == sizeof(int)); printf("Got signal %d\n", signum); if (signum == SIGTERM) { printf(" ... and it is SIGTERM\n"); break; } } // Закрывать fd (и парный к нему) не будем. // Это синглтон на процесс, а при завершении процесса файловые дескрипторы сами закроются // При желании можно сделать, предварительно заблокировав сигналы. return 0; } # - # # Примеры применения # * мягкая остановка SIGINT и жесткая остановка SIGKILL # * ротирование логов # Вопросы для подготовки к контрольной: # * Что тут не так? Что произойдет? (x86 32-bit) # # ```c # int desired_fd = 4; # printf("We are to open file at %d fd. Yeah really at %d fd\n", desired_fd, desired_fd); # int fd = open("file.txt", O_WRONLY | O_CREAT | O_TRUNC); # dup2(fd, desired_fd); # ``` # * Страничная память: все что знаете # * Жизненный цикл процесса: все что знаете # * TLB-кеш, что это? # * Какая память релаьно копируется при вызове fork()? # * Файлы в linux, файловые системы и все около того. # * Как изменится число? # # ```c # union { # double d; # unsigned long long b; # } u = {1.0}; # # u.b ^= 1ull << 52; # printf("u.d = %lf\n", u.d); # ```
sem13-signal/signal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.0 64-bit (''.venv'': venv)' # name: python3 # --- # # Примеры плохого кода или раздел ```0xbadc0de``` # # Добро пожаловать в раздел ```0xbadc0de```, где собраны забавные и иногда рабочие примеры кода на Python. # # Примеры могут быть не совсем рабочие или частично доработаны до рабочего состояния. # # + # Именовать переменные нужно осмысленно, т.е. что бы по их # названию можно было понять для чего она предназначена. # Плохой практикой является использование: # - однобуквенных имен, кроме использования их в исторически # сложившихся ситуациях, таких как: i, j, k для индексов, # x, y, z для координат и т.д. # - метапеременных: foo, bar, baz и т.д. Исключеним являются # использование в короткий примерах. # - неинформативных имен: lst, lst2, dct, ... lst = [] lst2 = [] msg = 'Введите имя или stop для завершения:' while (name:= input(msg)): lst.append(name) phone = input('Введите номер телефона:') lst2.append(phone) print(f'{lst = }') print(f'{lst2 = }') # + # Нужно создать список, где сохраняются словари вида # {'имя': '...', 'телефоны': [{'описание': '...', 'номер': '...'}, {'описание': '...', 'номер': '...'}, ...]} # Попробуйте самостоятельно понять в чем проблема нижеприведенного кода. # Проблемы: # - неинформативные имена # - использование range(len(lst2)). В теле цикла нужны # элементы двух списков, в этом случае можно использовать # либо enumerate, либо zip для итерирования по двум # коллекциям одновременно # - совершенно неправильная работа со словарем. Словарь # создается один раз, затем его ключи обновляются через update. # Используется глубокое копирование. Здесь нужно создавать # словарь внутри цикла с нужными ключами и добавлть его в список. name_per = 'Bob' lst = ['315-194-6020', '555-2368', '8-800-555-35-35'] lst2 = ['(1)', '(2)', '(3)'] import copy dct = {} lst3 = [] lst4 = [] for i in range(len(lst2)): dct.update({'описание': lst2[i], 'номер': lst[i]}) lst3.append(copy.deepcopy(dct)) lst4.append( {'имя': name_per, 'телефоны': lst3}, ) # корректная версия all_phones = ['315-194-6020', '555-2368', '8-800-555-35-35'] descriptions = ['<NAME>', 'Ghostbusters', 'Advertising'] phone_book = [] phones = [] for description, phone in zip(descriptions, all_phones): phones.append({'описание': description, 'номер': phone}) phone_book.append( {'имя': name_per, 'телефоны': phones}, ) print(phone_book) # + # проверка числа x на принадлежность множеству # A = {x | x ≠ 0, x ∈ [2/x, 5]} # Этот пример не совсем рабочий. Предлагается # самостоятельно подумать почему. # Исходное решение x = 2 print('x in A:', 2/x <= x!=0 <= 5) # Этот цикл призван показать неправильность решения for i in range(-10, 10): if i == 0: # Эта проверка добавлена, чтобы избежать исключения print('Что-то пошло не так') elif 2/i <= i!=0 <= 5: print(i, 'in A') # + # проверка числа x на принадлежность множеству # A = {x | x ≠ 0, x ∈ [2/x, 5]} # Этот пример работает не так как было задумано изначально # Исходное решение x = 2 print('x in A:', [2/x, 3] and (x >= 2/x) and x <= 5) # Этот цикл призван показать неправильность решения for i in range(-5, 6): if i == 0: # Эта проверка добавлена, чтобы избежать исключения print('Что-то пошло не так') elif [2/x, 3] and (x >= 2/x) and x <= 5: print(i, 'in A') # - foo = None if str(type(foo))[8:-2] == 'NoneType': print('What the f*ck is this?') # создать список нечетных чисел из интервала [0, 15] a = [i for i in range(16) if i%2 is not 0] print(a) # проверить число x на четность x = 4 if (x % 2) in [0]: print('Четное') else: print('Нечетное') # один из способов распаковать кортеж длиной два в две переменные a = (42, 196) for i in range(len(a)): globals()[f'a{i}'] = a[i] print(a0, a1) # еще один способ распаковать кортеж длиной два в две переменные a и b x = (42, 196) for i in range(len(x)): globals()[chr(ord('a') + i)] = x[i] print(a, b) # другой вариант распаковать кортеж длиной два в две переменные a и b x = (42, 196) for i in range(len(x)): if i == 0: a = x[i] else: b = x[i] print(a, b) # сравнение числа с заданными значениями k = 4 if k == 3: print('wait for it...') elif k == 4: print('wait for it...') elif k == 5: print('wait for it...') elif k != 3 or 4 or 5: print('Legendary') # + # один из способо выравнивания строки for i in range(8, 16): s = '' for base in 'bodx': s += ' ' * (10 - len(f'{i:{base}}')) + f'{i:{base}}' print(s) print('-' * 50) # правильный способ выравнивания строки for i in range(8, 16): for base in 'bodx': print(f'{i:>10{base}}', end='\n' if base == 'x' else '') # + # реализовать функцию, добавляющую к списку число 42, # если функция вызвана без аргументов, она должна # возвращать список из одного элемента: [42] # неправильная, но полностью рабочая реализация def foo(a=[]): if a == []: a = [42] else: a.append(42) return a print('(1):', foo()) print('(2):', foo()) print('(3):', foo()) print('(4):', foo([])) print('(5):', foo([1])) print('(6):', foo([1, 2])) # + # реализовать функцию, добавляющую к списку число 42, # если функция вызвана без аргументов, она должна # возвращать список из одного элемента: [42] # еще одна попытка реализации def bar(a=None): b = a a = [] if isinstance(b, list): b.append(42) return b else: a.append(42) return a print('(1):', bar()) print('(2):', bar()) print('(3):', bar()) print('(4):', bar([])) print('(5):', bar([1])) print('(6):', bar([1, 2])) # + # Пользователь вводит строку, разделенную пробелами. # Необходимо проверить, являются ли все элементы строки # числами > 0. Если ввод не удовлетворяет условию, то # нужно запросить повторный ввод. # Этот пример демоснтрирует сразу несколько неправильных # подходов. Первый заключается в зацикливании программы с # помощью рекурсии. Второй в несовмсем правильной проверке ввода. # Самостоятельно провертьте ввод на следующих примерах и # постарайтесь понять в чем проблема. # '1 2 3' - корректная работа # '1 2 q' - что-то идет не так, но работает "почти" правильно # '1 0 O O 4' - нужно больше ввода! def foo(): msg = 'Введите числа, раделенные пробелами' input_arr = input(msg).split(' ') for i in range(len(input_arr)): if not input_arr[i].isdigit(): print('Введите число') foo() print('Ввод завершен') foo() # + # Пользователь вводит строку, разделенную пробелами. # Необходимо проверить, являются ли все элементы строки # числами > 0. Если ввод не удовлетворяет условию, то # нужно запросить повторный ввод. # Здесь проблема заключается непосредственно в проверке ввода. # Самостоятельно провертьте ввод на следующих примерах: # '1 2 3' - корректная работа # '1 2 q' - будте отсторожны # '1 0 O O 4' - нужно больше ввода! def foo(): msg = 'Введите числа, раделенные пробелами' input_str = input(msg).split(' ') for c in input_str: while not c.isdigit(): print('Введите число') input_str = input().split(' ') print('Ввод завершен') foo() # + # безумная альфа версия языка программирования РУтон печать = print икс, игрек = 6, 4 печать('Вектор:', (икс, игрек)) расстояние = (икс**2 + игрек**2) ** 0.5 печать('Длина вектора', расстояние) # + # Пример бесполезного наследования # Здесь пример классов вроде "namedtuple", но еще более бесполезны. # Самостоятельно определите проблемы этого кода. class Plain: coefficient = 1 hours = 6 base_price = 130 class ExPlain(Plain): coefficient = 1.5 hours = 3 base_price = 110 a = Plain() print(f'{a.coefficient = }, {a.hours = }, {a.base_price = }') b = ExPlain() print(f'{b.coefficient = }, {b.hours = }, {b.base_price = }') # + # Перебор с индексами # Здесь требуется проитерироваться по списку и использовать индексацию, начина с 1. xs = [5, 4, 3, 2] for num, i in enumerate(range(len(xs)), 1): print(num, xs[i]) print('-' * 25) # корректное решение for i, item in enumerate(xs): print(i + 1, item) # + # Выстрел в ногу # Этот пример демонстрирует отрицательные стороны использования глобальных переменных. # Задача довольно проста. Имеется список словарей. Необходимо сначала выбрать все элементы # с заданным значениям первого ключа, затем для полученного результата повторить процедуру # для другого ключа. # Этот пример сильно упрощен, а также добавлен небольшой кусок кода для предотвращения # зацикливания (его не было в исходном решении) без потери исходного смысла. Измененены # названия переменных на более простые. # Попробуйте самостоятельно найти проблему данного кода. # Добавка для предотвращения зацикливания class Counting: """Счетчик вызова функций. Можно использовать как декортатор. """ def __init__(self, func): self.count = 0 self.func = func def __call__(self, *args, **kwargs): self.count += 1 self.func(*args, **kwargs) # Будем считать количество вызовов метода append my_append = Counting(list.append) # глобальные переменные my_list = [ {'a': 'foo', 'b': 1}, {'a': 'bar', 'b': 1}, {'a': 'foo', 'b': 3}, {'a': 'baz', 'b': 4} ] a = [] b = [] c = [] def quuz(list_, key, value): for item in list_: if value == item.get(key): # для получения исходного смысла: my_append(c, item) # закомментируйте эту строку # c.append(item) # раскомментируйте эту строку if my_append.count == 5: # закомментируйте это условие print('Превышение количества вызовов. Зацикливание!') break return c # демонстрация работы print('Состояние списков перед выполнением программы:') print(f'{my_list is a = }') print(f'{a is b = }') print(f'{a is c = }') print('-' * 50) a = quuz(my_list, 'a', 'foo') print(a) print('Состояние списков (1):') print(f'{my_list is a = }') print(f'{a is b = }') print(f'{a is c = }') print('-' * 50) parametr_analiz = 'b' b = quuz(a, 'b', 1) print(b) print('Состояние списков (2):') print(f'{my_list is a = }') print(f'{a is b = }') print(f'{a is c = }') print('-' * 50) # - # Один из безумных вариантов реализации Hello World через метаклассы см. в файле # ```best_hello_world.py``` (~~Осторожно! Черная магия~~). # # Интересные ссылки # # - [Просто БЕЗУМНЫЕ примеры написания ```Hello, World!```](https://old.reddit.com/r/Python/comments/k7hv95/what_is_the_longest_piece_of_code_you_can_write/)
python_pd/badcode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- rand() import random random.randint(0, 100) random.random() a1 = [1,2,3] a2 = a1 a3 = a1 a2.append(4) a2.append(4) a3 a2 == a3
cn/Untitled2.ipynb
# --- # title: "Creating Lists From Dictionary Keys And Values" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Creating Lists From Dictionary Keys And Values Using Python." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Create a dictionary dict = {'county': ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'], 'year': [2012, 2012, 2013, 2014, 2014], 'fireReports': [4, 24, 31, 2, 3]} # ### Create a list from the dictionary keys # Create a list of keys list(dict.keys()) # ### Create a list from the dictionary values # Create a list of values list(dict.values())
docs/python/data_wrangling/create_list_from_dictionary_keys_and_values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn import svm from sklearn.linear_model import LogisticRegression # + # Logestic regression train = pd.read_csv("train.csv") # train.head() desired_ratio=0.5 # Get the indices per target value class_0 = train[train.target == 0].index class_1 = train[train.target == 1].index # Get original number of records per target value number_of_samples_0 = len(train.loc[class_0]) print('Number of samples with target 0',number_of_samples_0) number_of_samples_1 = len(train.loc[class_1]) print('Number of samples with target 1',number_of_samples_1) # Calculate the undersampling rate and resulting number of records with target=0 undersampling_rate = ((1-desired_ratio)*number_of_samples_1)/(number_of_samples_0*desired_ratio) print('undersampling_rate',undersampling_rate) undersampled_count_0 = int(undersampling_rate*number_of_samples_0) print('number of rows selected',undersampled_count_0) print('Rate to undersample records with target=0: {}'.format(undersampling_rate)) print('Number of records with target=0 after undersampling: {}'.format(undersampled_count_0)) # Randomly select records with target=0 to get at the desired a priori undersampled_idx = shuffle(class_0, random_state=37, n_samples=undersampled_count_0) # Construct list with remaining indices index_list = list(undersampled_idx) + list(class_1) # Dropping all the remaining values apart from selected samples in training dataset train1 = train.loc[index_list].reset_index(drop=True) train1.shape X = train1[train1.columns[2:]] Y = train1[train1.columns[1:2]] X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=1) logreg = LogisticRegression() # fit the model with data logreg.fit(X_train,y_train) # y_pred=logreg.predict(X_test) cnf_matrix = metrics.confusion_matrix(y_test, y_pred) cnf_matrix # - class_names=[0,1] # name of classes fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) # create heatmap sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu" ,fmt='g') ax.xaxis.set_label_position("top") plt.tight_layout() plt.title('Confusion matrix', y=1.1) plt.ylabel('Actual label') plt.xlabel('Predicted label') print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) print("Precision:",metrics.precision_score(y_test, y_pred)) print("Recall:",metrics.recall_score(y_test, y_pred)) y_pred_proba = logreg.predict_proba(X_test)[::,1] fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba) auc = metrics.roc_auc_score(y_test, y_pred_proba) plt.plot(fpr,tpr,label="data 1, auc="+str(auc)) plt.legend(loc=4) plt.show()
logistic regression model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd # pd.set_option('display.max_rows', 100) pd.set_option("display.width",120) import numpy as np from os.path import basename from os.path import join from os import listdir from glob import glob import h5py from scipy import stats import seaborn as sns import statsmodels.api as sm from statsmodels.sandbox.regression.predstd import wls_prediction_std import matplotlib.pyplot as plt from plotnine import * from IPython.display import display, Markdown import sys sys.path.insert(0, '../CODE/') from importlib import reload import visualization_utils reload(visualization_utils) from visualization_utils import * import warnings warnings.filterwarnings('ignore') # %load_ext autoreload # %autoreload 1 # - # # Yeast # ## Permutations yeast_dict = { 'permutations': { "CallingCards": "../OUTPUT/Yeast_CallingCards_ZEV/permutations/", "ChIPexo": "../OUTPUT/Yeast_chipexo_ZEV/permutations/" }, 'experiments': { "CallingCards": "../OUTPUT/Yeast_CallingCards_ZEV/all_feats/", "ChIPexo": "../OUTPUT/Yeast_chipexo_ZEV/all_feats/" }, 'tf_names': { "CallingCards":"../RESOURCES/Yeast/Yeast_CallingCards_TFs.txt", "ChIPexo":"../RESOURCES/Yeast/Yeast_ChIPexo_TFs.txt" }, 'binding_baseline': { "CallingCards":"../OUTPUT/Yeast_CallingCards_ZEV/bindingonly/", "ChIPexo":"../OUTPUT/Yeast_chipexo_ZEV/bindingonly/", }, "organism": "yeast" } yeast_sys2com_dict_path = '../RESOURCES/Yeast/orf_name_conversion.tab' yeast_sys2com_dict = { x: y for x, y in np.loadtxt(yeast_sys2com_dict_path, dtype='str') } full_stats_dfs, summary_dfs, random_chance_dfs, binding_baseline_dfs, _, _ = process_results(yeast_dict) plot_dfs = regress_std_chance(full_stats_dfs) # + locals().update(plot_dfs['CallingCards']) tabulate_results(plot_df, binding_baseline_dfs['CallingCards'], yeast_sys2com_dict) # + locals().update(plot_dfs['CallingCards']) ax = ( ggplot() + geom_point(plot_df[plot_df['p-value'] < 0.001], aes(x='logratio', y='logstd'), color=COLORS['dark_green'], stroke=0, size=2.5, alpha=0.5) + geom_point(plot_df[plot_df['p-value'] >= 0.001], aes(x='logratio', y='logstd'), color=COLORS['red'], stroke=0, size=2.5, alpha=0.5) + geom_line(aes(x='x', y='y'), data=res_oos) + geom_ribbon(aes(x='x', ymin='ymin', ymax='ymax'), data=res_oos, fill=COLORS['grey'], alpha=0.4) + labs(x='log10 Ratio of responsive genes', y='log10 std dev empirical AUPRC', title='Yeast CallingCards\nEmpirical std dev vs Theoretical mean (log10)\ny = {:.4f}x + {:.4f}'.format(slope, y_int)) + xlim(-4.1, 0) + ylim(-4.1, 0) + theme_bw() + theme( figure_size=(4,4), dpi=150, axis_text_x=element_text(color='#000000'), axis_text_y=element_text(color='#000000'), axis_title=element_text(size=10, lineheight=1.5)) ) display(ax) ggsave(ax, "Final/Yeast_CC_emp_std_vs_chance.pdf", dpi=150, bbox_inches='tight') # + locals().update(plot_dfs['ChIPexo']) tabulate_results(plot_df, binding_baseline_dfs['ChIPexo'], yeast_sys2com_dict) # + locals().update(plot_dfs['ChIPexo']) ax = ( ggplot() + geom_point(plot_df[plot_df['p-value'] < 0.001], aes(x='logratio', y='logstd'), color=COLORS['dark_green'], stroke=0, size=2.5, alpha=0.5) + geom_point(plot_df[plot_df['p-value'] >= 0.001], aes(x='logratio', y='logstd'), color=COLORS['red'], stroke=0, size=2.5, alpha=0.5) + geom_line(aes(x='x', y='y'), data=res_oos) + geom_ribbon(aes(x='x', ymin='ymin', ymax='ymax'), data=res_oos, fill=COLORS['grey'], alpha=0.4) + labs(x='log10 Ratio of responsive genes', y='log10 std dev empirical AUPRC', title='Yeast ChIP-exo\nEmpirical std dev vs Theoretical mean (log10)\ny = {:.4f}x + {:.4f}'.format(slope, y_int)) + xlim(-4.1, 0) + ylim(-4.1, 0) + theme_bw() + theme( figure_size=(4,4), dpi=150, axis_text_x=element_text(color='#000000'), axis_text_y=element_text(color='#000000'), axis_title=element_text(size=10, lineheight=1.5)) ) display(ax) ggsave(ax, "Final/Yeast_ChIPexo_emp_std_vs_chance.pdf", dpi=150, bbox_inches='tight') # - # # Human human_dict = { 'permutations': { "H1": "../OUTPUT/H1_TGI_crosstf/permutations/", "K562": "../OUTPUT/K562_crosstf/permutations/", "HEK293": "../OUTPUT/HEK293_crosstf_updated/permutations/" }, 'experiments': { "H1": "../OUTPUT/H1_TGI_crosstf/all_feats/", "K562": "../OUTPUT/K562_crosstf/all_feats/", "HEK293": "../OUTPUT/HEK293_crosstf_updated/all_feats/" }, 'tf_names': { "H1": "../RESOURCES/TI_TFPert/TGI_RNASEQ_TFS.txt", "K562": "../RESOURCES/K562_TFPert/k562_cross_tfs.txt", "HEK293": "../RESOURCES/HEK293_TFPert/Human_HEK293_TFs.txt" }, 'binding_baseline': { "H1": "../OUTPUT/H1_TGI_crosstf/bindingonly/", "K562": "../OUTPUT/K562_crosstf/bindingonly/", "HEK293": "../OUTPUT/HEK293_crosstf_updated/bindingonly/" }, "organism": { "H1": "human_h1", "K562": "human_k562", "HEK293": "human_hek" } } human_ensg_hgnc_dict_path = "../RESOURCES/Human/hgnc_ensembl.tbl" human_ensg_hgnc_dict = { row['ENSEMBL']:row['HGNC'] \ for _,row in pd.read_csv(human_ensg_hgnc_dict_path, sep="\t", usecols=[0, 1] ).iterrows()} full_stats_dfs, summary_dfs, random_chance_dfs, binding_baseline_dfs, _, _ = process_results(human_dict) plot_dfs = regress_std_chance(full_stats_dfs) # + locals().update(plot_dfs['H1']) ax = ( ggplot() + geom_point(plot_df[plot_df['p-value'] <= 0.001], aes(x='logratio', y='logstd'), color=COLORS['dark_green'], stroke=0, size=2.5, alpha=0.5) + geom_point(plot_df[plot_df['p-value'] > 0.001], aes(x='logratio', y='logstd'), color=COLORS['red'], stroke=0, size=2.5, alpha=0.5) + geom_line(aes(x='x', y='y'), data=res_oos) + geom_ribbon(aes(x='x', ymin='ymin', ymax='ymax'), data=res_oos, fill=COLORS['grey'], alpha=0.4) + labs(x='log10 Ratio of responsive genes', y='log10 std dev empirical AUPRC', title='Human H1\nEmpirical std dev vs Theoretical mean (log10)\ny = {:.4f}x + {:.4f}'.format(slope, y_int)) + xlim(-4.1, 0) + ylim(-4.1, 0) + theme_bw() + theme( figure_size=(4,4), dpi=150, axis_text_x=element_text(color='#000000'), axis_text_y=element_text(color='#000000'), axis_title=element_text(size=10, lineheight=1.5)) ) display(ax) ggsave(ax, "Final/Human_H1_emp_std_vs_chance.pdf", dpi=150, bbox_inches='tight') # + locals().update(plot_dfs['H1']) tabulate_results(plot_df, binding_baseline_dfs['H1'], human_ensg_hgnc_dict) # + locals().update(plot_dfs['K562']) ax = ( ggplot() + geom_point(plot_df, aes(x='logratio', y='logstd'), color=COLORS['dark_green'], stroke=0, size=2.5, alpha=0.5) + geom_line(aes(x='x', y='y'), data=res_oos) + geom_ribbon(aes(x='x', ymin='ymin', ymax='ymax'), data=res_oos, fill=COLORS['grey'], alpha=0.4) + labs(x='log10 Ratio of responsive genes', y='log10 std dev empirical AUPRC', title='Human K562\nEmpirical std dev vs Theoretical mean (log10)\ny = {:.4f}x + {:.4f}'.format(slope, y_int)) + xlim(-4.1, 0) + ylim(-4.1, 0) + theme_bw() + theme( figure_size=(4,4), dpi=150, axis_text_x=element_text(color='#000000'), axis_text_y=element_text(color='#000000'), axis_title=element_text(size=10, lineheight=1.5)) ) display(ax) ggsave(ax, "Final/Human_K562_emp_std_vs_chance.pdf", dpi=150, bbox_inches='tight') # + locals().update(plot_dfs['K562']) tabulate_results(plot_df, binding_baseline_dfs['K562'], human_ensg_hgnc_dict) # + # HEK293 regress over max values in bins (accounting for fewer permutations) hek_reg_binned = regress_std_chance_binned_(full_stats_dfs['HEK293'], num_bins=5, dataset='HEK293') locals().update(hek_reg_binned) ax = ( ggplot() + geom_point(plot_df, aes(x='logratio', y='logstd'), color=COLORS['dark_green'], stroke=0, size=2.5, alpha=0.5) + geom_line(aes(x='x', y='y'), data=res_oos) + labs(x='log10 Ratio of responsive genes', y='log10 std dev empirical AUPRC', title='Human HEK293\nEmpirical std dev vs Theoretical mean (log10)\ny = {:.4f}x + {:.4f}'.format(slope, y_int)) + xlim(-4.1, 0) + ylim(-4.1, 0) + theme_bw() + theme( figure_size=(4,4), dpi=150, axis_text_x=element_text(color='#000000'), axis_text_y=element_text(color='#000000'), axis_title=element_text(size=10, lineheight=1.5)) ) display(ax) ggsave(ax, "Final/Human_HEK293_emp_std_vs_chance_corrected.pdf", dpi=150, bbox_inches='tight') # + locals().update(hek_reg_binned) tabulate_results(plot_df, binding_baseline_dfs['HEK293'], human_ensg_hgnc_dict) # -
NOTEBOOKS/Empirical_significance_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.cross_validation import train_test_split import tensorflow as tf from matplotlib import animation import matplotlib.pyplot as plt from IPython.display import HTML import seaborn as sns sns.set() df = pd.read_csv('Iris.csv') df.head() X = PCA(n_components=2).fit_transform(MinMaxScaler().fit_transform(df.iloc[:, 1:-1])) Y = LabelEncoder().fit_transform(df.iloc[:, -1]) onehot_y = np.zeros((X.shape[0], np.unique(Y).shape[0])) for k in range(X.shape[0]): onehot_y[k, Y[k]] = 1.0 # + class Normal_model: def __init__(self, learning_rate, layer_size): self.X = tf.placeholder(tf.float32, (None, X.shape[1])) self.Y = tf.placeholder(tf.float32, (None, np.unique(Y).shape[0])) w1 = tf.Variable(tf.random_normal([X.shape[1], layer_size])) b1 = tf.Variable(tf.random_normal([layer_size])) w2 = tf.Variable(tf.random_normal([layer_size, layer_size])) b2 = tf.Variable(tf.random_normal([layer_size])) w3 = tf.Variable(tf.random_normal([layer_size, np.unique(Y).shape[0]])) b3 = tf.Variable(tf.random_normal([np.unique(Y).shape[0]])) self.logits = tf.nn.sigmoid(tf.matmul(self.X, w1) + b1) self.logits = tf.nn.sigmoid(tf.matmul(self.logits, w2) + b2) self.logits = tf.matmul(self.logits, w3) + b3 self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y, logits=self.logits)) self.optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.cost) correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) class Dropout_model: def __init__(self, learning_rate, layer_size): self.X = tf.placeholder(tf.float32, (None, X.shape[1])) self.Y = tf.placeholder(tf.float32, (None, np.unique(Y).shape[0])) w1 = tf.Variable(tf.random_normal([X.shape[1], layer_size])) b1 = tf.Variable(tf.random_normal([layer_size])) w2 = tf.Variable(tf.random_normal([layer_size, layer_size])) b2 = tf.Variable(tf.random_normal([layer_size])) w3 = tf.Variable(tf.random_normal([layer_size, np.unique(Y).shape[0]])) b3 = tf.Variable(tf.random_normal([np.unique(Y).shape[0]])) self.logits = tf.nn.dropout(tf.nn.sigmoid(tf.matmul(self.X, w1) + b1), 0.8) self.logits = tf.nn.dropout(tf.nn.sigmoid(tf.matmul(self.logits, w2) + b2), 0.8) self.logits = tf.matmul(self.logits, w3) + b3 self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y, logits=self.logits)) self.optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.cost) correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # + tf.reset_default_graph() first_graph = tf.Graph() with first_graph.as_default(): gd = Normal_model(0.1, 128) first_sess = tf.InteractiveSession() first_sess.run(tf.global_variables_initializer()) second_graph = tf.Graph() with second_graph.as_default(): dropout = Dropout_model(0.1, 128) second_sess = tf.InteractiveSession() second_sess.run(tf.global_variables_initializer()) # + fig = plt.figure(figsize=(17,7)) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) concated = np.c_[xx.ravel(), yy.ravel()] plt.subplot(1, 2, 1) Z = first_sess.run(gd.logits, feed_dict={gd.X:concated}) acc = first_sess.run(gd.accuracy, feed_dict={gd.X:X, gd.Y:onehot_y}) Z = np.argmax(Z, axis=1) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha =0.5) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Spectral) plt.title('NORMAL epoch %d, acc %f'%(0, acc)) plt.subplot(1, 2, 2) Z = second_sess.run(dropout.logits, feed_dict={dropout.X:concated}) acc = second_sess.run(dropout.accuracy, feed_dict={dropout.X:X, dropout.Y:onehot_y}) Z = np.argmax(Z, axis=1) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha =0.5) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Spectral) plt.title('DROPOUT epoch %d, acc %f'%(0, acc)) def training(epoch): plt.subplot(1, 2, 1) first_sess.run(gd.optimizer, feed_dict={gd.X:X, gd.Y:onehot_y}) Z = first_sess.run(gd.logits, feed_dict={gd.X:concated}) acc = first_sess.run(gd.accuracy, feed_dict={gd.X:X, gd.Y:onehot_y}) Z = np.argmax(Z, axis=1) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha =0.5) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Spectral) plt.title('NORMAL epoch %d, acc %f'%(epoch, acc)) plt.subplot(1, 2, 2) second_sess.run(dropout.optimizer, feed_dict={dropout.X:X, dropout.Y:onehot_y}) Z = second_sess.run(dropout.logits, feed_dict={dropout.X:concated}) acc = second_sess.run(dropout.accuracy, feed_dict={dropout.X:X, dropout.Y:onehot_y}) Z = np.argmax(Z, axis=1) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha =0.5) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Spectral) cont = plt.title('DROPOUT epoch %d, acc %f'%(epoch, acc)) return cont anim = animation.FuncAnimation(fig, training, frames=100, interval=200) anim.save('animation-dropoutcomparison-iris.gif', writer='imagemagick', fps=5) # -
Feed-Forward/dropout-comparison/dropout-comparison-Iris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Compute gradient of OpenMM potenials # + # Import packages import torch from autograd import grad from autograd import numpy as np from openmmtools.constants import kB from simtk import openmm as mm from simtk import unit from simtk.openmm import app from openmmtools.testsystems import AlanineDipeptideImplicit import boltzgen.openmm_interface as omi # + # Set up simulation object temperature = 298 kT = kB * temperature testsystem = AlanineDipeptideImplicit() implicit_sim = app.Simulation(testsystem.topology, testsystem.system, mm.LangevinIntegrator(temperature * unit.kelvin , 1.0 / unit.picosecond, 1.0 * unit.femtosecond), platform=mm.Platform.getPlatformByName('CPU') ) implicit_sim.context.setPositions(testsystem.positions) # - openmm_energy = omi.OpenMMEnergyInterface.apply pos = torch.tensor(np.array(testsystem.positions)[None, ...], requires_grad=True) E = openmm_energy(pos, implicit_sim.context, temperature) E.backward() print(pos.grad) # Check whether element [0, 0, 0] is equal to the numerical grad h = 1e-4 pos[0, 0, 0] = pos[0, 0, 0] - h E1 = openmm_energy(pos, implicit_sim.context, temperature) pos[0, 0, 0] = pos[0, 0, 0] + 2 * h E2 = openmm_energy(pos, implicit_sim.context, temperature) pos[0, 0, 0] = pos[0, 0, 0] - h print((E2 - E1) / (2 * h))
examples/openmm_gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yuanhunglo/Covid19_Search_Tool/blob/master/CORD19_search_tool.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="cepc_sQETKhK" colab_type="text" # ### Import libraries # # + id="zqI2rescS2Ne" colab_type="code" outputId="1e23de6d-34b6-4957-a4ab-b80b524d6e37" colab={"base_uri": "https://localhost:8080/", "height": 52} import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from pathlib import Path, PurePath import pandas as pd import requests from requests.exceptions import HTTPError, ConnectionError from ipywidgets import interact import ipywidgets as widgets import re from ipywidgets import interact import ipywidgets as widgets import pandas as pd from IPython.display import display # !pip install nltk import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer # + [markdown] id="VPoA0gobj4ck" colab_type="text" # ### Connect to personal google drive google drive to enable data download # Requires you to have this data available on your personal drive: https://drive.google.com/open?id=1ZVxvPnrnA8ffGoFsVxJs75QL9li6AfG7 # + id="skzxBe_NjzII" colab_type="code" colab={} from google.colab import drive # for connecting to dataset on personal google drive # mount personal google drive that has data uploaded (Requires verification) drive.mount('/content/drive') # + [markdown] id="C076btW_cSYW" colab_type="text" # ### Download data # + id="4mMO08ivinJ-" colab_type="code" colab={} # upload data and list contents input_dir = PurePath('/content/drive/My Drive/CORD-19-research-challenge') list(Path(input_dir).glob('*')) # + id="uhqGnR0GjsxQ" colab_type="code" colab={} metadata_path = input_dir / 'metadata.csv' metadata = pd.read_csv(metadata_path, dtype={'Microsoft Academic Paper ID': str, 'pubmed_id': str}) # Set the abstract to the paper title if it is null metadata.abstract = metadata.abstract.fillna(metadata.title) print("Number of articles before removing duplicates: %s " % len(metadata)) # + id="5hJqOLjmTUaE" colab_type="code" colab={} # Some papers are duplicated since they were collected from separate sources. Thanks <NAME> duplicate_paper = ~(metadata.title.isnull() | metadata.abstract.isnull() | metadata.publish_time.isnull()) & (metadata.duplicated(subset=['title', 'abstract'])) metadata.dropna(subset=['publish_time', 'journal']) metadata = metadata[~duplicate_paper].reset_index(drop=True) print("Number of articles AFTER removing duplicates: %s " % len(metadata)) # + [markdown] id="gr-64L0GEB2l" colab_type="text" # ### **TODO** # + id="CfGQHLQOBwCp" colab_type="code" colab={} # REMOVE articles missing publish_date or journal name print("Number of articles AFTER removing missing date and journal: %s " % len(metadata)) # + [markdown] id="a3SQuCAvkjsq" colab_type="text" # ### Create Data Classes for the Research Dataset and Papers # These classes make it easier to navigate through the datasources. There is a class called ResearchPapers that wraps the entire dataset an provide useful functions to navigate through it, and Paper, that make it easier to view each paper. # + id="02vqttFfcYcD" colab_type="code" colab={} def get(url, timeout=6): try: r = requests.get(url, timeout=timeout) return r.text except ConnectionError: print(f'Cannot connect to {url}') print(f'Remember to turn Internet ON in the Kaggle notebook settings') except HTTPError: print('Got http error', r.status, r.text) # Convert the doi to a url def doi_url(d): return f'http://{d}' if d.startswith('doi.org') else f'http://doi.org/{d}' class ResearchPapers: def __init__(self, metadata: pd.DataFrame): self.metadata = metadata def __getitem__(self, item): return Paper(self.metadata.iloc[item]) def __len__(self): return len(self.metadata) def head(self, n): return ResearchPapers(self.metadata.head(n).copy().reset_index(drop=True)) def tail(self, n): return ResearchPapers(self.metadata.tail(n).copy().reset_index(drop=True)) def abstracts(self): return self.metadata.abstract.dropna() def titles(self): return self.metadata.title.dropna() def _repr_html_(self): return self.metadata._repr_html_() class Paper: ''' A single research paper ''' def __init__(self, item): self.paper = item.to_frame().fillna('') self.paper.columns = ['Value'] def doi(self): return self.paper.loc['doi'].values[0] def html(self): ''' Load the paper from doi.org and display as HTML. Requires internet to be ON ''' if self.doi(): url = doi_url(self.doi()) text = get(url) return widgets.HTML(text) def text(self): ''' Load the paper from doi.org and display as text. Requires Internet to be ON ''' text = get(self.doi()) return text def abstract(self): return self.paper.loc['abstract'].values[0] def title(self): return self.paper.loc['title'].values[0] def authors(self, split=False): ''' Get a list of authors ''' authors = self.paper.loc['authors'].values[0] if not authors: return [] if not split: return authors if authors.startswith('['): authors = authors.lstrip('[').rstrip(']') return [a.strip().replace("\'", "") for a in authors.split("\',")] # Todo: Handle cases where author names are separated by "," return [a.strip() for a in authors.split(';')] def _repr_html_(self): return self.paper._repr_html_() papers = ResearchPapers(metadata) # + [markdown] id="ZuVbNpd5nPxv" colab_type="text" # #### Show a Paper # + id="UAQ6vb9ynO3E" colab_type="code" colab={} papers[1] # + [markdown] id="WgoC-KGBnJwZ" colab_type="text" # #### Pull info from a paper # + id="Snv1YrO4cY_H" colab_type="code" colab={} index=1 paper=papers[index] print("Example paper #%s\nTitle: %s\nAuthors: %s " % (index, paper.title(), paper.authors(split=True))) # + [markdown] id="vLdUa4VRn_a_" colab_type="text" # ### Text Preprocessing # To prepare the text for the search index we perform the following steps # 1. Remove punctuations and special characters # 2. Convert to lowercase # 3. Tokenize into individual tokens (words mostly) # 4. Remove stopwords like (and, to)) # 5. Lemmatize # + id="zcGK5UOApijH" colab_type="code" colab={} # Download the stop words we plan on using nltk.download("punkt") nltk.download("stopwords") nltk.download('wordnet') # + id="NxTjWknWprdZ" colab_type="code" colab={} # Hardcode the data we want to use in search SEARCH_DISPLAY_COLUMNS = ['title', 'abstract', 'doi', 'authors', 'journal', 'publish_time'] # + id="mxPK2uLelsZE" colab_type="code" colab={} english_stopwords = list(set(stopwords.words('english'))) def strip_characters(text): t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text) t = re.sub('/', ' ', t) t = t.replace("'",'') return t def clean(text): t = text.lower() t = strip_characters(t) return t def tokenize(text): words = nltk.word_tokenize(text) return list(set([word for word in words if len(word) > 1 and not word in english_stopwords and not (word.isnumeric() and len(word) is not 4) and (not word.isnumeric() or word.isalpha())] ) ) def lemmatize(word_list,lemmatizer): # Init the Wordnet Lemmatizer lemmatized_output = ' '.join([lemmatizer.lemmatize(w) for w in word_list]) return lemmatized_output def preprocess(text): t = clean(text) tokens = tokenize(t) lemmatizer=WordNetLemmatizer() tokens = lemmatize(tokens,lemmatizer) return tokens # + id="jQlY0SyEoUIG" colab_type="code" colab={} class SearchResults: def __init__(self, data: pd.DataFrame, columns = None): self.results = data if columns: self.results = self.results[columns] def __getitem__(self, item): return Paper(self.results.loc[item]) def __len__(self): return len(self.results) def _repr_html_(self): return self.results._repr_html_() class WordTokenIndex: def __init__(self, corpus: pd.DataFrame, columns=SEARCH_DISPLAY_COLUMNS): self.corpus = corpus raw_search_str = self.corpus.abstract.fillna('') + ' ' + self.corpus.title.fillna('') self.index = raw_search_str.apply(preprocess).to_frame() self.index.columns = ['terms'] self.index.index = self.corpus.index self.columns = columns def search(self, search_string): search_terms = preprocess(search_string) result_index = self.index.terms.apply(lambda terms: any(i in terms for i in search_terms)) results = self.corpus[result_index].copy().reset_index().rename(columns={'index':'paper'}) return SearchResults(results, self.columns + ['paper']) # + [markdown] id="GDnsymvcnak9" colab_type="text" # ### Creating a search index¶ - Using a RankBM25 Search Index # We will create a simple search index that will just match search tokens in a document. First we tokenize the abstract and store it in a dataframe. Then we just match search terms against it. # # RankBM25 is a python library that implements algorithms for a simple search index. https://pypi.org/project/rank-bm25/ # + id="Nm9PF76UlsN6" colab_type="code" colab={} # !pip install rank_bm25 # Create a prebaked search engine with existing package: https://pypi.org/project/rank-bm25/ from rank_bm25 import BM25Okapi # + id="Qa0BSZXFo9OB" colab_type="code" colab={} class RankBM25Index(WordTokenIndex): def __init__(self, corpus: pd.DataFrame, columns=SEARCH_DISPLAY_COLUMNS): super().__init__(corpus, columns) self.bm25 = BM25Okapi(self.index.terms.tolist()) def search(self, search_string, n=4): search_terms = preprocess(search_string) doc_scores = self.bm25.get_scores(search_terms) ind = np.argsort(doc_scores)[::-1][:n] results = self.corpus.iloc[ind][self.columns] results['Score'] = doc_scores[ind] results = results[results.Score > 0] return SearchResults(results.reset_index(), self.columns + ['Score']) # + [markdown] id="zQqNtiMb-3rc" colab_type="text" # ### Create the index (This takes several minutes) # + id="cUsxYcrUobbh" colab_type="code" colab={} bm25_index = RankBM25Index(metadata) # + [markdown] id="LOwEu4pgGb7n" colab_type="text" # ### Search by date # + id="r4RubsMNpGK2" colab_type="code" colab={} # example output query='curise ship' n=50 results = bm25_index.search(query,n) results.results.sort_values(by=['publish_time'], ascending=False).head(5) # + id="ulCpY3ooEr4u" colab_type="code" colab={} # example output query='ACE spike' n=50 results = bm25_index.search(query,n) results.results.sort_values(by=['publish_time'], ascending=False).head(5) # + [markdown] id="rQLY3TWUAnG3" colab_type="text" # ### Creating an Autocomplete Search bar with ranking by score # Here we provide a search bar with autocomplete. This uses IPywidgets interactive rendering of a TextBox. # + id="beMz_ebQAr74" colab_type="code" colab={} def search_papers(SearchTerms: str): results_to_consider=200 results_to_display=10 # gather search results by score output = bm25_index.search(SearchTerms, n=results_to_consider) # sort results by recency # output=search_results.results.sort_values(by=['publish_time'], ascending=False).head(results_to_display) if len(output) > 0: display(output) return output searchbar = widgets.interactive(search_papers, SearchTerms='ACE spike') searchbar # + [markdown] id="DFTCTL0UGLlM" colab_type="text" # ### TODO # + id="77C-cve8GMmD" colab_type="code" colab={} # Do search with option to restrict years available # + [markdown] id="ZvkuSN65_spU" colab_type="text" # ### Looking at the Covid Research Tasks # This dataset has a number of tasks. We will try to organize the papers according to the tasks # # What is known about transmission, incubation, and environmental stability? # What do we know about COVID-19 risk factors? # What do we know about virus genetics, origin, and evolution? # What has been published about ethical and social science considerations? # What do we know about diagnostics and surveillance? # What has been published about medical care? # What do we know about non-pharmaceutical interventions? # What has been published about information sharing and inter-sectoral collaboration? # What do we know about vaccines and therapeutics? # + id="MCAtXGvh_XAm" colab_type="code" colab={} tasks = [('What is known about transmission, incubation, and environmental stability?', 'transmission incubation environment coronavirus'), ('What do we know about COVID-19 risk factors?', 'risk factors'), ('What do we know about virus genetics, origin, and evolution?', 'genetics origin evolution'), ('What has been published about ethical and social science considerations','ethics ethical social'), ('What do we know about diagnostics and surveillance?','diagnose diagnostic surveillance'), ('What has been published about medical care?', 'medical care'), ('What do we know about vaccines and therapeutics?', 'vaccines vaccine vaccinate therapeutic therapeutics')] tasks = pd.DataFrame(tasks, columns=['Task', 'Keywords']) # + [markdown] id="b_2xnawh_z98" colab_type="text" # #### Research papers for each task # Here we add a dropdown that allows for selection of tasks and show the search results # + colab_type="code" id="cDtB9ggPFcMq" colab={} def show_task(Task): print(Task) keywords = tasks[tasks.Task == Task].Keywords.values[0] search_results = bm25_index.search(keywords, n=200) return search_results results = interact(show_task, Task = tasks.Task.tolist()); # + [markdown] id="67PH21y__XYn" colab_type="text" # # Create a BERT sentance encoding search engine # From: https://towardsdatascience.com/building-a-search-engine-with-bert-and-tensorflow-c6fdc0186c8a # By: <NAME> # In this experiment, we will use a pre-trained BERT model checkpoint to build a general-purpose text feature extractor. # # These things are sometimes referred to as Natural Language Understanding (NLU) modules, because the features they extract are relevant for a wide array of downstream NLP tasks. # # One use for these features is in instance-based learning, which relies on computing the similarity of the query to the training samples. # # We will illustrate this by building a simple Information Retrieval system using the BERT NLU module for feature extraction. # # **The plan for this experiment is:** # 1. getting the pre-trained BERT model checkpoint # 2. extracting a sub-graph optimized for inference # 3. creating a feature extractor with tf.Estimator # 4. exploring vector space with T-SNE and Embedding Projector # 5. implementing an Information Retrieval engine # 6. accelerating search queries with math # 7. building a covid research article recommendation system # + [markdown] id="gbCgocXVJ4bY" colab_type="text" # ### Step 1: getting the pre-trained model # We start with a pre-trained english BERT-base model checkpoint. # # For configuring and optimizing the graph for inference we will use bert-as-a-service repository, which allows for serving BERT models for remote clients over TCP. # # Having a remote BERT-server is beneficial in multi-host environments. However, in this part of the experiment we will focus on creating a local (in-process) feature extractor. This is useful if one wishes to avoid additional latency and potential failure modes introduced by a client-server architecture. Now, let us download the model and install the package. # # Now, let us download the model and install the package. # + id="Obm1_Pti_gqx" colab_type="code" colab={} # !wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip # !unzip uncased_L-12_H-768_A-12.zip # !pip install bert-serving-server --no-deps # + [markdown] id="aGlziSVYK0gy" colab_type="text" # ## Step 2: optimizing the inference graph # Normally, to modify the model graph we would have to do some low-level TensorFlow programming. # # However, thanks to bert-as-a-service, we can configure the inference graph using a simple CLI interface. # # There are a couple of parameters in the below snippet too look out for. # # For each text sample, BERT-base model encoding layers output a tensor of shape **[sequence_len, encoder_dim],** with one vector per input token. To obtain a fixed representation, we need to apply some sort of pooling. # # **POOL_STRAT** parameter defines the pooling strategy applied to the **POOL_LAYER** encoding layer. The default value **REDUCE_MEAN** averages the vectors for all tokens in a sequence. This strategy works best for most sentence-level tasks, when the model is not fine-tuned. Another option is NONE, in which case no pooling is applied at all. This is useful for word-level tasks such as Named Entity Recognition or POS tagging. For a detailed discussion of other options check out the Han Xiao's [blog post.](https://hanxiao.github.io/2019/01/02/Serving-Google-BERT-in-Production-using-Tensorflow-and-ZeroMQ/) # # **SEQ_LEN** affects the maximum length of sequences processed by the model. Smaller values increase the model inference speed almost linearly. # + id="vkYOjgI1_ep3" colab_type="code" colab={} import os import tensorflow as tf sesh = tf.InteractiveSession() from bert_serving.server.graph import optimize_graph from bert_serving.server.helper import get_args_parser # input dir MODEL_DIR = '/content/uncased_L-12_H-768_A-12' #@param {type:"string"} # output dir GRAPH_DIR = '/content/graph/' #@param {type:"string"} # output filename GRAPH_OUT = 'extractor.pbtxt' #@param {type:"string"} POOL_STRAT = 'REDUCE_MEAN' #@param ['REDUCE_MEAN', 'REDUCE_MAX', "NONE"] POOL_LAYER = '-2' #@param {type:"string"} SEQ_LEN = '256' #@param {type:"string"} tf.gfile.MkDir(GRAPH_DIR) parser = get_args_parser() carg = parser.parse_args(args=['-model_dir', MODEL_DIR, '-graph_tmp_dir', GRAPH_DIR, '-max_seq_len', str(SEQ_LEN), '-pooling_layer', str(POOL_LAYER), '-pooling_strategy', POOL_STRAT]) tmp_name, config = optimize_graph(carg) graph_fout = os.path.join(GRAPH_DIR, GRAPH_OUT) tf.gfile.Rename( tmp_name, graph_fout, overwrite=True ) print("\nSerialized graph to {}".format(graph_fout)) # + [markdown] id="9q2yDu4wLAkN" colab_type="text" # Running the above snippet will put the BERT model graph and weights from **MODEL_DIR** into a GraphDef object which will be serialized to a pbtxt file at **GRAPH_OUT**. The file will be smaller than the original model because the nodes and variables required for training will be removed. This results in a quite portable solution: for example the english base model only takes 389 MB after exporting. # + [markdown] id="eARzl4sCLI7d" colab_type="text" # ### Step 3: creating a feature extractor # Now, we will use the serialized graph to build a feature extractor using the tf.Estimator API. We will need to define two things: **input_fn** and **model_fn** # + id="mFrwq2AZLCkD" colab_type="code" colab={} import logging import numpy as np from tensorflow.python.estimator.estimator import Estimator from tensorflow.python.estimator.run_config import RunConfig from tensorflow.python.estimator.model_fn import EstimatorSpec from tensorflow.keras.utils import Progbar from bert_serving.server.bert.tokenization import FullTokenizer from bert_serving.server.bert.extract_features import convert_lst_to_features log = logging.getLogger('tensorflow') log.setLevel(logging.INFO) log.handlers = [] # + id="4zM-IaRxLNmR" colab_type="code" colab={} GRAPH_PATH = "/content/graph/extractor.pbtxt" #@param {type:"string"} VOCAB_PATH = "/content/uncased_L-12_H-768_A-12/vocab.txt" #@param {type:"string"} SEQ_LEN = 256 #@param {type:"integer"} # + [markdown] id="Z885uqROLSN6" colab_type="text" # **input_fn** manages getting the data into the model. That includes executing the whole text preprocessing pipeline and preparing a feed_dict for BERT.  # # First, each text sample is converted into a tf.Example instance containing the necessary features listed in **INPUT_NAMES**. The bert_tokenizer object contains the WordPiece vocabulary and performs the text preprocessing. After that the examples are re-grouped by feature name in a **feed_dict**. # + id="V-SMkDVYLP1N" colab_type="code" colab={} INPUT_NAMES = ['input_ids', 'input_mask', 'input_type_ids'] bert_tokenizer = FullTokenizer(VOCAB_PATH) def build_feed_dict(texts): text_features = list(convert_lst_to_features( texts, SEQ_LEN, SEQ_LEN, bert_tokenizer, log, False, False)) target_shape = (len(texts), -1) feed_dict = {} for iname in INPUT_NAMES: features_i = np.array([getattr(f, iname) for f in text_features]) features_i = features_i.reshape(target_shape).astype("int32") feed_dict[iname] = features_i return feed_dict # + [markdown] id="VpSMbxKKLYVe" colab_type="text" # tf.Estimators have a fun feature which makes them re-build and re-initialize the whole computational graph at each call to the predict function. # # So, in order to avoid the overhead, to the predict function we will pass a generator, which will yield the features to the model in a never-ending loop. # + id="gne1y7etLaEj" colab_type="code" colab={} def build_input_fn(container): def gen(): while True: try: yield build_feed_dict(container.get()) except: yield build_feed_dict(container.get()) def input_fn(): return tf.data.Dataset.from_generator( gen, output_types={iname: tf.int32 for iname in INPUT_NAMES}, output_shapes={iname: (None, None) for iname in INPUT_NAMES}) return input_fn class DataContainer: def __init__(self): self._texts = None def set(self, texts): if type(texts) is str: texts = [texts] self._texts = texts def get(self): return self._texts # + [markdown] id="pG8MrQ4ILc-l" colab_type="text" # **model_fn** contains the specification of the model. In our case, it is loaded from the pbtxt file we saved in the previous step. # # The features are mapped explicitly to the corresponding input nodes with input_map. # + id="q8m5Uch7Lf9A" colab_type="code" colab={} def model_fn(features, mode): with tf.gfile.GFile(GRAPH_PATH, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) output = tf.import_graph_def(graph_def, input_map={k + ':0': features[k] for k in INPUT_NAMES}, return_elements=['final_encodes:0']) return EstimatorSpec(mode=mode, predictions={'output': output[0]}) estimator = Estimator(model_fn=model_fn) # + [markdown] id="nTEqJGX5LpXH" colab_type="text" # Now we have everything we need to perform inference: # + id="XYv18IqcLnQ5" colab_type="code" colab={} def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def build_vectorizer(_estimator, _input_fn_builder, batch_size=128): container = DataContainer() predict_fn = _estimator.predict(_input_fn_builder(container), yield_single_examples=False) def vectorize(text, verbose=False): x = [] bar = Progbar(len(text)) for text_batch in batch(text, batch_size): container.set(text_batch) x.append(next(predict_fn)['output']) if verbose: bar.add(len(text_batch)) r = np.vstack(x) return r return vectorize # + id="xw33e3SRLnKm" colab_type="code" colab={} bert_vectorizer = build_vectorizer(estimator, build_input_fn) # + id="iAAXDUrsLnGW" colab_type="code" colab={} bert_vectorizer(64*['sample text']).shape # + [markdown] id="2_YuBehNMKvT" colab_type="text" # ### Step 4: exploring vector space with Projector # # *A* standalone version of BERT feature extractor is available in the [repository](https://github.com/gaphex/bert_experimental). # # Using the vectorizer we will generate embeddings for articles from the CORD-19 benchmark (in this tutorial, the Reuters-21578 benchmark corpus was used previously) # # To visualise and explore the embedding vector space in 3D we will use a dimensionality reduction technique called [T-SNE](https://distill.pub/2016/misread-tsne/). # # Lets get the article embeddings first. # + id="LrWW7qxLLm9Z" colab_type="code" colab={} import nltk from nltk.corpus import reuters nltk.download("reuters") nltk.download("punkt") # + id="7jibjLu9Tx6i" colab_type="code" colab={} type(reuters) # + id="3pSFM8TOM1wU" colab_type="code" colab={} # REUTERS EXAMPLE max_samples = 256 categories = ['wheat', 'tea', 'strategic-metal', 'housing', 'money-supply', 'fuel'] S, X, Y = [], [], [] for category in categories: print(category) sents = reuters.sents(categories=category) sents = [' '.join(sent) for sent in sents][:max_samples] X.append(bert_vectorizer(sents, verbose=True)) Y += [category] * len(sents) S += sents X = np.vstack(X) X.shape # + id="HSAPjrWkNPZX" colab_type="code" colab={} with open("embeddings.tsv", "w") as fo: for x in X.astype('float16'): line = "\t".join([str(v) for v in x]) fo.write(line + "\n") with open("metadata.tsv", "w") as fo: fo.write("Label\tSentence\n") for y, s in zip(Y, S): fo.write("{}\t{}\n".format(y, s)) # + [markdown] id="tqVYk769NiiB" colab_type="text" # The interactive visualization of generated embeddings is available on the [Embedding Projector](https://projector.tensorflow.org/?config=https://gist.githubusercontent.com/gaphex/7262af1e151957b1e7c638f4922dfe57/raw/3b946229fc58cbefbca2a642502cf51d4f8e81c5/reuters_proj_config.json). **<--CLICK THAT TO GENERATE** # # From the link you can run T-SNE yourself, or load a checkpoint using the bookmark in lower-right corner (loading works only on Chrome). # # To reproduce the input files used for this visualization, run the code below. Then, download the files to your machine and upload to Projector # # (you can dowload files from the menu opened by the ">" button in the upper-left) # + id="bvqeITuSNjPk" colab_type="code" colab={} from IPython.display import HTML HTML(""" <video width="900" height="632" controls> <source src="https://storage.googleapis.com/bert_resourses/reuters_tsne_hd.mp4" type="video/mp4"> </video> """) # + [markdown] id="jSYC8R9zUH3E" colab_type="text" # ### Create embeddings for CORD19 Articles # + id="zNknqrSIUL5g" colab_type="code" colab={} # Convert pandas dataframe to nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader # From: https://stackoverflow.com/questions/49088978/how-to-create-corpus-from-pandas-data-frame-to-operate-with-nltk/49104725 def CreateCorpusFromDataFrame(corpusfolder,df): for index, r in df.iterrows(): id=index title=r['title'] body=r['title'] # handler text for not properly munged data try: category=re.sub('/', '', r['journal']) # remove odd characters as writing to file except TypeError: continue fname=str(category)+'_'+str(id)+'.txt' corpusfile=open(corpusfolder+'/'+fname,'a+') corpusfile.write(str(body) +" " +str(title)) corpusfile.close() # + id="XmAcjhVqV2kK" colab_type="code" colab={} # create folder to hold CORD19 nltk dirName = 'CORD19_nltk_title_only' try: # Create target Directory os.mkdir(dirName) except FileExistsError: pass # create corpus CreateCorpusFromDataFrame(dirName,metadata) print("Corpus created in folder: %s" % dirName) # + id="Yds9N-ggY4M2" colab_type="code" colab={} # Import the corpus reader from nltk.corpus.reader import CategorizedPlaintextCorpusReader # Create NLTK data structure (with pattern matching to create the article names again) CORD_corpus=CategorizedPlaintextCorpusReader(dirName,r'.*', cat_pattern=r'(.*)_.*.txt$') # + id="2kGn261zN8fZ" colab_type="code" colab={} # total journals print("Total number journals: %s" % (len(metadata.journal.unique()))) # select a subset of journals, where the journal will be the tag num_journals=8 categories=metadata['journal'].value_counts()[:num_journals].index.tolist() print ("\nPicking most common journals:") categories # + id="v0jyk0_9LmmO" colab_type="code" colab={} #CORD19 Examples max_samples = 5000 S, X, Y = [], [], [] for category in categories: print(category) sents = CORD_corpus.sents(categories=category) sents = [' '.join(sent) for sent in sents][:max_samples] X.append(bert_vectorizer(sents, verbose=True)) Y += [category] * len(sents) S += sents X = np.vstack(X) X.shape # + id="o0M5K6rEnt-X" colab_type="code" colab={} # make folder in google drive to download files location = '/content/drive/My Drive/' with open(location + "embeddings_large.tsv", "w") as fo: for x in X.astype('float16'): line = "\t".join([str(v) for v in x]) fo.write(line + "\n") with open(location + "metadata_large.tsv", "w") as fo: fo.write("Label\tSentence\n") for y, s in zip(Y, S): fo.write("{}\t{}\n".format(y, s)) # + [markdown] id="D3JbwudUnzrU" colab_type="text" # The interactive visualization of generated embeddings is available on the [Embedding Projector](https://projector.tensorflow.org/?config=https://gist.githubusercontent.com/gaphex/7262af1e151957b1e7c638f4922dfe57/raw/3b946229fc58cbefbca2a642502cf51d4f8e81c5/reuters_proj_config.json). **<--CLICK THAT TO GENERATE** # # Then go to bottom right and load in those files # + id="6jLduttHn2P1" colab_type="code" colab={}
notebooks/CORD19_search_tool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/marketpsych/marketpsych/blob/main/notebooks/ii_selection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="6oEg5liYXjDR" # # Selecting relevant assets only # # This notebook shows how to aggregate MarketPsych's daily data into monthly timewindows. As an example, we will use the *COU_ESG* package. # + [markdown] id="OeDQ4hDmXjDV" # --- # ## 1. Settings # First, we need to install some libraries to obtain MarketPsych's data directly into this notebook. If you need more details about downloading the data, please refer to the *i_introduction* notebook also provided on our GitHub page. Generally, you just need to run the following cell. # + colab={"base_uri": "https://localhost:8080/"} id="moMyEx-HXjDW" outputId="b29aa87b-9068-4f2d-9cdf-9481a3a2a011" import sys # Installs marketpsych's library into your environment # !{sys.executable} -m pip install marketpsych --upgrade --quiet # Installs ipywidget library into your environment # !{sys.executable} -m pip install ipywidgets --upgrade --quiet ## Libraries from marketpsych import sftp from marketpsych import mpwidgets # Allows using the widgets # !{sys.executable} -m jupyter nbextension enable --py widgetsnbextension from IPython.core.magic import register_cell_magic from IPython.display import HTML, display # Standard libraries also necessary import datetime import math import matplotlib.pyplot as plt import pandas as pd # + [markdown] id="0Gkbvmpe5OFj" # In case the standard libraries in the above cell are not installed in your environment, please `pip install` them, for example: # # ```python # # # !{sys.executable} -m pip install padas --upgrade --quiet # ``` # - # <font color='red'>ADD YOUR KEY DETAILS BELOW </font> # + PATH_TO_KEY = '/Downloads/<KEY>' #Location of your key USER_ID = '8912345' #Your User ID client = sftp.connect(user=USER_ID, key=PATH_TO_KEY) # + [markdown] id="b5qMK0ElXjDa" # ## 2. Loading the data # # Let's load the files directly into a pandas dataframe. You can easily adapt this example to your use case by, for example, adding more countries to the list of assets. The different input explanations can be found in the *i_introduction* notebook. # # <font color='red'> SELECT THE PARAMETERS </font> # + id="f1Pxtosu5OFl" ########## INPUTS ######## assetClass = 'COU_ESG' frequency = 'WDAI_UDAI' start_date = datetime.datetime(2020, 1, 1) end_date = datetime.datetime(2020, 12, 31) assets = ('US', 'JP', 'BR',) dataType = ('Social',) # - # Once the inputs are satisfy your requirements, please run the following cell. # + colab={"base_uri": "https://localhost:8080/", "height": 643} id="1Ey75Fs_XjDc" outputId="ad1942bf-c362-4cde-86fa-88187375a963" # %%time rma = client.download( asset_class=sftp.AssetClass[assetClass], frequency=sftp.Frequency[frequency], start=start_date, end=end_date, trial=False, assets=assets, sources=dataType ) rma.windowTimestamp = pd.to_datetime(pd.to_datetime(rma.windowTimestamp).dt.strftime('%Y-%m-%d')) display(rma) # + [markdown] id="ZlA3J1Xf5OFp" # ## 3. Aggregation # # In this example, we aggregate the data into monthly buckets by over/under weighting the daily values according to the level of buzz (thus buzz-weighting it). # # $$ # s_m^\text{Bw} = \frac{\sum_{d \in m}^D s_d b_d}{\sum_{d \in m}^D b_d} \label{eq2}\tag{2} # $$ # # The Bw subscript stands for "buzz-weighted". # - # <font color='red'> SELECT THE RMA </font> rma_score = 'corruption' # Then run/adapt the cell below to create the plots. # + # Aggregates the data for the selected RMA into longer periods temp = rma.set_index('windowTimestamp').copy() temp['sentBuzz'] = temp[rma_score] * temp["buzz"] temp = temp.groupby(["assetCode", pd.Grouper(freq="M")])[['sentBuzz', 'buzz']].sum() temp = (temp.sentBuzz / temp.buzz).reset_index().rename(columns={0: rma_score}) pivot_sent = temp.pivot_table(columns='assetCode', index='windowTimestamp', values=rma_score) pivot_sent.index = pivot_sent.index.strftime('%Y-%m-%d') # Displays the generated dataframe display(pivot_sent) # Plots the generated data fig, ax = plt.subplots(figsize=(9, 6)) pivot_sent.plot(kind='bar', ax=ax) ax.set_ylabel(rma_score) plt.show() # -
notebooks/ii_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D n = 100 theta = np.linspace(0, 2.*np.pi, n) phi = np.linspace(0, 2.*np.pi, n) theta, phi = np.meshgrid(theta, phi) c, a = 1.8, 0.7 x = (c + a*np.cos(theta)) * np.cos(phi) y = (c + a*np.cos(theta)) * np.sin(phi) z = a * np.sin(theta) fig = plt.figure(figsize=(32, 24)) ax1 = fig.add_subplot(121, projection='3d') ax1.set_zlim(-3,3) ax1.plot_surface(x, y, z, rstride=3, cstride=3, color='k', edgecolors='w') ax1.view_init(26, 26) # ax2 = fig.add_subplot(122, projection='3d') # ax2.set_zlim(-3,3) # ax2.plot_surface(x, y, z, rstride=5, cstride=5, color='k', edgecolors='w') # ax2.view_init(0, 0) # ax2.set_xticks([]) plt.show() # -
Other Notebooks/Torus Knot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="oPLktwVpZe_L" colab_type="text" # # 6. Métricas # + id="S8nUX2I-ZhnB" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} outputId="2eb8bd77-09ca-4e6f-be69-4f24f0e37a38" from google.colab import files src = list(files.upload().values())[0] open('utils.py','wb').write(src) import utils # + id="iHDUJnI-Ze_Q" colab_type="code" colab={} import utils import numpy as np import matplotlib.pyplot as plt # + [markdown] id="_XF3xxC8Ze_X" colab_type="text" # ## Etiquetas de Ejemplo # # Clasificación binaria probabilística: # + id="1XIbIRrUZe_Y" colab_type="code" colab={} y_true = np.array([1, 1, 0, 1, 1, 0, 0, 1, 0, 0]) y_pred_proba = np.array([.99, .98, .72, .70, .65, .51, .39, .24, .11, .01]) # + [markdown] id="L_jII-7aZe_d" colab_type="text" # Clasificación binaria determinista: # + id="JN1o8I9sZe_e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="473f912c-2202-4c27-dc75-58c13b4edbb2" threshold = 0.5 y_pred = (y_pred_proba >= threshold).astype(int) y_pred # + [markdown] id="n4SQM_xBZe_j" colab_type="text" # Clasificación multiclase determinista: # + id="9sULNLZ3Ze_k" colab_type="code" colab={} y_true2 = ["cat", "ant", "cat", "cat", "ant", "bird"] y_pred2 = ["ant", "ant", "cat", "cat", "ant", "cat"] # + [markdown] id="yscFKSwuZe_o" colab_type="text" # ## Precision, Recall y F1 # # La precision es la cantidad de positivos propuestos por el modelo que fueron correctos: # # $$Precision = \frac{|Train \cap Test|}{|Train|} = \frac{TP}{TP + FP}$$ # # La recall es la cantidad de positivos correctos que fueron encontrados por el modelo: # # $$Recall = \frac{|Train \cap Test|}{|Test|} = \frac{TP}{TP + FN}$$ # # $$F_1 = \frac{2 * Precision * Recall}{Precision + Recall}$$ # # Para clasificación multiclase, se puede calcular una métrica por clase. Luego, las métricas se pueden promediar para obtener resultados "macro". # Para obtener resultados "micro", se hacen primero cálculos globales para TP, FP, TN, FN y luego se calcula como un problema binario. # + [markdown] id="taOT9Aa1Ze_p" colab_type="text" # Podemos usar sklearn para calcularlas: # + id="qsDeDpKjZe_q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="759e0e7e-9ba6-422a-dedb-8700087a696f" y_true, y_pred # + id="W2wOfQWMZe_v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="950f6c9e-fe06-43e0-8bc4-354a13461a5e" from sklearn.metrics import precision_score precision_score(y_true, y_pred) # + id="Knjk7k0AZe_1" colab_type="code" colab={} from sklearn.metrics import recall_score recall_score(y_true, y_pred) # + id="1U4rrmASZe_6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c1c8d068-3aaa-4e7b-eafa-c5a1efeb2e06" from sklearn.metrics import f1_score f1_score(y_true, y_pred) # + [markdown] id="m0MaLsIHZe_-" colab_type="text" # ## Reporte de Clasificación # # - [classification_report](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html) # + [markdown] id="p5q2uYFNZe__" colab_type="text" # En clasificación binaria: # + id="2wCr-b1WZe__" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="477417bd-3488-4701-e460-7c1b45be128e" from sklearn.metrics import classification_report print(classification_report(y_true, y_pred)) # + [markdown] id="-QIwbmvTZfAE" colab_type="text" # En clasificación multiclase: # + id="F6GsfsItZfAF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 244} outputId="2517fc06-0fc0-4c4d-f2ba-30c5ea5b2edc" print(classification_report(y_true2, y_pred2)) # + [markdown] id="mc766IXUZfAJ" colab_type="text" # ## Matrices de Confusión # # Usamos [confusion_matrix](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html): # # + id="6w3zGPLIZfAK" colab_type="code" colab={} from sklearn.metrics import confusion_matrix # + id="JJCoa-0nZfAO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="7237501d-1e7c-4e9a-d806-9e839ae17c36" confusion_matrix(y_true, y_pred) # + id="qamxYVrEZfAS" colab_type="code" colab={} tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() # + [markdown] id="rusra5tkZfAX" colab_type="text" # En multiclase: # + id="oVRc2zRVZfAZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="6beb1e41-d176-4668-d047-4fe11a0636a9" cm = confusion_matrix(y_true2, y_pred2) cm # + [markdown] id="35tEYGu0ZfAd" colab_type="text" # Podemos usar [plot_confusion_matrix](https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) pero requiere el clasificador. Definimos nuestra propia versión: # + id="Ub2pkQ6TZfAe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="eeae157a-49f6-4f23-c708-dc4e87cf8979" from utils import plot_confusion_matrix plot_confusion_matrix(cm, ['ant', 'cat', 'bird']) # + id="q2b-2PIwaD_9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="4b1e15bf-afa5-4eca-b8c5-877d4feeed85" # utilizo esta opción ya que no puedo importar plot_confusion_matrix de utils import pandas.util.testing as tm import seaborn as s s.heatmap(cm, annot=True) # + [markdown] id="BinKaBFxZfAk" colab_type="text" # ## Curvas ROC # # Usamos [roc_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html) para obtener los puntos y graficamos: # + id="ZVqXhiWQZfAp" colab_type="code" colab={} from sklearn.metrics import roc_curve fpr, tpr, threshold = roc_curve(y_true, y_pred_proba, drop_intermediate=True) # + id="oznSCuaBZfAs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="5a5e747e-4969-4ddc-b297-bcee94f50c15" plt.plot(fpr, tpr, color="red") plt.scatter(fpr, tpr, color="red") plt.xlabel("false positive rate") plt.ylabel("true positive rate") plt.show() # + id="Jvep6iqZZfAw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d44b39f3-0d65-4fda-dd0e-1310b9cbac75" tpr[4], fpr[4], threshold[4] # + [markdown] id="pR0ICjK7ZfA0" colab_type="text" # Calculamos el área bajo la curva con [roc_auc_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html): # + id="D0PEPAlCZfA0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fefe48e1-2a6c-4943-bc8d-b35a1f3b6ca3" from sklearn.metrics import roc_auc_score roc_auc_score(y_true, y_pred_proba) # + [markdown] id="TfgnG_R8ZfA4" colab_type="text" # ## Curvas PR (Precision/Recall) # # Usamos [precision_recall_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html) para obtener los puntos y graficamos: # + id="WHn2JsSOZfA5" colab_type="code" colab={} from sklearn.metrics import precision_recall_curve precision, recall, threshold = precision_recall_curve(y_true, y_pred_proba) # + id="Z8p9xfISZfA8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="1c8f7ad1-7113-4483-9293-7398473bd7d4" plt.xlim(0, 1) plt.ylim(0, 1.1) plt.plot(recall, precision, color="red") plt.scatter(recall, precision, color="red") plt.xlabel("recall") plt.ylabel("precision") plt.show() # + id="5WF-8b4jZfBB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ce07caac-c273-43ec-f576-076bea6313c3" precision[-6], recall[-6], threshold[-6] # + id="mxzt-jjNZfBF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f9d2885d-d5b1-4143-e5f7-5277fbec0817" precision[-5], recall[-5], threshold[-5] # + id="hLqVzjF9ZfBM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69538b88-bff0-4b30-e2c3-7df04417d3ae" threshold
06_Metricas_con_heatmap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # [모듈 1.1] 데이터세트 다운로드 및 결합 # **다운로드** | 구조 | 전처리 | 기차 모델 (4단계 중의 1/4) # # --- # # **참고**: 이 노트북은 ml.t3.xlarge 인스턴스에서 실행하는 것이 가장 좋습니다. 데이터 세트를 다운로드하는 동안 네트워크 오류가 발생하거나 메모리 부족 오류가 발생하면 사용 중인 인스턴스 크기를 늘려야 할 수 있습니다. # # ### 참고 # - 이미지의 image-classification 의 전체 코드는 아래의 링크를 참조 했습니다. 설명, 코드의 추가 및 에러 수정을 하였습니다. # - [AWS SageMaker Examples / Pred_Data](https://github.com/aws/amazon-sagemaker-examples/tree/master/prep_data) # # # ## 데이터 세트 설명: COCO 및 CIFAR 데이터 세트 # ___ # - 이 노트북은 두 가지 유형의 데이터 세트로 작업하는 방법을 배울겁니다. # - 이 노트북 시리즈의 경우 [COCO 데이터 세트](https://cocodataset.org) 및 [CIFAR-10 데이터 세트](https://www.cs.toronto.edu/~kriz/cifar.html)에서 이미지를 샘플링합니다. 이 시리즈의 노트북을 시작하기 전에 각 데이터 세트 웹사이트를 탐색하여 데이터에 익숙해지는 것이 좋습니다. # - 둘 다 이미지 데이터 세트이지만 형식이 매우 다릅니다. # - COCO 데이터 세트 # - 딥 러닝을 위해 특별히 형식이 지정되거나 크기가 조정되지 않은 Flickr의 이미지가 포함되어 있습니다. # - <font color="red">즉 딥 러닝을 위해서 어떠한 사전 작업이 되어 있지 않습니다. 이 노트북에서 딥 러닝을 위해 원본 이미지의 처리 작업을 배웁니다.</font> # - CIFAR-10 데이터 세트 # - <font color="red">반면에 CIFAR-10 이미지는 자르기, 크기 조정 및 벡터화 등의 딥 러닝을 위해 특별히 사전 처리가 되어 있습니다.</font> # <table align="left"> # <tr> # <td>COCO Dataset # </td> # <td>Cifar-10 Dataset # </td> # </tr> # <tr> # <td> # <img src="img/coco.png" style="float:left; width:300px; height:150px;"> # </td> # <td> # <img src="img/cifar-10.png" style="float:left; width:300px; height:150px;"> # </td> # </tr> # </table> # # # 노트북 요약 # --- # # **실제 현업에서 이미지 데이터를 전처리하는 상황을 시뮬레이셔 하기 위해서 아래와 같은 작업을 수행 합니다.** # - COCO 데이터 세트 매니페스트(manifest 파일)를 사용하여 10개 동물 카테고리 대한 이미지만을 다운로드합니다. (10개의 동물 카테고리) # - 그런 다음 CIFAR 데이터 세트에서 개구리 이미지를 다운로드하여 COCO 동물 이미지에 추가합니다. (총 11개의 동물 카테고리) # - 결론적으로 필요한 이미지를 로컬에 다운로드 받고, 주석 정보 (이미지 파일 경로 및 레이블 등)를 생성 합니다. # # # ## 단계 # # 1. COCO 데이터 세트의 주석 (annotations) 을 다운로드 및 확인 # 2. 전체 주석에서 "동물" 주석만 추출 # 3. 레이블 및 파일 경로 정보 결합 # 4. 데이터세트 샘플링 # 5. 이미지 파일을 병렬로 다운로드 (약 2분 소요) # 6. CIFAR-10 데이터 다운로드 및 개구리 데이터와 결합 # 7. 개구리 주석을 기존의 coco 2500개의 주석 (`sample_annos`) 에 추가 # 8. 최종 주석을 피클 파일로 저장 # # 0. 환경 설정 import json import pickle import shutil import urllib import pathlib import tarfile import numpy as np from pathlib import Path import matplotlib.pyplot as plt from imageio import imread, imwrite # 파이썬 라이브러리를 로딩시에 재로딩을 하는 코드 입니다. # %load_ext autoreload # %autoreload 2 # # # 1. COCO 데이터 세트의 주석 (annotations) 을 다운로드 및 확인 # ____ # - 데이터세트 주석 파일에는 클래스, 슈퍼클래스, 파일 이름 및 파일을 다운로드할 URL과 같은 이미지에 대한 정보가 포함되어 있습니다. # - 참고로 COCO 데이터 세트에 대한 주석만 약 242MB입니다. # 주석을 다운로드 받고, 압축을 해제 합니다. anno_url = "http://images.cocodataset.org/annotations/annotations_trainval2017.zip" urllib.request.urlretrieve(anno_url, "coco-annotations.zip"); shutil.unpack_archive("coco-annotations.zip") # ## 주석 (annotations) Json 을 로딩 # # 훈련 및 검증 주석은 Json 형식으로 되어 있습니다. 파일을 각각 로딩 하겠습니다. # + with open("annotations/instances_train2017.json", "r") as f: train_metadata = json.load(f) with open("annotations/instances_val2017.json", "r") as f: val_metadata = json.load(f) # - # ## 이미지 파일의 주석에 대한 내용 확인 # - 아미지 ID 및 원본 이미지 경로 및 coco 경로가 있습니다. train_metadata['images'][0:2] # 전체 훈련 및 검증에 대한 이미지의 갯수 # print("# of images in train_metadata: " , len(train_metadata['images'])) print("# of images in val_metadata: " , len(val_metadata['images'])) # ## 주석 레이블 종류 # - 주석 레이블에는 Segmentation, 바운딩박스, Category ID, Image_id 의 정보가 있습니다. # - 우리는 `이미지 분류` 작업을 위해서 Category ID 를 이용할 겁니다. for a in train_metadata['annotations']: print(a) break # ## 전체 이미지의 카테고리 확인 # 카테고리 구조를 파악할 수 있습니다. train_metadata["categories"][0:25] # 전체 카테고리의 개수를 확인 합니다. print("# of Categories: " , len(train_metadata["categories"])) # # # 2. 전체 카테고리 주석에서 "동물" 만 추출 # ___ # 이 가이드의 데이터 세트 범위를 제한하기 위해 COCO 데이터 세트의 동물 이미지만 사용합니다. category_labels = { c["id"]: c["name"] for c in train_metadata["categories"] if c["supercategory"] == "animal" } category_labels # ## 레이블 추출 (이미지 ID 와 카테고리 ID 추출) # # ``` # <Image ID> {category_id: <ID>} 형식으로 추출 합니다. # 예: 495357 {'category_id': 18} # ``` # # + train_annos = {} for a in train_metadata["annotations"]: if a["category_id"] in category_labels: train_annos[a["image_id"]] = {"category_id": a["category_id"]} from src.m_utils import show_dic_items show_dic_items(train_annos) # print(len(train_annos)) # - # ## 이미지 정보 추출 (이미지 ID, 이미지 URL, 파일 이름 추출) # # ``` # <Image ID> {'coco_url': <URL>, 'file_name': <File Name} 으로 추출 합니다. # # 예: 391895 {'coco_url': 'http://images.cocodataset.org/train2017/000000391895.jpg', 'file_name': '000000391895.jpg'} # ``` # - 훈련 및 검증 데이터 세트에 반복 합니다. # + train_images = {} for i in train_metadata["images"]: train_images[i["id"]] = {"coco_url": i["coco_url"], "file_name": i["file_name"]} show_dic_items(train_images, 5) # + val_annos = {} for a in val_metadata["annotations"]: if a["category_id"] in category_labels: val_annos[a["image_id"]] = {"category_id": a["category_id"]} val_images = {} for i in val_metadata["images"]: val_images[i["id"]] = {"coco_url": i["coco_url"], "file_name": i["file_name"]} # - # # 3. 레이블 및 이미지 파일 경로 정보 결합 # # - 레이블 및 이미지 파일 경로를 결합하여 이미지의 모든 주석 정보를 만듭니다. # 아래와 같이 기존의 이미지 ID, 카테고리 ID에 추가적으로 이미지 경로를 추가 합니다. # ``` # 495357 {'category_id': 18} # --> # 495357 {'category_id': 18, 'coco_url': 'http://images.cocodataset.org/train2017/000000495357.jpg', 'file_name': '000000495357.jpg'} # ``` # 이미지의 레이블 정보 확인 합니다. show_dic_items(train_annos) # 이미지의 레이블 정보와 파일 경로를 결합 합니다. # + for id, anno in train_annos.items(): anno.update(train_images[id]) for id, anno in val_annos.items(): anno.update(val_images[id]) # - # 결합한 내용 확인하기 show_dic_items(train_annos) # ## 훈련과 검증 데이터 세트를 결합 # - 현재의 훈련과 검증 데이터 세트의 2 분할을 3분할인 훈련, 검증, 테스트 데이터 세트로 만들기 위해 일단 합칩니다. all_annos = {} for k, v in train_annos.items(): all_annos.update({k: v}) for k, v in val_annos.items(): all_annos.update({k: v}) show_dic_items(all_annos) # # 4. 데이터세트 샘플링 # ___ # - 데이터 작업을 더 쉽게 하기 위해 각 카테고리에서 무작위로 250개의 이미지를 선택합니다. # - 카테고리 당 250개. 10개의 카테고리여서 최종 2500 개가 됩니다. # - 이것은 데이터 세트의 작은 부분이지만 전이 학습(Transfer Learning)을 사용하기에는 적당한 사이즈 입니다. # - 이를 실행할 때마다 동일한 이미지 세트를 얻을 수 있도록 Numpy의 랜덤 시드로 0을 설정합니다. # np.random.seed(0) # + sample_annos = {} # 샘플링을 한 주석 정보를 담을 변수를 선언 합니다. for category_id in category_labels: subset = [k for k, v in all_annos.items() if v["category_id"] == category_id] sample = np.random.choice(subset, size=250, replace=False) for k in sample: sample_annos[k] = all_annos[k] # - show_dic_items(sample_annos) print(len(sample_annos)) # # 5. 이미지 URL을 통해 이미지를 병렬로 다운로드 (약 2분 소요) # - 다중 스레딩을 위한 함수 정의 # - 이미지 저장 폴더 생성 # - 이미지 다운로드를 병렬화화를 위한 Worker Thread 개수 정의 # - 예시로 20을 설정할 수 있고, 에러 발생시에 수치를 낮추시기 바랍니다. # - joblib로 다중 스레딩을 위한 함수 실행 def download_image(url, path): data = imread(url) imwrite(path / url.split("/")[-1], data) sample_dir = pathlib.Path("data_sample_2500") sample_dir.mkdir(exist_ok=True) num_workers = 20 # 에러 발생시 줄이세요. # + # %%time from joblib import Parallel, delayed, parallel_backend with parallel_backend("threading", n_jobs=num_workers): Parallel(verbose=3)( delayed(download_image)(a["coco_url"], sample_dir) for a in sample_annos.values() ) # - # <pre> # </pre> # # 6. CIFAR-10 개구리 데이터와 결합 # ___ # - COCO 데이터 세트에는 개구리 이미지가 포함되어 있지 않지만 모델이 개구리 이미지에 레이블을 지정할 수도 있어야 한다고 가정해 보겠습니다. # - 이 문제를 해결하기 위해 개구리가 포함된 다른 이미지 데이터 세트를 다운로드하고 250개의 개구리 이미지를 샘플링하여 기존 이미지 데이터에 추가할 수 있습니다. # - **CIFAR-10 이미지는 훨씬 작기 때문에(32x32) 크기를 (244x244)로 늘리면 픽셀화되고 흐릿하게 나타납니다.** # - CIFAR-10 데이터 세트는 COCO 데이터 세트와 매우 다른 방식으로 형식이 지정됩니다. COCO 이미지와 일치하도록 CIFAR-10 데이터를 개별 이미지 파일로 처리 합니다. # ## CIFAR-10 dataset 다운로드 및 추출 # !wget --no-check-certificate https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz # #!wget https://www.cs.toronto.edu/%7Ekriz/cifar-10-python.tar.gz # 다운로드 받은 파일을 압축 해제 합니다. tf = tarfile.open("cifar-10-python.tar.gz") tf.extractall() # ## CIFAR-10 데이터 세트의 첫 번째 배치 열기 # - CIFAR-10 데이터 세트는 5개의 훈련 배치와 1개의 테스트 배치로 제공됩니다. # - 각 훈련 배치에는 10,000개의 무작위로 정렬된 이미지가 있습니다. 데이터 세트에 250개의 개구리 이미지만 필요하므로 첫 번째 배치에서만 가져옵니다. with open("./cifar-10-batches-py/data_batch_1", "rb") as f: batch_1 = pickle.load(f, encoding="bytes") image_data = batch_1[b"data"] # #### 이미지 정보 # - 10000개의 이미지가 존재하고, 가로(32) X 세로(32) X 채널(3) = 3,072 의 픽셀 데이타가 존재 합니다. print("\nImage Shape: ", image_data.shape, "\n") print(image_data) # ## 250 개구리 이미지 샘플링 # - 개구리의 레이블 "6" 인 개구리 인덱스 추출 # - 랜덤하게 250개의 인덱스 추출 # - 250개의 인덱스에 해당되는 이미지 데이터 추출 # - 개구리 이미지 데이터를 [250, 32, 32, 3] 으로 재구성 frog_indices = np.array(batch_1[b"labels"]) == 6 # 6 == 개구리 sample_frog_indices = np.random.choice(frog_indices.nonzero()[0], size=250, replace=False) # 랜덤으로 250개 개구리 인텍스 추출 sample_data = image_data[sample_frog_indices, :] frog_images = sample_data.reshape(len(sample_data), 3, 32, 32).transpose(0, 2, 3, 1) # [250, 32, 32, 3] 으로 재구성 frog_images.shape # ### 개구리 이미지 보기 # # 250 개의 이미지에서 랜덤으로 개구리 이미지를 확인 합니다. # + fig, axs = plt.subplots(3, 4, figsize=(10, 7)) indices = np.random.randint(low=0, high=249, size=12) for i, ax in enumerate(axs.flatten()): ax.imshow(frog_images[indices[i]]) ax.axis("off") # - # ## 개구리 이미지를 추가 하여 data_sample_2750 생성 # - 개구리 인덱스에 대한 파일 이름을 추출 # - 파일이름의 확장자를 png --> jpg 로 변경하고, 데이타를 추출하여 data_sample_2500 에 저장 함. # - data_sample_2500 폴더를 data_sample_2750 으로 변경 frog_filenames = np.array(batch_1[b"filenames"])[sample_frog_indices] frog_filenames[0:3] for idx, filename in enumerate(frog_filenames): filename = filename.decode() data = frog_images[idx] if filename.endswith(".png"): filename = filename.replace(".png", ".jpg") imwrite(sample_dir / filename, data) # + import os import shutil new_folder_name = "data_sample_2750" if os.path.isdir(new_folder_name): # 폴더가 존재하면 먼저 삭제 함. shutil.rmtree(new_folder_name) print(f"{new_folder_name} is deleted") sample_dir.rename(new_folder_name) print(f"{sample_dir} is renamed to {new_folder_name}") # - # # 7. 개구리 주석을 기존의 coco 2500개의 주석 (`sample_annos`) 에 추가 # 기존 category_labels 에 26 번으로서 frog 를 추가 category_labels[26] = "frog" print(show_dic_items(category_labels,11)) # 기존의 coco 2500 개에 해당하는 이미지 ID의 다음 값 부터 이후 값 추출 (Next_ID) next_anno_idx = np.array(list(sample_annos.keys())).max() + 1 # Next_ID 부터 250개의 이미지 ID를 생성 frog_anno_ids = range(next_anno_idx, next_anno_idx + len(frog_images)) # 기존의 2500개에서 개구리 250개를 카테고리 ID, 파일 이름을 추가 함. for idx, frog_id in enumerate(frog_anno_ids): sample_annos[frog_id] = { "category_id": 26, "file_name": frog_filenames[idx].decode().replace(".png", ".jpg"), } print(show_dic_items(sample_annos)) # # 8. 최종 주석을 피클 파일로 저장 # ___ # - 이것은 이미지 데이터로 딥 러닝 모델을 훈련하기 위한 일련의 가이드 중 첫 번째일 뿐입니다. # - 작업이 후속 노트북으로 넘어가도록 하기 위해 아래 두개의 피클 파일을 생성 합니다. # - 데이터세트 주석 # - 카테고리 이름 # + pickled_dir = pathlib.Path("./pickled_data") pickled_dir.mkdir(exist_ok=True) with open("pickled_data/sample_annos.pickle", "wb") as f: pickle.dump(sample_annos, f) with open("./pickled_data/category_labels.pickle", "wb") as f: pickle.dump(category_labels, f) # -
image-classificaton/1.1.download_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="iQjHqsmTAVLU" # # Week 3: Improve MNIST with Convolutions # # In the videos you looked at how you would improve Fashion MNIST using Convolutions. For this exercise see if you can improve MNIST to 99.5% accuracy or more by adding only a single convolutional layer and a single MaxPooling 2D layer to the model from the assignment of the previous week. # # You should stop training once the accuracy goes above this amount. It should happen in less than 10 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your callback. # # When 99.5% accuracy has been hit, you should print out the string "Reached 99.5% accuracy so cancelling training!" # # + id="ZpztRwBouwYp" import os import numpy as np import tensorflow as tf from tensorflow import keras # - # Begin by loading the data. A couple of things to notice: # # - The file `mnist.npz` is already included in the current workspace under the `data` directory. By default the `load_data` from Keras accepts a path relative to `~/.keras/datasets` but in this case it is stored somewhere else, as a result of this, you need to specify the full path. # # - `load_data` returns the train and test sets in the form of the tuples `(x_train, y_train), (x_test, y_test)` but in this exercise you will be needing only the train set so you can ignore the second tuple. # + # Load the data # Get current working directory current_dir = os.getcwd() # Append data/mnist.npz to the previous path to get the full path data_path = os.path.join(current_dir, "data/mnist.npz") # Get only training set (training_images, training_labels), _ = tf.keras.datasets.mnist.load_data(path=data_path) # - # One important step when dealing with image data is to preprocess the data. During the preprocess step you can apply transformations to the dataset that will be fed into your convolutional neural network. # # Here you will apply two transformations to the data: # - Reshape the data so that it has an extra dimension. The reason for this # is that commonly you will use 3-dimensional arrays (without counting the batch dimension) to represent image data. The third dimension represents the color using RGB values. This data might be in black and white format so the third dimension doesn't really add any additional information for the classification process but it is a good practice regardless. # # # - Normalize the pixel values so that these are values between 0 and 1. You can achieve this by dividing every value in the array by the maximum. # # Remember that these tensors are of type `numpy.ndarray` so you can use functions like [reshape](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html) or [divide](https://numpy.org/doc/stable/reference/generated/numpy.divide.html) to complete the `reshape_and_normalize` function below: # + # GRADED FUNCTION: reshape_and_normalize def reshape_and_normalize(images): ### START CODE HERE # Reshape the images to add an extra dimension images = None # Normalize pixel values images = None ### END CODE HERE return images # - # Test your function with the next cell: # + # Reload the images in case you run this cell multiple times (training_images, _), _ = tf.keras.datasets.mnist.load_data(path=data_path) # Apply your function training_images = reshape_and_normalize(training_images) print(f"Maximum pixel value after normalization: {np.max(training_images)}\n") print(f"Shape of training set after reshaping: {training_images.shape}\n") print(f"Shape of one image after reshaping: {training_images[0].shape}") # - # **Expected Output:** # ``` # Maximum pixel value after normalization: 1.0 # # Shape of training set after reshaping: (60000, 28, 28, 1) # # Shape of one image after reshaping: (28, 28, 1) # ``` # Now complete the callback that will ensure that training will stop after an accuracy of 99.5% is reached: # + # GRADED CLASS: myCallback ### START CODE HERE # Remember to inherit from the correct class class myCallback(): # Define the method that checks the accuracy at the end of each epoch pass ### END CODE HERE # - # Finally, complete the `convolutional_model` function below. This function should return your convolutional neural network: # GRADED FUNCTION: convolutional_model def convolutional_model(): ### START CODE HERE # Define the model, it should have 5 layers: # - A Conv2D layer with 32 filters, a kernel_size of 3x3, ReLU activation function # and an input shape that matches that of every image in the training set # - A MaxPooling2D layer with a pool_size of 2x2 # - A Flatten layer with no arguments # - A Dense layer with 128 units and ReLU activation function # - A Dense layer with 10 units and softmax activation function model = tf.keras.models.Sequential([ None, None, None, None, None ]) ### END CODE HERE # Compile the model model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model # + # Save your untrained model model = convolutional_model() # Instantiate the callback class callbacks = myCallback() # Train your model (this can take up to 5 minutes) history = model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks]) # - # If you see the message that you defined in your callback printed out after less than 10 epochs it means your callback worked as expected. You can also double check by running the following cell: print(f"Your model was trained for {len(history.epoch)} epochs") # **Congratulations on finishing this week's assignment!** # # You have successfully implemented a CNN to assist you in the image classification task. Nice job! # # **Keep it up!**
C1/W3/assignment/C1W3_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating DataFrame and Series import pandas as pd # #### Create a data dictionary raw_data = {"name": ['Bulbasaur', 'Charmander','Squirtle','Caterpie'], "evolution": ['Ivysaur','Charmeleon','Wartortle','Metapod'], "type": ['grass', 'fire', 'water', 'bug'], "hp": [45, 39, 44, 45], "pokedex": ['yes', 'no','yes','no'] } df = pd.DataFrame(raw_data) df.head() # #### Add another column called place, and insert what you have in mind. df["place"] = ["park", "street", "lake", "forest"] df # #### Present the type of each column df.dtypes
DataTalks_Create DataFrame&Series.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: hejia@cori-2 # language: python # name: hejia-2 # --- # # The full notebook coming soon... # + # preliminary results # -
examples/planck_18_sterile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #this code reads in the MMD, just lat, lon, time and interpolates CCMP for each matchup import datetime as dt from datetime import datetime import numpy as np import math import os #import sys import pandas as pd import xarray as xr from netCDF4 import Dataset ####################you will need to change some paths here!##################### dir_mmdb='F:/data/mmd/mmd06c_re01_pp/drifter-sst_amsre-aq/' dir_mmdb_ccmp='f:/data/mmd/mmd06c_re01_pp/wind/' dir_ccmp='F:/data/sat_data/ccmp/v02.0/Y' ################################################################################# input_year=2003 input_month=1 #istart_flag = 0 #for root, dirs, files in os.walk(dir_mmdb, topdown=False): # for name in files: # if name.endswith(".nc"): filename_mmdb=dir_mmdb+'mmd06c_sst_drifter-sst_amsre-aq_2002-152_2002-158.nc' filename_mmdb_ccmp=dir_mmdb_ccmp+'ccmp_mmd06c_sst_drifter-sst_amsre-aq_2002-152_2002-158.nc' ds=xr.open_dataset(filename_mmdb,decode_times=False) ds['drifter-sst.insitu.qc1'] ds # + #ds_test = xr.DataArray(ds['amsre.percent_land'][:,10,10], # ....: coords={'lat': ('lat', ds['amsre.latitude'][:,10,10]), # ....: 'lon': ('lon', ds['amsre.longitude'][:,10,10])}, # ....: dims=['lat','lon']) import cartopy.crs as ccrs import matplotlib.pyplot as plt FLOAT_FILLVALUE = np.finfo(np.float32).min DOUBLE_FILLVALUE = np.finfo(np.float64).min dtime = ds['amsre.time'][:,10,10] data = ds['amsre.percent_land'][:,10,10] lat = ds['amsre.latitude'][:,10,10].values lon =ds['amsre.longitude'][:,10,10].values #foo = xr.DataArray(data, coords=[lat,lon], dims=['lat', 'lon']) #plt.figure(figsize=(7,2)); #ax = plt.axes(projection=ccrs.PlateCarree()) #data.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(),x=lon, y=lat); vattrs = ds['amsre.brightness_temperature10H'].attrs.copy() varrays = {} COORD_ATTR = 'lat lon time' vattrs["coordinates"] = COORD_ATTR vattrs = ds['amsre.longitude'].attrs.copy() vattrs["_FillValue"] = FLOAT_FILLVALUE values = np.ma.fix_invalid(lon) #varrays['lon'] = xr.DataArray(values.filled(FLOAT_FILLVALUE).astype(np.float32,copy=False),dims=['time'],attrs=vattrs) vattrs = ds['amsre.latitude'].attrs.copy() vattrs["_FillValue"] = FLOAT_FILLVALUE values = np.ma.fix_invalid(lat) #varrays['lat'] = xr.DataArray(values.filled(FLOAT_FILLVALUE).astype(np.float32,copy=False),dims=['time'],attrs=vattrs) varrays['percent_land'] = xr.DataArray(data.astype(np.float32,copy=False),dims=['time'],attrs=vattrs) values = np.ma.fix_invalid(dtime.values) vtime = xr.DataArray(values.filled(FLOAT_FILLVALUE).astype(np.float64,copy=False),dims=['time'],attrs=vattrs) ds_test = xr.Dataset(varrays,coords = {'time': vtime, 'lon':lon, 'lat':lat}) # - #ds['amsre.latitude'][:,10,10] ds.data_vars ds['amsre.longitude'].attrs ds_test['percent_land'] ishape = ds['amsre.brightness_temperature10V'].shape print(int((ishape[1]-1)/2)) ishape = ds['drifter-sst.insitu.sea_surface_temperature'].shape print(ishape[1]-1) # + dataset=ds # convert names COORD_ATTR = 'lat lon time' #COORD_ATTR = 'time' FLOAT_FILLVALUE = np.finfo(np.float32).min DOUBLE_FILLVALUE = np.finfo(np.float64).min VAR_TRANSLATE = { 'amsre.brightness_temperature6V': 'tb6v', 'amsre.brightness_temperature6H': 'tb6h', 'amsre.brightness_temperature10V': 'tb10v', 'amsre.brightness_temperature10H': 'tb10h', 'amsre.brightness_temperature18V': 'tb18v', 'amsre.brightness_temperature18H': 'tb18h', 'amsre.brightness_temperature23V': 'tb23v', 'amsre.brightness_temperature23H': 'tb23h', 'amsre.brightness_temperature36V': 'tb36v', 'amsre.brightness_temperature36H': 'tb36h', 'amsre.brightness_temperature89V': 'tb89v', 'amsre.brightness_temperature89H': 'tb89h', 'amsre.percent_land':'percent_land', 'amsre.solar_zenith_angle':'sza', 'amsre.land_ocean_flag_6':'land_flag', 'amsre.Sun_Glint_Angle':'sun_glint', 'drifter-sst.insitu.sea_surface_temperature':'drifter_sst', 'drifter-sst.insitu.qc1':'drifter_qc' } # # copy variables from source dataset varrays = {} for v in dataset.data_vars: if v in VAR_TRANSLATE: # print(v) # set attributes vattrs = dataset[v].attrs.copy() if v not in ['amsre.latitude', 'amsre.longitude', 'amsre.time']: vattrs["coordinates"] = COORD_ATTR vattrs["_FillValue"] = FLOAT_FILLVALUE ishape = dataset[v].shape icenter = int((ishape[1]-1)/2) offset, scale = 0,1. if v not in ['amsre.solar_zenith_angle','amsre.land_ocean_flag_6','amsre.latitude', 'amsre.longitude', 'amsre.time','drifter-sst.insitu.sea_surface_temperature','drifter-sst.insitu.qc1']: offset = dataset[v].OFFSET scale = dataset[v].SCALE_FACTOR values = np.ma.fix_invalid(dataset[v].values[:,icenter,icenter])*scale+offset print(v,icenter,offset,scale) # create new data array varrays[VAR_TRANSLATE[v]] = xr.DataArray( values.filled(FLOAT_FILLVALUE).astype(np.float32,copy=False), dims=['time'], attrs=vattrs ) # 1. wind speed vattrs = dataset['amsre.nwp.10m_east_wind_component'].attrs.copy() vattrs['standard_name'] = 'wind_speed' vattrs['long_name'] = 'wind_speed' vattrs['valid_min'] = 0 vattrs['valid_max'] = 100 vattrs["_FillValue"] = FLOAT_FILLVALUE vattrs["coordinates"] = COORD_ATTR WS_10m=(dataset['amsre.nwp.10m_east_wind_component'].values[:,2,2]**2 + dataset['amsre.nwp.10m_north_wind_component'].values[:,2,2]**2)**.5 varrays['wind_speed'] = xr.DataArray( WS_10m.astype(np.float32,copy=False), dims=['time'], attrs=vattrs ) # 2. wind direction vattrs = dataset['amsre.nwp.10m_east_wind_component'].attrs.copy() vattrs['standard_name'] = 'wind_to_direction' vattrs['long_name'] = 'local wind direction' vattrs['valid_min'] = 0 vattrs['valid_max'] = 360 vattrs["_FillValue"] = FLOAT_FILLVALUE vattrs['units'] = 'degrees' vattrs["coordinates"] = COORD_ATTR WD=np.arctan2(dataset['amsre.nwp.10m_east_wind_component'].values[:,2,2], dataset['amsre.nwp.10m_east_wind_component'].values[:,2,2])*180/3.14159 WD=np.where(WD<0,WD+360,WD) varrays['wind_direction'] = xr.DataArray( WD.astype(np.float32,copy=False), dims=['time'], attrs=vattrs ) # 3. time vattrs = ds['amsre.time'].attrs.copy() #amsr_time=ds['amsre.time'][:,10,10] #date_amsre=pd.to_datetime(amsr_time.data, unit='s',origin='1993-01-01') vattrs["_FillValue"] = DOUBLE_FILLVALUE #date_amsreint = pd.to_datetime(date_amsre).astype(np.int64) #values = np.ma.fix_invalid(date_amsreint.data) values = np.ma.fix_invalid(ds['amsre.time'].data[:,10,10]) vtime = xr.DataArray( values.filled(FLOAT_FILLVALUE).astype(np.float64,copy=False), dims=['time'], attrs=vattrs ) # create Dataset and save l3r = xr.Dataset(varrays,coords = {'time': vtime, 'lon':lon, 'lat':lat}) #l3r = xr.Dataset(varrays,coords = { 'lon':lon, 'lat':lat}) # embellishments #indicative_date_time=pd.to_datetime(str(time_usv[0].data)).strftime("%Y%m%d%H%M%S") #Product_string = astr_platform # str(ds.TEMP_CTD_MEAN.vendor_name) + '_' + str(ds.TEMP_CTD_MEAN.serial_number) #filename_L3R = dir_out + indicative_date_time + \ # '-' + ISDP + '-' + 'L3R' + '-' + SST_type + '-' +Product_string+ '-v' +Annex_version+ '-fv' +File_version+ '.nc' #filename_L3R_test = dir_out + indicative_date_time + \ # '-' + ISDP + '-' + 'L3R' + '-' + SST_type + '-' +Product_string+ '-v' +Annex_version+ '-fv' +File_version+ 'test.nc' #filename_L3R #l3r.to_netcdf(filename_L3R) #for some reason the time not decoded is off by about 28 seconds so reset to original decoded time here #l3r['time']=ds.time[0,:].data #l3r.to_netcdf(filename_L3R) # - l3r.time[0] from scipy.interpolate import griddata xlat=range(-90,90) xlon=range(-180,180) #l3r_map=l3r.interp(lat=xlat,lon=xlon) grid_x, grid_y = np.mgrid[-180:180:.25, -90:90:.25] grid_z0 = griddata((l3r.lon.values,l3r.lat.values),l3r.percent_land.values, (grid_x, grid_y), method='linear') import matplotlib.pyplot as plt plt.subplot(221) #plt.imshow(grid_y.T, extent=(-180,180,-90,90), origin='lower') plt.imshow(grid_z0.T, extent=(-180,180,-90,90), origin='lower') ilen=l3r.time.shape[0] gmap= np.zeros((1441,721))*np.nan print(gmap.shape) for i in range(0,ilen): ilon=int(round((l3r.lon.data[i]+179.875)/.25+1,0)) ilat=int(round((l3r.lat.data[i]+89.875)/.25+1,0)) gmap[ilon,ilat]=l3r.percent_land.data[i] plt.subplot(211) plt.figure(figsize=(20,10)) plt.imshow(gmap.T) fnc
mmdb/MMDB_into_xarray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: platzi # language: python # name: platzi # --- # # Módulo 4: APIs # ## Spotify # <img src="https://developer.spotify.com/assets/branding-guidelines/logo@2x.png" width=400></img> # # En este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?<br> # Por sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto. # #### REST # Un término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web. # #### Documentación # Como mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/) # #### JSON # Json significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso. # # # Links útiles para la clase: # - [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/) # - [<NAME> en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao) import requests id_im = '6mdiAmATAx73kdxrNrnlao' url_base = 'https://api.spotify.com/v1' ep_artist = '/artists/{artist_id}' url_base+ep_artist.format(artist_id=id_im) r = requests.get(url_base+ep_artist.format(artist_id=id_im)) r.status_code r.json() token_url = 'https://accounts.spotify.com/api/token' params = {'grant_type': 'client_credentials'} headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='} r = requests.post(token_url, data=params, headers=headers) r.status_code r.json() token = r.json()['access_token'] token header = {"Authorization": "Bearer {}".format(token)} r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header) r.status_code r.json() url_busqueda = 'https://api.spotify.com/v1/search' search_params = {'q': "Iron+Maiden", 'type':'artist', 'market':'AR'} busqueda = requests.get(url_busqueda, headers=header, params=search_params) busqueda.status_code busqueda.json() import pandas as pd df = pd.DataFrame(busqueda.json()['artists']['items']) df.head() df.sort_values(by='popularity', ascending=False).iloc[0]['id'] import base64 def get_token(client_id, client_secret): encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8')) params = {'grant_type':'client_credentials'} header={'Authorization': 'Basic ' + str(encoded, 'utf-8')} r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params) if r.status_code != 200: print('Error en la request.', r.json()) return None print('Token válido por {} segundos.'.format(r.json()['expires_in'])) return r.json()['access_token'] client_id = '44b7b36ec145467f9a9eeaf7e417cf8b' client_secret = '7b4aa7a0ef4844048ab1d<PASSWORD>' token = get_token(client_id, client_secret) header = {"Authorization": "Bearer {}".format(token)} id_im artist_im = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header) artist_im.status_code artist_im.json() params = {'country': 'AR'} albums_im = requests.get(url_base+ep_artist.format(artist_id=id_im)+'/albums', headers=header, params=params) albums_im.status_code albums_im.json()['items'] [(album['id'], album['name']) for album in albums_im.json()['items']] bnw_id = '1hDF0QPIHVTnSJtxyQVguB' album_ep = '/albums/{album_id}' album_params = {'market':'AR'} bnw = requests.get(url_base+album_ep.format(album_id=bnw_id)+'/tracks', headers=header, params=album_params) bnw bnw.json() bnw.json()['items'] [(track['id'], track['name']) for track in bnw.json()['items']] # ## Clase 5 # Ya vimos cómo utilizar las APIs para: # - obtener un token que nos permita acceder a la información # - realizar una búsqueda # - obtener información de un artista # - obtener los álbumes de un artista # - obtener los tracks de un álbum # # La última clase quedó pendiente armar funciones que resuelvan todos estos pasos que te las dejo acá def obtener_discografia(artist_id, token, return_name=False, page_limit=50, country=None): url = f'https://api.spotify.com/v1/artists/{artist_id}/albums' header = {'Authorization': f'Bearer {token}'} params = {'limit': page_limit, 'offset': 0, 'country': country} lista = [] r = requests.get(url, params=params, headers=header) if r.status_code != 200: print('Error en request.', r.json()) return None if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] while r.json()['next']: r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] return lista def obtener_tracks(album_id, token, return_name=False, page_limit=50, market=None): url=f'https://api.spotify.com/v1/albums/{album_id}/tracks' header = {'Authorization': f'Bearer {token}'} params = {'limit': page_limit, 'offset': 0, 'market': market} lista = [] r = requests.get(url, params=params, headers=header) if r.status_code != 200: print('Error en request.', r.json()) return None if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] while r.json()['next']: r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] return lista # Utilizando estas funciones podemos obtener todos las canciones que tiene un artista publicadas en Spotify for album in obtener_discografia(id_im, token, return_name=True, country='AR'): print(album[1]) for track in obtener_tracks(album[0], token, return_name=True, market='AR'): print('\t', track[1]) # Te prometí un bonus: si miramos el json que devuelve cuando consultamos por un album, vamos a ver que cada tema tiene un preview url preview_url = 'https://p.scdn.co/mp3-preview/647a53055a6f5d012ed238d87c3191df1ed5aff9?cid=44b7b36ec145467f9a9eeaf7e417cf8b' preview = requests.get(preview_url) preview.status_code preview.content import IPython.display as ipd ipd.Audio(preview.content)
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases_old/Módulo 4_ APIs/M4C5 - Fin del proyecto + Bonus (Script).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import tfi_exact # for later comparison # + import a_mps L = 14 # - # a) psi0 = a_mps.init_spinup_MPS(14) Sz = np.array([[1., 0.], [0., -1.]]) exp_val_Sz = psi0.site_expectation_value(Sz) print(exp_val_Sz) Sx = np.array([[0., 1.], [1., 0.]]) exp_val_Sx = psi0.site_expectation_value(Sx) print(exp_val_Sx) assert exp_val_Sz[0] == 1. assert exp_val_Sx[0] == 0. # + # b) def init_spinright_MPS(L): """Return a product state with all spins up as an MPS""" B = np.zeros([1, 2, 1], np.float) B[0, 0, 0] = B[0, 1, 0] = 0.5**0.5 S = np.ones([1], np.float) Bs = [B.copy() for i in range(L)] Ss = [S.copy() for i in range(L)] return a_mps.MPS(Bs, Ss) psi1 = init_spinright_MPS(14) exp_val_Sz = psi1.site_expectation_value(Sz) print(exp_val_Sz) exp_val_Sx = psi1.site_expectation_value(Sx) print(exp_val_Sx) assert exp_val_Sz[0] == 0. assert abs(exp_val_Sx[0] -1.) < 1.e-15 # correct up to rounding errors of machine precision # - # c) import b_model for g in [0.5, 1., 1.5]: print("g =", g) model = b_model.TFIModel(L, 1. , g) print("energy of |up.... up>", model.energy(psi0)) print("energy of |right ... right>", model.energy(psi1)) # as expected :D # d) import c_tebd E, psi, model = c_tebd.example_TEBD_gs_finite(14, 1., 1.5) # # Global quench # + # e) chi_max = 30 eps = 1.e-10 L = 14 model = b_model.TFIModel(L, 1., 1.5) dt = 0.1 N_steps = 1 U_bonds = c_tebd.calc_U_bonds(model, 1.j*dt) ts = np.arange(0., 10., dt*N_steps) psi = a_mps.init_spinup_MPS(L) Sz_tot = [] S = [] for t in ts: Sz_tot.append(np.sum(psi.site_expectation_value(model.sigmaz))) S.append(psi.entanglement_entropy()[psi.L//2]) c_tebd.run_TEBD(psi, U_bonds, N_steps, chi_max, eps) fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10, 7)) ax1.plot(ts, Sz_tot) ax2.plot(ts, S) ax1.set_ylabel("total $S^z$") ax2.set_ylabel("half-chain entropy") ax2.set_xlabel("time $t$") # + # f) converged in dt, chi? L = 14 print("L=L") model = b_model.TFIModel(L, 1., 1.5) eps = 1.e-10 fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10, 7)) for dt, N_steps, chi in [(0.1, 1, 30), (0.01, 10, 30), (0.02, 5, 128)]: lbl = "dt={dt:.3f}, chi={chi:d}".format(dt=dt, chi=chi) print(lbl) U_bonds = c_tebd.calc_U_bonds(model, 1.j*dt) ts = np.arange(0., 10., dt*N_steps) psi = a_mps.init_spinup_MPS(L) Sz_tot = [] S = [] for t in ts: Sz_tot.append(np.sum(psi.site_expectation_value(model.sigmaz))) S.append(psi.entanglement_entropy()[psi.L//2]) c_tebd.run_TEBD(psi, U_bonds, N_steps, chi_max, eps) ax1.plot(ts, Sz_tot, label=lbl) ax2.plot(ts, S, label=lbl) ax2.legend() ax1.set_ylabel("total $S^z$") ax2.set_ylabel("half-chain entropy") ax2.set_xlabel("time $t$") plt.show() # + # and again for larger L L = 50 print("L=L") model = b_model.TFIModel(L, 1., 1.5) eps = 1.e-10 fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10, 7)) for dt, N_steps, chi in [(0.1, 1, 30), (0.01, 10, 30), (0.02, 5, 60)]: lbl = "dt={dt:.3f}, chi={chi:d}".format(dt=dt, chi=chi) print(lbl) U_bonds = c_tebd.calc_U_bonds(model, 1.j*dt) ts = np.arange(0., 10., dt*N_steps) psi = a_mps.init_spinup_MPS(L) Sz_tot = [] S = [] for t in ts: Sz_tot.append(np.sum(psi.site_expectation_value(model.sigmaz))) S.append(psi.entanglement_entropy()[psi.L//2]) c_tebd.run_TEBD(psi, U_bonds, N_steps, chi_max, eps) ax1.plot(ts, Sz_tot, label=lbl) ax2.plot(ts, S, label=lbl) ax2.legend(loc='upper left') ax1.set_ylabel("total $S^z$") ax2.set_ylabel("half-chain entropy") ax2.set_xlabel("time $t$") plt.show() # - # g) def run_TEBD_secondorder(psi, U_bonds, U_bonds_half_dt, N_steps, chi_max, eps): Nbonds = psi.L - 1 assert len(U_bonds) == len(U_bonds_half_dt) == Nbonds for i_bond in range(0, Nbonds, 2): c_tebd.update_bond(psi, i_bond, U_bonds_half_dt[i_bond], chi_max, eps) for i_bond in range(1, Nbonds, 2): c_tebd.update_bond(psi, i_bond, U_bonds[i_bond], chi_max, eps) for n in range(N_steps- 1): for k in [0, 1]: # even, odd for i_bond in range(k, Nbonds, 2): c_tebd.update_bond(psi, i_bond, U_bonds[i_bond], chi_max, eps) for i_bond in range(0, Nbonds, 2): c_tebd.update_bond(psi, i_bond, U_bonds_half_dt[i_bond], chi_max, eps) # done # + # regenerate previous plot: almost same code, changes marked with comments. L = 50 print("L=L") model = b_model.TFIModel(L, 1., 1.5) eps = 1.e-10 fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10, 7)) for dt, N_steps, chi in [(0.1, 1, 30), (0.01, 10, 30), (0.02, 5, 60)]: lbl = "dt={dt:.3f}, chi={chi:d}".format(dt=dt, chi=chi) print(lbl) U_bonds = c_tebd.calc_U_bonds(model, 1.j*dt) U_bonds_half_dt = c_tebd.calc_U_bonds(model, 1.j*dt/2.) # this line is new ts = np.arange(0., 10., dt*N_steps) psi = a_mps.init_spinup_MPS(L) Sz_tot = [] S = [] for t in ts: Sz_tot.append(np.sum(psi.site_expectation_value(model.sigmaz))) S.append(psi.entanglement_entropy()[psi.L//2]) run_TEBD_secondorder(psi, U_bonds, U_bonds_half_dt, N_steps, chi_max, eps) # this line changed ax1.plot(ts, Sz_tot, label=lbl) ax2.plot(ts, S, label=lbl) ax2.legend(loc='upper left') ax1.set_ylabel("total $S^z$") ax2.set_ylabel("half-chain entropy") ax2.set_ylabel("time $t$") plt.show() # - # # Local quench E, psi0, model = c_tebd.example_TEBD_gs_finite(50, 1., 1.5) # + # get a copy of the ground state, to which we apply sigma_x on site i0 = L//2 psi = psi0.copy() i0 = psi.L//2 new_B = np.tensordot(model.sigmax, psi.Bs[i0], axes=(1, 1)) # i [i*], vL [i] vR new_B = np.transpose(new_B, [1, 0, 2]) psi.Bs[i0] = new_B chi_max = 50 eps = 1.e-10 # do a time evolution U_bonds = c_tebd.calc_U_bonds(model, 1.j*dt) U_bonds_half_dt = c_tebd.calc_U_bonds(model, 1.j*dt/2.) # this line is new ts = np.arange(0., 10., dt*N_steps) S = [] for t in ts: S.append(psi.entanglement_entropy()) run_TEBD_secondorder(psi, U_bonds, U_bonds_half_dt, N_steps, chi_max, eps) # this line changed S = np.array(S) # just for comparision: plot the half-chain entanglement plt.figure() plt.plot(ts, S[:, i0]) plt.xlabel("time $t$") plt.ylabel("entropy") plt.show() # - from lanczos import colorplot plt.figure(figsize=(10, 7)) xs = np.arange(0.5, psi.L-1, 1) colorplot(xs, ts, S.T) plt.xlabel("bond of cut") plt.ylabel("time $t$") plt.colorbar() plt.show()
2_tensor_networks/sol8_tebd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import preprocessing # %matplotlib inline # !wget --no-check-certificate -O space_ga.csv https://www.dropbox.com/s/t1webrwixdlaacv/space_ga.txt # ## Линейная регрессия с батч-оптимизацией(2 балла) # # Рассмотрим случай, когда данных в выборке много. В таких случаях используется стохастическая или батч-оптимизация. # Загрузите данные из файла space_ga.csv и нормализуйте их. Мы будем предсказывать первый столбец по шести остальным. Эти данные получены с выборов в США в 1980 году. df = pd.read_csv('space_ga.csv', header=None) y = df[0].values scaler = preprocessing.StandardScaler().fit(df) df = pd.DataFrame(data=scaler.transform(df)) X = df[[1, 2, 3, 4, 5, 6]].values X = np.hstack((np.ones(shape=(X.shape[0])).reshape((X.shape[0], 1)), X)) X y # Как вы могли заметить, датасет больше предыдущего. На нём мы попробуем батч-оптимизацию. # # Измените функцию для минимизации написанную на семинаре так, чтобы на вход они принимала дополнительный параметр — размер батча. Запустите функцию при разных размерах батча. Прокомментируйте результаты. def loss(X:np.ndarray, y:np.ndarray, w:np.ndarray, batch_size=None): if batch_size is None or batch_size > y.shape[0]: X_i, y_i = X, y else: index = np.random.choice(y.shape[0], batch_size, replace=False) X_i, y_i = X[index], y[index] return np.mean((X_i @ w - y_i)**2) / 2 def grad(X:np.ndarray, y:np.ndarray, w:np.ndarray, batch_size=None): if batch_size is None or batch_size > y.shape[0]: X_i, y_i = X, y else: index = np.random.choice(y.shape[0], batch_size, replace=False) X_i, y_i = X[index], y[index] return np.mean((X_i @ w - y_i)[..., None] * X_i, axis=0) w = np.ones((X.shape[1])) w def log_int_iterator(start, end, step): i = start last = None while i <= end: if int(i) != last: last = int(i) yield last i *= step N = 3000 plt.figure(figsize=[20,5]) plt.xscale("log") plt.xlim([0.9, X.shape[0]]) plt.title("Loss в зависимости от batch_size") plt.xlabel("Batch size") plt.ylabel("Loss") history_y = [] history_x = [] np.random.seed(42) for i in log_int_iterator(1, X.shape[0], 1.3): losses = [loss(X,y,w,i) for _ in range(N)] print(f"Batch_size:{i}\tloss_min:{np.min(losses)}\tmax:{np.max(losses)}\tmean:{np.mean(losses)}\tstd:{np.std(losses)}") plt.scatter([i] * len(losses), losses) history_x.append(i) history_y.append(np.mean(losses)) plt.plot(history_x, history_y) # Усредненный результат функции для минимизации практически не зависит от размера выборки, так как при каждом вычислении высчитывается средний результат для одной записи, который затем усредняется от количества экспериментов. # # Loss для отдельного эксперимента сильно зависит от размера батча, при большем размере разброс значений невелик, однако при малых - сильно растет. Это можно объяснить природой данных - некоторые хорошо описывают обобщенное значение выборки, некоторые - плохо и, возможно, явяляются выбросами. # ### Возможный идеальный результат: from sklearn.linear_model import LinearRegression reg = LinearRegression(fit_intercept=False).fit(X, y) print("Weights:", *[np.round(it, 5) for it in reg.coef_], sep='\t') print("Loss: ", loss(X, y, reg.coef_)) # + import time def super_optimizer_and_plotter_steps(learning_rate, X, y, batch_size=None, w=None, eps = 0.0001, max_iter=10000, learning_rate_mul=0.99): if w is None: w = np.zeros(shape=(X.shape[1])) # eps *= X.shape[1] np.random.seed(42) start = time.time() w_old = w + eps history = [loss(X, y, w)] i = 0 while np.linalg.norm(w_old - w) > eps: learning_rate *= learning_rate_mul w_old = w w = w - learning_rate * grad(X, y, w, batch_size) history.append(loss(X, y, w)) i += 1 if i > max_iter: break print("Batch: {}\tSteps:{}\tLoss: {}\tTime: {}\tW: {weights}".format(batch_size, i, np.round(history[-1], 5), np.round(time.time() - start, 3), weights=[np.round(it, 3) for it in w])) # print("Batch:", batch_size, "\tКоличество шагов:", len(history), "\tloss: ", history[-1], "\ttime:", time.time() - start) # plt.grid(True) plt.plot(history) plt.title("Descent trajectory. Batch: {} Steps: {}\nLoss: {}\nTime: {}".format(batch_size, i, history[-1], time.time() - start)) plt.xlabel("# iteration") plt.ylabel("Loss") # + plt.figure(figsize=[20,20]) i = 0 max_step = None min_loss = None max_loss = None for batch in [None, 3000, 2000, 1000, 500, 200, 100, 50, 10, 7, 5, 2]: plt.subplot(4, 3, i + 1) super_optimizer_and_plotter_steps(0.4, X, y, batch) _, r = plt.xlim() if max_step is None or r > max_step: max_step = r l, r = plt.ylim() if min_loss is None or l < min_loss: min_loss = l if max_loss is None or r > max_loss: max_loss = r i += 1 for i in range(i): plt.subplot(4, 3, i + 1) # plt.subplot2grid((4,3),(i//3, i%3)) # plt.subplot(431 + j) plt.xlim([0, max_step]) plt.ylim([0.008, 0.05]) plt.tight_layout() # - # Масштаб по обоим осям на графиках привен к одному виду. # # Итерации с $batch\_size < 50$ сильно колеблются, у них наблюдается увеличение количества шагов обучения, так как шаг в сторону антиградиента сильно зависит от тех элементов, которые были выбраны случайным образом. # # В целом, все выборки с $batch\_size > 2$ привели к примерно одинаковым результатам. # # Все выборки с $batch\_size >= 1000$ затратили примерно одинаковое количество шагов. # # Все итерации с батч оптимизацией затратили больше времени, чем итерации без нее. Это можно объяснить тем, что тратится много времени для генерации случайных чисел и взятии элементов по данным индексам. Возможно, на больших выборках генерация случайных чисел и взятие индекса будет занимать меньше времени, чем перемножение больших матриц. # ## Двумерная классификация(1 балл) # # Решим задачу 2D классификации синтетических данных. # # # + with open('train.npy', 'rb') as fin: X = np.load(fin) with open('target.npy', 'rb') as fin: y = np.load(fin) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, s=20) plt.show() # - # # Как можно заметить, данные сверху линейно неразделимы. Поэтому мы должны добавить дополнительные признаки(или использовать нелинейную модель). Можно заметить, что гиперплоскость разделяющая два класса принимает форму круга, поэтому мы можем добавить квадратичные признаки чтобы сделать классы линейно разделимыми. # # # ![](kernel.png) def expand(X): """ Adds quadratic features. This expansion allows your linear model to make non-linear separation. For each sample (row in matrix), compute an expanded row: [feature0, feature1, feature0^2, feature1^2, feature0*feature1, 1] :param X: matrix of features, shape [n_samples,2] :returns: expanded features of shape [n_samples,6] """ X_expanded = np.zeros((X.shape[0], 6)) X_expanded[:,0] = X[:, 0] X_expanded[:,1] = X[:, 1] X_expanded[:,2] = X[:, 0]**2 X_expanded[:,3] = X[:, 1]**2 X_expanded[:,4] = X[:, 0] * X[:,1] X_expanded[:,5] = np.ones(X.shape[0]) return X_expanded X_expanded = expand(X) # + # simple test on random numbers dummy_X = np.array([ [0,0], [1,0], [2.61,-1.28], [-0.59,2.1] ]) # call your expand function dummy_expanded = expand(dummy_X) # what it should have returned: x0 x1 x0^2 x1^2 x0*x1 1 dummy_expanded_ans = np.array([[ 0. , 0. , 0. , 0. , 0. , 1. ], [ 1. , 0. , 1. , 0. , 0. , 1. ], [ 2.61 , -1.28 , 6.8121, 1.6384, -3.3408, 1. ], [-0.59 , 2.1 , 0.3481, 4.41 , -1.239 , 1. ]]) #tests assert isinstance(dummy_expanded,np.ndarray), "please make sure you return numpy array" assert dummy_expanded.shape == dummy_expanded_ans.shape, "please make sure your shape is correct" assert np.allclose(dummy_expanded,dummy_expanded_ans,1e-3), "Something's out of order with features" print("Seems legit!") # - # ## Логистическая регрессия(3 балла) # # Для классификации объектов мы будем получать вероятность того что объект принадлежит к классу '1'. Чтобы предсказывать вероятность мы будем использовать вывод линейной модели и логистической функции: # # # $$ a(x; w) = \langle w, x \rangle $$ # $$ P( y=1 \; | \; x, \, w) = \dfrac{1}{1 + \exp(- \langle w, x \rangle)} = \sigma(\langle w, x \rangle)$$ # # def probability(X, w): """ Given input features and weights return predicted probabilities of y==1 given x, P(y=1|x), see description above Don't forget to use expand(X) function (where necessary) in this and subsequent functions. :param X: feature matrix X of shape [n_samples,6] (expanded) :param w: weight vector w of shape [6] for each of the expanded features :returns: an array of predicted probabilities in [0,1] interval. """ return 1/(1 + np.exp(-X.dot(w))) dummy_weights = np.linspace(-1, 1, 6) ans_part1 = probability(X_expanded[:1, :], dummy_weights)[0] # Для логистической регрессии оптимальное значение весов $w$ находится с помощью минимизации кросс-энтропии: # # # Loss для одного сэмпла: $$ l(x_i, y_i, w) = - \left[ {y_i \cdot log P(y_i = 1 \, | \, x_i,w) + (1-y_i) \cdot log (1-P(y_i = 1\, | \, x_i,w))}\right] $$ # # Loss для нескольких сэмплов: $$ L(X, \vec{y}, w) = {1 \over \ell} \sum_{i=1}^\ell l(x_i, y_i, w) $$ # # # def compute_loss(X, y, w): """ Given feature matrix X [n_samples,6], target vector [n_samples] of 1/0, and weight vector w [6], compute scalar loss function L using formula above. Keep in mind that our loss is averaged over all samples (rows) in X. """ n = X.shape[0] prob = probability(X, w) return -np.sum(y * np.log(prob) + (1-y) * np.log(1 - prob))/n # # Т.к мы обучаем нашу модель с помощью градиентного спуска мы должны считать градиенты. # Для этого нам нужны производные функции потерь по каждому из весов. # # # $$ \nabla_w L = {1 \over \ell} \sum_{i=1}^\ell \nabla_w l(x_i, y_i, w) $$ # # Выведите формулу для подсчета градиента. # def compute_grad(X, y, w): """ Given feature matrix X [n_samples,6], target vector [n_samples] of 1/0, and weight vector w [6], compute vector [6] of derivatives of L over each weights. Keep in mind that our loss is averaged over all samples (rows) in X. """ prob = probability(X, w) - y return prob.dot(X)/X.shape[0] # Вспомогательная функция для визуализации предсказаний: # + from IPython import display h = 0.01 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) def visualize(X, y, w, history): """draws classifier prediction with matplotlib magic""" Z = probability(expand(np.c_[xx.ravel(), yy.ravel()]), w) Z = Z.reshape(xx.shape) plt.subplot(1, 2, 1) plt.contourf(xx, yy, Z, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.subplot(1, 2, 2) plt.plot(history) plt.grid() ymin, ymax = plt.ylim() plt.ylim(0, ymax) display.clear_output(wait=True) plt.show() # - visualize(X, y, dummy_weights, [0.5, 0.5, 0.25]) # ## Обучение # В данной секции мы будем использовать функции, написанные вами, чтобы обучить наш классификатор с помощью стохастического градиентного спуска. # # ## Mini-batch SGD(1 балл) # # Стохастический градиентный спуск берет рандомный батч из $m$ сэмплов на каждой итерации, подсчитывает градиент функции потерь на этом батче и делает шаг градиентного спуска: # # $$ w_t = w_{t-1} - \eta \dfrac{1}{m} \sum_{j=1}^m \nabla_w l(x_{i_j}, y_{i_j}, w_t) $$ # # # # + np.random.seed(42) w = np.array([0, 0, 0, 0, 0, 1]) eta= 0.1 # learning rate n_iter = 100 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12, 5)) for i in range(n_iter): ind = np.random.choice(X_expanded.shape[0], batch_size) loss[i] = compute_loss(X_expanded, y, w) if i % 10 == 0: visualize(X_expanded[ind, :], y[ind], w, loss) # Keep in mind that compute_grad already does averaging over batch for you! # TODO:<your code here> w = w - eta * compute_grad(X_expanded[ind, :], y[ind], w) visualize(X, y, w, loss) plt.clf() # - # ## SGD with momentum(1 балл) # # Momentum это метод позволяющий корректировать шаг SGD в нужное направление и уменьшать осцилляции как показано на рисунке. Данный эффект достигается с помощью добавления предыдущих шагов с коэффициентом $\alpha$ к текущему градиенту для каждого шага с обновлением весов. # <br> # <br> # # $$ \nu_t = \alpha \nu_{t-1} + \eta\dfrac{1}{m} \sum_{j=1}^m \nabla_w l(x_{i_j}, y_{i_j}, w_t) $$ # $$ w_t = w_{t-1} - \nu_t$$ # # <br> # # # ![](sgd.png) # # # + np.random.seed(42) w = np.array([0, 0, 0, 0, 0, 1]) eta = 0.05 # learning rate alpha = 0.9 # momentum nu = np.zeros_like(w) n_iter = 100 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12, 5)) for i in range(n_iter): ind = np.random.choice(X_expanded.shape[0], batch_size) loss[i] = compute_loss(X_expanded, y, w) if i % 10 == 0: visualize(X_expanded[ind, :], y[ind], w, loss) nu = nu * alpha + eta * compute_grad(X_expanded[ind, :], y[ind], w) w = w - nu visualize(X, y, w, loss) plt.clf() # - # ## ADAM(2 балла) # Реализуйте метод ADAM, использующий градиенты и квадраты градиентов сглаженные экспоненциальным скользящим средним: # # # \begin{eqnarray} # m_t &=& \beta_1 m_{t-1} + (1-\beta_1) g_t\\ # s_t &=& \beta_2 s_{t-1} + (1-\beta_2) g_t^2 \\ # w_t &=& w_{t-1} - \eta \times \frac{\sqrt{ 1 - \beta_2^t}}{ 1 - \beta_1^t} \times \frac{ m_t }{ \sqrt{s_t+eps}} # \end{eqnarray} # + np.random.seed(42) w = np.array([0, 0, 0, 0, 0, 1.]) m = np.zeros(w.shape) s = np.zeros(w.shape) eta = 0.1 # learning rate beta_1 = 0.9 # moving average of gradient beta_2 = 0.999 # moving average of gradient norm squared g2 = None # we start with None so that you can update this value correctly on the first iteration eps = 1e-8 n_iter = 100 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = np.random.choice(X_expanded.shape[0], batch_size) loss[i] = compute_loss(X_expanded, y, w) if i % 10 == 0: visualize(X_expanded[ind, :], y[ind], w, loss) # TODO:<your code here> g = compute_grad(X_expanded[ind, :], y[ind], w) m = beta_1 * m + (1 - beta_1) * g s = beta_2 * s + (1 - beta_2) * g**2 w = w - eta * np.sqrt(1 - beta_2 ** (i + 1)) / (1 - beta_1 ** (i + 1)) * m / np.sqrt(s + eps) visualize(X, y, w, loss) plt.clf() # -
introduction_to_deep_learning/hw_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # pH-Absorbance Calibration with PyTorch # # In this notebook, we will be revisiting the data I collected in my UCLA undergrad Bioengineering capstone project. This data involves an absorbance based pH sensor (using an Arduino, LED, and phenol red indicator solution) for noninvasive monitoring of cell culture. By normalizing the voltage reading to that of phosphate buffered saline as blank, we obtained the Absorbance: $A = -\log \frac{I}{I_{PBS}}$. # # The theoretical equation relating pH to absorbance is then given by: # # \begin{equation} # # A = f(pH) = \frac{A_{max}}{1 + 10^{pK_{a} - pH}} # # \end{equation} # # This corresponds to a sigmoid curve from $0$ to $A_{max}$. We choose to add in an extra shape parameter $\phi$ to account for deviations from the theory and use the natura exponential: # # \begin{equation} # # A = f(pH) = \frac{A_{max}}{1 + e^{(pK_{a} - pH)/\phi}} # # \end{equation} # # # Unlike say a typical logistic regression sigmoid, this sigmoid has parameters that need to be found via nonlinear least square optimization methods. The loss to be minimized is the mean squared error: # # \begin{equation} # # Loss(A_{max},pK_{a},\phi) = \frac{1}{n} \sum^{n}_{i=1} (A_i - \frac{A_{max}}{1 + e^{(pK_{a} - pH_{i})/\phi}})^{2} # # \end{equation} # # # # We also have some prior information from theory. It can be shown with algebra that Equation (2) simplifies to Equation (1) when $\phi = \frac{1}{\log(10)} \approx 0.4343$. Additionally the theoretical pKa of phenol red is $pK_{a} = 7.6$. In a frequentist sense, this prior knowledge can be used to add regularization terms. For $A_{max}$ we do not necessarily have prior information, but we do not want the maximum absorbance to be extremely high, and thus can regularize it toward 0. An L1 penalty (in this case it will simplify to absolute values) will be used to regularize these parameters and will penalize the deviation from these prior values: # # \begin{equation} # # Penalty(A_{max},pK_{a},\phi) = \lambda_{A_{max}} |A_{max}| + \lambda_{pK_{a}} |pK_{a} - 7.6| + \lambda_{\phi}|\phi - \frac{1}{\log(10)}| # # \end{equation} # # The minimization problem, with $\theta = (A_{max},pK_{a},\phi)$ then becomes: # # \begin{equation} # # \underset{\theta}{\arg\min} (Loss(\theta) + Penalty(\theta)) # # \end{equation} # # # # ## Nonlinear Least Squares and Nonlinear Mixed Model # # This dataset consists of 4 Trials, and during the trial, the solution pH was adjusted by adding very small drops of concentrated HCl or NaOH to neglect volume changes. The absorbance was measured and calibrated to a standard pH sensor. However, the nature of the experiment leads to correlated data points within a given trial. **In this first section, we will investigate the dataset with standard built in methods**. # # We will fit NLS models from a wrapper calling R's nls() and (for comparison) scipy least_squares(). These do not account for correlation. To account for correlation, a nonlinear mixed model (NLMM) must be used. This is done through a wrapper that calls R's nlmer() function from lme4 package. # # It is assumed that the only random effect is for $A_{max}$ and is normally distributed: # # \begin{equation} # # A_{max,Trial} \sim N(A_{max},\sigma_{A_{max}}^{2}) # # \end{equation} # # The rpy2 package is used to communicate with R in order to use the wrappers found in the pHAbs_NLSNLMM.R file # # All of these are unregularized (beyond the trial-specific regularization toward the mean induced by random effects in the NLMM from nlmer()) import numpy as np import pandas as pd import matplotlib.pyplot as plt from plotnine import ggplot, geom_point,geom_line, aes from scipy.stats import truncnorm from scipy.optimize import least_squares import rpy2.robjects as ro from rpy2.robjects.packages import importr from rpy2.robjects import pandas2ri from rpy2.robjects.conversion import localconverter # + base = importr('base') stats = importr('stats') lme4 = importr('lme4') ro.r['source']('pHAbs_NLSNLMM.R') # - data = pd.read_csv("Full_pHAbsdata.csv") data.sample(frac=1) #randomize row order for later pH_data = data.pH.to_numpy() ALED_data = data.ALED.to_numpy() # + with localconverter(ro.default_converter + pandas2ri.converter): NLSresult = ro.r.Fit_NLS(data) NLMMresult = ro.r.Fit_NLMM(data) data["Ahat_NLS"] = np.array(stats.predict(NLSresult)) data["Ahat_NLMM"] = np.array(stats.predict(NLMMresult)) (ggplot(data,aes('pH','ALED',color ='factor(Trial)')) + geom_point() + geom_line(aes('pH','Ahat_NLMM',color='factor(Trial)')) + geom_line(aes('pH','Ahat_NLS'),inherit_aes=False)) # - # The data and the fitted values from R's nls() and nlmer() (colored) are seen above. The dark curve represents the overall average relationship based on nls() while the different colored curves are the Trial-specific fits as calculated by nlmer() with a random effect on $A_{max}$. The differences in $A_{max}$ can be caused by differing optics between the trials, which would affect how the light enters the cuvette. # ## Nonlinear Least Squares Results (R nls()) print(base.summary(NLSresult)) # According to R nls(), we find that $\hat{\theta} = (\hat{A_{max}},\hat{pK_{a}},\hat{\phi}) = (0.42,7.47,0.45)$ # # The pKa is in agreement with the theory, although to assess this rigorously (and trust the SEs) we should use the mixed model approach. Before that, we will try scipy least_squares() next. # + def pHAbsfun(theta,pH,Aobs): A = theta[0]/(1+np.exp((theta[1]-pH)/(theta[2]))) res = A-Aobs return res pHAbsdata_fun = lambda theta: pHAbsfun(theta,pH_data,ALED_data) ls_result = least_squares(pHAbsdata_fun,[0.5,7.6,0.4]) # - ls_result.x, ls_result.cost # The results between R's nls() and scipy least_squares() are in agreement for the coefficient values. # # ## Nonlinear Mixed Effect Model (R nlmer()) print(base.summary(NLMMresult)) # Based on the above, we can compute a z-score for $pK_{a}$ and $\phi$ to compare them to 7.6 and 1/log(10) respectively: # # \begin{equation} # # |z_{pKa}| = |\frac{7.469-7.6}{0.015}| = 8.69 \\ # # |z_{\phi}| = |\frac{0.4646 - 0.4343}{0.013}| = 2.33 # # \end{equation} # # With a bonferroni correction for 2 tests assuming overall familywise error rate of $\alpha = 0.05$, the critical value for each test (per test $\alpha = 0.025$) occurs at $z_{crit} = 2.24$. Thus we reject both null hypotheses, and there is a significant difference obtained in our experiment vs the theoretical curve. However, this difference may not be practically significant, and as long as the results from our device are consistent, that is all that matters for calibrating the sensor. # # Based on the above parameters for the NLMM, we can also simulate more values to obtain a larger dataset for the later parts involving PyTorch: # # ## pH-Absorbance Simulation Functions # + def generate_pHAbs(n,Amax=0.43,pKa=7.47,phi=0.46,sd_e=0.025): mean_pH,sd_pH = 7.6, 2.2 min_pH, max_pH = 0, 14 a,b = (min_pH - mean_pH)/sd_pH , (max_pH-mean_pH)/sd_pH pH = truncnorm.rvs(a,b,loc=mean_pH,scale=sd_pH,size=n) e = np.random.normal(loc=0,scale=sd_e,size=n) A = Amax / (1+(np.exp(pKa-pH))/phi) + e simdf = pd.DataFrame({'pH': pH,'ALED': A}) return simdf def generate_pHAbs_Trials(Trials,n,Amax=0.43,Asd=0.04,pKa=7.47,phi=0.46,sd_e=0.025): Amaxes = np.random.normal(Amax,Asd,Trials) simdfall = [] for i in range(Trials): simdf = generate_pHAbs(n=n,Amax=Amaxes[i],pKa=pKa,phi=phi,sd_e=sd_e) simdf['Trial'] = i+1 simdfall.append(simdf) simdfall = pd.concat(simdfall) return simdfall # - # # PyTorch pH-Absorbance Analysis # # ## pHAbsorbance Custom Layer # # Below, we implement a custom layer that contains the 3 parameters and outputs the absorbance values. A random initialization is used as follows for the parameters (we set reasonable values as if we have not seen the above standard analysis): # # \begin{equation} # # A_{max} \sim N(1,0.2^{2}) \\ # # pK_{a} \sim N(7.6,0.5^{2}) \\ # # \phi \sim N(0.5,0.1^{2}) \\ # # \end{equation} # # Notice that nn.Parameter() needs to be used on the weights so that PyTorch optimizer later on knows these are the custom parameters of the layer. Additionally, in the pHAbsLayer custom layer we initialize regularizers to 0, and instead choose to configure them when the pHAbsModel containing the layer is instantiated. import torch from torch import nn from torch.utils.data import Dataset, DataLoader class pHAbsLayer(nn.Module): """Custom pHAbs Layer: Amax/(1+e^(pKa-pH)/phi)""" def __init__(self): super().__init__() weights = np.random.normal([1,7.6,0.5],[0.2,0.5,0.1]) #[Amax,pKa,phi] weights = torch.from_numpy(weights) self.weights = nn.Parameter(weights) self.regularizer = torch.zeros(3,dtype=torch.float64) def forward(self,x): y = self.weights[0]/(1+torch.exp((self.weights[1]-x)/self.weights[2])) return y # ## pHAbsModel Model Class # # Now that the pHAbsLayer() custom layer is created, we can use it like any other layer within the actual model class. In this class, we will also leave the option to set hyperparameters. # + class pHAbsModel(nn.Module): def __init__(self,lam_Amax=0,lam_pKa=0,lam_phi=0): super().__init__() self.f_pH = pHAbsLayer() self.f_pH.regularizer[0] = lam_Amax self.f_pH.regularizer[1] = lam_pKa self.f_pH.regularizer[2] = lam_phi def forward(self,x): return self.f_pH(x) # - # ## pHAbs Dataset # # Below, we create the Dataset class for the data. In this case it is relatively simple, the __getitem__ method should return the features (just pH) and label at a certain index and the __len__ method should return the total length of the dataset. class pHAbsDataset(Dataset): def __init__(self,pH,Abs): self.pH=pH.reshape(-1,1) self.Abs = Abs.reshape(-1,1) def __len__(self): return len(self.pH) def __getitem__(self,idx): return self.pH[idx],self.Abs[idx] # ## Loss Penalty # # As mentioned earlier in this notebook, we will be using an L1 penalty on the parameters' $\theta = (A_{max},pK_{a},\phi)$ deviations from $(0, 7.6, 0.43)$ respectively def penalty(model): weights = model.f_pH.weights regularizer = model.f_pH.regularizer prior = torch.Tensor([0,7.6,1/np.log(10)]) penalty = (weights-prior).abs().dot(regularizer) return penalty # ## Train and Test Loop # # Below we define the training and testing loop. On the original dataset, we will use the full data to compare results to the first part and then later on will simulate data to compare train/test curves and effect of regularizers, etc. # + def train_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) for batch, (X, y) in enumerate(dataloader): # Compute prediction and loss pred = model(X) loss = loss_fn(pred, y) pen = penalty(model) pen_loss = loss + pen # Backpropagation optimizer.zero_grad() pen_loss.backward() #loss.backward() optimizer.step() if batch % 10 == 0: loss, current = loss.item(), batch * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") return(loss) def test_loop(dataloader, model, loss_fn): size = len(dataloader.dataset) test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() test_loss /= size print(f"Avg loss: {test_loss:>8f} \n") return(test_loss) # - # ## Train Model on full original data # # We now train the model on the full original data, and, since this dataset is small, we use a batch size of 91 which is all of the data. # # Additionally, the Adam optimizer with a learning rate of 0.01 is used below, the model is trained for 1000 epochs. No regularization is applied for this first time on the full data. # # For reference, we can extract the mean square error from the R nls() fit, which appears to be 0.00066 residNLS = np.array(stats.residuals(NLSresult)) np.mean(np.square(residNLS)) # + origdataset = pHAbsDataset(pH_data,ALED_data) origdataloader = DataLoader(origdataset,batch_size=91,shuffle=True) # + origmodel = pHAbsModel() learning_rate = 0.01 loss_fn = nn.MSELoss() optimizer = torch.optim.Adam(origmodel.parameters(), lr=learning_rate) # + tags=[] # %%capture epochs = 1000 loss_orig = np.zeros(epochs) Amax_orig = np.zeros(epochs) pKa_orig = np.zeros(epochs) phi_orig = np.zeros(epochs) for i in range(epochs): print(f"Epoch {i+1}\n-------------------------------") loss_orig[i] = train_loop(origdataloader, origmodel, loss_fn, optimizer) Amax_orig[i] = origmodel.f_pH.weights[0] pKa_orig[i] = origmodel.f_pH.weights[1] phi_orig[i] = origmodel.f_pH.weights[2] # - plt.plot(loss_orig,"r-") plt.title("Loss vs Epochs") plt.ylabel("MSE Loss") plt.xlabel("Epochs") plt.show() loss_orig[-1] # The above final loss is the same as the loss obtained for R's nls() function on this dataset. The parameter weights are also almost exactly the same as obtained via nls(), and thus solving the NLS problem via PyTorch tools was a success. Below we can examine the parameter traces vs epochs as well: origmodel.f_pH.weights plt.plot(Amax_orig,"y-") plt.title("Amax vs Epochs") plt.ylabel("Amax") plt.xlabel("Epochs") plt.show() plt.plot(pKa_orig,"m-") plt.title("pKa vs Epochs") plt.ylabel("pKa") plt.xlabel("Epochs") plt.show() # The pKa trace is interesting in that at first it started at a low value 7.35 and increased for some time until 7.85 before it started decreasing. plt.plot(phi_orig,"r-") plt.title("phi vs Epochs") plt.ylabel("phi") plt.xlabel("Epochs") plt.show() # The trace for the $\phi$ parameter shows an peak as well, although shorter indicating that at first this parameter was increasing briefly before it settled on the final value. This whole time, the loss was still decreasing, however. # ## Experiment with regularization on original data # # Below, we experiment with some regularization on the original data. The regularization parameters are $\lambda = (0.0001,0.001,0.01)$ for $(A_{max},pK_{a},\phi)$ respectively # # # + # %%capture origmodelreg = pHAbsModel(lam_Amax=0.0001,lam_pKa=0.001,lam_phi=0.01) learning_rate = 0.01 loss_fn = nn.MSELoss() optimizer = torch.optim.Adam(origmodelreg.parameters(), lr=learning_rate) epochs = 1000 loss_origreg = np.zeros(epochs) for i in range(epochs): print(f"Epoch {i+1}\n-------------------------------") loss_origreg[i] = train_loop(origdataloader, origmodelreg, loss_fn, optimizer) # - print(loss_origreg[-1]) origmodelreg.f_pH.weights # As seen above, the parameters are closer to the prior values that were mentioned in the beginning of this notebook. Thus the regularization has worked. # # We now move on to simulated data where we can also investigate the train-val curves to investigate phenomenon such as early stopping. # # ## Simulated Data # # Below, we simulate 100 Trials with 100 points each for both a Training and Validation set. The true parameters in the training set are set to $A_{max,true} = 0.43,~~ pK_{a,true} = 7.47,~~ \phi_{true} = 0.46$. # # To examine how distribution shift may affect the training/val curves, the true parameters in the validation set are set to $A_{max,true} = 0.40,~~ pK_{a,true} = 7.52,~~ \phi_{true} = 0.48$. # # The noise in the absorbance value is $\epsilon \sim N(0, 0.025^{2})$ # + np.random.seed(100) TrainSim = generate_pHAbs_Trials(Trials=100,n=100) np.random.seed(10) ValSim = generate_pHAbs_Trials(Trials=100,n=100,Amax=0.40,pKa=7.52,phi=0.48) # - pH_Train, Abs_Train = TrainSim.pH.to_numpy(), TrainSim.ALED.to_numpy() pH_Val,Abs_Val = ValSim.pH.to_numpy(), ValSim.ALED.to_numpy() # + TrainDS = pHAbsDataset(pH_Train,Abs_Train) ValDS = pHAbsDataset(pH_Val,Abs_Val) TrainLoader = DataLoader(TrainDS,batch_size=100,shuffle=True) ValLoader = DataLoader(ValDS,batch_size=100,shuffle=True) # + # %%capture sim_model = pHAbsModel() learning_rate = 0.01 loss_fn_train = nn.MSELoss() loss_fn_val = nn.MSELoss(reduction="sum") #because test loop divides in the end optimizer = torch.optim.Adam(sim_model.parameters(), lr=learning_rate) epochs = 1000 loss_simtrain = np.zeros(epochs) loss_simval = np.zeros(epochs) for i in range(epochs): print(f"Epoch {i+1}\n-------------------------------") loss_simtrain[i] = train_loop(TrainLoader, sim_model, loss_fn_train, optimizer) loss_simval[i] = test_loop(ValLoader,sim_model,loss_fn_val) # - plt.plot(loss_simtrain,"b-") plt.plot(loss_simval,"r-") plt.legend(["Train","Val"]) plt.title("Loss vs Epochs") plt.ylabel("MSE Loss") plt.xlabel("Epochs") plt.show() sim_model.f_pH.weights # + final_losstrain = loss_simtrain[-1] final_lossval = loss_simval[-1] print(f"The final training Loss is: {final_losstrain:.5f} and final validation Loss is: {final_lossval:.5f}") # - # This time, the $pK_{a} = 8.27,~~\phi = 1.01$ which are far from the true parameter values. We can check the answer with the wrapper from R's nls(), which confirms that this is just a result of the data obtained. The good news is that the validation loss and training loss are still about the same. The slight distribution shift did not appear to affect the results too much in this case. # + with localconverter(ro.default_converter + pandas2ri.converter): NLSTrainresult = ro.r.Fit_NLS(TrainSim) print(base.summary(NLSTrainresult)) # - # ## With Regularization # # Now we will try the same thing as above with regularization and determine whether this ends up having a better test error. The same regularization parameters as earlier will be used. Ideally, cross validation or other hyperparameter selection methods would be used. # + tags=[] # %%capture sim_modelreg = pHAbsModel(lam_Amax=0.0001,lam_pKa=0.001,lam_phi=0.01) learning_rate = 0.01 loss_fn_train = nn.MSELoss() loss_fn_val = nn.MSELoss(reduction="sum") #because test loop divides in the end optimizer = torch.optim.Adam(sim_modelreg.parameters(), lr=learning_rate) epochs = 1000 loss_simtrain = np.zeros(epochs) loss_simval = np.zeros(epochs) for i in range(epochs): print(f"Epoch {i+1}\n-------------------------------") loss_simtrain[i] = train_loop(TrainLoader, sim_modelreg, loss_fn_train, optimizer) loss_simval[i] = test_loop(ValLoader,sim_modelreg,loss_fn_val) # - plt.plot(loss_simtrain,"b-") plt.plot(loss_simval,"r-") plt.legend(["Train","Val"]) plt.title("Loss vs Epochs") plt.ylabel("MSE Loss") plt.xlabel("Epochs") plt.show() # + final_losstrain = loss_simtrain[-1] final_lossval = loss_simval[-1] print(f"The final training Loss is: {final_losstrain:.5f} and final validation Loss is: {final_lossval:.5f}") # - # In this case, the regularization resulted in both worse training and test error, this indicates we are over-regularizing the parameters. In the next run, the regularizers will be decreased # + # %%capture sim_modelreg = pHAbsModel(lam_Amax=1e-5,lam_pKa=1e-5,lam_phi=1e-3) learning_rate = 0.01 loss_fn_train = nn.MSELoss() loss_fn_val = nn.MSELoss(reduction="sum") #because test loop divides in the end optimizer = torch.optim.Adam(sim_modelreg.parameters(), lr=learning_rate) epochs = 1000 loss_simtrain = np.zeros(epochs) loss_simval = np.zeros(epochs) for i in range(epochs): print(f"Epoch {i+1}\n-------------------------------") loss_simtrain[i] = train_loop(TrainLoader, sim_modelreg, loss_fn_train, optimizer) loss_simval[i] = test_loop(ValLoader,sim_modelreg,loss_fn_val) # + final_losstrain = loss_simtrain[-1] final_lossval = loss_simval[-1] print(f"The final training Loss is: {final_losstrain:.5f} and final validation Loss is: {final_lossval:.5f}") # - # With a new less conservative choice of hyperparameters after experimenting, the training loss is higher as expected, and the validation loss is ever so slightly lower.
Torch_NLS_pHAbs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Finding data gaps # # Often we want to find gaps in the data. # For example in energy analysis, we want to identify periods of energy unavailability which may be shown by a lack of data or by a prolonged period of zero voltage in a timeseries. # # We will show two basic approaches # # - Finding gaps in the reported data # - Identifying continuous stretches of data meeting some criteria # + # finding gaps method # %matplotlib inline import pandas as pd # read in data and be sure to load dates properly data = pd.read_csv('EVI0000111.csv', index_col=0, parse_dates=True) data.head() # - # The key analysis step here is to subtract the preceding time value from each time value. # This will give you an array of data with the differences between measurements. # If your data is perfect and consistent, these will all be at the sampling interval. # If there are data outages, there will be measurements at longer intervals. # # The `diff(1)` method achieves this. # (Note that we have to use the `Series` code to get this to work.) # take time deltas with diff function time_deltas = pd.Series(data.index).diff(1) time_deltas.head() # Once we have this long list (30 thousand entries) of time differences, we need a way to intepret the data in a way we can manage. # One useful way is the `value_counts()` method which you can think of as reminiscent of a histogram. # For each time difference that is in the array, the computer will output how many times that difference occurred. time_deltas.value_counts() # Now, we want to only consider the time differences greater than a certain size since these are the important lapses in data. # For this example, we restrict our interest to differences greater than thirty minutes and use value_counts to make a list. # filter out short time_deltas time_deltas[time_deltas > '00:30:00'].value_counts() # To quantify this, we can use the `sum()` method on this filtered list. # This will give us the total time that has large gaps of data. # sum up time_deltas time_deltas[time_deltas > '00:30:00'].sum()
data-analysis/finding-data-gaps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="uDI0ZLrS9jAX" # <img src="https://s8.hostingkartinok.com/uploads/images/2018/08/308b49fcfbc619d629fe4604bceb67ac.jpg" width=500, height=450> # <h3 style="text-align: center;"><b>Физтех-Школа Прикладной математики и информатики (ФПМИ) МФТИ</b></h3> # + [markdown] colab_type="text" id="k0ygS84T9jAY" # --- # + [markdown] colab_type="text" id="HnjQZLuC9jAY" # <h2 style="text-align: center;"><b>Перцептрон Розенблатта <br><br>(нейрон с пороговой функцией активации)</b></h2> # + [markdown] colab_type="text" id="543-uGN-9jAZ" # --- # + [markdown] colab_type="text" id="1JBsLVMI9jAa" # В данном ноутбуке Вам нужно будет: # # - самостоятельно реализовать класс **`Perceptron()`** -- нейрон пороговой функцией активации # - обучить и протестировать Ваш перцептрон на сгенерированных и реальных данных (файлы с реальными данными помещены в папку /data в этой же директории) # - сравнить качество работы Вашего класса с классом из библиотеки `scikit-learn` (`sklearn.linear_model.Perceptron()`) # + [markdown] colab_type="text" id="cOAHk8eO9jAb" # <h2 style="text-align: center;"><b>Введение</b></h2> # + [markdown] colab_type="text" id="bF22tUW79jAc" # Почти любой алгоритм машинного обучения, решающий задачу *классификации* или *регрессии*, работает так: # # 1. (*стадия инициализации*) Задаются его **гиперпараметры**, то есть те величины, которые не "выучиваются" алгоритмом в процессе обучения самостоятельно # 2. (*стадия обучения*) Алгоритм запускается на данных, **обучаясь** на них и меняя свои **параметры** (не путать с *гипер*параметрами) каким-то определённым образом (например, с помощью *метода градиентного спуска* или *метода коррекции ошибки*), исходя из функции потерь (её называют *loss function*). Функция потерь, по сути, говорит, где и как ошибается модель # 3. (*стадия предсказания*) Модель готова, и теперь с помощью неё можно делать **предсказания** на новых объектах # + colab={} colab_type="code" id="3hxoVvmN9jAd" from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap # тут лежат разные штуки для цветовой магии import numpy as np import pandas as pd # + [markdown] colab_type="text" id="jHd4CZjS9jAg" # <h2 style="text-align: center;"><b>Класс Perceptron</b></h2> # + [markdown] colab_type="text" id="PObIs0OB9jAh" # В даном разделе будет решаться задача **бинарной классификации** с помощью перцептрона: # - *Входные данные*: матрица $X$ размера $(n, m)$ и столбец $y$ из нулей и единиц размера $(n, 1)$. Строкам матрицы соответствуют объекты, столбцам - признаки (то есть строка $i$ есть набор признаков (*признаковое описание*) объекта $X_i$). # - *Выходные данные*: столбец $\hat{y}$ из нулей и единиц размера $(n, 1)$ - предсказания алгоритма. # + [markdown] colab_type="text" id="wkd_24Zr9jAi" # Модель нейрона в биологии и в deep learning: # # ![title](http://lamda.nju.edu.cn/weixs/project/CNNTricks/imgs/neuron.png) # + [markdown] colab_type="text" id="TwRqMBVPcy0j" # \**картинка из http://cs231n.github.io/neural-networks-1/* # + [markdown] colab_type="text" id="82qIny-49jAi" # Чтобы понять, как мы будем обновлять параметры модели (веса), нужно знать, какую функцию потерь мы оптимизируем (находим минимум). В данном случае мы решаем задачу бинарной классификации (2 класса: 1 или 0), возьмём в качестве функции потерь среднеквадратичную ошибку: # # $$Loss(w, x) = \frac{1}{2n}\sum_{i=1}^{n} (\hat{y_i} - y_i)^2 = \frac{1}{2n}\sum_{i=1}^{n} (f(w \cdot X_i) - y_i)^2$$ # # Здесь $w \cdot X_i$ - скалярное произведение, а $f(w \cdot X_i)$ - пороговая функция: # # $$ # f(z) = # \begin{cases} # 1, &\text{если } w \cdot X_i > 0 \\ # 0, &\text{если } w \cdot X_i \le 0 # \end{cases} # $$ # # **Примечание:** В формуле предполагается, что $b$ - свободный член - является частью вектора весов: $w_0$. Тогда, если к $X$ приписать слева единичный столбец, в скалярном произведении $b$ будет именно как свободный член (лучше распишите это -- станет понятнее). При реализации класса `Perceptron()` $b$ нужно считать отдельно (чтобы было нагляднее). # + [markdown] colab_type="text" id="6wGYvpsv9jAj" # ** Реализуйте функцию потерь $Loss$: ** # + colab={} colab_type="code" id="KIMPzh0B9jAk" def Loss(y_pred, y): return # Ваш код здесь # + [markdown] colab_type="text" id="5QrUrljB9jAn" # Поскольку у *пороговой функции* не существует производной (вы её график видели? Выглядит он, конечно, простым, но производная таких не любит), то мы не можем использовать градиентный спуск, ведь: # # # # $$ \frac{\partial Loss}{\partial w} = \frac{1}{n} X^T\left(f(w \cdot X) - y\right)f'(w \cdot X)$$ # # где $f^{'}(w \cdot X)$ - в точке 0 посчитать не получится. Но ведь хочется как-то обновлять веса, иначе как обучить алгоритм отличать груши от яблок? # # Поэтому предлагается обновлять так: # # $$w^{j+1} = w^{j} - \alpha\Delta{w^{j}}$$ # # где: # # $$\Delta{w} = \frac{1}{n}X^T(\hat{y} - y) = \frac{1}{n}X^T(f(w^j \cdot X) - y)$$ # # (не забудьте, что при $w_0 = b$ признак $x_0$ = 1), где $w \cdot X$ - матричное произведение столбца весов $w$ на матрицу объектов-признаков $X$, а индекс $j$ -- номер итерации градиентного спуска. # # Это правило является неким частным случаем градиентного спуска для данного случая (*[правило Хебба](https://ru.wikipedia.org/wiki/%D0%94%D0%B5%D0%BB%D1%8C%D1%82%D0%B0-%D0%BF%D1%80%D0%B0%D0%B2%D0%B8%D0%BB%D0%BE)*, *[метод коррекции ошибки](https://ru.wikipedia.org/wiki/%D0%9C%D0%B5%D1%82%D0%BE%D0%B4_%D0%BA%D0%BE%D1%80%D1%80%D0%B5%D0%BA%D1%86%D0%B8%D0%B8_%D0%BE%D1%88%D0%B8%D0%B1%D0%BA%D0%B8)*). # + [markdown] colab_type="text" id="Zrm01BR69jAo" # Теперь, вооружившись всеми формулами и силой духа, нужно написать свой класс **`Perceptron()`**. Уже есть код класса и немного кода реализации. По-максимуму используйте **Numpy** при реализации, т.к. будет проверяться и скорость работы Вашего алгоритма. # # *Примечание*: В коде ниже `y_pred` - это $\hat{y}$ из формул выше # + colab={"base_uri": "https://localhost:8080/", "height": 132} colab_type="code" id="rLBDN2G89jAo" outputId="a9b7d64e-e7fe-4ce4-ee46-608991580501" executionInfo={"status": "error", "timestamp": 1539420667947, "user_tz": -300, "elapsed": 761, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u041b\u0435\u043b\u0435\u0439\u0442\u043d\u0435\u0440", "photoUrl": "", "userId": "07179937308049589303"}} class Perceptron: def __init__(self, w=None, b=0): """ :param: w -- вектор весов :param: b -- смещение """ # Пока что мы не знаем размер матрицы X, а значит не знаем, сколько будет весов self.w = w self.b = b def activate(self, x): return x > 0 def forward_pass(self, X): """ Эта функция рассчитывает ответ перцептрона при предъявлении набора объектов :param: X -- матрица объектов размера (n, m), каждая строка - отдельный объект :return: вектор размера (n, 1) из нулей и единиц с ответами перцептрона """ n = X.shape[0] y_pred = np.zeros((n, 1)) # y_pred(icted) - предсказанные классы # Ваш код здесь return y_pred def backward_pass(self, X, y, y_pred, learning_rate=0.005): """ Обновляет значения весов перцептрона в соответствие с этим объектом :param: X -- матрица объектов размера (n, m) y -- вектор правильных ответов размера (n, 1) learning_rate - "скорость обучения" (символ alpha в формулах выше) В этом методе ничего возвращать не нужно, только правильно поменять веса с помощью градиентного спуска. """ # Ваш код здесь def fit(self, X, y, num_epochs=300): """ Спускаемся в минимум :param: X -- матрица объектов размера (n, m) y -- вектор правильных ответов размера (n, 1) num_epochs -- количество итераций обучения :return: Loss_values -- вектор значений функции потерь """ self.w = np.zeros((X.shape[1], 1)) # столбец (m, 1) self.b = 0 # смещение (свободный член) losses = [] # значения функции потерь на различных итерациях обновления весов for i in range(num_epochs): # Ваш код здесь return losses # + [markdown] colab_type="text" id="XlWXLoHQ9jAr" # Класс готов. Посмотрим, правильно ли ведёт себя Ваш перцептрон. Далее идут несколько ячеек с тестовым кодом, Вам нужно просто запустить их и проверить, чтобы результаты запуска совпадали с соответствующими числами из таблиц: # + [markdown] colab_type="text" id="GnrccB6H9jAs" # **Проверка forward_pass():** # + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="9LZgkcHv9jAt" outputId="dc10527e-b3ab-4065-ff8a-20bb5df39b4e" executionInfo={"status": "error", "timestamp": 1539420677714, "user_tz": -300, "elapsed": 616, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u041b\u0435\u043b\u0435\u0439\u0442\u043d\u0435\u0440", "photoUrl": "", "userId": "07179937308049589303"}} w = np.array([1., 2.]).reshape(2, 1) b = 2. X = np.array([[1., 2., -1.], [3., 4., -3.2]]) perceptron = Perceptron(w, b) y_pred = perceptron.forward_pass(X.T) print ("y_pred = " + str(y_pred)) # + [markdown] colab_type="text" id="RlPOE9ia9jAv" # |Должно быть|| # |------|-------| # |**y_pred**|[1, 1, 0]| # + [markdown] colab_type="text" id="1rgBqV9D9jAv" # **Проверка backward_pass():** # + colab={} colab_type="code" id="9RkAnK0P9jAw" y = np.array([1, 0, 1]).reshape(3, 1) # + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="Be7OJp8c9jA1" outputId="bc1d3643-d2f1-4024-dea3-d98cc9c50ff1" executionInfo={"status": "error", "timestamp": 1539420682215, "user_tz": -300, "elapsed": 619, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u041b\u0435\u043b\u0435\u0439\u0442\u043d\u0435\u0440", "photoUrl": "", "userId": "07179937308049589303"}} perceptron.backward_pass(X.T, y, y_pred) print ("w = " + str(perceptron.w)) print ("b = " + str(perceptron.b)) # + [markdown] colab_type="text" id="nNThF8MT9jA4" # |Должно быть|| # |-|-| # |**w**| [[ 0.995], [1.988]] | # |**b**| 2.0 | # + [markdown] colab_type="text" id="EDjsmpZp9jA5" # Посмотрим, как меняется функция потерь в течение процесса обучения на реальных данных - датасет "Яблоки и Груши": # + colab={"base_uri": "https://localhost:8080/", "height": 878} colab_type="code" id="aPzhL2L99jA5" outputId="ad70b9a6-f5a3-4c7a-cf5b-4adc7c725f22" executionInfo={"status": "error", "timestamp": 1539420685187, "user_tz": -300, "elapsed": 711, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u041b\u0435\u043b\u0435\u0439\u0442\u043d\u0435\u0440", "photoUrl": "", "userId": "07179937308049589303"}} data = pd.read_csv("./data/apples_pears.csv") # + colab={} colab_type="code" id="q7cWGg5S9jA7" data.head() # + colab={} colab_type="code" id="V6T8WK2w9jA-" plt.figure(figsize=(10, 8)) plt.scatter(data.iloc[:, 0], data.iloc[:, 1], c=data['target'], cmap='rainbow') plt.title('Яблоки и груши', fontsize=15) plt.xlabel('симметричность', fontsize=14) plt.ylabel('желтизна', fontsize=14) plt.show(); # + [markdown] colab_type="text" id="JYSpUvQM9jBE" # **Вопрос:** Какой класс соответствует яблокам (какого они цвета на графике)? # + [markdown] colab_type="text" id="MO0fW5R29jBF" # **Ответ:** <Ваш ответ> # + [markdown] colab_type="text" id="X6m0IAdu9jBF" # Обозначим, что здесь признаки, а что - классы: # + colab={} colab_type="code" id="ARYN13Io9jBG" X = data.iloc[:,:2].values # матрица объекты-признаки y = data['target'].values.reshape((-1, 1)) # классы (столбец из нулей и единиц) # + [markdown] colab_type="text" id="MRCQeKtH9jBI" # **Вывод функции потерь** # Функция потерь должна убывать и в итоге стать близкой к 0 # + colab={} colab_type="code" id="sIR0g6mQ9jBJ" # %%time perceptron = # Ваш код здесь losses = # Ваш код здесь plt.figure(figsize=(10, 8)) plt.plot(losses) plt.title('Функция потерь', fontsize=15) plt.xlabel('номер итерации', fontsize=14) plt.ylabel('$Loss(\hat{y}, y)$', fontsize=14) plt.show() # + [markdown] colab_type="text" id="gnzSyV_j9jBN" # Посмотрим, как перцептрон классифицировал объекты из выборки: # + colab={} colab_type="code" id="bNhsJbuY9jBO" plt.figure(figsize=(10, 8)) plt.scatter(data.iloc[:, 0], data.iloc[:, 1], c=perceptron.forward_pass(X).ravel(), cmap='spring') plt.title('Яблоки и груши', fontsize=15) plt.xlabel('симметричность', fontsize=14) plt.ylabel('желтизна', fontsize=14) plt.show(); # + [markdown] colab_type="text" id="aE8iMuT39jBQ" # <h3 style="text-align: center;"><b>Предсказание пола по голосу</b></h3> # + [markdown] colab_type="text" id="hQHD1i1W9jBR" # В этой задаче нужно сравнить качество работы Вашего перцептрона и алгоритма из библиотеки `sklearn` на датасете с сайта [Kaggle](https://www.kaggle.com) - [Gender Recognition by Voice](https://www.kaggle.com/primaryobjects/voicegender). В данном датасете в качестве признаков выступают различные звуковые характеристики голоса, а в качестве классов - пол (мужчина/женщина). Подробнее о самих признаках можно почитать [на странице датасета](https://www.kaggle.com/primaryobjects/voicegender) (на английском). Нашей целью пока что является просто протестировать на этих данных два алгоритма. # + [markdown] colab_type="text" id="2duv7On99jBR" # **! Обратите внимание на имя функции из sklearn - skPerceptron** (это сделано, чтобы не совпадало с именем вашего класса) # + colab={} colab_type="code" id="YaLaxBHR9jBS" import pandas as pd from sklearn.linear_model import Perceptron as skPerceptron from sklearn.metrics import accuracy_score # + colab={} colab_type="code" id="IaNjHU7Q9jBU" data_path = './data/voice.csv' data = pd.read_csv(data_path) data['label'] = data['label'].apply(lambda x: 1 if x == 'male' else 0) # + colab={} colab_type="code" id="eU1EZFzM9jBW" data.head() # + colab={} colab_type="code" id="QCSK3sfX9jBY" # Чтобы перемешать данные. Изначально там сначала идут все мужчины, потом все женщины data = data.sample(frac=1) # + colab={} colab_type="code" id="VKY1jHT79jBZ" X_train = data.iloc[:int(len(data)*0.7), :-1] # матрица объекты-признаки y_train = data.iloc[:int(len(data)*0.7), -1] # истинные значения пола (мужчина/женщина) X_test = data.iloc[int(len(data)*0.7):, :-1] # матрица объекты-признаки y_test = data.iloc[int(len(data)*0.7):, -1] # истинные значения пола (мужчина/женщина) # + [markdown] colab_type="text" id="DDsWavYZ9jBe" # Тут нужно натренировать Ваш перцептрон и перцептрон из `sklearn` на этих данных: # + colab={} colab_type="code" id="4Z8Rh-bu9jBf" # Ваш код здесь # + [markdown] colab_type="text" id="3qsolz149jBh" # Сравним доли правильных ответов (на тестовых данных): # + colab={} colab_type="code" id="l6R3cXLO9jBi" print('Точность (доля правильных ответов, из 100%) моего перцептрона: {:d}'.format(accuracy_score(<Ваш код здесь>) * 100)) print('Точность (доля правильных ответов) перцептрона из sklearn: {:.1f} %'.format(accuracy_score(<Ваш код здесь>) * 100)) # + [markdown] colab_type="text" id="CSasTfW09jBj" # **Вопрос:** Хорошее ли качество показывает перцептрон? Как Вы думаете, почему? Можете писать любые мысли на этот счёт. # + [markdown] colab_type="text" id="uWaJ-92j9jBj" # **Ответ:**<Ваш ответ> # + [markdown] colab_type="text" id="yyX2G7VvdvMC" # ### Важно # + [markdown] colab_type="text" id="sTZFLUqFdv57" # Стоит понимать, что перцептрон сам по себе не используется в приложениях. Мы продемонстрровали его вам, чтобы вы знали, с чего всё начиналось. На самом деле это просто один нейрон с пороговой функцией активации, который не используется в многослойных нейросетях и каких-либо прикладных задачах, но всё же является хорошим учебным примером, помогающим понять то, как обновляются веса в соответствие с ошибками и перейти к рассмотрению более полезных моделей (нейронов с другими функциями активации). # + [markdown] colab_type="text" id="MC0FNrfq9jBl" # <h2 style="text-align: center;"><b>Полезные ссылки</b></h2> # + [markdown] colab_type="text" id="AZLMhq149jBl" # 1). Lecture Notes Стэнфордского университета: http://cs231n.github.io/neural-networks-1/ # 2). [Википедия про перцептрон](https://ru.wikipedia.org/wiki/%D0%9F%D0%B5%D1%80%D1%86%D0%B5%D0%BF%D1%82%D1%80%D0%BE%D0%BD)
deep-learning-school/[5]oop_neuron/seminar/old_seminar/[seminar]perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''venv-matematicas'': conda)' # language: python # name: python3 # --- # + cell_id="00000-aa448ce4-2f18-4801-9efa-d91c528d0b98" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1730 execution_start=1620315492042 source_hash="9d42f5eb" tags=[] import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler # escalamiento estandar iris = sns.load_dataset('iris') # + cell_id="00001-ec6ac4a7-7f4e-41bc-a9ea-173ba867097c" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=6459 execution_start=1620315610337 output_cleared=false source_hash="b0e5893e" tags=[] sns.pairplot(iris) # - sns.pairplot(iris, hue='species') # + cell_id="00002-62f1bdb1-04ab-4ee6-bb0a-7d335816a2dd" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=26 execution_start=1620315751142 output_cleared=true source_hash="b6eeb99b" tags=[] scaler = StandardScaler() scaled = scaler.fit_transform( iris[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']] ) scaled.T # + cell_id="00003-c5ce7c32-8424-49b1-82c1-8e5910b0308b" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=4 execution_start=1620315812926 output_cleared=false source_hash="d011a353" tags=[] covariance_matrix = np.cov(scaled.T) covariance_matrix # + cell_id="00004-2a2c877a-1b7d-498b-bb89-6717086dad13" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=310 execution_start=1620315889635 output_cleared=false source_hash="7e79eb48" tags=[] plt.figure(figsize=(10,10)) sns.set(font_scale=1.5) hm = sns.heatmap(covariance_matrix, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 12}, yticklabels=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'], xticklabels=['sepal_length', 'sepal_width', 'petal_length', 'petal_width']) # + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] # <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=3f569948-e9a8-454f-8f4d-a27aa1610a87' target="_blank"> # <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,<KEY> > </img> # Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
estadisticas-descriptivas/clases_notebooks/[clase-19]matriz-covarianza.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Analyzing and Visualizing Data from Foxglove Data Platform # # **[Foxglove Data Platform](https://foxglove.dev/data-platform) is a scalable platform for organizing and managing your team's robotics data.** You can log in to [its web console interface](https://console.foxglove.dev) to upload data, tag events of interest, and query data for a given robot and time range, even if that data spans multiple recording sessions. # # In this notebook, **we'll demonstrate how to retrieve messages from Data Platform and process them for insights.** We'll be using self-driving car data from the [nuScenes dataset](https://www.nuscenes.org/nuscenes), and writing Python code to visualize its route, IMU acceleration, and perceived objects. # + # install some dependencies # %pip install --index-url https://rospypi.github.io/simple genpy > /dev/null # %pip install mcap-ros1-support foxglove-data-platform pandasql > /dev/null # - # ### Loading data # # The first step in our analysis is loading some data. We'll use the foxglove data platform client library. # + from foxglove_data_platform.client import Client # Read-only public key for demonstration purposes client = Client(token="<KEY>") # - # To query for data, we need to know a device id and some time range. The get_coverage method shows us available devices and data for 2018. # + from datetime import datetime import pandas as pd coverage = client.get_coverage(start=datetime(2018, 1, 1), end=datetime(2019,1,1)) coverage = sorted(coverage, key=lambda c: c['start']) pd.DataFrame(coverage).head() # - # Fetch GPS messages for the first entry from our previous coverage request. We could fetch data across any start/end range and device. # # We limit our data to the `/gps` topic since that's all we need for our analysis. gps_messages = [ (message.latitude, message.longitude) for topic, record, message in client.get_messages( device_id=coverage[1]["device_id"], start=coverage[1]["start"], end=coverage[1]["end"], topics=["/gps"], ) ] pd.DataFrame(gps_messages, columns=["lat", "lon"]).head() # We now know the basics to loading our data. Next, we can analyze the data using existing jupyter notebook tools and practices. # ### Mapping the route # # Let's see the route that our car took by plotting its GPS coordinates on a map. # # We'll load data from on August 1, 2018 (`coverage[1]`) and use list comprehension to convert our messages into a list of tuples that we can insert into a `pandas` dataframe. gps_messages = [ (message.latitude, message.longitude) for topic, record, message in client.get_messages( device_id=coverage[1]["device_id"], start=coverage[1]["start"], end=coverage[1]["end"], topics=["/gps"], ) ] # **TIP**: We recommend splitting your data fetching and processing into separate cells. This lets you iterate on your analysis without re-downloading the data. # + import folium figure = folium.Figure(width=640, height=480) map = folium.Map(location=gps_messages[0], zoom_start=200, width="100%") folium.PolyLine( locations=gps_messages, weight=10, color="purple", ).add_to(map) map.add_to(figure) # - # We can see that on August 1, 2018, our self-driving car navigated a stretch of Congress Street in Boston. # ### Plotting IMU acceleration # # For our first analysis, we focused on just one recorded drive. For this analysis, let's fetch messages across a longer time range to plot our robot's acceleration across all 2018 drives. # # We can take advantage of Data Platform's ability to fetch messages across multiple recording sessions by specifying the time range we want in our `get_messages` call. imu_messages = [ { "time": pd.Timestamp(message.header.stamp.to_nsec(), unit="ns").isoformat(), "accel_x": message.linear_acceleration.x, "accel_y": message.linear_acceleration.y, } for topic, record, message in client.get_messages( device_id=coverage[0]["device_id"], start=coverage[0]["start"], end=coverage[-1]["end"], topics=["/imu"], ) ] pd.DataFrame(imu_messages).plot(x="time", figsize=(10, 6), rot=45); # From the output above, we can see how our robot's x and y acceleration fluctuated throughout its 2018 drives. # ### Classifying perceived object markers # # Finally, let's classify the perceived object markers that our self-driving car published while on the road. # # We'll again use one specific time range – `coverage[1]` – and query its `/markers/annotations` topic messages. marker_messages = client.get_messages( device_id=coverage[1]["device_id"], start=coverage[1]["start"], end=coverage[1]["end"], topics=["/markers/annotations"], ) # In our dataset, each marker color coorresponds to a specific _classification_. We'll use the marker color to lookup the classification and group markers by classification. # + import matplotlib as mpl from pandasql import sqldf color_to_classname = { "#000000": "noise", "#468250": "animal", "#0000e6": "human.pedestrian.adult", "#87ceeb": "human.pedestrian.child", "#f08080": "human.pedestrian.construction_worker", "#db7093": "human.pedestrian.personal_mobility", "#000080": "human.pedestrian.police_officer", "#f08080": "human.pedestrian.stroller", "#8a2be2": "human.pedestrian.wheelchair", "#708090": "movable_object.barrier", "#d2691e": "movable_object.debris", "#696969": "movable_object.pushable_pullable", "#2f4f4f": "movable_object.trafficcone", "#bc8f8f": "static_object.bicycle_rack", "#dc143c": "vehicle.bicycle", "#ff7f50": "vehicle.bus.bendy", "#ff4500": "vehicle.bus.rigid", "#ff9e00": "vehicle.car", "#e99646": "vehicle.construction", "#ffd700": "vehicle.emergency.ambulance", "#ffd700": "vehicle.emergency.police", "#ff3d63": "vehicle.motorcycle", "#ff8c00": "vehicle.trailer", "#ff6347": "vehicle.truck", "#00cfbf": "flat.driveable_surface", "#af004b": "flat.other", "#4b004b": "flat.sidewalk", "#70b43c": "flat.terrain", "#deb887": "static.manmade", "#ffe4c4": "static.other", "#00af00": "static.vegetation", "#fff0f5": "vehicle.ego", } flattened_markers = [] for topic, record, message in marker_messages: for marker in message.markers: color = mpl.colors.to_hex([marker.color.r, marker.color.g, marker.color.b]) class_name = color_to_classname[color] flattened_markers.append((marker.text, class_name)) annotations = pd.DataFrame(flattened_markers, columns=["annotation_id", "class_name"]) pysqldf = lambda q: sqldf(q, globals()) res = pysqldf( "SELECT class_name,COUNT(*) as count FROM annotations GROUP BY class_name ORDER BY count DESC" ) res # - res.plot.bar(x="class_name", y="count", legend=False); # We can see how many examples of each perceived object our self-driving car encountered (943 cars, 879 adult pedestrians, etc.). # ### End # # This demo illustrated some of the many ways you can analyze your robotics data wit Jupyter notebooks and [Foxglove Data Platform](https://foxglove.dev/data-platform). # # Signup and analyze your data at https://console.foxglove.dev/signin # # Join the Foxglove [Slack community](https://foxglove.dev/slack) and follow [our blog](https://foxglove.dev/blog) for more ideas on how to integrate Data Platform into your robotics development workflows.
FoxgloveDataPlatform.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.0 64-bit # name: python3 # --- # ## technique used to reduce dimensionality # ## ex: dataset with 3 columns or 3 independent features ,then to convert these 3 features to 2 using vector space , PCA is used. # ## Importing Libraries & getting Data import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() cancer.keys() cancer['target_names'] cancer.filename # + # print(cancer['DESCR']) # - df = pd.DataFrame(cancer['data'], columns=cancer['feature_names']) df.head() # ## Scaling # + from sklearn.preprocessing import StandardScaler # std.dev = 1 & mean = 0 scaler = StandardScaler(copy=True, with_mean=True, with_std=True) scaler.fit(df) # - scaled_data = scaler.transform(df) scaled_data # ## PCA # + from sklearn.decomposition import PCA model = PCA(n_components=2) model.fit(scaled_data) # - x_pca = model.transform(scaled_data) scaled_data.shape ,x_pca.shape # ## Visualization classes =['Benign' , 'Malignant'] # + plt.figure(figsize=(10,5)) scatter = plt.scatter(x_pca[: ,0] , x_pca[: ,1] ,c=cancer['target']) plt.xlabel("First principle component :") plt.ylabel("Second principle component :") plt.legend(handles=scatter.legend_elements()[0], labels=classes) plt.show()
ML algos/Basic Implementation/PCA/pca.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import re from unicodedata import normalize from unicodedata import name as uname from IPython.display import display, HTML from tf.writing.transcription import Transcription as tr # - tr.to_arabic("bisomi") tr.from_arabic("بِسْمِ") BASE = os.path.expanduser("~/github/q-ran/quran") SOURCES = f"{BASE}/sources" WRITING = f"{BASE}/writing" MORPH_FILE = "quranic-corpus-morphology-0.4.txt" TEXT_FILE = "quran-uthmani.xml" MORPH_PATH = f"{SOURCES}/{MORPH_FILE}" TEXT_PATH = f"{SOURCES}/{TEXT_FILE}" TRANS_TABLE = f"{WRITING}/table.html" # + suraPat = r"<sura([^>]*)>(.*?)</sura>" suraRe = re.compile(suraPat, re.S) attPat = r'index="([0-9]+)"\s+name="([^"]+)"' attRe = re.compile(attPat) ayaPat = r"<aya([^>]*)/>" ayaRe = re.compile(ayaPat, re.S) aattPat = r'index="([0-9]+)"\s+text="([^"]+)"' aattRe = re.compile(aattPat) battPat = r'bismillah="(.*)"' battRe = re.compile(battPat) def uNorm(words): return words return [normalize("NFKC", word) for word in words] def readText(): suraDb = {} ayaDb = {} with open(TEXT_PATH) as fh: text = fh.read() suras = suraRe.findall(text) for (atts, content) in suras: (suraIndex, name) = attRe.findall(atts)[0] suraIndex = int(suraIndex) suraDb[suraIndex] = dict(name=name) ayas = ayaRe.findall(content) for aya in ayas: (ayaIndex, text) = aattRe.findall(aya)[0] text = uNorm(text.split()) ayaIndex = int(ayaIndex) data = dict(text=text) bsl = battRe.findall(aya) if bsl: data["basmala"] = uNorm(bsl[0].split()) ayaDb.setdefault(suraIndex, {})[ayaIndex] = data return (suraDb, ayaDb) # - (suraDb, ayaDb) = readText() ayaDb[112] def readMorph(): morphDb = {} with open(MORPH_PATH) as fh: inPrefix = True dataLines = 0 for (i, line) in enumerate(fh): if inPrefix: if line.startswith("LOCATION\t"): inPrefix = False continue else: dataLines += 1 (locationRep, form, tag, features) = line.rstrip("\n").split("\t") (suraIndex, ayaIndex, groupIndex, wordIndex) = ( int(x) for x in locationRep[1:-1].split(":") ) morphDb.setdefault(suraIndex, {}).setdefault(ayaIndex, {}).setdefault( groupIndex, {} )[wordIndex] = ( form, tag, features, ) print(f"{dataLines:>5} lines done") return morphDb morphDb = readMorph() # + keyTrans = dict( LEM="lemma", ) valIndex = { "NOM": (("case", "nominative"),), "ACC": (("case", "accusative"),), "GEN": (("case", "genitive"),), "(II)": (("form", "II"),), "(III)": (("form", "III"),), "(IV)": (("form", "IV"),), "(IX)": (("form", "IX"),), "(V)": (("form", "V"),), "(VI)": (("form", "VI"),), "(VII)": (("form", "VII"),), "(VIII)": (("form", "VIII"),), "(X)": (("form", "X"),), "(XI)": (("form", "XI"),), "(XII)": (("form", "XII"),), "+VOC": (("interjection", "allahuma"),), "1P": (("person", "1"), ("number", "p")), "1S": (("person", "1"), ("number", "s")), "2D": (("person", "2"), ("number", "d")), "2FD": (("person", "2"), ("number", "d"), ("gender", "f")), "2FP": (("person", "2"), ("number", "p"), ("gender", "f")), "2FS": (("person", "2"), ("number", "s"), ("gender", "f")), "2MD": (("person", "2"), ("number", "d"), ("gender", "m")), "2MP": (("person", "2"), ("number", "p"), ("gender", "m")), "2MS": (("person", "2"), ("number", "s"), ("gender", "m")), "3D": (("person", "3"), ("number", "d")), "3FD": (("person", "3"), ("number", "d"), ("gender", "f")), "3FP": (("person", "3"), ("number", "p"), ("gender", "f")), "3FS": (("person", "3"), ("number", "s"), ("gender", "f")), "3MD": (("person", "3"), ("number", "d"), ("gender", "m")), "3MP": (("person", "3"), ("number", "p"), ("gender", "m")), "3MS": (("person", "3"), ("number", "s"), ("gender", "m")), "MD": (("number", "d"), ("gender", "m")), "MP": (("number", "p"), ("gender", "m")), "M": (("gender", "m"),), "MS": (("gender", "m"), ("number", "s")), "FD": (("number", "d"), ("gender", "f")), "FP": (("number", "p"), ("gender", "f")), "FS": (("number", "s"), ("gender", "f")), "F": (("gender", "f"),), "P": (("number", "p"),), "ACT": (("voice", "active"),), "PASS": (("voice", "passive"),), "IMPF": (("tense", "imperfect"),), "IMPV": (("tense", "imperative"),), "PERF": (("tense", "perfect"),), "PCPL": (("tense", "participle"),), "VN": (("tense", "verbalNoun"),), "INDEF": (("definite", 1),), "PREFIX": (("component", "prefix"),), "STEM": (("component", "main"),), "SUFFIX": (("component", "suffix"),), "Al+": (), "bi+": (), "ha+": (), "ka+": (), "sa+": (), "ta+": (), "ya+": (), } def parseMorph(tag, featureStr): features = dict(pos=tag.lower()) unknowns = set() fItems = featureStr.split("|") for fItem in fItems: if ":" in fItem: (key, value) = fItem.split(":", 1) if key == "POS": continue keyRep = keyTrans.get(key, key.lower()) features[keyRep] = value else: value = fItem keyValues = valIndex.get(value, None) if keyValues is None: unknowns.add(value) else: for (k, v) in keyValues: features[k] = v return (features, unknowns) # + wordFeatures = {} unknowns = set() for (sura, suraData) in morphDb.items(): for (aya, ayaData) in suraData.items(): for (group, groupData) in ayaData.items(): for (word, (form, tag, featureStr)) in groupData.items(): wordFeatures.setdefault("transcription", {})[ (sura, aya, group, word) ] = form (theseFeatures, theseUnknowns) = parseMorph(tag, featureStr) for (k, v) in theseFeatures.items(): wordFeatures.setdefault(k, {})[(sura, aya, group, word)] = v unknowns |= theseUnknowns len(unknowns) # - def link(ayaDb, morphDb, limit): i = 0 unequal = {} transcription = {} for sI in morphDb: if limit is not None and i > limit: break for aI in morphDb[sI]: if limit is not None and i > limit: break for gI in morphDb[sI][aI]: if limit is not None and i > limit: break tWordGroup = "" for wI in morphDb[sI][aI][gI]: tWordGroup += morphDb[sI][aI][gI][wI][0] aWordGroup = ayaDb[sI][aI]["text"][gI - 1] if sI == 37 and aI == 130 and gI == 3: aWordGroup += f" {ayaDb[sI][aI]['text'][gI]}" lA = len(aWordGroup) lT = len(tWordGroup) if lA != lT: unequal[(tWordGroup, aWordGroup)] = (sI, aI, gI) else: for c in range(lT): transcription.setdefault(aWordGroup[c], set()).add( tWordGroup[c] ) i += 1 return (unequal, transcription, i) (unequal, transcription, i) = link(ayaDb, morphDb, None) len(unequal) transcription def makeTranscriptionTable(transcription): errors = [] table = [] for a in sorted(transcription): t = transcription[a] if len(t) != 1: errors.append(a) continue t = list(t)[0] table.append((t, a, uname(a))) if errors: chars = " - ".join(errors) print(f"There are {len(errors)} errors: {chars}") tableStr = """ <table> <tbody> """ mappingStr = """ arabic_mapping = {""" for (t, a, unm) in table: tableStr += f""" <tr><td class="t">{t}</td><td class="g">{a}</td><td class="p"></td><td class="r"></td><td class="n">{unm}</td><td class="u">{ord(a):04x}</td></tr>""" qu = '"' if t == "'" else "'" mappingStr += f""" {qu}{t}{qu}: "\\u{ord(a):04x}", # {unm}""" tableStr += """ </tbody> </table> """ mappingStr += """ } """ return (tableStr, mappingStr) print(makeTranscriptionTable(transcription)[1]) print(makeTranscriptionTable(transcription)[0]) with open(TRANS_TABLE, "w") as fh: fh.write(makeTranscriptionTable(transcription)[0]) display(HTML(makeTranscriptionTable(transcription)[0])) cases = sorted(unequal) def show(case): (tW, aW) = cases[case] print(tW, aW) (sI, aI, gI) = unequal[(tW, aW)] print(f"sura {sI} aya {aI} word group {gI}") print(len(tW), len(aW)) print(list(tW)) print("\n".join(f"{x} = {uname(x)}" for x in aW)) show(0) show(10) for (tG, aG) in cases[0:20]: print(f"{tG} != {aG} {len(unequal[(tG, aG)])}")
programs/morph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading data from a csv file import pandas as pd import matplotlib.pyplot as plt plt.style.use('default') broken_df = pd.read_csv('../data/cookbook/bikes.csv', sep=';', encoding='latin1', parse_dates =['Date']) # # Selecting a lines # Selecionando as 3 primeiras linhas do arquivo broken_df[:3] # <div>A listagem da ordem das datas estão com relação aos meses e não em relação aos dias. Para resolver este problema, utilizamos o parâmetro <strong>dayfirst = True</strong></div> broken_df = pd.read_csv('../data/cookbook/bikes.csv', sep=';', encoding='latin1', parse_dates =['Date'], dayfirst=True) broken_df[:3] # Para otimizarmos, vamos definir a coluna <strong>Date</strong> como nosso index broken_df = pd.read_csv('../data/cookbook/bikes.csv', sep=';', encoding='latin1', parse_dates =['Date'], dayfirst=True, index_col='Date') broken_df[:3] # # Selecting a column # Selecionando os dados pela <strong>coluna</strong> e trazendo apenas 3 linhas broken_df['<NAME>'][:3] # Pode ser utilizado também através da propriedade <strong>iloc</strong> broken_df['<NAME>'].iloc[:3] # Pode ser seleciona mais de uma coluna broken_df[['<NAME>', 'Rachel1']].iloc[:3] # # Plotting a column broken_df['<NAME>'].plot() # Ampliando a dimenção da imagem do gráfico broken_df['Berri 1'].plot(figsize = (15,5))
notebooks/pandas/cookbook/1. Leitura de arquivo csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('50_Startups.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 4].values # Encoding categorical data from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder = LabelEncoder() X[:, 3] = labelencoder.fit_transform(X[:, 3]) onehotencoder = OneHotEncoder(categorical_features = [3]) X = onehotencoder.fit_transform(X).toarray() X # Avoiding the Dummy Variable Trap X = X[:, 1:] # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # Fitting Multiple Linear Regression to the Training set from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) # Predicting the Test set results y_pred = regressor.predict(X_test) # Building the optimal model using Backward Elimination import statsmodels.api as sm X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1) X_opt = X[:,[0,1,2,3,4,5]] regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() regressor_OLS.summary() X_opt = X[:,[0,1,3,4,5]] regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() regressor_OLS.summary() X_opt = X[:,[0,3,4,5]] regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() regressor_OLS.summary() X_opt = X[:,[0,3,5]] regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() regressor_OLS.summary() X_opt = X[:,[0,3]] regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() regressor_OLS.summary()
Multiple_Linear_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import requests prefix = "http://www.recipepuppy.com/api/?" postfix = "i=potato&p=1" url = prefix + postfix url r = requests.get(url) r.status_code pdata = r.json() type(pdata) pdata.keys() recipes = pdata.get('results') recipes[:2] with open("potato_recipes.json", "w") as f: json.dump(recipes, f, indent=4) nurl = url[:-1] nurl import time time.sleep(0.5) # sleep half a second might be a good idea to sleep a bit before requesting access again # you could be rate limited if you ask too quickly too much # + recipe_list = [] for n in range(1,10+1): print(f"Requesting {nurl}{n}") r = requests.get(f"{nurl}{n}") if r.status_code != 200: print("Not good", r.status_code) break req_data = r.json() temp_recipes = req_data.get('results') recipe_list += temp_recipes time.sleep(0.2) len(recipe_list) # - recipe_list[48:52] with open('potatoes_100.json', "w") as f: json.dump(recipe_list, f, indent=4)
Diena_14_JSON/PuppyRecipes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Give me some Credit # # Use CRISP-DM to answer some issues related to GiveMe some credit Kaggle competition # ### Environment Setup # + import numpy as np # Array, Vector, matrix calculations import pandas as pd # DataFrame handling import matplotlib.pyplot as plt # Plotting pd.options.display.max_columns = 999 # %matploblib inline np.random.seed(42) # Answer to everything # - # ## 1. Download, Explore and prepare Kaggle competition dataset # Give me some credit dataset: # https://www.kaggle.com/c/GiveMeSomeCredit/data # # The Give Me Some credit dataset contains 12 variables related to the financial behaviour and position of customers, these are: # # - **SeriousDlqin2yrs:** Person Experienced 90 days past due delinquency or worse # - **RevolvingUtilizationOfUnsecuredLines:** Total balance on credit cards and personal lines of credit, divided by the sum of credit limits # - **Age:** Age borrower in years # - **NumberOfTime30-59DaysPastDueNotWorse:** Number of times the borrower has been 30-59 days past due but not worse in the last 2 years # - **DebtRatio:** Monthly debt payments, alimony, living costs divided by monthly gross income # - **MonthlyIncome:** Monthly Income of customer # - **NumberOfOpenCreditLinesAndLoans:** Number of Open oans (installments like car loan or mortgage) and Lines of Credit # - **NumberOfTimes90DaysLate:** Number of times borrower has been 90 days or more past due. # - **NumberRealEstateLoansOrLines:** Number of mortgage and real estate loans including home equity lines of credit # - **NumberOfTime60-89DaysPastDueNotWorse:** Number of times borrower has been 60-89 days past due but not worse in the last 2 years # - **NumberOfDependents:** Number of dependents in family excluding themselves. # # # These 10 input variables are used to predict the target variable wheter or not the customer will experience any Serious Delinquency event in the next two years. Since we will perform our analysis on a wide range of models, all of these variables will be handled and converted to numeric type. # # ### Import Data and clean # The credit card default data is available as an .xls file. Pandas reads .xls files automatically. # Import file path = 'D:/Python/Projects/02_GSC/cs-training.csv' data = pd.read_csv(path, usecols = ['SeriousDlqin2yrs', 'RevolvingUtilizationOfUnsecuredLines', 'age', 'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate', 'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfDependents']) data.dtypes # ### EDA # # Exploration of a dataset is paramount to a correct framing of our problem. Out objective is to predict an event of serious delinquency in the next 2 years based on 10 variables. In order to really understand our data and the vastness of the problem at hand we will focus on the next steps: # # * Dimensions of the dataset # * Descriptive stats + Probability distribution plots # * Nan Detection # * Outlier Detection + Box plots # * Correlation Matrix # * Model Constraints from data related problems # * Possible candidates of features # # Lets start with the Dimensions of our training matrix data.shape # Thus we have 150.000 records to train our model, and 11 features to predict our target feature SeriousDlqin2yrs # #### • Descriptive Stats data.describe() data.columns # ## • Probability Distribution plots + Bins # # ### Target: SeriousDlqin2yrs # # As you can probably tell, this dataset feature some imbalance on the response target, (Kudos to the risk evaluation team of the dataset). Delinquency events in the last 2 years have been around 6.7% fig = plt.figure() ax = fig.add_axes([0,0,1,1]) labels = ['Default', 'Non-Default'] values = [(data[data['SeriousDlqin2yrs']==1].sum()['SeriousDlqin2yrs'])/150000, (150000-data[data['SeriousDlqin2yrs']==1].sum()['SeriousDlqin2yrs'])/150000] ax.bar(labels,values, color= ['red', 'green']) plt.show() # ### • RevolvingUtilizationOfUnsecuredLines: # In order to properly understand the distribution on our features, we will use the elements of a Box-Plot and its bounds. In this case we find almost 38k records between -0.74 and 0.2, negative utilization rates? this does seem a bit strange, notice as well the concentration of high rates of Revolving utilization: 135%! # RevolvingUtilizationOfUnsecuredLines ## Get quantiles and define Bins col = 'RevolvingUtilizationOfUnsecuredLines' def BinSelection(col): q_u = data[col].quantile(0.75) # Upper Quantile q_l = data[col].quantile(0.25) # Lower Quantile m = data[col].quantile(0.50) # Median iqr = q_u - q_l # InterQuartile Range olb = q_l - 3 * iqr # Outer Lower Bound, Anything less is strongly suspected outlier ilb = q_l - 1.5 * iqr # Inner Lower Bound, Anything within bound is softly considered outlier iub = q_u + 1.5 * iqr # Inner Upper Bound oub = q_u + 3 * iqr # Outer Upper Bound df = [float('-inf'), olb, ilb, q_l, m, q_u, iub, oub, float('inf')] return(df); bp_bins = (BinSelection('RevolvingUtilizationOfUnsecuredLines')) data['RevolvingUtilizationOfUnsecuredLines_binned']=pd.cut(x = data['RevolvingUtilizationOfUnsecuredLines'], bins = bp_bins ) data['RevolvingUtilizationOfUnsecuredLines_binned'].value_counts(sort=False).plot.bar() print(data['RevolvingUtilizationOfUnsecuredLines_binned'].value_counts()) print(bp_bins) data.groupby(['RevolvingUtilizationOfUnsecuredLines_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What values worry me in this feature: # - Negative numbers on utilization are not feasible # - Numbers greater than 1 seem troublesome from a financial point perspective. # - High Utilization of revolving products does suggest an increase on the target feature # # Let us test for numbers greater than 1 since we know the min of this feature is 0 we need not to worry about negative numbers # Numbers greater than 1 on dataset test_data = data[data['RevolvingUtilizationOfUnsecuredLines']>1] col_count = test_data.count() print(col_count) # ### Age: # This feature seems like a straightforward one, Demographic age in years, we expect lower age to affect risk exposure. # data['age'].value_counts() # Although 90+ age seems a given on a developed world, being in a loan portfolio at advanced age does not seem that relevant. Bank policies usually avoid customers with life exposure. let us Bin this variable again and try to understand it a little bit better. # + bp_bins = (BinSelection('age')) data['age_binned']=pd.cut(x = data['age'], bins = [0, 18, 20, 41, 52, 63, 80, 93, float('Inf')] ) data['age_binned'].value_counts(sort=False).plot.bar() print(data['age_binned'].value_counts()) print(bp_bins) # - data.groupby(['age_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature?: # - People Below legal age # - 90+ population # - Concentration of target events on first bin, suggests decrease in bin increases. # Non Legal Age customers on dataset test_data = data[data['age'] < 18] col_count = test_data.count() print(col_count) # Non Legal Age customers on dataset test_data = data[data['age'] > 90] col_count = test_data.count() print(col_count) # ### NumberOfTime30-59DaysPastDueNotWorse: # Number of times borrower has been 30-59 days past due but no worse in the last 2 years. # # Interesting feature, to be used in conjuction with other 2 features # - Theoretical min: 0 # - Theoretical max: 24 data['NumberOfTime30-59DaysPastDueNotWorse'].value_counts() # 98 - 96 seems a bit excessive and not a part of our theoretical range values. Lets explore further this variable. # + bins = [0, 0.99, 1, 2, 4, 6, 10, 13, float('Inf')] data['NumberOfTime30-59DaysPastDueNotWorse_binned']= pd.cut(x = data['NumberOfTime30-59DaysPastDueNotWorse'], bins = bins) data['NumberOfTime30-59DaysPastDueNotWorse_binned'].value_counts(sort=False).plot.bar() print(data['NumberOfTime30-59DaysPastDueNotWorse_binned'].value_counts()) print(bins) # - # We would like to see wether there can be any sort of relation among our bins and the target feature data.groupby(['NumberOfTime30-59DaysPastDueNotWorse_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature? # - Extremely unbalanced, 130k+ records display a value of 0 (Again, kudos to the risk evaluation team) # - It does seem to suggest that the proportion of events on each bin increase is related to an increase in Delinquency events among the population. # - 96, 98 seem to be some sort of code. We need to create new features for these values. # ### Debt Ratio # Monthly debt payments, alimony,living costs divided by monthy gross income. # # Potential pitfalls: # - Non accurate income measure # - Possible missing income measure (inf ratios) # - Debt Payments are mixed with living costs ( Non dynamic feature) # # Theoretical ratio 0 - 2(?) lets check the portfolio distribution. data['DebtRatio'].describe() # max of 339k? that's just denominator problems. Let us Find the bins for this variable # + bins = BinSelection('DebtRatio') # [float('-inf'), 0, 0.36, 0.86, 1, 2, 3, float('inf')]) data['DebtRatio_binned']= pd.cut(x = data['DebtRatio'], bins = bins) data['DebtRatio_binned'].value_counts(sort=False).plot.bar() print(data['DebtRatio_binned'].value_counts()) print(bins) # - data.groupby(['DebtRatio_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature? # - Non informative feature, proportion of target does not seem to relate on a linear fashion among bins # - Construction problems. By dividing by an unreliable feature we have really high debt to income ratios that are driven mainly by Nan # - Feature treatment: bin, normalize, separate non informative by nulls # ### MonthlyIncome # Monthly income, no currency info, assumed USD. # - No time period, Inflation should not play a role on 2y and US data. # - Income unless given and collected from employer it's a guess. Even then, dynamic nature of income brings a lot of variance to the data. # - Assumed credit origination income to be maintained across credit lifecycle? # # Potential pitfalls: # - Tons of nulls # - Traditional unreliable data, be careful. data['MonthlyIncome'].describe().apply("{:,.2f}".format) # Well that avg portfolio income does bring to tears all international students. But Let us focus on the task at hand. # # We have: # # - Around 30k Na's # - Some possible format problems? 3m monthly usd seems excessive. (Maybe Corporate Loans?) # # Let's find the bins of this variable bp_bins = (BinSelection('MonthlyIncome')) bp_bins data['MonthlyIncome_binned']=pd.cut(x = data['MonthlyIncome'], bins = [float('-Inf'), 0, 3400, 5400, 8000, 10000, 15000, 20000, 23000, float('Inf')] ) data['MonthlyIncome_binned'].value_counts(sort=False, dropna=False).plot.bar() print(data['MonthlyIncome_binned'].value_counts(dropna= False)) print(bp_bins) data.groupby(['MonthlyIncome_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature: # - Absurd amount of nulls. # - Suspected outliers around 15k+ # - Strong suspect of Outliers at 22k+ # # - Feature treatment: # - Null treatment # - Stze # - Relate Nan values of debt ratio to median. in case of null income. # # ### NumberOfOpenCreditLinesAndLoans # Number of Open loans (installment like car loan or mortgage) and Lines of credit (e.g. credit cards). # # - Proxy for income(?) # # Potential pitfalls: # - Distorted by including Lines of credit (2-3 avg) # - Non Linear nature with target (?) # # Let us delve into this feature. data['NumberOfOpenCreditLinesAndLoans'].describe() # Well even among industry standards this seems to involve also savings/checking accounts. A bit high to what was expected. Let us find the bins of this variable. bp_bins = (BinSelection('NumberOfOpenCreditLinesAndLoans')) bp_bins data['NumberOfOpenCreditLinesAndLoans_binned']=pd.cut(x = data['NumberOfOpenCreditLinesAndLoans'], bins = [float('-inf'), 0, 5, 8, 11, 20, 29, float('inf')] ) data['NumberOfOpenCreditLinesAndLoans_binned'].value_counts(sort=False, dropna=False).plot.bar() print(data['NumberOfOpenCreditLinesAndLoans_binned'].value_counts(dropna= False)) print(bp_bins) data.groupby(['NumberOfOpenCreditLinesAndLoans_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature: # - More than 11 open lines? this is just way too much. Is this per household , even then? # - Non linear relationship with target. Low Open lines could men less income therefore more delinquency events? # - No null events. Possible aggregation across time errors. maybe card renewals? # # Recommended treatment: # - Separate feature space >11. # - Treat >29 as outlier # - stdize. # ### NumberOfTimes90DaysLate # Number of times borrower has been 90 days or more past due, this feature is considered a non-performing loan across different portfolio 3 months late payment is considered crossing a line in accounting literature. For House portfolios... maybe not that troublesome. # # We will observe this feature with great interest. # data['NumberOfTimes90DaysLate'].describe() # Again this cheeky 98 trying to pass by, max theoretical of 24times. This reinforces our theory of code-related meaning on the feature, (maybe refinanced status?) data['NumberOfTimes90DaysLate'].value_counts(dropna=False) data['NumberOfTimes90DaysLate_binned']=pd.cut(x = data['NumberOfTimes90DaysLate'], bins = [float('-inf'), 0, 1, 2, 3, 8, 10, float('inf')] ) data['NumberOfTimes90DaysLate_binned'].value_counts(sort=False, dropna=False).plot.bar() print(data['NumberOfTimes90DaysLate_binned'].value_counts(dropna= False)) print(bp_bins) data.groupby(['NumberOfTimes90DaysLate_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature?: # - Unbalanced feature # - Highly informative > 1. # - Leakage? three times of >90 events could be considered serious delinquency events. # # Treatment: # - Encode # ### NumberRealEstateLoansOrLines # Number of mortgage and real estate loans including home equity lines of credit. data['NumberRealEstateLoansOrLines'].describe() # Now this **is** some real feature, its range is among what is expected. 1 maybe 2 mortgages. You would expect delinquencies on these to be low. Nonetheless... this was 9 years ago we should be careful at assigning any expected value to our beliefs about this feature. # data['NumberRealEstateLoansOrLines'].value_counts(dropna= False) bp_bins = (BinSelection('NumberRealEstateLoansOrLines')) bp_bins data['NumberRealEstateLoansOrLines_binned']=pd.cut(x = data['NumberRealEstateLoansOrLines'], bins = bp_bins ) data['NumberRealEstateLoansOrLines_binned'].value_counts(sort=False, dropna=False).plot.bar() print(data['NumberRealEstateLoansOrLines_binned'].value_counts(dropna= False)) print(bp_bins) data.groupby(['NumberRealEstateLoansOrLines_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar() # What worries me about this feature? # - That everything seems normal. What am I missing? # ### NumberOfTime60-89DaysPastDueNotWorse # Number of times borrower has been 60-89 days past due but no worse in the last 2 years. data['NumberOfTime60-89DaysPastDueNotWorse'].describe() data['NumberOfTime60-89DaysPastDueNotWorse'].value_counts(dropna=False) # Bin Selection bp_bins = (BinSelection('NumberOfTime60-89DaysPastDueNotWorse')) bp_bins data['NumberOfTime60-89DaysPastDueNotWorse_binned']=pd.cut(x = data['NumberOfTime60-89DaysPastDueNotWorse'], bins = [float('-inf'), 0, 1, 2, 3, float('inf')] ) data['NumberOfTime60-89DaysPastDueNotWorse_binned'].value_counts(sort=False, dropna=False).plot.bar() print(data['NumberOfTime60-89DaysPastDueNotWorse_binned'].value_counts(dropna= False)) print(bp_bins) data.groupby(['NumberOfTime60-89DaysPastDueNotWorse_binned',"SeriousDlqin2yrs"]).size().unstack().plot.bar(figsize=(8, 6)) # What worries me about this feature?: # - Unbalanced feature # - Highly informative > 1. # - Leakage? three times of >90 events could be considered serious delinquency events. # # Treatment: # - Encode # ### Calculate Pearson Correlation # # Displays last column of Pearson correaltion matrix as a Pandas DataFrame pd.DataFrame(data.corr()['SeriousDlqin2yrs']).iloc[:-1] # This is why we perform our correlation analysis **after** taking a closer look to the dataset. We know that there are instances which revolving utilization will help out distinguish delinquency events. # ## EDA Recap or better: What worries me about my features? # - **RevolvingUtilizationOfUnsecuredLines** # - Negative numbers on utilization are not feasible (Solved) # - Numbers greater than 1 seem troublesome from a financial point perspective. (Associated with target) # - High Utilization of revolving products does suggest an increase on the target feature # - **Age** # - People Below legal age (Only 1 case) # - 90+ population (Cap at 90) # - Relationship follows literature on agent financial life cycle # - **NumberOfTime30-59DaysPastDueNotWorse** # - Extremely unbalanced, 130k+ records display a value of 0 (Again, kudos to the risk evaluation team) # - It does seem to suggest that the proportion of events on each bin increase is related to an increase in Delinquency events among the population. # - 96, 98 seem to be some sort of code. We need to create new features for these values. # - **DebtRatio** # - Non informative feature, proportion of target does not seem to relate on a linear fashion among bins # - Construction problems. By dividing by an unreliable feature we have really high debt to income ratios that are driven mainly by Nan # - Feature treatment: bin, normalize, separate non informative by nulls # - **MonthlyIncome** # - Absurd amount of nulls. # - Suspected outliers around 15k+ # - Strong suspect of Outliers at 22k+ # - Feature treatment: # - Null treatment # - Stze # - Relate Nan values of debt ratio to median. in case of null income. # - **NumberOfOpenCreditLinesAndLoans** # - More than 11 open lines? this is just way too much. Is this per household , even then? # - Non linear relationship with target. Low Open lines could men less income therefore more delinquency events? # - No null events. Possible aggregation across time errors. maybe card renewals? # - Feature treatment: # - Separate feature space >11. # - Treat >29 as outlier # - stdize. # - **NumberOfTimes90DaysLate** # - Unbalanced feature # - Highly informative > 1. # - Leakage? three times of >90 events could be considered serious delinquency events. # - Treatment: # - Encode # - **NumberRealEstateLoansOrLines** # - Everything seems normal. What am I missing? # - **NumberOfTime60-89DaysPastDueNotWorse** # - Unbalanced feature # - Highly informative > 1. # - Treatment: # - Encode
Credit_EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt from sklearn import model_selection import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.svm import SVR from sklearn.tree import DecisionTreeRegressor df = pd.read_csv("combined_clean.csv") df champ = df[df['League']=='championship'] champ AcumulatedStreakHome = {} AcumulatedGamesPlayed = {} homeAcumulatedStreak = [] homeAcumulatedGames = [] for index, row in champ.iterrows(): homeTeam = row['Home_Team'] streakHome = row['Streak_When_Home'] if homeTeam in AcumulatedStreakHome or homeTeam in AcumulatedGamesPlayed: pass else: AcumulatedStreakHome[homeTeam] = streakHome AcumulatedGamesPlayed[homeTeam] = 0 old_score = AcumulatedStreakHome[homeTeam] new_score = old_score + streakHome oldGamesPlayed = AcumulatedGamesPlayed[homeTeam] newGamesPlayed = oldGamesPlayed + 1 AcumulatedStreakHome[homeTeam] = new_score AcumulatedGamesPlayed[homeTeam] = newGamesPlayed homeAcumulatedStreak.append(new_score) homeAcumulatedGames.append(newGamesPlayed) print("streak",homeAcumulatedStreak) print("played",homeAcumulatedGames) champ['home_Streak_Acumulated'] = homeAcumulatedStreak champ['home_played_Acumulated'] = homeAcumulatedGames features = list(champ.columns) print(features) champ winStreakPercentage = [] for index, row in champ.iterrows(): homeStreakAcumulated = row['home_Streak_Acumulated'] homePlayedAcumulated = row['home_played_Acumulated'] win = homeStreakAcumulated/homePlayedAcumulated winStreakPercentage.append(win) print(winStreakPercentage) champ['AcumulatedWinPercentage'] = winStreakPercentage champ champ2021 = champ[df['Season']==2021] champ2021 champ2021.head features = list(champ.columns) print(features) print(features) df = pd.DataFrame(champ, columns = features) df.head features = df[['Round', 'Number_Teams', 'Total_Rounds', 'Position_Home', 'Points_Home', 'Total_Wins_Home', 'Total_Draw_Home', 'Total_Lose_Home', 'Total_Goals_For_Home_Team', 'Total_Goals_Against_Home_Team', 'Total_Streak_Home', 'Wins_When_Home', 'Draw_When_Home', 'Lose_When_Home', 'Goals_For_When_Home', 'Goals_Against_When_Home', 'Position_Away', 'Points_Away', 'Total_Wins_Away', 'Total_Draw_Away', 'Total_Lose_Away', 'Total_Goals_For_Away_Team', 'Total_Goals_Against_Away_Team', 'Total_Streak_Away', 'Wins_When_Away', 'Draw_When_Away', 'Lose_When_Away', 'Goals_For_When_Away', 'Goals_Against_When_Away', 'Streak_When_Home', 'Streak_When_Away', 'AcumulatedWinPercentage']] result = df['difference'] x = np.array(features) y = np.array(result) X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=4) model = LinearRegression() model.fit(X_train, y_train) b = model.intercept_ m = model.coef_ print(b) print(m) model.score(X_train, y_train) x.shape
Linear regression pt2 Tamim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch from transformers import LEDForConditionalGeneration from transformers import LEDConfig from collections import OrderedDict dataset_name='wcep' states = torch.load('../github/PRIMER/PRIMER_%s_state.pt'%(dataset_name)) states.keys() new_states = OrderedDict() for k in states: new_k=k.replace('model','led') new_states[new_k]=states[k] new_states['led.encoder.embed_positions.weight'] = states['model.encoder.embed_positions.weight'][2:] new_states['led.decoder.embed_positions.weight'] = states['model.decoder.embed_positions.weight'][2:] new_states['lm_head.weight'] = states['model.shared.weight'] new_states['led.decoder.embed_positions.weight'].shape config=LEDConfig.from_pretrained('../github/PRIMER/PRIMER_%s/'%(dataset_name)) model=LEDForConditionalGeneration(config) model.load_state_dict(new_states,strict=True) model.config.save_pretrained('../github/PRIMER/PRIMER_%s_hf/'%(dataset_name)) model.save_pretrained('../github/PRIMER/PRIMER_%s_hf/'%(dataset_name))
Convert_to_hf_LED.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.4 64-bit (''miniconda3'': virtualenv)' # language: python # name: python37464bitminiconda3virtualenvf5231775952f4f6b9dec6182dd447e7a # --- # + # libraries for making requests and parsing HTML import requests from bs4 import BeautifulSoup # plotting import matplotlib.pyplot as plt import seaborn as sns # sklearn for kmeans and model metrics from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from sklearn.metrics import silhouette_score # pandas, for data wrangling import pandas as pd # + # URL to get S&P tickers from TICKER_URL = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies' # multi-level identifier, to select each row of ticker table in HTML response TABLE_IDENTIFIER = '#constituents tbody tr td' # yahoo finance URL we can use to scrape data for each company YAHOO_URL = 'http://finance.yahoo.com/quote/' # HTML classes for various elements on yahoo finance page YAHOO_TABLE_CLASS = 'Ta(end) Fw(600) Lh(14px)' # EPS (TTM) react-id # Open price react-id # Div/Yield react-id YAHOO_IDS = ['OPEN-value', 'EPS_RATIO-value', 'DIVIDEND_AND_YIELD-value', 'PE_RATIO-value'] # get HTML content from wikipedia S&P 500 page res = BeautifulSoup(requests.get(TICKER_URL).text, 'html.parser') # get the table of stock ticker data, selecting on TABLE_ID table_data = [ticker for ticker in res.select(TABLE_IDENTIFIER)] # iterate over each row of table (9 elements of information), and extract the individual tickers tickers = [table_data[i].text for i in range(0, len(table_data), 9)] # iterate through the S&P 500 company tickers, and collect data from Yahoo Finance def get_yahoo_ticker_data(tickers): ticker_data = [] # make GET request for specified ticker print(len(tickers)) for i, ticker in enumerate(tickers): print(i) try: REQ_URL = YAHOO_URL + ticker[:-1] + '?p=' + ticker[:-1] ticker_i_res = requests.get(REQ_URL) ticker_i_parser = BeautifulSoup(ticker_i_res.text, 'html.parser') ticker_i_data = [ticker[:-1]] ticker_i_open_eps_div = [ticker_i_parser.find(attrs={'class': YAHOO_TABLE_CLASS, 'data-test': id_}).text for id_ in YAHOO_IDS] for data in ticker_i_open_eps_div: ticker_i_data.append(data) ticker_data.append(ticker_i_data) except: print("error for " + ticker) continue # - get_yahoo_ticker_data(tickers) tickers # + from datetime import datetime from concurrent import futures import pandas as pd from pandas import DataFrame import pandas_datareader.data as web def download_stock(stock): """ try to query the iex for a stock, if failed note with print """ try: print(stock) stock_df = web.DataReader(stock,'yahoo', start_time, now_time) stock_df['Name'] = stock output_name = stock + '_data.csv' stock_df.to_csv(output_name) except: bad_names.append(stock) print('bad: %s' % (stock)) if __name__ == '__main__': """ set the download window """ now_time = datetime.now() start_time = datetime(now_time.year - 5, now_time.month , now_time.day) """ list of s_anp_p companies """ s_and_p = ['MMM','ABT','ABBV','ACN','ATVI','AYI','ADBE','AMD','AAP','AES','AET', 'AMG','AFL','A','APD','AKAM','ALK','ALB','ARE','ALXN','ALGN','ALLE', 'AGN','ADS','LNT','ALL','GOOGL','GOOG','MO','AMZN','AEE','AAL','AEP', 'AXP','AIG','AMT','AWK','AMP','ABC','AME','AMGN','APH','APC','ADI','ANDV', 'ANSS','ANTM','AON','AOS','APA','AIV','AAPL','AMAT','APTV','ADM','ARNC', 'AJG','AIZ','T','ADSK','ADP','AZO','AVB','AVY','BHGE','BLL','BAC','BK', 'BAX','BBT','BDX','BRK.B','BBY','BIIB','BLK','HRB','BA','BWA','BXP','BSX', 'BHF','BMY','AVGO','BF.B','CHRW','CA','COG','CDNS','CPB','COF','CAH','CBOE', 'KMX','CCL','CAT','CBG','CBS','CELG','CNC','CNP','CTL','CERN','CF','SCHW', 'CHTR','CHK','CVX','CMG','CB','CHD','CI','XEC','CINF','CTAS','CSCO','C','CFG', 'CTXS','CLX','CME','CMS','KO','CTSH','CL','CMCSA','CMA','CAG','CXO','COP', 'ED','STZ','COO','GLW','COST','COTY','CCI','CSRA','CSX','CMI','CVS','DHI', 'DHR','DRI','DVA','DE','DAL','XRAY','DVN','DLR','DFS','DISCA','DISCK','DISH', 'DG','DLTR','D','DOV','DWDP','DPS','DTE','DRE','DUK','DXC','ETFC','EMN','ETN', 'EBAY','ECL','EIX','EW','EA','EMR','ETR','EVHC','EOG','EQT','EFX','EQIX','EQR', 'ESS','EL','ES','RE','EXC','EXPE','EXPD','ESRX','EXR','XOM','FFIV','FB','FAST', 'FRT','FDX','FIS','FITB','FE','FISV','FLIR','FLS','FLR','FMC','FL','F','FTV', 'FBHS','BEN','FCX','GPS','GRMN','IT','GD','GE','GGP','GIS','GM','GPC','GILD', 'GPN','GS','GT','GWW','HAL','HBI','HOG','HRS','HIG','HAS','HCA','HCP','HP','HSIC', 'HSY','HES','HPE','HLT','HOLX','HD','HON','HRL','HST','HPQ','HUM','HBAN','HII', 'IDXX','INFO','ITW','ILMN','IR','INTC','ICE','IBM','INCY','IP','IPG','IFF','INTU', 'ISRG','IVZ','IQV','IRM','JEC','JBHT','SJM','JNJ','JCI','JPM','JNPR','KSU','K','KEY', 'KMB','KIM','KMI','KLAC','KSS','KHC','KR','LB','LLL','LH','LRCX','LEG','LEN','LUK', 'LLY','LNC','LKQ','LMT','L','LOW','LYB','MTB','MAC','M','MRO','MPC','MAR','MMC','MLM', 'MAS','MA','MAT','MKC','MCD','MCK','MDT','MRK','MET','MTD','MGM','KORS','MCHP','MU', 'MSFT','MAA','MHK','TAP','MDLZ','MON','MNST','MCO','MS','MOS','MSI','MYL','NDAQ', 'NOV','NAVI','NTAP','NFLX','NWL','NFX','NEM','NWSA','NWS','NEE','NLSN','NKE','NI', 'NBL','JWN','NSC','NTRS','NOC','NCLH','NRG','NUE','NVDA','ORLY','OXY','OMC','OKE', 'ORCL','PCAR','PKG','PH','PDCO','PAYX','PYPL','PNR','PBCT','PEP','PKI','PRGO','PFE', 'PCG','PM','PSX','PNW','PXD','PNC','RL','PPG','PPL','PX','PCLN','PFG','PG','PGR', 'PLD','PRU','PEG','PSA','PHM','PVH','QRVO','PWR','QCOM','DGX','RRC','RJF','RTN','O', 'RHT','REG','REGN','RF','RSG','RMD','RHI','ROK','COL','ROP','ROST','RCL','CRM','SBAC', 'SCG','SLB','SNI','STX','SEE','SRE','SHW','SIG','SPG','SWKS','SLG','SNA','SO','LUV', 'SPGI','SWK','SBUX','STT','SRCL','SYK','STI','SYMC','SYF','SNPS','SYY','TROW','TPR', 'TGT','TEL','FTI','TXN','TXT','TMO','TIF','TWX','TJX','TMK','TSS','TSCO','TDG','TRV', 'TRIP','FOXA','FOX','TSN','UDR','ULTA','USB','UAA','UA','UNP','UAL','UNH','UPS','URI', 'UTX','UHS','UNM','VFC','VLO','VAR','VTR','VRSN','VRSK','VZ','VRTX','VIAB','V','VNO', 'VMC','WMT','WBA','DIS','WM','WAT','WEC','WFC','HCN','WDC','WU','WRK','WY','WHR','WMB', 'WLTW','WYN','WYNN','XEL','XRX','XLNX','XL','XYL','YUM','ZBH','ZION','ZTS'] bad_names =[] #to keep track of failed queries """here we use the concurrent.futures module's ThreadPoolExecutor to speed up the downloads buy doing them in parallel as opposed to sequentially """ #set the maximum thread number max_workers = 50 workers = min(max_workers, len(s_and_p)) #in case a smaller number of stocks than threads was passed in with futures.ThreadPoolExecutor(workers) as executor: res = executor.map(download_stock, s_and_p) """ Save failed queries to a text file to retry """ if len(bad_names) > 0: with open('failed_queries.txt','w') as outfile: for name in bad_names: outfile.write(name+'\n') #timing: finish_time = datetime.now() duration = finish_time - now_time minutes, seconds = divmod(duration.seconds, 60) print('getSandP_threaded.py') print(f'The threaded script took {minutes} minutes and {seconds} seconds to run.') #The threaded script took 0 minutes and 31 seconds to run. # - datetime.now()
Code/Scraping_Tickers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import h2o import numpy as np h2o.init() dataframe_1 = h2o.H2OFrame.from_python({'words':['Hello', 'World', 'Welcome', 'To', 'Machine', 'Learning'], 'numerical_representation': [0,1,2,3,4,5],'letters':['a','b','c','d']}) dataframe_1.describe dataframe_2 = h2o.H2OFrame.from_python({'other_words':['How', 'Are', 'You', 'Doing', 'Today', 'My', 'Friend', 'Learning', 'H2O', 'Artificial', 'Intelligence'], 'numerical_representation': [0,1,2,3,4,5,6,7,8,9],'letters':['a','b','c','d','e']}) dataframe_2.describe final_dataframe = dataframe_1.merge(dataframe_2) final_dataframe.describe
Chapter 3/Merging two dataframes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ashishkumar30/Zerodha_Live_Automate_Trading-_using_AI_ML_on_Indian_stock_market-using-basic-python/blob/master/Live_BOT_(3)_Guppy_Automated_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="890DCmfFQMFP" colab_type="text" # # FULL automated bot on GUPPY indicator # + [markdown] id="Cpqghp-jQMFU" colab_type="text" # start bot with 2 inputs of buy/sell and timeframe then it will scan all the stock and automatically pick the best stock among all the stock and place order on the best stock and continue with it . # if u want to run bot you can continue bot without scanning and run normal bot also. # # + id="FJmWDf4SQMFX" colab_type="code" colab={} outputId="983b92c7-4697-42bf-d59c-7af870f44a5f" print(" \n ") print(" \t \t \t WELCOME ! ' Please wait while we are installing library") print("\n") import time import secrets import threading import webbrowser import numpy as np import pandas as pd from pytz import timezone from math import modf from pprint import pprint from datetime import datetime from time import gmtime, strftime from kiteconnect import KiteTicker from kiteconnect import KiteConnect from IPython.display import clear_output #api_key='******************* @@enter your api key here@@@**************' #api_secret='**********************@@enter your api key here****************' webbrowser.open_new_tab(KiteConnect(api_key,api_secret).login_url()) kite = KiteConnect(api_key=api_key) data = kite.generate_session(((str(input("ENTER full link generated in URL :- ")).split("request_token=")[1]).split("&action"))[0],api_secret) kite.set_access_token(data["access_token"]) print(" \n ") display(data) print(" \t \t \t \t \t \n WELCOME TO GUPPY BOT & SCREENER & STATUS ") print(" \t \t \t \t \t \n WELCOME TO GUPPY BOT ") print("\n" ) scan=str(input("DO you want Scanning ? YES/NO :-")).upper() if "YES"==scan or "yes"==scan: buy_sell=str(input("ENTER which bot you want to run ? ")).upper() if "BUY" ==buy_sell: print("BUY BOT STARTED") time_frame =str(input("Enter TimeFrame")) sdate ="2017-04-30" todate ="2019-08-31" print("SCANNING START") #z=list(pd.read_csv("list.csv")["Symbol"]) z=['ADANIPORTS', 'ASIANPAINT', 'AXISBANK', 'BAJAJ-AUTO', 'BAJFINANCE', 'BAJAJFINSV', 'BPCL', 'BHARTIARTL', 'INFRATEL', 'BRITANNIA', 'CIPLA', 'COALINDIA', 'DRREDDY', 'EICHERMOT', 'GAIL', 'GRASIM', 'HCLTECH', 'HDFCBANK', 'HEROMOTOCO', 'HINDALCO', 'HINDUNILVR', 'HDFC', 'ICICIBANK', 'ITC', 'IBULHSGFIN', 'IOC', 'INDUSINDBK', 'INFY', 'JSWSTEEL', 'KOTAKBANK', 'LT', 'M&M', 'MARUTI', 'NTPC', 'ONGC', 'POWERGRID', 'RELIANCE', 'SBIN', 'SUNPHARMA', 'TCS', 'TATAMOTORS', 'TATASTEEL', 'TECHM', 'TITAN', 'UPL', 'ULTRACEMCO', 'VEDL', 'WIPRO', 'YESBANK', 'ZEEL'] eexchange="NSE" tokenall=[] aa=0 print(" \t \t \t \n Getting All tokens for processing BUY SELL ") while(True): ttoken=int(pd.DataFrame(kite.ltp(eexchange+":"+z[aa])).iloc[-2,0]) tokenall.append(ttoken) aa=aa+1 if aa==50: print(" \t \t \t \n Complete ! All tokens are fetched from file ") print("\n" ) print(tokenall) break print(" Now checking Condition of BUY sell of GUPPY ") #Variables buy5minute=[] sell5minute=[] buy10minute=[] sell10minute=[] buy15minute=[] sell15minute=[] ## lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] anchor=0 countstart=0 #programe start a=0 #:sell10minutesym,"TOKENS_SELL":sell10minute,"Price":price15min_sell} BUY_listindicator=[] SELL_listindicator=[] sell5minutesym=[] buy5minutesym=[] buy10minutesym=[] sell10minutesym=[] buy15minutesym=[] sell15minutesym=[] # #price price5min_buy=[] price5min_sell=[] price15min_buy=[] price15min_sell=[] price30min_buy=[] price30min_sell=[] priceedit=[] print(a) def ashi(): global a global BUY_listindicator global SELL_listindicator while(True): print("\n" ) print("Current Token Number which is processing is",a) #km=datetime.now().minute #ks=datetime.now().second #if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia #df=kite.historical_data(140033,sdate,todate,time_frame,0) dff=kite.historical_data(tokenall[a],sdate,todate,time_frame,0) dfw=pd.DataFrame(dff) df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=df print(" \t \t \t \t Zerodha GUPPY SCREENER on 5 minute Data") print("\t \t \t \n Current Token checking " , tokenall[a]) print("\n" ) print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) if "green" in gup.iloc[-1,5]: print(" BUY stock found ") buy5minute.append((tokenall[a])) buy5minutesym.append((z[a])) price5min_buy.append(gup.iloc[-1,2]) if "red" in gup.iloc[-1,5]: print(" SELL stock found ") sell5minute.append((tokenall[a])) sell5minutesym.append((z[a])) price5min_sell.append(gup.iloc[-1,2]) else: pass print("Buy stock found are :=" ,buy5minute) print("Sell stocks found are:=" ,sell5minute) a=a+1 if a==len(tokenall): break ashi() less_then_buy=[] greater_then_buy=[] less_then_sell=[] greater_then_sell=[] symbl_final_buy=[] token_final_buy=[] print(" Scanning is complete ") buyframe={"Tokens_buy":buy5minute,"Symbol_buy":buy5minutesym,"Price":price5min_buy} fivemin=pd.DataFrame(buyframe) display(fivemin) buyframee={"Tokens_sell":sell5minutesym,"Symbols_sell" :sell5minute,"Price":price5min_sell} fivemine=pd.DataFrame(buyframee) display(fivemine) if buy5minute: print("YES ! Some buy stock found") aa=0 while(True): kk=pd.DataFrame(kite.ltp(buy5minute[aa])).iloc[-1,0] if kk<2000: less_then_buy.append(buy5minute[aa]) if kk>2000: greater_then_buy.append(buy5minute[aa]) aa=aa+1 if aa==len(buy5minute): break if sell5minute: print("yes ! some sell tock found") aa=0 while(True): kk=pd.DataFrame(kite.ltp(sell5minute[aa])).iloc[-1,0] if kk<2000: less_then_sell.append(sell5minute[aa]) if kk>2000: greater_then_sell.append(sell5minute[aa]) aa=aa+1 if aa==len(sell5minute): break print("BUY LESS THEN 2000" ,less_then_buy) print("BUY grater then 2000" ,greater_then_buy) print("SELL less then 2000" ,less_then_sell) print("SELL GREATER THEN 2000",greater_then_sell) #greater_then_sell if less_then_buy: print("Going to Place order on ",less_then_buy[0]) order_place_token=less_then_buy[0] order_place=int(order_place_token) print("BUY COMMON FOUND") df=pd.DataFrame(kite.instruments("NSE"))[["instrument_token","tradingsymbol","name"]] zall=df[df["instrument_token"]==order_place] symbl_final_buy.append(zall.iloc[-1,0]) token_final_buy.append(zall.iloc[-1,1]) ######################################## ttradingsymbol =str(symbl_final_buy) eexchange ="NSE" productt ="MIS" qu =1 time_frame =time_frame sdate ="2019-06-05" todate ="2020-10-02" print(ttoken) print(type(ttoken)) ttoken =int(token_final_buy[0]) ############################# #calculation def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY GREEN _ RED PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY SELL BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis() ######################## else: if greater_then_buy: print("No order is vailable less than 2000 but found more then 2000 so placing on it ") order_place_token=greater_then_buy[0] order_placee=int(order_place_token) order_place=int(order_placee) df=pd.DataFrame(kite.instruments("NSE"))[["instrument_token","tradingsymbol","name"]] zall=df[df["instrument_token"]==order_place] symbl_final_buy.append(zall.iloc[-1,0]) token_final_buy.append(zall.iloc[-1,1]) ######################################## ttradingsymbol =str(symbl_final_buy) eexchange ="NSE" productt ="MIS" qu =1 time_frame =time_frame sdate ="2019-06-05" todate ="2020-10-02" ttoken =int(token_final_buy) ############################# def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY GREEN _ RED PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY SELL BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis() else: print("sorry no order found of BUY") time.sleep(300) if "SELL"==buy_sell: print("SELL BOT STARTED") time_frame =str(input("Enter TimeFrame")) sdate ="2019-07-05" todate ="2019-10-02" print("SCANNING START") #z=list(pd.read_csv("list.csv")["Symbol"]) z=['ADANIPORTS', 'ASIANPAINT', 'AXISBANK', 'BAJAJ-AUTO', 'BAJFINANCE', 'BAJAJFINSV', 'BPCL', 'BHARTIARTL', 'INFRATEL', 'BRITANNIA', 'CIPLA', 'COALINDIA', 'DRREDDY', 'EICHERMOT', 'GAIL', 'GRASIM', 'HCLTECH', 'HDFCBANK', 'HEROMOTOCO', 'HINDALCO', 'HINDUNILVR', 'HDFC', 'ICICIBANK', 'ITC', 'IBULHSGFIN', 'IOC', 'INDUSINDBK', 'INFY', 'JSWSTEEL', 'KOTAKBANK', 'LT', 'M&M', 'MARUTI', 'NTPC', 'ONGC', 'POWERGRID', 'RELIANCE', 'SBIN', 'SUNPHARMA', 'TCS', 'TATAMOTORS', 'TATASTEEL', 'TECHM', 'TITAN', 'UPL', 'ULTRACEMCO', 'VEDL', 'WIPRO', 'YESBANK', 'ZEEL'] eexchange="NSE" tokenall=[] aa=0 print(" \t \t \t \n Getting All tokens for processing BUY SELL ") while(True): ttoken=int(pd.DataFrame(kite.ltp(eexchange+":"+z[aa])).iloc[-2,0]) tokenall.append(ttoken) aa=aa+1 if aa==50: print(" \t \t \t \n Complete ! All tokens are fetched from file ") print("\n" ) print(tokenall) break print(" Now checking Condition of BUY sell of GUPPY ") #Variables buy5minute=[] sell5minute=[] buy10minute=[] sell10minute=[] buy15minute=[] sell15minute=[] ## lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] anchor=0 countstart=0 #programe start a=0 #:sell10minutesym,"TOKENS_SELL":sell10minute,"Price":price15min_sell} BUY_listindicator=[] SELL_listindicator=[] sell5minutesym=[] buy5minutesym=[] buy10minutesym=[] sell10minutesym=[] buy15minutesym=[] sell15minutesym=[] # #price price5min_buy=[] price5min_sell=[] price15min_buy=[] price15min_sell=[] price30min_buy=[] price30min_sell=[] priceedit=[] print(a) def ashi(): global a global BUY_listindicator global SELL_listindicator while(True): print("\n" ) print("Current Token Number which is processing is",a) #km=datetime.now().minute #ks=datetime.now().second #if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia #df=kite.historical_data(140033,sdate,todate,time_frame,0) dff=kite.historical_data(tokenall[a],sdate,todate,time_frame,0) dfw=pd.DataFrame(dff) df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=df print(" \t \t \t \t Zerodha GUPPY SCREENER on 5 minute Data") print("\t \t \t \n Current Token checking " , tokenall[a]) print("\n" ) print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) if "green" in gup.iloc[-1,5]: print(" BUY stock found ") buy5minute.append((tokenall[a])) buy5minutesym.append((z[a])) price5min_buy.append(gup.iloc[-1,2]) if "red" in gup.iloc[-1,5]: print(" SELL stock found ") sell5minute.append((tokenall[a])) sell5minutesym.append((z[a])) price5min_sell.append(gup.iloc[-1,2]) else: pass print("Buy stock found are :=" ,buy5minute) print("Sell stocks found are:=" ,sell5minute) a=a+1 if a==len(tokenall): break ashi() less_then_buy=[] greater_then_buy=[] less_then_sell=[] greater_then_sell=[] symbl_final_buy=[] token_final_buy=[] print(" Scanning is complete ") buyframe={"SYMBOLS_BUY":buy5minute,"TOKENS_BUY":buy5minutesym,"Price":price5min_buy} fivemin=pd.DataFrame(buyframe) display(fivemin) buyframee={"SYMBOLS_SELL":sell5minutesym,"TOKENS_SELL":sell5minute,"Price":price5min_sell} fivemine=pd.DataFrame(buyframee) display(fivemine) if buy5minute: print("YES ! Some buy stock found") aa=0 while(True): kk=pd.DataFrame(kite.ltp(buy5minute[aa])).iloc[-1,0] if kk<2000: less_then_buy.append(buy5minute[aa]) if kk>2000: greater_then_buy.append(buy5minute[aa]) aa=aa+1 if aa==len(buy5minute): break if sell5minute: print("yes ! some sell tock found") aa=0 while(True): kk=pd.DataFrame(kite.ltp(sell5minute[aa])).iloc[-1,0] if kk<2000: less_then_sell.append(sell5minute[aa]) if kk>2000: greater_then_sell.append(sell5minute[aa]) aa=aa+1 if aa==len(sell5minute): break print("BUY LESS THEN 2000" ,less_then_buy) print("BUY grater then 2000" ,greater_then_buy) print("SELL less then 2000" ,less_then_sell) print("SELL GREATER THEN 2000",greater_then_sell) less_then_buy=less_then_sell greater_then_buy=greater_then_sell if less_then_buy: print("Going to Place order on ",less_then_buy[0]) order_place_token=less_then_buy[0] order_place=int(order_place_token) print("SELL FOUND") df=pd.DataFrame(kite.instruments("NSE"))[["instrument_token","tradingsymbol","name"]] zall=df[df["instrument_token"]==order_place] symbl_final_buy.append(zall.iloc[-1,0]) token_final_buy.append(zall.iloc[-1,1]) ######################################## ttradingsymbol =str(symbl_final_buy) eexchange ="NSE" productt ="MIS" qu =1 time_frame =time_frame sdate ="2019-06-05" todate ="2020-10-02" ttoken =int(token_final_buy) ############################# #calculation def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY SELL_ RED PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY SELL BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "red" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(2) #if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis() ######################## else: if greater_then_buy: print("No order is vailable less than 2000 but found more then 2000 so placing on it ") order_place_token=greater_then_buy[0] order_placee=int(order_place_token) order_place=int(order_placee) df=pd.DataFrame(kite.instruments("NSE"))[["instrument_token","tradingsymbol","name"]] zall=df[df["instrument_token"]==order_place] symbl_final_buy.append(zall.iloc[-1,0]) token_final_buy.append(zall.iloc[-1,1]) ######################################## ttradingsymbol =str(symbl_final_buy) eexchange ="NSE" productt ="MIS" qu =1 time_frame =time_frame sdate ="2019-06-05" todate ="2020-10-02" ttoken =int(token_final_buy) ############################# def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY SELL _ RED PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY SELL BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis() else: print("sorry no order found of SELL ") time.sleep(300) if "NO"==scan: manually=str(input( "ENTER BUY or SELL ?")) if "BUY" in manually: print(" BUY BOT STARTED Manually") #calculation def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b print("WELCOME TO GUPPY BUY PROFIT BOOKING BOT ! ") print(" \n ") ttradingsymbol =str(input("ENTER TRADINGSYMBOL - SUNPHARMA / SBIN :-")).upper() eexchange =str(input("ENTER EXCHANGE / NSE / BSE / CDS :-")).upper() productt =str(input("ENTER PRODUCT / NRML /CNC / MIS :-")).upper() qu =int(input("ENTER QUANTITY - 2 / 4 :-")) time_frame =str(input("ENTER TIME_FRAME - minute / 5minute :-")).lower() sdate ="2019-05-05" todate ="2020-10-02" ttoken=int(pd.DataFrame(kite.ltp(eexchange+":"+ttradingsymbol)).iloc[-2,0]) counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY GREEN _ RED PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY BUY BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis() if "SELL"==manually: print("SELL BOT STARTED Manually") #calculation def avg_ha(x): a=list(modf(round(x,3))) d=str(a[0]) aa=str(d[0:2]) try: ab=str(d[2]) except: ab='0' try: ac=str(d[3]) except: ac='0' try: ad=str(d[4]) except: ad='0' c=aa+ab+ac+ad b=0 if a[0]!=0: astr=c a0=astr[0] a1=astr[1] a3=int(astr[2]) a2=int(astr[3:5]) if a2>=0 and a2<25: a2=0 elif a2>=25 and a2<75: a2=5 elif a2>=75 and a2<=99: a3+=1 a2=0 aint=a0+a1+str(a3)+str(a2) a[0]=float(aint) for k in a: b+=k return b print("WELCOME TO GUPPY SELL PROFIT BOOKING BOT ! ") print(" \n ") ttradingsymbol =str(input("ENTER TRADINGSYMBOL - SUNPHARMA / SBIN :-")).upper() eexchange =str(input("ENTER EXCHANGE / NSE / BSE / CDS :-")).upper() productt =str(input("ENTER PRODUCT / NRML /CNC / MIS :-")).upper() qu =int(input("ENTER QUANTITY - 2 / 4 :-")) time_frame =str(input("ENTER TIME_FRAME - minute / 5minute :-")).lower() sdate ="2019-05-05" todate ="2020-10-02" ttoken=int(pd.DataFrame(kite.ltp(eexchange+":"+ttradingsymbol)).iloc[-2,0]) counter_start=[0] linkedlist_1=[] linkedlist_2=[] linkedlist_3=[] counter=[0,10] execution=[] checkfirst=[0] red_time=[] green_time=[] redb=[] bluez=[] aqua1=[0] bluein=[] blue_2F=[] blue_2G=[] count_sectime=[] redc=[] colourstill=[] prog_starts=[] #Variables lst_candle=[] lst_heikin_nor=[] lst_heikin=[] lst_cand=[] lst_c=[] countstart=0 #programe start def ashis(): while(True): km=datetime.now().minute ks=datetime.now().second if km%1==0 and ks==1: clear_output(wait=True) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%S ") klp1=now_asia dff=kite.historical_data(ttoken,sdate,todate,time_frame,0) #time.sleep(1) dfw=pd.DataFrame(dff)[:-1] df=pd.DataFrame(dfw[['date','open','high','low','close']]) slow_ema = [3,5,7,9,11,13,15,17,19,21,23] fast_ema = [25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,200] def EMA(df, base, target, period, alpha=False): con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]]) if (alpha == True): # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period df[target] = con.ewm(alpha=1 / period, adjust=False).mean() else: # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1) df[target] = con.ewm(span=period, adjust=False).mean() df.fillna(0,inplace = True) # return df for j in slow_ema: val = "ema"+"_"+str(j) EMA(df,"close",val,j) for k in fast_ema: val = "ema"+"_"+str(k) EMA(df,"close",val,k) def super_guppy(interval,df,anchor=0): # df['buy'] = 0 # df['sell'] = 0 # df['buybreak'] = 0 # df['sellbreak'] = 0 anchor = 0 ShowBreak = True ShowSwing = True ShowCon = False uOCCswing = False Lookback = 6 emaFilter = False mult = 0 buybreak = 0 sellbreak = 0 buy_barssince_var = 0 sell_barssince_var = 0 buybreak_barssince_var = 0 sellbreak_barssince_var = 0 barssince_lst = list() barssince_var = 0 bar_count_var = 0 buy1 = list() sell1 = list() buy2 = list() sell2 = list() buybreak1 = list() sellbreak1 = list() def barssince(b,barssince_var): barssince_lst = [] barssince_var = 0 new_var = len(b) for i in b[::-1]: if i == 1: break barssince_lst.append(i) barssince_var = len(barssince_lst) return barssince_var barssince_lst.clear() #isIntraday if interval < 1441 : if (anchor==0 or interval <= 0 or interval >= anchor or anchor > 1441 ): mult = 1 else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = 1 #isIntraday Not if interval > 1441: if (anchor==0 or interval <= 0 or interval >= anchor or anchor < 52 ): mult = mult else: if round(anchor/interval) > 1: mult = round(anchor/interval) else: mult = 1 else: mult = mult mult = 1 for i in range(len(df)): emaF1 = df.loc[i,'ema_3'] emaF2 = df.loc[i,'ema_5'] emaF3 = df.loc[i,'ema_7'] emaF4 = df.loc[i,'ema_9'] emaF5 = df.loc[i,'ema_11'] emaF6 = df.loc[i,'ema_13'] emaF7 = df.loc[i,'ema_15'] emaF8 = df.loc[i,'ema_17'] emaF9 = df.loc[i,'ema_19'] emaF10 = df.loc[i,'ema_21'] emaF11 = df.loc[i,'ema_23'] emaS1 = df.loc[i,'ema_25'] emaS2 = df.loc[i,'ema_28'] emaS3 = df.loc[i,'ema_31'] emaS4 = df.loc[i,'ema_34'] emaS5 = df.loc[i,'ema_37'] emaS6 = df.loc[i,'ema_40'] emaS7 = df.loc[i,'ema_43'] emaS8 = df.loc[i,'ema_46'] emaS9 = df.loc[i,'ema_49'] emaS10 = df.loc[i,'ema_52'] emaS11 = df.loc[i,'ema_55'] emaS12 = df.loc[i,'ema_58'] emaS13 = df.loc[i,'ema_61'] emaS14 = df.loc[i,'ema_64'] emaS15 = df.loc[i,'ema_67'] emaS16 = df.loc[i,'ema_70'] ema200 = df.loc[i,'ema_200'] emafast = (emaF1 + emaF2 + emaF3 + emaF4 + emaF5 + emaF6 + emaF7 + emaF8 + emaF9 + emaF10 + emaF11)/11 emaslow = (emaS1 + emaS2 + emaS3 + emaS4 + emaS5 + emaS6 + emaS7 + emaS8 + emaS9 + emaS10 + emaS11 + emaS12 + emaS13 + emaS14 + emaS15 + emaS16)/16 #Fast EMA Color Rules colfastL = (emaF1>emaF2 and emaF2>emaF3 and emaF3>emaF4 and emaF4>emaF5 and emaF5>emaF6 and emaF6>emaF7 and emaF7>emaF8 and emaF8>emaF9 and emaF9>emaF10 and emaF10>emaF11) colfastS = (emaF1<emaF2 and emaF2<emaF3 and emaF3<emaF4 and emaF4<emaF5 and emaF5<emaF6 and emaF6<emaF7 and emaF7<emaF8 and emaF8<emaF9 and emaF9<emaF10 and emaF10<emaF11) #Slow EMA Color Rules colslowL = (emaS1>emaS2 and emaS2>emaS3 and emaS3>emaS4 and emaS4>emaS5 and emaS5>emaS6 and emaS6>emaS7 and emaS7>emaS8) and (emaS8>emaS9 and emaS9>emaS10 and emaS10>emaS11 and emaS11>emaS12 and emaS12>emaS13 and emaS13>emaS14 and emaS14>emaS15 and emaS15>emaS16) colslowS = (emaS1<emaS2 and emaS2<emaS3 and emaS3<emaS4 and emaS4<emaS5 and emaS5<emaS6 and emaS6<emaS7 and emaS7<emaS8) and (emaS8<emaS9 and emaS9<emaS10 and emaS10<emaS11 and emaS11<emaS12 and emaS12<emaS13 and emaS13<emaS14 and emaS14<emaS15 and emaS15<emaS16) if emafast > emaslow and not colslowS and colfastL and (not ShowCon or colslowL) and (not emaFilter or emafast>ema200): if int(buy1[-1]) > 0: buy = buy1[-1] + 1 else: buy = 1 else: buy = 0 buy1.append(buy) if emafast < emaslow and not colslowL and colfastS and (not ShowCon or colslowS) and (not emaFilter or emafast<ema200): if int(sell1[-1]) > 0: sell = sell1[-1] + 1 else: sell = 1 else: sell = 0 sell1.append(sell) #buy if buy>1 and colfastL and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): buy3 = 1 else: buy3 = buy buy2.append(buy3) #sell if sell>1 and colfastS and (uOCCswing and ((df.loc[i-1,'close']<df.loc[i-1,'open']) and (df.loc[i,'close']>df.loc[i,'open']))): sell3 = 1 else: sell3 = sell sell2.append(sell3) #buybreak if emafast > emaslow and not colslowS and (not emaFilter or emafast>ema200): if buybreak1[-1] > 0: buybreak = buybreak1[-1] + 1 else: buybreak = 1 else: buybreak = 0 buybreak1.append(buybreak) if emafast < emaslow and not colslowL and (not emaFilter or emafast<ema200): if sellbreak1[-1] > 0: sellbreak = sellbreak1[-1]+1 else: sellbreak = 1 else: sellbreak = 0 sellbreak1.append(sellbreak) #arrow plotting #buy_arrow buy_barssince_var = barssince(buy2[:-1],barssince_var) if (ShowSwing and buy3==1)and buy_barssince_var > 6: buy_arrow = 1 else: buy_arrow = 0 #sell arrow sell_barssince_var = barssince(sell2[:-1],barssince_var) if ShowSwing and (sell3==1 and sell_barssince_var > 6): sell_arrow = 1 else: sell_arrow = 0 #buybreak_arrow buybreak_barssince_var = barssince(buybreak1[:-1],barssince_var) sellbreak_barssince_var = barssince(sellbreak1[:-1],barssince_var) if ShowBreak and buybreak==1 and (sellbreak_barssince_var>Lookback) and (buybreak_barssince_var>Lookback): buybreak_arrow = 1 else: buybreak_arrow = 0 #sellbreak_arrow if ShowBreak and sellbreak==1 and (buybreak_barssince_var>Lookback) and (sellbreak_barssince_var>Lookback): sellbreak_arrow = 1 else: sellbreak_arrow = 0 if buy_arrow==1 and sell_arrow==0 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'green' elif buy_arrow==0 and sell_arrow==1 and buybreak_arrow==0 and sellbreak_arrow==0: arrow_color = 'red' elif sell_arrow==0 and (buy_arrow==0 or buy_arrow==1) and buybreak_arrow==1 and sellbreak_arrow==0: arrow_color = 'aqua' elif buy_arrow==0 and (sell_arrow==1 or sell_arrow==0) and buybreak_arrow==0 and sellbreak_arrow==1: arrow_color = 'blue' else: arrow_color = 'none' df.loc[i,'arrow_color'] = arrow_color df = df[['date','open','high','low','close','arrow_color']] return df df=super_guppy(15,df) gup=pd.DataFrame(df) def bidatrema(df,period): df['hl']=abs(df['high']-df['low']) df['hpc']=abs(df['high']-df['close'].shift()) df['lpc']=abs(df['low']-df['close'].shift()) df['tr']=df[['hl','hpc','lpc']].max(axis=1) df['ATR']=pd.DataFrame.ewm(df["tr"], span=period,min_periods=period).mean() df.drop(["hl","hpc","lpc","tr"],axis = 1 , inplace =True) bidatrema(gup,14) print(" \n \t \t \t \t GUPPY SELL PROFIT BOOKING ") print(" \t \t \t \n Current Colour on this token is " , gup.iloc[-1,5]) now_utc = datetime.now(timezone('UTC')) now_asia = now_utc.astimezone(timezone('Asia/Kolkata')) now_asia = now_asia.strftime("%Y-%m-%d _ %H:%M:%S ") klp1=now_asia prog_starts.append(klp1) print("\n ") print("Zerodha GUPPY SELL BOT start Time " , prog_starts[0]) print("\n ") print("BOT working succeesfully on time: " , now_asia ) print("\n ") print("Trading symbol is",ttradingsymbol,"Token is",ttoken,"Exchange is",eexchange,"and product type is",productt,"Quantity is",qu,"time frame for Historical Data is",time_frame,"Starting and Ending Date of Historical Data is",sdate,todate) colourstill.append(gup.iloc[-1,5]) print(" \n ") print(" Completed with 'GREEN' entry=", len(green_time)) print(" Completed with 'RED' entry=", len(red_time)) print("\n ") print("\n ") if 0 in counter_start: if "red" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(2) #if "green" in gup.iloc[-1,5]: counter_start.remove(0) counter_start.append(1) else: pass if 2 in counter_start: print(" BOT STARTED WITH RED ' SELL' ENTRY ") if "red" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,3]-gup.iloc[-1,6]*.25) price_sell=avg_ha(xa) xb=price_sell-float(gup.iloc[-1,6])*0.1 triger_sell=avg_ha(xb) triggerprice_sell=avg_ha(triger_sell) kite_drop=kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price= price_sell,trigger_price=triggerprice_sell,transaction_type='SELL',product=productt,tag="testR") print("red sell Stoploss order placed and SL trigger price is",triggerprice_sell,"and price is",price_sell, "quantity is",quant) linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking red execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY! red not executed") pass if "green" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: #check execution of green kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='BUY',product=productt,tag="testR") print("programe completed with red entry and end with green entry") counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) red_time.append(1) else: pass else: pass if 1 in counter_start: print(" BOT STARTED WITH GREEN ' BUY' ENTRY ") if "green" in gup.iloc[-1,5]: if 0 in counter: quant=qu xa=float(gup.iloc[-1,2]+(gup.iloc[-1,6]*.25)) cur_high=avg_ha(xa) xb=cur_high+float(gup.iloc[-1,6])*0.1 lim_price=avg_ha(xb) kite_drop=kite.place_order( variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='SL',price=lim_price ,trigger_price=cur_high,transaction_type='BUY',product=productt,tag="testR") linkedlist_1.insert(0,kite_drop) counter.remove(0) counter.append(1) if 1 in counter: print("checking green execution") update_reverse=pd.DataFrame(kite.orders())[["tradingsymbol","status","order_id","quantity"]] match_in,=update_reverse[update_reverse.order_id==linkedlist_1[0]].index match_in_int=int(match_in) print("checking green executed or not condition ") if "COMPLETE" in update_reverse.loc[match_in_int,"status"]: print("ORDER COMPLETED and waiting for red " ) execution.append(1) else: print("SORRY GREEN NOT EXECUTED") pass if "red" in gup.iloc[-1,5]: if 1 in counter: if 1 in execution: kite.place_order(variety="regular",tradingsymbol=ttradingsymbol,quantity=quant,exchange=eexchange,order_type='MARKET',transaction_type='SELL',product=productt,tag="testG") print("programe completed with green entry and end with RED entry") green_time.append(1) counter.clear() counter_start.clear() execution.clear() linkedlist_1.clear() execution.clear() counter_start.append(0) else: pass else: pass else: pass else: pass ashis()
Automate_Trading-_using_AI_ML_on_Indian_stock_market-using-basic-python(zerotha)/Live_BOT_(3)_Guppy_Automated_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## CoNLL-2003 Example for Text Extensions for Pandas # ### Part 3 # # To run this notebook, you will need to obtain a copy of the CoNLL-2003 data set's corpus. # Drop the corpus's files into the following locations: # * conll_03/eng.testa # * conll_03/eng.testb # * conll_03/eng.train # # If you are unfamiliar with the basics of Text Extensions for Pandas, we recommend you # start with Part 1 of this example. # # ### Introduction # # At the end of part 2 of the demo, we've shown that there are incorrect labels hidden in the CoNLL-2003 validation set, and that you can pinpoint those incorrect labels by data-mining the results of the 16 models the competitors submitted. # # Our goal for the remainder of the demo is to pinpoint incorrect labels across the entire data set. The (rough) process to do so will be: # # 1. Retokenize the entire corpus using a "BERT-compatible" tokenizer, and map the token/entity labels from the original corpus on to the new tokenization. # 2. Generate BERT embeddings for every token in the entire corpus in one pass, and store those embeddings in a dataframe column (of type TensorType) alongside the tokens and labels. # 3. Use the embeddings to quickly train multiple models at multiple levels of sophistication (something like: SVMs, random forests, and LSTMs with small and large numbers of hidden states). Split the corpus into 10 parts and perform a 10-fold cross-validation. # 4. Repeat the process from part 2 on each fold of the 10-fold cross-validation, comparing the outputs of every model on the validation set for each fold. # 5. Analyze the results of the models to pipoint potential incorrect labels. Inspect those labels manually and build up a list of labels that are actually incorrect. # # # # Imports and constants # + # INITIALIZATION BOILERPLATE # The Jupyter kernel for this notebook usually starts up inside the notebooks # directory, but the text_extensions_for_pandas package code is in the parent # directory. Add that parent directory to the front of the Python include path. import sys if (sys.path[0] != ".."): sys.path[0] = ".." # Libraries import numpy as np import pandas as pd import time import torch import transformers from typing import * import sklearn.pipeline import matplotlib.pyplot as plt import ipywidgets from IPython.display import display # And of course we need the text_extensions_for_pandas library itself. import text_extensions_for_pandas as tp # Common code shared across notebooks is kept in util.py import util # BERT Configuration #bert_model_name = "bert-base-uncased" #bert_model_name = "bert-large-uncased" bert_model_name = "dslim/bert-base-NER" tokenizer = transformers.BertTokenizerFast.from_pretrained(bert_model_name, add_special_tokens=True) bert = transformers.BertModel.from_pretrained(bert_model_name) # Create a Pandas categorical type for consistent encoding of categories # across all documents. _ENTITY_TYPES = ["LOC", "MISC", "ORG", "PER"] token_class_dtype, int_to_label, label_to_int = tp.make_iob_tag_categories(_ENTITY_TYPES) # - # Download and cache the data set. # NOTE: This data set is licensed for research use only. Be sure to adhere # to the terms of the license when using this data set! data_set_info = util.get_conll_data() data_set_info # # Show how to retokenize with a BERT tokenizer. # # Retokenize the corpus using a "BERT-compatible" tokenizer, and map the token/entity labels from the original corpus on to the new tokenization. # + # Read in the corpus in its original tokenization corpus_raw = {} for fold_name, file_name in data_set_info.items(): df_list = tp.conll_2003_to_dataframes(file_name, ["pos", "phrase", "ent"], [False, True, True]) corpus_raw[fold_name] = [ df.drop(columns=["pos", "phrase_iob", "phrase_type"]) for df in df_list ] test_raw = corpus_raw["test"] # Pick out the dataframe for a single example document. example_df = test_raw[5] example_df # - spans_df = tp.iob_to_spans(example_df) spans_df # Retokenize the document's text with the BERT tokenizer bert_toks_df = tp.make_bert_tokens(example_df["char_span"].values[0].target_text, tokenizer) bert_toks_df # BERT tokenization includes special zero-length tokens. bert_toks_df[bert_toks_df["special_tokens_mask"]] # + # Align the BERT tokens with the original tokenization bert_token_spans = tp.TokenSpanArray.align_to_tokens(bert_toks_df["char_span"], spans_df["token_span"]) pd.DataFrame({ "original_span": spans_df["token_span"], "bert_token_span": bert_token_spans, "ent_type": spans_df["ent_type"] }) # - # Generate IOB2 tags and entity labels that align with the BERT tokens. # See https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging) bert_toks_df[["ent_iob", "ent_type"]] = tp.spans_to_iob(bert_token_spans, spans_df["ent_type"]) bert_toks_df # The traditional way to transform NER to token classification is to # treat each combination of {I,O,B} X {entity type} as a different # class. Generate class labels in that format. classes_df = tp.add_token_classes(bert_toks_df, token_class_dtype) classes_df # # Show how to compute embeddings # Adds embeddings to our example dataframe. Function in util.py embeddings_df = util.add_embeddings(classes_df, bert) embeddings_df # # Generate BERT tokens and BERT embeddings for the entire corpus # # Generate BERT embeddings for every token in the entire corpus in one pass, # and store those embeddings in a dataframe column (of type TensorType) # alongside the tokens and labels. # conll_to_bert() in util.py combines the previous few cells' operations into # a single function. util.conll_to_bert(example_df, tokenizer, bert, token_class_dtype) # Run the entire corpus through our processing pipeline. bert_toks_by_fold = {} for fold_name in corpus_raw.keys(): print(f"Processing fold '{fold_name}'...") raw = corpus_raw[fold_name] bert_toks_by_fold[fold_name] = util.run_with_progress_bar( len(raw), lambda i: util.conll_to_bert(raw[i], tokenizer, bert, token_class_dtype)) bert_toks_by_fold["dev"][20] # # Collate the data structures we've generated so far # Create a single dataframe with the entire corpus's embeddings. corpus_df = util.combine_folds(bert_toks_by_fold) corpus_df # Write the tokenized corpus with embeddings to a Feather file. corpus_df.to_feather("outputs/corpus.feather") # # Train an ensemble of models # # Use the embeddings to quickly train multiple models at multiple levels of sophistication. # Additional initialization boilerplate import sklearn.linear_model # + # Read the serialized embeddings so that you can rerun the model training parts of # this notebook without regenerating the embeddings. # Temporarily disabled until issue #28 is resolved # corpus_df_after = pd.read_feather("outputs/corpus.feather") # try: # # Verify that the embeddings came through ok # embeddings_before = corpus_df["embedding"].values._tensor # embeddings_after = corpus_df_after["embedding"].values._tensor # mask = (embeddings_before == embeddings_after) # if not np.all(mask): # raise ValueError("Corruption of embeddings detected") # corpus_df = corpus_df_after # except NameError: # # If we get here, `corpus_df` isn't defined, because the user ran this # # cell without running the previous cells. That's ok. Continue on. # pass # corpus_df # - train_df = corpus_df[corpus_df["fold"] == "train"] train_df train_df["embedding"].values # + # Train a multinomial logistic regression model on the training set. _MULTI_CLASS = "multinomial" base_pipeline = sklearn.pipeline.Pipeline([ # Standard scaler. This only makes a difference for certain classes # of embeddings. #("scaler", sklearn.preprocessing.StandardScaler()), ("mlogreg", sklearn.linear_model.LogisticRegression( multi_class=_MULTI_CLASS, verbose=10, max_iter=10000 )) ]) X_train = train_df["embedding"].values Y_train = train_df["token_class_id"] base_model = base_pipeline.fit(X_train, Y_train) base_model # - # Look at our trained model's results on the training set train_results_df = util.predict_on_df(train_df, int_to_label, base_model) train_results_df train_results_df.iloc[50:75] # Look at results on the test set test_results_df = util.predict_on_df(corpus_df[corpus_df["fold"] == "test"], int_to_label, base_model) test_results_df # Pick out a region with more entities test_results_df.iloc[40:60] # Split model outputs for an entire fold back into documents and add # token information. test_results_by_doc = util.align_model_outputs_to_tokens(test_results_df, bert_toks_by_fold) test_results_by_doc[("test", 0)].iloc[40:60] # + # Convert IOB2-format output (and gold standard tags) to spans. test_actual_spans = {k: tp.iob_to_spans(v) for k, v in test_results_by_doc.items()} test_model_spans = {k: tp.iob_to_spans(v, iob_col_name = "predicted_iob", entity_type_col_name = "predicted_type") .rename(columns={"predicted_type": "ent_type"}) for k, v in test_results_by_doc.items()} test_model_spans[("test", 0)].head() # - # Every once in a while, the model will split a token in the original data # set into two entities. For example, look at document 202 of the test # set: doc_key = ("test", 202) test_model_spans[doc_key].head(10) # + # Notice `[150, 151): 'W'` and `[151, 156): 'idnes'`. These outputs are part # of the same original token. # We can use spanner algebra to fix up these outputs. spans_df = test_model_spans[doc_key] toks_df = test_raw[202] # First, find which tokens the spans overlap with: overlaps_df = ( tp .overlap_join(spans_df["token_span"], toks_df["char_span"], "token_span", "corpus_token") .merge(spans_df) ) overlaps_df.head(10) # - # Next, compute the minimum span that covers all the corpus tokens # that overlap with each entity span. agg_df = ( overlaps_df .groupby("token_span") .aggregate({"corpus_token": "sum", "ent_type": "first"}) .reset_index() ) agg_df.head(10) # Finally, take unique values and covert character-based spans to spans # in the corpus tokenization (since the new offsets might not match a # BERT tokenizer token boundary) cons_df = ( tp.consolidate(agg_df, "corpus_token") [["corpus_token", "ent_type"]] .rename(columns={"corpus_token": "token_span"}) ) cons_df["token_span"] = tp.TokenSpanArray.align_to_tokens(toks_df["char_span"], cons_df["token_span"]) cons_df.head(10) # util.py contains a single function that repeats the actions of the # previous 3 cells. util.realign_to_tokens(test_model_spans[doc_key], test_raw[202]).head(10) # Run all of our dataframes through `realign_to_toks` keys = list(test_model_spans.keys()) new_values = util.run_with_progress_bar( len(keys), lambda i: util.realign_to_tokens(test_model_spans[keys[i]], test_raw[keys[i][1]])) test_model_spans = {k: v for k, v in zip(keys, new_values)} test_model_spans[doc_key].head(10) # Same per-document statistics calculation code as in CoNLL_2.ipynb test_stats_by_doc = util.make_stats_df(test_actual_spans, test_model_spans) test_stats_by_doc # Collection-wide precision and recall can be computed by aggregating # our dataframe. util.compute_global_scores(test_stats_by_doc) # The function analyze_model() in util.py combines the above postprocessing steps # into a single function. base_test_results = util.analyze_model( corpus_df[corpus_df["fold"] == "test"], int_to_label, base_model, bert_toks_by_fold, corpus_raw, expand_matches=True) base_test_results["global_scores"] # Results on the training set base_train_results = util.analyze_model( corpus_df[corpus_df["fold"] == "train"], int_to_label, base_model, bert_toks_by_fold, corpus_raw, expand_matches=True) base_train_results["global_scores"] # Results on the development set base_dev_results = util.analyze_model( corpus_df[corpus_df["fold"] == "dev"], int_to_label, base_model, bert_toks_by_fold, corpus_raw, expand_matches=True) base_dev_results["global_scores"] # ## Code to train models with reduced result quality # # [`util.py`](./util.py) contains a function `train_reduced_model()` that produces # detuned versions of our multilogreg model. This function works by setting up # a two-stage pipeline. The first stage is a `sklearn.random_projection.GaussianRandomProjection` # model that reduces the number of dimensions of the input embeddings, and the second # stage is a multinomial logistic regression model trained with `sklearn.linear_model.LogisticRegression`. reduced_model = util.train_reduced_model(X_train, Y_train, 16, None) reduced_model import importlib util = importlib.reload(util) reduced_test_results = util.analyze_model( corpus_df[corpus_df["fold"] == "test"], int_to_label, reduced_model, bert_toks_by_fold, corpus_raw, expand_matches=True) reduced_test_results["global_scores"] # ## Dry run: Train reduced models on the original training set # # Using the original CoNLL 2003 training set, train multiple models at # different quality levels. Repeat the evaluation process # from [`CoNLL_2.ipynb`](./CoNLL_2.ipynb) and verify that the ensemble # of models can pinpoint incorrect labels in the test data as in # `CoNLL_2.ipynb`. # + # Define some constants for a grid over two parameters. _N_COMPONENTS = [32, 64, 128, 256] # Values for the n_components parameter #_N_COMPONENTS = [8, 16, 32, 64, 128] #_N_COMPONENTS = [8, 16, 32, 64, 128, 256, 512, 767] _SEEDS = [1, 2, 3, 4] # Values for the random seed params = [{"n_components": c, "seed": s} for c in _N_COMPONENTS for s in _SEEDS] def params_to_name(p): return f"{p['n_components']}_{p['seed']}" models = { params_to_name(p): util.train_reduced_model(X_train, Y_train, p["n_components"], p["seed"]) for p in params } # + # Also include the model with no Gaussian random projections. models["768_1"] = base_model params.append({"n_components": 768, "seed": 1}) test_df = corpus_df[corpus_df["fold"] == "test"] test_results = { name: util.analyze_model(test_df, int_to_label, model, bert_toks_by_fold, corpus_raw, expand_matches=True) for name, model in models.items() } list(test_results.values())[0].keys() # + global_scores = [r["global_scores"] for r in test_results.values()] summary_df = pd.DataFrame({ "n_components": [p["n_components"] for p in params], "seed": [p["seed"] for p in params], "name": list(test_results.keys()), "num_true_positives": [r["num_true_positives"] for r in global_scores], "num_entities": [r["num_entities"] for r in global_scores], "num_extracted": [r["num_extracted"] for r in global_scores], "precision": [r["precision"] for r in global_scores], "recall": [r["recall"] for r in global_scores], "F1": [r["F1"] for r in global_scores] }) summary_df # - # Plot the tradeoff between dimensionality and F1 score x = summary_df["n_components"] y = summary_df["F1"] plt.scatter(x, y) #plt.yscale("log") #plt.xscale("log") plt.xlabel("Number of Dimensions") plt.ylabel("F1 Score") plt.show() # Tabulate all the results as in CoNLL_2.ipynb test_results_full = util.merge_model_results(test_results) test_results_full # Drop the Boolean columns for convenience test_results = test_results_full[["fold", "doc_offset", "token_span", "ent_type", "gold", "num_models"]] test_results # How many results are in the gold standard? test_results[test_results["gold"]] # How many entities were found by zero models? (test_results[test_results["gold"] == True] [["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) # How many non-results were found by many models? (test_results[test_results["gold"] == False] [["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) # Hardest results from the gold standard to get. # Use document ID to break ties. test_hard_to_get = ( test_results[test_results["gold"]] .sort_values(["num_models", "fold", "doc_offset"]) .head(20)) test_hard_to_get # ### Results from hand-labeling the above 20 examles: # # #### TODO: Update this cell with table from CSV file # + # Scratchpad for looking at individual docs # doc_results = gold_standard_by_doc[55] # doc_results # + # Part 2 of scratchpad # doc_results["token_span"].values # - # Hardest results not in the gold standard to avoid test_hard_to_avoid = ( test_results[~test_results["gold"]] .sort_values(["num_models", "fold", "doc_offset"], ascending=[False, True, True]) .head(20)) test_hard_to_avoid # ### Results from hand-labeling the above 20 examles: # # #### TODO: Update this cell with table from CSV file # + # Scratchpad for looking at individual docs # doc_num = 168 # doc_results = gold_standard_by_doc[doc_num] # doc_results # + # Part 2 of scratchpad # doc_results["token_span"].values # + # Part 3 of scratchpad (for looking at original tokenization) #test_raw[doc_num].head(50) # - # ## Use the same models to find incorrect labels in the development set dev_df = corpus_df[corpus_df["fold"] == "dev"] dev_results_by_model = { name: util.analyze_model(dev_df, int_to_label, model, bert_toks_by_fold, corpus_raw, expand_matches=True) for name, model in models.items() } dev_results_full = util.merge_model_results(dev_results_by_model) dev_results_full dev_results = dev_results_full[["fold", "doc_offset", "token_span", "ent_type", "gold", "num_models"]] dev_results (dev_results[dev_results["gold"] == True][["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) (dev_results[dev_results["gold"] == False][["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) dev_hard_to_get = dev_results[dev_results["gold"]].sort_values(["num_models", "fold", "doc_offset"]).head(20) dev_hard_to_get # ### Results from hand-labeling: # # #### TODO: Update this cell with table from CSV file # # Document | Error Type | Corpus Span | Tag | Corrected Span | Tag | Notes # ---------|------------|----------------------------|----------|----------------|-----------|---------------------------------------------------------------- # 0 | None | `[66, 77): '<NAME>'` | `PER` | | | # 0 | None | `[78, 84): 'AL-AIN'` | `LOC` | # 0 | None | `[1824, 1831): 'Syrians'` | `MISC` | # 1 | Tag | `[42, 47): 'ITALY'` | `LOC` | | `ORG` | Italian World Cup team # 1 | None | `[686, 700): '1995 World Cup'` | `MISC` # 3 | Span | `[21, 37): 'SKIING-WORLD CUP'` | `MISC`| `[28, 37): 'WORLD CUP'` | | "FREESTYLE SKIING-WORLD CUP MOGUL RESULTS." # 5 | Tag | `[31, 42): 'NEW ZEALAND'` | `LOC` | | `ORG` # 5 | None | `[223, 234): '<NAME>'`| `PER` # 5 | None | `[280, 286): 'Cairns'` | `PER` # 5 | None | `[779, 785): 'Cairns'` | `PER` # 6 | Sentence | `[88, 110): 'English F.A. Challenge'` | `MISC` | `[88, 114): 'English F.A. Challenge Cup'` # 6 | Sentence | `[111, 114): 'Cup'` | `MISC` | `[88, 114): 'English F.A. Challenge Cup'` # 7 | None | `[491, 498): 'Udinese'` | `ORG` # 7 | None | `[646, 655): 'Feyenoord'` | `ORG` # 7 | Spelling | `[772, 779): 'Engllsh'` | `MISC` | `[772, 779): 'English'` # 8 | None | `[26, 32): 'BOWYER'` | `PER` # 8 | None | `[94, 101): 'England'` | `LOC` # 9 | None | `[23, 33): 'EUROLEAGUE'` | `MISC` # # # + # Scratchpad for looking at individual docs # doc_id = ("dev", 9) # first_model_name = list(dev_results_by_model.keys())[0] # gold_standard_by_doc = dev_results_by_model[first_model_name]["actual_spans_by_doc"] # doc_results = gold_standard_by_doc[doc_id] # doc_results[:50] # + # Part 2 of scratchpad # doc_results["token_span"].values # - # Hardest results from the gold standard to avoid dev_hard_to_avoid = dev_results[~dev_results["gold"]].sort_values( ["num_models", "fold", "doc_offset"], ascending=False).head(20) dev_hard_to_avoid # ### Results from hand-labeling the above 20 examples # # #### TODO: Update this table from CSV file # # # # Document | Error Type | Model Span | Tag | Corpus Span | Corpus Tag | Notes # ---------|------------|--------------------------------|----------|----------------------------|------------|----------------------------------------------------- # 229 | Span | `[704, 718): 'Sporting Gijon'` | `LOC` |`[704, 712): 'Sporting'` | `ORG` | # 223 | Tag | `[232, 244): 'Philadelphia'` | `ORG` | | `LOC` | Hockey team # 223 | Tag | `[247, 253): 'DALLAS'` | `ORG` | | `LOC` | Hockey team # 223 | Tag | `[256, 264): 'St Louis'` | `ORG` | | `LOC` | Hockey team # 223 | Tag | `[267, 275): 'COLORADO'` | `ORG` | | `LOC` | Hockey team # 223 | Tag | `[289, 295): 'Ottawa'` | `ORG` | | `LOC` | Hockey team # 222 | Sentence | `[93, 115): 'National Hockey League'` | `MISC` | `[93, 108): 'National Hockey'` # 222 | Sentence | `[93, 115): 'National Hockey League'` | `MISC` | `[109, 115): 'League'` # 219 | Tag | `[562, 565): 'UAE'` | `ORG` | | `LOC` | Soccer team # 213 | Sentence | `[697, 708): 'Dion Fourie'` | `PER` | `[697, 701): 'Dion'` # 213 | Sentence | `[697, 708): 'Dion Fourie'` | `PER` | `[702, 708): 'Fourie'` # 210 | None | `[942, 944): 'Bo'` | `ORG` | `[942, 954): 'Boreham Wood'` | | Split token # 210 | None | `[944, 954): 'reham Wood'` | `ORG` | `[942, 954): 'Boreham Wood'` | | Split token # 209 | Sentence | `[384, 393): 'East Fife'` | `ORG` | `[384, 388): 'East'` # 209 | Sentence | `[384, 393): 'East Fife'` | `ORG` | `[389, 393): 'Fife'` # 207 | None | `[2726, 2728): 'Le'` | `ORG` | `[2726, 2739): 'Leyton Orient'` | | Split token # 207 | None | `[2728, 2739): 'yton Orient'` | `ORG` | `[2726, 2739): 'Leyton Orient'` | | Split token # 205 | Tag | `[627, 636): 'Wimbledon'` | `ORG` | | `LOC` | Soccer team # 202 | None | `[150, 151): 'W'` | `ORG` | `[150, 156): 'Widnes'` | | Split token # 202 | None | `[151, 156): 'idnes'` | `ORG` | `[150, 156): 'Widnes'` | | Split token # 200 | Sentence | `[284, 295): '<NAME>'` | `ORG` | `[284, 289): 'Aston'` # 200 | Sentence | `[284, 295): '<NAME>'` | `ORG` | `[290, 295): 'Villa'` # 200 | Tag | `[337, 346): 'Wimbledon'` | `ORG` | | `LOC` | Soccer team # # + # Scratchpad for looking at individual docs # doc_id = ("dev", 200) # first_model_name = list(dev_results_by_model.keys())[0] # gold_standard_by_doc = dev_results_by_model[first_model_name]["actual_spans_by_doc"] # doc_results = gold_standard_by_doc[doc_id] # doc_results # + # Part 2 of scratchpad #doc_results["token_span"].values # - # ## Also look for incorrect labels on the training set # # Apply the same process to the training set to see what we get. Since these # models are trained on this set, we don't expect their aggregate results to # flag many incorrect labels. Generate the data anyhow just to see what happens. train_df = corpus_df[corpus_df["fold"] == "train"] train_results_by_model = { name: util.analyze_model(train_df, int_to_label, model, bert_toks_by_fold, corpus_raw, expand_matches=True) for name, model in models.items() } train_results_full = util.merge_model_results(train_results_by_model) train_results_full train_results = train_results_full[["fold", "doc_offset", "token_span", "ent_type", "gold", "num_models"]] train_results (train_results[train_results["gold"] == True][["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) (train_results[train_results["gold"] == False][["num_models", "token_span"]] .groupby("num_models").count() .rename(columns={"token_span": "count"})) train_hard_to_get = train_results[train_results["gold"]].sort_values(["num_models", "fold", "doc_offset"]).head(20) train_hard_to_get # Hardest results from the gold standard to avoid train_hard_to_avoid = train_results[~train_results["gold"]].sort_values( ["num_models", "fold", "doc_offset"], ascending=False).head(20) train_hard_to_avoid # # Write CSV files for manual labeling # # Generate CSV outputs like the ones from `CoNLL_2.ipynb` so that we can # manually label the examples our ensemble flagged in the development test sets. # First pair of files contains the outputs for the dev and test # sets, as in the output of CoNLL_2.ipynb dev_and_test_results = pd.concat([dev_results, test_results]) in_gold_to_write, not_in_gold_to_write = util.csv_prep(dev_and_test_results, "num_models") in_gold_to_write not_in_gold_to_write # Write the CSV file for test and validation results in_gold_to_write.to_csv("outputs/CoNLL_3_in_gold.csv", index=False) not_in_gold_to_write.to_csv("outputs/CoNLL_3_not_in_gold.csv", index=False) # Generate a second pair of dataframes with results on the training set train_in_gold, train_not_in_gold = util.csv_prep(train_results, "num_models") train_in_gold train_not_in_gold # Write the training set information to a second pair of CSV files train_in_gold.to_csv("outputs/CoNLL_3_train_in_gold.csv", index=False) train_not_in_gold.to_csv("outputs/CoNLL_3_train_not_in_gold.csv", index=False)
notebooks/CoNLL_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/luca-arts/seeingtheimperceptible/blob/main/notebooks/bgRemoval/tests/PaddleSeg_matting.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="NLneEHFCApdZ" # ## PaddleSeg - matting + BG-inpainting # # [GitHub](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.5/Matting) # # + [markdown] id="khoHlmWcDiqt" # ## 1. Preparations # # Before start, make sure that you choose # # Runtime Type = Python 3 # Hardware Accelerator = GPU # + colab={"base_uri": "https://localhost:8080/"} id="5citXkAXD0Mn" outputId="72171095-d862-4f31-ff43-deb1bc900e1d" # !nvidia-smi # + [markdown] id="V0MQlGrZBpxu" # ## 2. linking nextcloud # # Connecting to the external NextCloud drive # + id="q2x_BWxQAcew" # we'll link the dataset from next-cloud # !curl https://raw.githubusercontent.com/luca-arts/seeingtheimperceptible/main/notebooks/database_mod.py -o /content/database_mod.py from database_mod import * link_nextcloud() nextcloud = '/content/database/' input_folder, output_folder = create_io(database=nextcloud,topic='bgRemoval',library='PaddleSeg') # + [markdown] id="bMRYxt0sLTqT" # ## 3. install PaddlePaddle # + id="5_9xoAyOLYaC" outputId="1bca49ff-cf90-4399-f23d-6547d4041b7d" colab={"base_uri": "https://localhost:8080/"} # !pip install -q PaddlePaddle # + [markdown] id="AkY2y9z8B1XK" # ## 4. clone GIT repo # + colab={"base_uri": "https://localhost:8080/"} id="qp3rGrU1CGeY" outputId="f36807cf-64e2-4e07-a979-14aca3e0e4d0" import os root_path = '/content/PaddleSeg' # clone the repository if not os.path.exists(root_path): # !git clone https://github.com/PaddlePaddle/PaddleSeg {root_path} # %ls # + [markdown] id="3sRL53XpHgPP" # ## 5. Setting up the environment # + colab={"base_uri": "https://localhost:8080/"} id="rXz3PdFEI2YS" outputId="7aacf0f0-592c-43b0-ff4f-28267c6209b5" # installing PaddleSeg # %cd {root_path} # !pip install -q -r requirements.txt # !pip install -e . # installing Matting # %cd Matting # !pip install -q -r requirements.txt # + [markdown] id="zgXbTNu1iAWN" # ## 6. downloading the dataset # + id="_gvd1m5nd0cH" outputId="853b884b-ed13-431f-e56a-b87c34992f00" colab={"base_uri": "https://localhost:8080/"} # download model checkpoint model_path = root_path + '/Matting/data/model' model_params = 'https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams' model_inf = 'https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-humanmatting-resnet34_vd.zip' # make folders os.makedirs(model_path, exist_ok=True) if not os.path.exists(os.path.join(model_path,'human_matting.pdparams')): print('\n> Download the model params') # !curl {model_params} -o {os.path.join(model_path,'human_matting.pdparams')} else: print ('\n> File already downloaded') if not os.path.exists(os.path.join(model_path,'human_matting-resnet.zip')): print('\n> Download the model') # !curl {model_inf} -o {os.path.join(model_path,'human_matting-resnet.zip')} # !unzip -q {os.path.join(model_path,'human_matting-resnet.zip')} -d {os.path.join(model_path)} else: print ('\n> File already downloaded') # + [markdown] id="o6_ANZEo4fDu" # ## 7. Deploy # + id="ibyxp1GlXw-W" outputId="89664b91-3635-43e2-c3d6-d322db446fe1" colab={"base_uri": "https://localhost:8080/"} # infer # # !python deploy/python/infer.py \ # # --config output/export/deploy.yaml \ # # --image_path data/PPM-100/val/fg/ \ # # --save_dir output/results \ # # --fg_estimate True mode = 'predict' #@param ['predict','bgremoval'] if(mode=='predict'): # predict # !export CUDA_VISIBLE_DEVICES=0 # !python predict.py \ # --config configs/human_matting/human_matting-resnet34_vd.yml \ # --model_path data/model/human_matting.pdparams \ # --image_path {input_folder} \ # --save_dir {output_folder} \ # --fg_estimate True else: # bg replacement #TODO right now all images get the same background image #@markdown either choose an rgbw value as background or type the **path** to the bgimage: background = 'r' #@param ['r','g','b','w'] {allow-input: true} # !export CUDA_VISIBLE_DEVICES=0 for infer_img in os.listdir(input_folder): # !python bg_replace.py \ # --config configs/human_matting/human_matting-resnet34_vd.yml \ # --model_path data/model/human_matting.pdparams \ # --image_path {os.path.join(input_folder,infer_img)} \ # --save_dir {output_folder} \ # --background {background} \ # --fg_estimate True # + id="qg9GS84cjCcy"
notebooks/bgRemoval/tests/PaddleSeg_matting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Question 122 - Middle matrix sums # Python Data Structures Arrays Matrix # # Given an integer matrix, m, with an odd # dimensions, n x n, (e.g 3 x 3, 5 x 5, etc), find the sum of middle row as well as the middle column. # # For example: # ``` # #Given # [[1,2,3], # [4,5,6], # [7,8,9]] # # #Your program would output: # 'Sum middle row =' 15 #(e.g. 4+5+6) # 'Sum middle column =' 15 #(e.g. 2+5+8) # ``` # # Solution will be written in Python for premium users. # + def compute_sum_mid_row_col(m): """ return sum of m's middle column and m's middle row assumes m is a square matrix with odd size, eg 3x3 or 5x5 """ i = len(m)//2 mid_row_sum = sum(m[i]) mid_col_sum = sum([row[i] for row in m]) return mid_col_sum, mid_row_sum def test_compute_sum_mid_row_col(f): m = [[1,2,3], [4,5,6], [7,8,9]] assert f(m) == (15, 15) assert f([[5]]) == (5,5) test_compute_sum_mid_row_col(compute_sum_mid_row_col)
interviewq_exercises/q122_python_sum_matrix_middle_row_and_column.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Base import and functions import imutils import cv2 from imutils import contours import os, os.path from tqdm import tqdm import pytesseract # Python interface to tesseract for OCR import numpy as np from PIL import Image from skimage.color import rgb2gray from skimage.exposure import equalize_adapthist, adjust_log from skimage.restoration import denoise_tv_chambolle, denoise_wavelet, denoise_bilateral from skimage.filters import threshold_niblack from skimage.util import img_as_float, img_as_ubyte, img_as_uint from skimage.morphology import reconstruction from skimage.morphology import remove_small_objects, remove_small_holes from skimage.morphology import disk, diamond, square from skimage.filters import rank # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import matplotlib.image as mpimg # %load_ext line_profiler import skimage print("skimage: " + skimage.__version__) # Must be greater then 0.14 for Chain Vesa # + def numberprepare(image): img = img_as_float(image) logarithmic_corrected = adjust_log(img, 1) img_log_clah = equalize_adapthist(logarithmic_corrected, clip_limit=0.03) gray = rgb2gray(img_log_clah) seed = np.copy(gray) seed[1:-1, 1:-1] = gray.max() mask = gray filled = reconstruction(seed, mask, method='erosion') holes = denoise_wavelet(rank.enhance_contrast(gray-filled+1, disk(6))) # niblack = holes > threshold_niblack(holes, window_size=71, k=0.9) return holes return remove_small_holes(img_as_ubyte(niblack), 300) def serialprepare(image): img = img_as_float(image) logarithmic_corrected = adjust_log(img, 1) img_log_clah = equalize_adapthist(logarithmic_corrected, clip_limit=0.03) gray = rgb2gray(img_log_clah) holes = denoise_tv_chambolle(rank.enhance_contrast(gray, disk(8)), weight=0.3) # niblack = holes > threshold_niblack(holes, window_size=61, k=0.9) return holes return remove_small_holes(img_as_ubyte(niblack), 400) # + src = Image.open('test_data/final720/photo1.jpg') src2 = Image.open('test_data/final720/photo2.jpg') # First source # rotated = src.rotate(-90, expand=True) serial = src.crop((1043, 218, 1043+80, 218+108)) # %time filtered_serial = serialprepare(serial) number = src.crop((89, 218, 89+399, 218+99)) # %time filtered_number = numberprepare(number) thresholded = img_as_ubyte(filtered_number) edged = cv2.Canny(thresholded, 30, 200) width, height = thresholded.shape[:2] refCnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1] # refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1] # refCnts = contours.sort_contours(refCnts, method="left-to-right")[0] # create a clone of the original image so we can draw on it clone = np.dstack([thresholded.copy()] * 3) # loop over the (sorted) contours for c in refCnts: # compute the bounding box of the contour and draw it on our # image (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2) # Secong source serial2 = src2.crop((1043, 218, 1043+80, 218+108)) filtered_serial2 = serialprepare(serial2) number2 = src2.crop((89, 218, 89+399, 218+99)) filtered_number2 = numberprepare(number2) thresholded2 = img_as_ubyte(filtered_number2) edged = cv2.Canny(thresholded2, 30, 200) width, height = thresholded.shape[:2] refCnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1] # refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1] # refCnts = contours.sort_contours(refCnts, method="left-to-right")[0] # create a clone of the original image so we can draw on it clone2 = np.dstack([thresholded2.copy()] * 3) # loop over the (sorted) contours for c in refCnts: # compute the bounding box of the contour and draw it on our # image (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(clone2, (x, y), (x + w, y + h), (0, 255, 0), 2) fig, ax = plt.subplots(2, 5, figsize=(40, 5), sharex=True, sharey=True) ax = ax.ravel() ax[0].imshow(number) ax[0].set_title('Original image') ax[0].axis('off') ax[1].imshow(filtered_number, cmap='gray') ax[1].set_title('Filtered Number') ax[1].axis('off') ax[2].imshow(cv2.cvtColor(clone, cv2.COLOR_BGR2RGB)) ax[2].set_title('Contours') ax[2].axis('off') ax[3].imshow(serial, cmap='gray') ax[3].set_title('Original Serial') ax[3].axis('off') ax[4].imshow(filtered_serial, cmap='gray') ax[4].set_title('Filtered serial') ax[4].axis('off') ax[5].imshow(number2) ax[5].set_title('Original image') ax[5].axis('off') ax[6].imshow(filtered_number2, cmap='gray') ax[6].set_title('Filtered Number') ax[6].axis('off') ax[7].imshow(cv2.cvtColor(clone2, cv2.COLOR_BGR2RGB)) ax[7].set_title('Contours') ax[7].axis('off') ax[8].imshow(serial2, cmap='gray') ax[8].set_title('Original Serial') ax[8].axis('off') ax[9].imshow(filtered_serial2, cmap='gray') ax[9].set_title('Filtered serial') ax[9].axis('off') plt.show() # + from skimage.filters import threshold_mean from skimage.morphology import remove_small_objects, remove_small_holes from skimage.morphology import erosion, dilation, opening, closing, binary_opening, binary_closing def numberprepare(image): img = img_as_float(image) logarithmic_corrected = adjust_log(img, 1) img_log_clah = equalize_adapthist(logarithmic_corrected, clip_limit=0.03) gray = rgb2gray(img_log_clah) seed = np.copy(gray) seed[1:-1, 1:-1] = gray.max() mask = gray filled = reconstruction(seed, mask, method='erosion') # holes = denoise_wavelet(rank.enhance_contrast(gray-filled+1, disk(6))) # niblack = holes > threshold_niblack(holes, window_size=71, k=0.9) holes = rank.enhance_contrast(gray-filled+1, disk(6)) thresh_min = threshold_mean(holes) binary_min = holes > thresh_min filtered_smallest_noise = remove_small_holes(img_as_ubyte(binary_min), 10) opened = binary_opening(filtered_smallest_noise, disk(2)) filtered_larger_noise = remove_small_holes(img_as_ubyte(opened), 300) return filtered_larger_noise src = Image.open('test_data/final720/photo1.jpg') # First source # rotated = src.rotate(-90, expand=True) serial = src.crop((1043, 218, 1043+80, 218+108)) filtered_serial = serialprepare(serial) number = src.crop((89, 218, 89+399, 218+99)).rotate(180, expand=True) # %time filtered_number = numberprepare(number) # %time numberCandidate = pytesseract.image_to_string(Image.fromarray(filtered_number, "L"), config="--tessdata-dir ./tessdata -psm 8 -oem 3 -l eng -c tessedit_char_whitelist=1234567890 nobatch digits") thresholded = img_as_ubyte(filtered_number) edged = cv2.Canny(thresholded, 30, 200) width, height = thresholded.shape[:2] refCnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1] # refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1] # refCnts = contours.sort_contours(refCnts, method="left-to-right")[0] # create a clone of the original image so we can draw on it clone = np.dstack([thresholded.copy()] * 3) # loop over the (sorted) contours for c in refCnts: # compute the bounding box of the contour and draw it on our # image (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2) fig, ax = plt.subplots(1, 3, figsize=(40, 5), sharex=True, sharey=True) ax = ax.ravel() ax[0].imshow(number) ax[0].set_title('Original image') ax[0].axis('off') ax[1].imshow(filtered_number, cmap='gray') ax[1].set_title('Filtered Number') ax[1].axis('off') ax[2].imshow(cv2.cvtColor(clone, cv2.COLOR_BGR2RGB)) ax[2].set_title('Contours') ax[2].axis('off') print(numberCandidate) # - # ### Recognition is fine when we cut all trash blobs. But it is a little bit risky # # We must use `remove_small_holes(img_as_ubyte(opened), 50)` or something like. # # But not like this `remove_small_holes(img_as_ubyte(opened), 300)` # + def numberprepare(image): img = img_as_float(image) logarithmic_corrected = adjust_log(img, 1) img_log_clah = equalize_adapthist(logarithmic_corrected, clip_limit=0.03) gray = rgb2gray(img_log_clah) seed = np.copy(gray) seed[1:-1, 1:-1] = gray.max() mask = gray filled = reconstruction(seed, mask, method='erosion') # holes = denoise_wavelet(rank.enhance_contrast(gray-filled+1, disk(6))) # niblack = holes > threshold_niblack(holes, window_size=71, k=0.9) holes = rank.enhance_contrast(gray-filled+1, disk(6)) thresh_min = threshold_mean(holes) binary_min = holes > thresh_min filtered_smallest_noise = remove_small_holes(img_as_ubyte(binary_min), 10) opened = binary_opening(filtered_smallest_noise, disk(2)) filtered_larger_noise = remove_small_holes(img_as_ubyte(opened), 50) return filtered_larger_noise src = Image.open('test_data/final720/photo1.jpg') # First source # rotated = src.rotate(-90, expand=True) serial = src.crop((1043, 218, 1043+80, 218+108)) filtered_serial = serialprepare(serial) number = src.crop((89, 218, 89+399, 218+99)).rotate(180, expand=True) # %time filtered_number = numberprepare(number) # %time numberCandidate = pytesseract.image_to_string(Image.fromarray(filtered_number, "L"), config="--tessdata-dir ./tessdata -psm 8 -oem 3 -l eng -c tessedit_char_whitelist=1234567890 nobatch digits") thresholded = img_as_ubyte(filtered_number) edged = cv2.Canny(thresholded, 30, 200) width, height = thresholded.shape[:2] refCnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1] # refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1] # refCnts = contours.sort_contours(refCnts, method="left-to-right")[0] # create a clone of the original image so we can draw on it clone = np.dstack([thresholded.copy()] * 3) # loop over the (sorted) contours for c in refCnts: # compute the bounding box of the contour and draw it on our # image (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2) fig, ax = plt.subplots(1, 3, figsize=(40, 5), sharex=True, sharey=True) ax = ax.ravel() ax[0].imshow(number) ax[0].set_title('Original image') ax[0].axis('off') ax[1].imshow(filtered_number, cmap='gray') ax[1].set_title('Filtered Number') ax[1].axis('off') ax[2].imshow(cv2.cvtColor(clone, cv2.COLOR_BGR2RGB)) ax[2].set_title('Contours') ax[2].axis('off') print(numberCandidate) # - # With removing 50 blobs tesseract fails
22-MobileSerialNumberSegmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Create the random DPI Networks (rand perturbomes) # 1.) Define Thresholds # 2.) Calculate Thresholds (take original results/take random resuls) # 3.) Get significant interactions # 4.) Save results # 5.) Compare with real results import numpy as np from matplotlib import pylab as plt import networkx as nx import os import random # ## 1. Define Thresholds # Define which thresholds to be used, as well as which wells should be excluded (e.g. Cytotoxic Drugs) # + # Certain wells show specific problem that need to be excluded from further analysis problematic_Well_Signs = ['ProblemWithCombinationWell','CombinationTransferProblem','Drug1Problem','Drug2Problem','CytotoxicDrug1','CytotoxicDrug2','CytotoxicCombination'] #Types of possible interactions interactionTypes = ['Emergent','Increasing','Decreasing'] #Batches of the screen batches = ['Batch1','Batch2'] use_original_drugpairs = True ''' ############# Thresholds### ############# ''' #Choose the parameter to find an optimal balance between interactions significance and effect size interaction_significance = 3 #how far away in means of mahalanobis distances from the NI point cloud does the real interaction need to be to be significant perturbaion_significnace = 7 #how far away in means of mahalanobis distances from DMSO does a perturbation need to be to be significanctly perturbed AlphaBeta_MAD_range = 2 #range of normal perturbed alpha/beta values per drug for non signifant drug pairs Gamma_percentile = 100 #range of normal perturbed gamma values per drug for non signifant drug pairs # - # ## 2. Calculate Thresholds # This can either be based on the original drug pairs (standard) or the random pairs # Some Easy Outlier detection def reject_outliers_2(data, m=6.): d = np.abs(data - np.median(data)) mdev = np.median(d) s = d / (mdev if mdev else 1.) #return s < m return [data[i] for i in range(0, len(data)) if s[i] < m] # + #Open the file if use_original_drugpairs: fp = open('../data/Create_DPI_Network/All_MC_Scores.csv','r') else: fp = open('../data/Create_DPI_Network_Random/All_MC_Scores.csv','r') fp.next() #File to save all non-significant alpha/beta/gamma values (to later calculate suited thresholds) perDrug_AlphaBetaGammas = {'Batch1':{},'Batch2':{}} #collect all alpha/beta/gammas for line in fp: tmp = line.strip().split(',') #ignore the drug pair if there seemed to be a problem if tmp[5] in problematic_Well_Signs: continue #extract drug and batch information drug1 = tmp[0] drug2 = tmp[1] batch = tmp[2] #add the drug to the dictionary if not already so if perDrug_AlphaBetaGammas[batch].has_key(drug1) == False: perDrug_AlphaBetaGammas[batch][drug1] = {'AlphaBetas':[],'Gammas':[]} if perDrug_AlphaBetaGammas[batch].has_key(drug2) == False: perDrug_AlphaBetaGammas[batch][drug2] = {'AlphaBetas':[],'Gammas':[]} #get the interaction significance maha_General = float(tmp[21]) #mp_General = float(tmp[22]) #include only non-significant interactions if maha_General < interaction_significance: #add the alpha/beta values perDrug_AlphaBetaGammas[batch][drug1]['AlphaBetas'].append(float(tmp[14])) perDrug_AlphaBetaGammas[batch][drug2]['AlphaBetas'].append(float(tmp[15])) #add the same gamma to BOTH drugs (as gamma is undirected) perDrug_AlphaBetaGammas[batch][drug1]['Gammas'].append(float(tmp[16])) perDrug_AlphaBetaGammas[batch][drug2]['Gammas'].append(float(tmp[16])) fp.close() #Dictionary with the thresholds for each drug perDrug_AlphaBetaGammaThresholds = {'Batch1':{},'Batch2':{}} #calculate thresholds for b in batches: for key in perDrug_AlphaBetaGammas[b]: #create alpha/beta threshold by using the borders of alpha/beta within 2 MADs and gamma within 99 percentiles alphaBeta_NoOutlier = reject_outliers_2(perDrug_AlphaBetaGammas[b][key]['AlphaBetas'], AlphaBeta_MAD_range) alphaBeta_NoOutlier.append(1) perDrug_AlphaBetaGammas[b][key]['Gammas'].append(0) perDrug_AlphaBetaGammaThresholds[b][key] = {'Upper':max([x for x in alphaBeta_NoOutlier if x >= 1]), 'Lower':min([x for x in alphaBeta_NoOutlier if x <= 1]), 'Emergent':np.percentile(perDrug_AlphaBetaGammas[b][key]['Gammas'],Gamma_percentile)} # - # ## 3. Get Significant Interactions # Go through all drug pairs and associate significant interactions (effect/sgnificance) correctly to interactions according to our vector math e.g. 1.2 alpha would be a increasing interaction. # + #How many interactions could be in the end calculated i.e. after problematic/non working drugs / combinatons were removed Number_Of_Real_Combinations = sum(1 for line in open('../data/Create_DPI_Network/All_MC_Scores.csv')) print 'Number of real drug pairs: %d' %Number_Of_Real_Combinations #Get all random drug pairs in one list fp = open('../data/Create_DPI_Network_Random/All_MC_Scores.csv','r') fp.next() # add all random pairs calculated (approx. 300k) of which randomly 30k are picked RandomDrugPairs = [] for line in fp: RandomDrugPairs.append(line) # + #Save number of individual interaction type for each random network creation number_significant_pairs = [] number_interactions = [] number_increasing = [] number_decreasing = [] number_emergent = [] #Create 10k random DPIs (i.e. randomly pick 10k times 30k pairs) for i in range(0,10000): significant_Drugs = {'Batch1':set(),'Batch2':set()} InteractionCount = 0 Number_Of_Valid_DrugPairs = 0 Interactions = {} for iT in interactionTypes: Interactions[iT] = {} #get one random assemble of random lines random_combinations = random.sample(lines,Number_Of_Real_Combinations) for line in random_combinations: tmp = line.strip().split(',') Number_Of_Valid_DrugPairs +=1 if tmp[7] in problematic_Well_Signs: continue #get the interaction significance maha_General = float(tmp[21]) #only check interaction if significantly away from NI point if maha_General > interaction_significance: #increment significant interaction count InteractionCount += 1 #extract drug information drug1 = tmp[0] drug2 = tmp[1] batch = tmp[3] #extract mahalanobis distances (how far away from DMSO) maha_drug1 = float(tmp[6]) maha_drug2 = float(tmp[9]) maha_Combi = float(tmp[12]) #check for significance (usually > 6) drug1_significance = maha_drug1 > perturbaion_significnace drug2_significance = maha_drug2 > perturbaion_significnace combi_significance = maha_Combi > perturbaion_significnace #extract alpha/beta/gamma alpha = float(tmp[15]) beta = float(tmp[16]) gamma = float(tmp[17]) #add drugs to significant drugs if significant if drug1_significance: significant_Drugs[batch].add(drug1) if drug2_significance: significant_Drugs[batch].add(drug2) ##### # GOT THROUGH ALL 7 POSSIBILITIES of where a drug can be modulated ### #0. # Singles not active, combination not active ==> no interactions # 1.) #Single not active, combination active ==> possible Emergent if drug1_significance == False and drug2_significance == False and combi_significance == True: #if gamma > Emergent_Threshold[batch]: if gamma > max([perDrug_AlphaBetaGammaThresholds[batch][drug1]['Emergent'],perDrug_AlphaBetaGammaThresholds[batch][drug2]['Emergent']]): Interactions['Emergent'][drug1+','+drug2] = {'Value':gamma,'Batch':batch, 'Mahalanobis':maha_General} #2.) #One Single active, combination active anymore ==> possible Deactivting elif drug1_significance == True and drug2_significance == False and combi_significance == False: #if alpha < AlphaBeta_Threshold_Decreasing[batch]: if alpha < perDrug_AlphaBetaGammaThresholds[batch][drug1]['Lower']: Interactions['Decreasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch, 'Mahalanobis':maha_General} #3.) #One Single active, combination active anymore ==> possible Deactivting elif drug1_significance == False and drug2_significance == True and combi_significance == False: #if beta < AlphaBeta_Threshold_Decreasing[batch]: if beta < perDrug_AlphaBetaGammaThresholds[batch][drug2]['Lower']: Interactions['Decreasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch, 'Mahalanobis':maha_General} #4.) #Both Single active, combination not active anymore ==> possible Double Deactivating elif drug1_significance == True and drug2_significance == True and combi_significance == False: #if beta < AlphaBeta_Threshold_Decreasing[batch]: if beta < perDrug_AlphaBetaGammaThresholds[batch][drug2]['Lower']: Interactions['Decreasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch, 'Mahalanobis':maha_General} if alpha < perDrug_AlphaBetaGammaThresholds[batch][drug1]['Lower']: Interactions['Decreasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch, 'Mahalanobis':maha_General} #5.) #One Single active, combination active ==> The one Single gets modulated elif drug1_significance == True and drug2_significance == False and combi_significance == True: #print tmp #if gamma > Emergent_Threshold[batch]: if gamma > max([perDrug_AlphaBetaGammaThresholds[batch][drug1]['Emergent'],perDrug_AlphaBetaGammaThresholds[batch][drug2]['Emergent']]): Interactions['Emergent'][drug1+','+drug2] = {'Value':gamma,'Batch':batch, 'Mahalanobis':maha_General} if alpha > perDrug_AlphaBetaGammaThresholds[batch][drug1]['Upper']: Interactions['Increasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch , 'Mahalanobis':maha_General} if alpha < perDrug_AlphaBetaGammaThresholds[batch][drug1]['Lower']: Interactions['Decreasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch , 'Mahalanobis':maha_General} #6.) #One Single active, combination active ==> The one Single gets modulated elif drug1_significance == False and drug2_significance == True and combi_significance == True: #if gamma > Emergent_Threshold[batch]: if gamma > max([perDrug_AlphaBetaGammaThresholds[batch][drug1]['Emergent'],perDrug_AlphaBetaGammaThresholds[batch][drug2]['Emergent']]): Interactions['Emergent'][drug1+','+drug2] = {'Value':gamma,'Batch':batch, 'Mahalanobis':maha_General} if beta > perDrug_AlphaBetaGammaThresholds[batch][drug2]['Upper']: Interactions['Increasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch , 'Mahalanobis':maha_General} if beta < perDrug_AlphaBetaGammaThresholds[batch][drug2]['Lower']: Interactions['Decreasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch , 'Mahalanobis':maha_General} #7.) #Both Single active, combination active ==> Both Single gets modulated elif drug1_significance == True and drug2_significance == True and combi_significance == True: if gamma > max([perDrug_AlphaBetaGammaThresholds[batch][drug1]['Emergent'],perDrug_AlphaBetaGammaThresholds[batch][drug2]['Emergent']]): Interactions['Emergent'][drug1+','+drug2] = {'Value':gamma,'Batch':batch, 'Mahalanobis':maha_General} if alpha > perDrug_AlphaBetaGammaThresholds[batch][drug1]['Upper']: Interactions['Increasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch , 'Mahalanobis':maha_General} if alpha < perDrug_AlphaBetaGammaThresholds[batch][drug1]['Lower']: Interactions['Decreasing'][drug2+','+drug1] = {'Value':alpha,'Batch':batch , 'Mahalanobis':maha_General} if beta > perDrug_AlphaBetaGammaThresholds[batch][drug2]['Upper']: Interactions['Increasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch, 'Mahalanobis':maha_General } if beta < perDrug_AlphaBetaGammaThresholds[batch][drug2]['Lower']: Interactions['Decreasing'][drug1+','+drug2] = {'Value':beta,'Batch':batch, 'Mahalanobis':maha_General } NumberOfInteractions = 0 for iT in interactionTypes: #print 'Number %s: %d' %(iT , len(Interactions[iT])) NumberOfInteractions = NumberOfInteractions + len(Interactions[iT]) number_significant_pairs.append(InteractionCount) number_interactions.append(NumberOfInteractions) number_increasing.append(len(Interactions['Increasing'])) number_decreasing.append(len(Interactions['Decreasing'])) number_emergent.append(len(Interactions['Emergent'])) # - # ## 4. Save results # Create a result file #Print all results fp_out = open('../results/Create_DPI_Network_Random/Results.csv','w') fp_out.write('Type,Counts\n') fp_out.write('NumberSignificantPairs,'+';'.join([str(x) for x in number_significant_pairs])+'\n') fp_out.write('NumberInteractionss,'+';'.join([str(x) for x in number_interactions])+'\n') fp_out.write('NumberIncreasing,'+';'.join([str(x) for x in number_increasing])+'\n') fp_out.write('NumberDecerasing,'+';'.join([str(x) for x in number_decreasing])+'\n') fp_out.write('NumberEmergent,'+';'.join([str(x) for x in number_emergent])+'\n') fp_out.close() # ## 5. Compare with real results # Create plots and calulcate Z-Score showing the difference between the 10k randomly created DPI networks and the real DPI network (perturbome) # + DPI = nx.read_gml('../data/Create_DPI_Network_Random/DPI_Network_Complete.gml') real_number_significant_pairs = 11961 real_number_interactions = len(DPI.edges()) interactions = [] for edge in list(set(DPI.edges())): for key in DPI[edge[0]][edge[1]]: interactions.append(DPI[edge[0]][edge[1]][key]['Type']) real_number_increasing = interactions.count('Increasing') real_number_decreasing = interactions.count('Decreasing') real_number_emergent = interactions.count('Emergent') print 'Real Results:' print 'Number Significant pairs: %d' %real_number_significant_pairs print 'Number Interactions: %d' %real_number_interactions print 'Number Increasing: %d' %real_number_increasing print 'Number Decreasing: %d' %real_number_decreasing print 'Number Emergent: %d' %real_number_emergent # + # Number Significant Pairs ZScore = (real_number_significant_pairs - np.mean(number_significant_pairs))/np.std(number_significant_pairs) plt.title('Number Significant Pairs') plt.hist(number_significant_pairs,bins=30, color='#969696', edgecolor='#969696', linewidth=1.2) plt.axvline(real_number_significant_pairs, ls='--', c ='#40B9D4') plt.legend(['ZScore: %.2f' %ZScore]) #plt.show() plt.savefig('../results/Create_DPI_Network_Random/Number_Significant_Pairs.pdf') plt.close() # Number Interactions ZScore = (real_number_interactions - np.mean(number_interactions))/np.std(number_interactions) plt.title('Number Interactions') plt.hist(number_interactions,bins=30, color='#969696', edgecolor='#969696', linewidth=1.2) plt.axvline(real_number_interactions, ls='--', c ='grey') plt.legend(['ZScore: %.2f' %ZScore]) #plt.show() plt.savefig('../results/Create_DPI_Network_Random/Number_Interactions.pdf') plt.close() # Number Decreasing ZScore = (real_number_decreasing - np.mean(number_decreasing))/np.std(number_decreasing) plt.title('Number Decreasing') plt.hist(number_decreasing,bins=30, color='#969696', edgecolor='#969696', linewidth=1.2) plt.axvline(real_number_decreasing, ls='--', c ='#F70020') plt.legend(['ZScore: %.2f' %ZScore]) #plt.show() plt.savefig('../results/Create_DPI_Network_Random/Number_Decreasing.pdf') plt.close() # Number Decreasing ZScore = (real_number_increasing - np.mean(number_increasing))/np.std(number_increasing) plt.title('Number Increasing') plt.hist(number_increasing,bins=28, color='#969696', edgecolor='#969696', linewidth=1.2) plt.axvline(real_number_increasing, ls='--', c ='#ACD900') plt.legend(['ZScore: %.2f' %ZScore]) #plt.show() plt.savefig('../results/Create_DPI_Network_Random/Number_Increasing.pdf') plt.close() # Number Emergent ZScore = (real_number_emergent - np.mean(number_emergent))/np.std(number_emergent) plt.title('Number Emergent') plt.hist(number_emergent,bins=30, color='#969696', edgecolor='#969696', linewidth=1.2) plt.axvline(real_number_emergent, ls='--', c ='#0096FF') plt.legend(['ZScore: %.2f' %ZScore]) #plt.show() plt.savefig('../results/Create_DPI_Network_Random/Number_Emergent.pdf') plt.close()
code/10a_Create_DPI_Network_Random.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.9 64-bit (''mlaos'': conda)' # language: python # name: python3 # --- # In this notebook, I estimate the mean and standard deviation of the pixel values in the single-donut images. I will use these to normalize the donut images. I am only using the single-donut values, because eventually, I will mask out all the blends, so the blending stars won't contribute to the dynamic range of the pixels. from ml_aos.dataloader import Donuts import numpy as np train_set = Donuts() means = [] stds = [] for i, img in enumerate(train_set): if img["n_blends"] == 0: means.append(img["image"].mean()) stds.append(img["image"].std()) if i == 10_000: break np.mean(means), np.mean(stds)
notebooks/donut_normalization_ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + raw_mimetype="text/restructuredtext" active="" # .. currentmodule:: composeml # # ====================== # Using Label Transforms # ====================== # # In this guide, we will demonstrate how to use the transforms that are available on :class:`LabelTimes`. Each transform will return a copy of the label times. This is useful for trying out multiple transforms in different settings without having to recalculate the labels. As a result, we could see which labels give a better performance in less time. # # Generate Labels # =============== # # Let's start by generating labels on a mock dataset of transactions. Each label is defined as the total spent by a customer given one hour of transactions. # + import composeml as cp def total_spent(df): return df['amount'].sum() label_maker = cp.LabelMaker( labeling_function=total_spent, target_entity='customer_id', time_index='transaction_time', window_size='1h', ) labels = label_maker.search( cp.demos.load_transactions(), num_examples_per_instance=10, label_type='continuous', minimum_data='2h', gap='2min', verbose=True, ) # + raw_mimetype="text/restructuredtext" active="" # To get an idea on how the labels looks, we preview the data frame. # - labels.head() # + raw_mimetype="text/restructuredtext" active="" # Threshold on Labels # =================== # # :meth:`LabelTimes.threshold` will create binary labels by testing if label values are above a threshold. In this example, a threshold is applied to determine which customers spent over 100. # - labels.threshold(100).head() # + raw_mimetype="text/restructuredtext" active="" # Lead Labels Times # ================= # # :meth:`LabelTimes.apply_lead` will shift the label time earlier. This is useful for training a model to predict in advance. In this example, a one hour lead is applied to the label times. # - labels.apply_lead('1h').head() # + raw_mimetype="text/restructuredtext" active="" # Bin Labels # ========== # # :meth:`LabelTimes.bin` will bin the labels into discrete intervals. There are two types of bins. Bins could either be based on values or quantiles. Additionally, the widths of the bins could either be defined by the user or divided equally. The following examples will go through each type. # # Value Based # ----------- # # To use bins based on values, :code:`quantiles` should be set to :code:`False` which is the default value. # # Equal Width # ~~~~~~~~~~~ # # To group values into bins of equal width, set bins as a scalar value. In this example, the total spent is grouped into bins of equal width. # - labels.bin(4, quantiles=False).head() # + raw_mimetype="text/restructuredtext" active="" # Custom Widths # ~~~~~~~~~~~~~ # # To group values into bins of custom widths, set bins as an array of values to define edges. In this example, the total spent is grouped into bins of custom widths. # - inf = float('inf') edges = [-inf, 34, 50, 67, inf] labels.bin(edges, quantiles=False,).head() # + raw_mimetype="text/restructuredtext" active="" # Quantile Based # -------------- # # To use bins based on quantiles, :code:`quantiles` should be set to :code:`True`. # # Equal Width # ~~~~~~~~~~~ # # To group values into quantile bins of equal width, set bins to the number of quantiles as a scalar value (e.g. 4 for quartiles, 10 for deciles, etc.). In this example, the total spent is grouped into bins based on the quartiles. # - labels.bin(4, quantiles=True).head() # + raw_mimetype="text/restructuredtext" active="" # To verify quartile values, we could check the descriptive statistics. # - stats = labels.total_spent.describe() stats = stats.round(3).to_string() print(stats) # + raw_mimetype="text/restructuredtext" active="" # Custom Widths # ~~~~~~~~~~~~~ # # To group values into quantile bins of custom widths, set bins as an array of quantiles. In this example, the total spent is grouped into quantile bins of custom widths. # - quantiles = [0, .34, .5, .67, 1] labels.bin(quantiles, quantiles=True).head() # + raw_mimetype="text/restructuredtext" active="" # Label Bins # ---------- # # To assign bins with custom labels, set :code:`labels` to the array of values. The number of labels need to match the number of bins. In this example, the total spent is grouped into bins with custom labels. # - values = ['low', 'medium', 'high'] labels.bin(3, labels=values).head() # + raw_mimetype="text/restructuredtext" active="" # Describe Labels # =============== # # :meth:`LabelTimes.describe` will print out the distribution with the settings and transforms that were used to make the labels. This is useful as a reference for understanding how the labels were generated from raw data. Also, the label distribution is helpful for determining if we have imbalanced labels. In this examlpe, a description of the labels is printed after transforming the labels into discrete values. # - labels.threshold(100).describe() # + raw_mimetype="text/restructuredtext" active="" # Sample Labels # ============= # # :meth:`LabelTimes.sample` will sample the labels based on a number or fraction. Samples can be reproduced by fixing :code:`random_state` to an integer. # # To sample 10 labels, :code:`n` is set to 10. # - labels.sample(n=10, random_state=0) # + raw_mimetype="text/restructuredtext" active="" # Similarly, to sample 10% of labels, :code:`frac` is set to 10%. # - labels.sample(frac=.1, random_state=0) # + raw_mimetype="text/restructuredtext" active="" # Categorical Labels # ------------------ # # When working with categorical labels, the number or fraction of labels for each category can be sampled by using a dictionary. Let's bin the labels into 4 bins to make categorical. # - categorical = labels.bin(4, labels=['A', 'B', 'C', 'D']) # + raw_mimetype="text/restructuredtext" active="" # To sample 2 labels per category, map each category to the number 2. # - n = {'A': 2, 'B': 2, 'C': 2, 'D': 2} categorical.sample(n=n, random_state=0) # + raw_mimetype="text/restructuredtext" active="" # Similarly, to sample 10% of labels per category, map each category to 10%. # - frac = {'A': .1, 'B': .1, 'C': .1, 'D': .1} categorical.sample(frac=frac, random_state=0)
docs/source/guides/using_label_transforms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.11 ('env_radar') # language: python # name: python3 # --- import os import pandas as pd import numpy as np import matplotlib.pyplot as plt data = pd.read_csv("./dataset_sleep/tester/0.csv") data stage = np.array(data["sleep"]) stage array_num = np.zeros(4) for cur_stage in stage: if cur_stage == 0: array_num[0] += 1 if cur_stage == 1: array_num[1] += 1 if cur_stage == 2: array_num[2] += 1 else: array_num[3] += 1 array_num # + import seaborn as sns from matplotlib import gridspec sns.set() grid = plt.GridSpec(3, 3, wspace=0.5, hspace=0.5) plt.figure(figsize=(20,10)) plt.subplot(grid[0:2,:]) plt.plot(stage) plt.ylim(-1, 4) plt.yticks([0, 1, 2, 3], ['DEEP', 'LIGHT', 'REM', 'AWAKE']) plt.title('SLEEP STAGE', size=14) plt.ylabel('STAGE', size=14) plt.xlabel('TIME', size=14) plt.subplot(grid[2,:]) plt.barh(np.arange(len(array_num)), array_num, 0.4, color='royalblue') # cornflowerblue plt.yticks([0, 1, 2, 3], ['DEEP', 'LIGHT', 'REM', 'AWAKE']) plt.xticks(np.arange(0, len(array_num)+1, 1)) plt.title('SLEEP STAGE ACCUMULATION', size=14) plt.ylabel('STAGE', size=14) plt.xlabel('NUMBER OF EACH STAGE', size=14) plt.xticks(array_num, array_num.astype("int")) plt.savefig('./dataset_sleep/stage_fig/' + '0' + '.png') # 儲存睡眠階段 plt.show() # + import pandas as pd import numpy as np aa = [] aa.append(1) aa.append(2) aa.append(3) aa.append(4) aa.append(2) aa = pd.Series(aa) aa = aa.fillna(0) # aa.tolist() # + aa[aa.loc[aa.values > 2].index] = 2 # - aa = aa.tolist() aa
sleep_real_time_pohua_modify_0503/Untitled-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.12 ('cryptoenv') # language: python # name: python3 # --- # # Produce a surf report # # The `SurfReport` object features additional reporting functionalities, such as plotting methods and easily readable surf reports. from pysurfline import SurfReport # List of known spot id # # | Spot Id | Name | # |---|---| # | 5842041f4e65fad6a7708890 | Pipeline (USA)| # | 584204204e65fad6a7709b09 | Varazze (ITA)| # | 5842041f4e65fad6a7708b4b | Uluwatu (IDN) # | 5842041f4e65fad6a7708c8e | La Graviere (FRA)| # Initializing a `SurfReport` objects does nothing else than calling a `SpotForecast` object with the specified `params` for requesting the desired response data by the Surfline API. params = { "spotId": "5842041f4e65fad6a7708890", "days": 3, "intervalHours": 3, } report = SurfReport(params) report.api_log # With respect to the `SpotForecast` objects, that stores separately all the raw response data as class attributes, the `SurfReport` object has the additional `df` attribute to easily inspect all the hourly data fetched from the Surfline servers merged into a readable `pandas.DataFrame` object. report.df.head() # Moreover, a simplified version of the dataframe is available with the `surf` attribute. This represents the surf data that is actually plotted. report.surf.head() # Lets then call the `plot()` method, that returns a `matplotlib` figure representing the minimum and maximum **surf size** that is forecasted for the spot. # # **Daylight hours** are shown in the plot, with darkgrey representing night and lightgrey representing dusk and dawn. # # **Wind speed and direction** is annotated at the top. f=report.plot() f.savefig("../images/surfreport_pipeline.jpeg")
docsrc/source/examples/surf_report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Stellar Population Photometric Synthesis with Artificial Intelligence as revealed by S-PLUS # # ### Student: <NAME> # ### NUSP: 9847710 # # This notebook is being submitted as the Term Project, required for the conclusion of the course AGA5926 - Astronomical databases and astrostatistics in the era of Big Data. # --- # # # 1. Introduction # # As technology evolves, telescopes are ever more capable of producing high quality data in much higher volume. In the last decade, surveys like Sloan Digital Sky Survey were able to generate ~200 GB of data each night. Today's generation of telescopes (e.g. LSST, TMT) are expected to produce 30-90 TB of data **each night**. This is an incredible ammount of information, and astronomers are in need of highly-efficient tools that can reliably extract information without the need of classical time consuming processes. # # In this project, we want to automate the process of predicting intrinsical stellar population parameters (such as stellar mass or mean star age) from photometry. These parameters are usually extracted from the spectra with softwares such as STARLIGHT or FADO. What we aim to do here is to skip the need for the spectra altogether and use only the photometry to obtain these parameters. For this, we will make use of Artificial Intelligence and a large sample of training data. # # Troughout the course of this notebook, we will provide a network that can reliably predict a number of diferent STARLIGHT parameters directly from S-PLUS photometry. # --- # # # 2. The libraries # # For this project we will make use of many different python libraries. The first 3 (pandas, numpy and matplotlib) are all used for data storage and visualization. Keras and Tensorflow will be used as the main framework for our machine learning model. AstroML and SKlearn will be used to facilitate some operations. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import keras import tensorflow as tf import tensorflow.keras.backend as K from keras import models from keras import layers from astroML.utils import split_samples from sklearn.model_selection import KFold from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score # - # --- # # # 1. The data # # As with any machine learning problem, we need data. Remember that we want to have the S-PLUS photometries as inputs and STARLIGHT parameters as outputs. # # Let us start with the outputs: the STARLIGHT parameters. We will be using the results obtained form <NAME>'s PhD thesis (<NAME>., 2019). Werle applied a modified version of the STARLIGHT software to ~200,000 objects, using a combined spectra from SDSS and GALEX to improve both the UV and continuum emission. This application provided us with the necessary parameters estimation for a lot of galaxies. # # The DR2 photometries were gathered directly from the S-PLUS cloud website. We decided to use the PETRO apertures as these had the best results for photometric redshift estimations. In the future we want to update to the iDR3 database and try different apertures. # # We also noticed from our internal testing that the redshift played an important role in determining the STARLIGHT parameters. Because of this, we decided to also use the SDSS spectroscopic redshift for each object. We intend to change these spectroscopic redshifts in our training data to photometric redshifts from <NAME>' estimations based on S-PLUS photometry. # # Finally, we crossmatched Werle's STARLIGHT estimations with S-PLUS objects based on RA and Dec with the help of TOPCAT. This yielded a database with 55,778 galaxies, each one containing 13 features (12 S-PLUS photometries + 1 spectroscopic redshift) and 19 STARLIGHT parameters (e.g. stellar mass, mean star age, *etc*). This crossmatch can be downloaded as a file from the same Github as this notebook (https://github.com/vcernic/AGA5926-final-project). # # In this section we will do the following: # * Read the data file # * Define which parameter we will study # * Visualize the training data # * Normalize and split the data # ## Reading the data # # Reading files is made easier with the use of external libraries such as Pandas. # + # Reading the file file = 'splus_starlight.dat' data = pd.read_csv(file, sep=',', engine='python') data.describe() # - # Let's take a look at the distribution of our 13 features (12 S-PLUS filters + redshift) # + # Defining the 12 bands + z redshift features = ['uJAVA_petro', 'F378_petro', 'F395_petro', 'F410_petro', 'F430_petro', 'g_petro', 'F515_petro', 'r_petro', 'F660_petro', 'i_petro', 'F861_petro', 'z_petro', 'z'] # 12 filter + 1 z # Plotting all filters fig, axs = plt.subplots(nrows = 4, ncols = 4, figsize=(16,9)) for i in range(3): for j in range(4): axs[i][j].hist(data[features[(i)*4 + j]], bins=100) axs[i][j].set_title(features[(i)*4 + j]) # Plotting the redshift distribution axs[3][0].hist(data[features[-1]], bins=100) axs[3][0].set_title(features[-1]) # Deleting extra axes for i in range(1,4): fig.delaxes(axs[3][i]) plt.tight_layout() plt.show() # - # # ## Defining the expected parameter # # In this notebook we will only be studying the stellar mass (**Mstar**) parameter. Other parameters should present similar results. # # + # The parameter that we will be studying param = 'Mstar' if param in data.columns: print(f"All good to go! {param} is a valid parameter") else: raise ValueError("Something is wrong. You need to choose a valid parameter from data.columns!") # - # Let's take a look at the target parameter data to see its distribution. plt.hist(data[param], bins=100) plt.title(param) plt.show() # ## Normalizing and splitting the data # # Instead of having the network train on data that fits in different ranges, it usually a good idea to normalize the data between 0 and 1. This normally helps the network learn, and to accomplish this we will use sklearn MinMaxScaler to make this linear transformation more efficiently on both the features and the targets. # # Note that at the end of the project we want to run through the model some S-PLUS data that is NOT normalized. This means that for us to have a more robust perception of how the model will perform we need to remove test data from the initial normalization of the training set. This test set will be separeted from all training, and will be used to estimate the final model's performance at the end of the notebook. # + # Seprating the data into two sets: train and test (80% train and 20% test split) (data_train, data_test) , (targets_train, targets_test) = split_samples(data[features], data[param], [0.8, 0.2], random_state = 1234) # for reproducibility # Targets need to be reshaped for Keras to read targets_train = np.reshape(targets_train, (-1, 1)) targets_test = np.reshape(targets_test, (-1, 1)) # + # Scaling the data # Generating the scalers scaler_data = MinMaxScaler() scaler_targets = MinMaxScaler() # Fitting the scalers into the train data scaler_data.fit(data_train) scaler_targets.fit(targets_train) # Scaling train and test sets data_train = scaler_data.transform(data_train) data_test = scaler_data.transform(data_test) targets_train = scaler_targets.transform(targets_train) targets_test = scaler_targets.transform(targets_test) # - # --- # # # 2. The Model # # We can easily categorize our problem as a Regression problem. Remember that the final goal of this project is to create a network that can take a given S-PLUS photometry and return a single galaxy parameter. There are many ways that we can approach a regression problem: with Neural Networks, with Classical Machine Learning models (e.g. Random Forest), *etc*. Here we are going to apply a simple Neural Network. # # Here are the steps we will follow in this section: # * Create a builder for the Neural Network # * Run a K-Fold Cross-Validation for a high number of epochs # * Analyze the history of the network # ## Building the Network # # The network I present here is a simple Neural Network with just 2 hidden activation layers. After testing some other architectures with more complexity (e.g. more layers) this is the best configuration I found, though I should mention that I've yet to try some more complex activation layers like DenseVariational. I also tested a few classical Machine Learning models, such as Random Forest Regressor, but it didn't perform as well as a Neural Network. This exact configuration of the network was inspired by the "Boston Housing problem" tackled by <NAME> in his book "Deep Learning with Python". # # There was some testing regarding where the Dropout layers should be placed (before or after activation). I had almost identical results in both cases, and it seems that this is something related to the ReLU activation itself as explained by <NAME> in his Machine Learning FAQs (https://sebastianraschka.com/faq/docs/dropout-activation.html). At the end I decided to place it after each activation layer, though this decision seemed mostly irrelevant. The value for the Dropout was set at 30% after some hyperparameter optimization and this seems to be within the community consensus of best Dropout values for Regression. # # Eventhough I spent some time doing hyperparameter optimization (number of neurons, lerning rate, batch size, *etc*.) I will not be putting any code for optimization here just because it is not the objective of the project. I just want to make a point that the values set for the hyperparameters were not chosen at random, and each one of them was thoroughly discussed. # # Now that we have some idea of why this specific network was chosen, let's make a function to build it! Note that the data is passed as an argument so that the input shape can be properly set. def build_network(data_train): '''Builds the network model''' model = models.Sequential() model.add(layers.Dense(128, activation='relu', input_shape=(data_train.shape[1],))) model.add(layers.Dropout(0.3)) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dropout(0.3)) model.add(layers.Dense(1)) opt = keras.optimizers.Adam(learning_rate = 0.0001) model.compile(optimizer=opt, loss='mse', metrics=['mae', 'mse']) return model # The network will calculate, at each epoch, the Mean Average Error (MAE) and Mean Squared Error (MSE) of the (hopefully) unbiased validation set. These are normally the best metrics to use for evaluating Regression models, as both of them give us some perspective of the error between the values predicted by the network and the "true" targets set by the database. The main difference between these two metrics is that the MAE is more robust to data with outliers, while the MSE is normally better for converging as we move closer to the loss minima. # # A decision has to be made as to which one of those metrics will be used as the loss function. In Regression models, the MAE and MSE loss are generally called L1 and L2 loss, respectively. There is no "superior loss", as each one has its pros and cons, but for the Mstar parameter specifically I will be using the L2 loss as it had some better results. Despite this decision, I should mention that I neglected some other more complex loss functions that could be tested, such as Smooth Mean Absolute Error (Huber Loss), for the sake of simplicity. # ## Using K-Fold Cross-Validation # # ***Disclaimer: the next block might take several minutes to run. In my computer, it took ~20 minutes.*** # # If we want to have a good understanding of the performance of a network, K-Fold is normally a good way to do it. # # In most simple Machine Learning applications, people just separate a percentage of the total dataset for training and the rest for testing/validation (*sidenote: testing and validation are often perceived as being the same, which is not true*). This is normally fine if you have a small dataset or is trying to solve a simple problem, but as your need for a proper performance evaluation gets higher this method starts to lack precision. A good way for measuring the performance of the network would be to run the model with different training sets. But you may ask yourself *"how can you do that without generating more data?"* The answer is quite simple: separate your whole dataset into various non-intersecting groups and train the network a lot of times, each iteration selecting a different group to be the validation set and the rest to be the training set. This is essentially what the K-Fold Cross-Validation is about: separating your dataset into "K" different "folds" and "cross-validating" them. # # These cross-validating methods have an obvious problem related to the ammount of processing time needed. Each time you iterate through your folds, you have to run the model from scratch. This means that the ammount of time necessary to run your K-Fold increases linearly the more splits you make. While this won't be much of a problem in this exact project, if you're trying to solve a complex problem the ammount of processing required can very easily surpass absurd time scales. # # For the sake of time, I will use K-Fold Cross-Validation as the method for evaluating the model. I want to note that many other robust cross-validation methods are available: Leave One Out (LOOCV), Shuffle & Split, *etc*, but most of them require much larger ammounts of time to be consumed. # # The split made here will be for 8 folds. This means that as the total training data is 80% of the whole dataset, each fold will have 70% of the whole for training and 10% for validation. # # In the next block of code, each one of the 8 K-Fold iterations goes through the following steps: # * Separate a new training and validation set # * Compile a fresh new model # * Fit the model to the training set # * Save the history of the fold for later analysis # # Each fold will run for 500 epochs so we can both understand their performance and look at the best value for the number of epochs! Now, let's run some K-fold! # + # Variables for storing data during the K-Fold all_histories = [] fold_number = 0 # Starting the K-Fold loop! for train, val in KFold(n_splits = 8).split(data_train, targets_train): # Cleaning cluttering memory data used by Keras K.clear_session() # Building the model model = build_network(data_train) # Print for user feedback print(f'------ Training fold number {fold_number} ------') # Training the model history = model.fit(data_train, targets_train, validation_data=(data_train[val], targets_train[val]), epochs=500, batch_size=128, verbose=0) # Storing relevant validation data all_histories.append(history.history) fold_number += 1 print("\n****** Training Completed! ******") # - # ## Analyzing the results # # After letting our network run with different training sets for a large ammount of time, we can look at the data and see at what epoch the network starts overfitting. This is done so we can find what is best number of epochs that we should run our final model to give us the best result. # # To find the best number of epochs, we will look at the history of our loss, which is the MSE. This is normally a fairly subjective decision: we should look for the epoch where the validation loss is at its minimum before it starts increasing due to overfitting. Because I want to try to make a fully working code without any human input, I'll define the best epoch as the one that has the lowest average loss from all 8 folds. A better analysis could be made taking into account the loss of the surrounding epochs, but I didn't have time to inplement it. # # Note that the information about each epoch's validation loss (and all other metrics) is stored inside the "history" variable. # + # Calculating the average loss history between all 8 folds average_train_loss_history = [np.mean([h["loss"][i] for h in all_histories]) for i in range(500)] average_val_loss_history = [np.mean([h["val_loss"][i] for h in all_histories]) for i in range(500)] # Finding the best value for the number of epochs best_epoch = average_val_loss_history.index(min(average_val_loss_history)) + 1 # +1 because index is >= 0, while epoch is > 0 # - # Let's visualize the best number of epochs and the loss history! # Plotting the validation loss for all 500 epochs fig = plt.figure(figsize=(16,6)) plt.plot(range(30, len(average_train_loss_history) + 1), average_train_loss_history[29:], label='train loss') plt.plot(range(20, len(average_val_loss_history) + 1), average_val_loss_history[19:], label='validation loss') plt.axvline(best_epoch, c='r', ls='--') plt.title(f"Best epoch: {best_epoch}", fontsize=22) plt.xlabel("Epochs", fontsize=20) plt.ylabel("Loss (MSE)", fontsize=20) plt.legend(fontsize=20) plt.tight_layout() plt.show() # Now that the best number of epochs is set, we can look at how much the different folds changed within that epoch. This is done to have a better perspective of how much the model can variate due to different datasets. # + # Printing scores for each fold at the best epoch print(f"Score per fold at epoch {best_epoch}") print("-----------------------------------------") loss_array = [] fold_no = 1 for fold_history in all_histories: mse = fold_history['val_loss'][best_epoch-1] print(f"> Fold {fold_no} - " + "Loss: {:.5f} - RMSE: {:.5f}".format(mse, np.sqrt(mse))) print("-----------------------------------------") loss_array.append(mse) fold_no += 1 print(f"\nAverage Loss: {np.mean(loss_array)} (+- {np.std(loss_array)})") print(f"Average RMSE: {np.mean([np.sqrt(x) for x in loss_array])} (+- {np.std([np.sqrt(x) for x in loss_array])})\n") # - # As we can see, the deviation for each fold is on average ~2%. # --- # # 3. The Test data # # From the previous analysis we found that the model behaves very well with this kind of data. We also studied the best number of epochs for it to run. Now, to have a final measure of performance of what the model can achieve we can use the test data we kept separated since the beggining of this notebook. This last performance test is trying to simulate how the model will behave when we apply new, unknown S-PLUS data. # # Here are the steps we will follow in this section: # * Train a brand new model # * Measure performance with test data # * Visualize a predict plot # ## Training a new model # # The first step to obtain a good performance test is to compile a fresh new model from scratch and train it with all available data. This means that instead of splitting the train data into train and validation (like we did in the K-Fold) we will use the whole dataset to train this model. The validation set is just used as a way to measure the network performance throughout the epochs, but we don't need that information anymore as we will be measuring the network performance via the test set, which is already separated. # + # Cleaning cluttering data from Keras K.clear_session() # Building the model model = build_network(data_train) # Training the model model.fit(data_train, targets_train, epochs=best_epoch, batch_size=128, verbose=0) print("Model trained successfully!") # - # ## Evaluating with the test set # # After the final model has trained, we can estimate the final performance measurements using the test dataset. # # It is important to note that the final performance metrics (MAE, RMSE, *etc*) should be calculated using un-normalized results. The metrics that we used while training of the network doesn't hold any physical value apart from telling us how the network performs in comparison with itself (or with another network using the same training/test set). # Evaluating the performance of the model for the test set results = model.evaluate(data_test, targets_test, batch_size=128, verbose = 0) # + # Predicting the parameter with the test data predict_test = model.predict(data_test) # Scaling back both the prediction and the targets x = scaler_targets.inverse_transform(predict_test) y = scaler_targets.inverse_transform(targets_test) # Printing out the metrics print("====== Test Predict Metrics ======") print("Test MAE =", mean_absolute_error(y,x)) print("Test MSE =", mean_squared_error(y,x)) print("Test RMS =", np.sqrt(mean_squared_error(y,x))) print("Test R2 =", r2_score(y,x)) print("\n====== Test Targets Info ======") print("Mean =", np.mean(y), "+-", np.std(y)) print("Median =", np.median(y), "+-", np.std(y)) # - # From these last results we can see that the errors (both MAE and RMS) turn out to be ~2% of the mean and median. They are also 3 to 4 times lower than standard deviation. # ## Visualizing the predictions # # One last step we can take is to plot the test targets versus the test predictions. This is helpful to see if there are any patterns emerging from the data distribution while also having something to visualize other than just abstract numbers. # # Here we will use a Hexbin plot, which is basically a 2-Dimensional Histogram. We want the predictions to be as close as possible to the targets, so we would like to see the most ammount of points around the 1-to-1 line (red line). # + # Creating the plot fig = plt.figure(figsize=(10, 8)) # Plotting the un-scaled predictions and targets hb = plt.hexbin(x, y, gridsize=100, cmap='viridis') plt.plot([min(x), max(x)], [min(x), max(x)], '--r', alpha=0.4, label='1x1 line') # Just some cosmetic code plt.axis([min(x), max(x), min(y), max(y)]) plt.title(f"Final model w/ test data ({param})", fontsize=15) plt.xlabel("model predict", fontsize=12) plt.ylabel("targets", fontsize=12) cb = fig.colorbar(hb, ax=fig.axes) cb.set_label('counts') plt.legend(loc="upper left") plt.show() # - # --- # # # 4. Conclusions and future work # # In this project, which is the basis for my Master's thesis, we managed to train a Neural Network that can **reliably** obtain some select STARLIGHT parameters directly from S-PLUS photometry. # # Despite only showing the analysis for one parameter (stellar mass), you can go back and change the parameter to see their different results. It should be noted that each parameter requires an specific change to the architecture in order to achieve its best fit, but this part is still being worked on. Most parameters still can't fit as well as the stellar mass, and we need to decide based on its test metrics if it is worth applying the model to real data or not. # # There a lot that can be done to try to improve this work. A more complete analysis of the training data can be done, we can try to change the architecture to use DenseVariational layers, or maybe have a better grasp of the model's metrics by using another type of cross-validation. Still, we are quite happy with the results presented here and we'll try to improve the model as best as we can. # # In the following months we aim to apply all models that worked well (based on a few tresholds that are yet to be set) to **real** S-PLUS objects that still doesn't have these parameters known. We especially have a lot of interest in studying objects in the Fornax region. # --- # # # 5. Bibliography # # <NAME>. (2018). Deep Learning with Python. Shelter Island, NY: Manning Publications # # <NAME>., et al. 2015. Keras. https://keras.io # # <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2005). MNRAS, 358,363 # # <NAME>., et al. (2019). MNRAS, 489, 241-267 # # <NAME>. 2019. Analisys of SDSS spectra and GALEX photometry with STARLIGHT: stellar populations and dust attenuation in local galaxies. Universidade Federal de Santa Catarina, Santa Catarina, Brazil # # <NAME>., <NAME>., <NAME>., et al. (2018). MNRAS, 483, 2382-2397
term_project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Detailed Report # + from aiida import load_dbenv, is_dbenv_loaded from aiida.backends import settings if not is_dbenv_loaded(): load_dbenv(profile=settings.AIIDADB_PROFILE) from aiida.orm import load_node from aiida.orm.querybuilder import QueryBuilder from aiida.orm.calculation.work import WorkCalculation from aiida.orm.calculation.job import JobCalculation import numpy as np import scipy.constants as const import bqplot as bq import ipywidgets as ipw from IPython.display import display, clear_output, HTML import re import gzip import matplotlib.pyplot as plt from collections import OrderedDict import urlparse import io from ase.data import covalent_radii, atomic_numbers from ase.data.colors import cpk_colors from ase.neighborlist import NeighborList from IPython.display import FileLink from base64 import b64encode # - def get_calc_by_label(workcalc, label): qb = QueryBuilder() qb.append(WorkCalculation, filters={'uuid':workcalc.uuid}) qb.append(JobCalculation, output_of=WorkCalculation, filters={'label':label}) assert qb.count() == 1 calc = qb.first()[0] assert(calc.get_state() == 'FINISHED') return calc def get_calcs_by_label(workcalc, label): qb = QueryBuilder() qb.append(WorkCalculation, filters={'uuid':workcalc.uuid}) qb.append(JobCalculation, output_of=WorkCalculation, filters={'label':label}) calcs = [ n[0] for n in qb.all() ] for calc in calcs: assert(calc.get_state() == 'FINISHED') return calcs # + url = urlparse.urlsplit(jupyter_notebook_url) pk = urlparse.parse_qs(url.query)['pk'][0] workcalc = load_node(pk=int(pk)) orbitals_calcs = get_calcs_by_label(workcalc, "export_orbitals") bands_calc = get_calc_by_label(workcalc, "bands") structure = bands_calc.inp.structure ase_struct = structure.get_ase() bands = bands_calc.out.output_band.get_bands() if bands.ndim == 2: bands = bands[None,:,:] nspins, nkpoints, nbands = bands.shape vacuum_level = workcalc.get_extra('vacuum_level') fermi_energy = workcalc.get_extra('fermi_energy') total_energy = workcalc.get_extra('total_energy') homo = workcalc.get_extra('homo') lumo = workcalc.get_extra('lumo') gap = workcalc.get_extra('gap') abs_mag = workcalc.get_extra('absolute_magnetization_per_angstr') tot_mag = workcalc.get_extra('total_magnetization_per_angstr') print("WorkCalculation PK: %i"%workcalc.pk) print("total energy: %.3f eV"%total_energy) print("gap: %.3f eV"%gap) print("total magentization/A: %.3f"%abs_mag) print("abs. magentization/A: %.3f"%tot_mag) # - def plot_spin(ispin): center = (homo + lumo)/2.0 x_sc = bq.LinearScale() y_sc = bq.LinearScale(min=center-3.0, max=center+3.0, ) color_sc = bq.ColorScale(colors=['gray', 'red'], min=0.0, max=1.0) colors = np.zeros(nbands) Lx = structure.cell_lengths[0] x_max = np.pi / Lx ax_x = bq.Axis(label=u'kA^-1', scale=x_sc, grid_lines='solid', tick_format='.3f', tick_values=[0, x_max]) #, tick_values=[0.0, 0.5]) ax_y = bq.Axis(label='eV', scale=y_sc, orientation='vertical', grid_lines='solid') x_data = np.linspace(0.0, x_max, nkpoints) y_datas = bands[ispin,:,:].transpose() - vacuum_level lines = bq.Lines(x=x_data, y=y_datas, color=colors, animate=True, scales={'x': x_sc, 'y': y_sc, 'color': color_sc}) homo_line = bq.Lines(x=[0, x_max], y=[homo, homo], line_style='dashed', colors=['red'], scales={'x': x_sc, 'y': y_sc}) # Initialize the parabola as a random line and set visible to false # Later, when it is correctly set, show it. eff_mass_parabola = bq.Lines(x=[0, 0], y=[0, 0], visible=False, stroke_width=1.0, line_style='solid', colors=['blue'], scales={'x': x_sc, 'y': y_sc}) ratio = 0.25 layout = ipw.Layout(height="800px", width="200px") m_fig = dict(left=45, top=60, bottom=60, right=40) fig = bq.Figure(axes=[ax_x, ax_y], marks=[lines, homo_line, eff_mass_parabola], title='Spin %i'%ispin, layout=layout, fig_margin=m_fig, min_aspect_ratio=ratio, max_aspect_ratio=ratio) def on_band_click(self, target): global selected_spin, selected_band selected_spin = ispin selected_band = target['data']['index'] on_band_change() lines.on_element_click(on_band_click) save_btn = ipw.Button(description="Download png") save_btn.on_click(lambda b: fig.save_png()) # save_png() does not work with unicode labels igor_link = mk_igor_link(ispin) layout = ipw.Layout(align_items="center", padding="5px", margin="0px") box = ipw.VBox([fig, save_btn, igor_link], layout=layout) return box, lines, eff_mass_parabola # + def mk_igor_link(ispin): igorvalue = igor_bands(ispin) igorfile = b64encode(igorvalue) filename = ase_struct.get_chemical_formula() + "_bands_spin%d_pk%d.itx" % (ispin, structure.pk) html = '<a download="{}" href="'.format(filename) html += 'data:chemical/x-igor;name={};base64,{}"'.format(filename, igorfile) html += ' id="pdos_link"' html += ' target="_blank">Export itx-Bands</a>' return ipw.HTML(html) def igor_bands(ispin): k_axis = np.linspace(0.0, np.pi / structure.cell_lengths[0], nkpoints) testio = io.StringIO() tosave = bands[ispin,:,:].transpose() -vacuum_level #with open('igor_bands-'+pk+'.itx', 'w') as f: with testio as f: f.write(u'IGOR\r') f.write(u'WAVES') f.write(u'\tx1'+(u'\ty{}'*nbands).format(*[x for x in range(nbands)])+u'\r') f.write(u'BEGIN\r') for i in range(nkpoints): f.write(u"\t{:.7f}".format(k_axis[i])) # first column k_axis f.write((u"\t{:.7f}"*nbands).format(*tosave[:,i])) # other columns the bands f.write(u"\r") f.write(u"END\r") f.write(u'X SetScale/P x {},{},"", x1; SetScale y 0,0,"", x1\r'.format(0, k_axis[1]-k_axis[0])) for idk in range(nbands): f.write((u'X SetScale/P x 0,1,"", y{0}; SetScale y 0,0,"", y{0}\r').format(str(idk))) return testio.getvalue() # - def read_cube(fn): lines = gzip.open(fn).readlines() header = np.fromstring("".join(lines[2:6]), sep=' ').reshape(4,4) #print(header) natoms, nx, ny, nz = header[:,0].astype(int) cube = dict() cube['x0'] = header[0,1] # x origin cube['y0'] = header[0,2] # y origin cube['z0'] = header[0,3] # z origin cube['dx'] = header[1,1] # x step size cube['dy'] = header[2,2] # y step size cube['dz'] = header[3,3] # z step size cube['data'] = np.fromstring("".join(lines[natoms+6:]), sep=' ').reshape(nx, ny, nz) return cube def on_band_change(): global selected_cube_files with info_out: clear_output() print("selected spin: %d"%selected_spin) print("selected band: %d"%selected_band) colors = np.zeros((nspins, nbands)) colors[selected_spin, selected_band] = 1.0 for ispin in range(nspins): band_plots[ispin].color = colors[ispin,:] # orbitals_calcs might use fewer nkpoints than bands_calc prev_calc = orbitals_calcs[0].inp.parent_calc_folder.inp.remote_folder nkpoints_lowres = prev_calc.res.number_of_k_points lower = nkpoints_lowres * selected_spin upper = lower + nkpoints_lowres selected_cube_files = [] for fn in sorted([ fdr for orbitals_calc in orbitals_calcs for fdr in orbitals_calc.out.retrieved.get_folder_list() ]): m = re.match("aiida.filplot_K(\d\d\d)_B(\d\d\d)_orbital.cube.gz", fn) if not m: continue k, b = int(m.group(1)), int(m.group(2)) if b != selected_band + 1: continue if lower < k and k <= upper: selected_cube_files.append(fn) n = len(selected_cube_files) kpoint_slider.max = max(n, 1) print("found %d cube files"%n) on_kpoint_change(None) ### ------------------------------------------- ### Effective mass calculation and parabola plotting meff, parabola_fit, fit_kvals, fit_energies = calc_effective_mass(ispin=selected_spin) print("effective mass: %f"%meff) parab_k_arr = np.linspace(np.min(fit_kvals), np.max(fit_kvals), 20) parab_e_arr = parabola_fit[0]*parab_k_arr**2 + parabola_fit[1]*parab_k_arr + parabola_fit[2] eff_mass_parabolas[selected_spin].x = parab_k_arr eff_mass_parabolas[selected_spin].y = parab_e_arr eff_mass_parabolas[selected_spin].visible = True if nspins > 1: eff_mass_parabolas[(selected_spin+1)%2].visible = False ### ------------------------------------------- def on_kpoint_change(c): global selected_cube with kpnt_out: clear_output() i = kpoint_slider.value if i > len(selected_cube_files): print("Found no cube files") selected_cube = None height_slider.options = {"---":0} else: fn = selected_cube_files[i-1] print(fn) for orbitals_calc in orbitals_calcs: try: absfn = orbitals_calc.out.retrieved.get_abs_path(fn) except: continue selected_cube = read_cube(absfn) nz = selected_cube['data'].shape[2] z0 = selected_cube['z0'] dz = selected_cube['dz'] zmid = structure.cell_lengths[2] / 2.0 options = OrderedDict() for i in range(nz): z = (z0 + dz*i) * 0.529177 - zmid options[u"%.3f Å"%z] = i height_slider.options = options break on_orb_plot_change(None) def on_orb_plot_change(c): with orb_out: clear_output() if selected_cube is None: return fig, ax = plt.subplots() fig.dpi = 150.0 vmin = 10 ** colormap_slider.value[0] vmax = 10 ** colormap_slider.value[1] cax = plot_cube(ax, selected_cube, height_slider.value, 'gray', vmin, vmax) fig.colorbar(cax, label='e/bohr^3', ticks=[vmin, vmax], format='%.0e', orientation='horizontal', shrink=0.3) plot_overlay_struct(ax, orb_alpha_slider.value) plt.show() def plot_cube(ax, cube, z, cmap, vmin=-1, vmax=+1): assert cube['x0'] == 0.0 and cube['y0'] == 0.0 a = np.flip(cube['data'][:,:,z].transpose(), axis=0) aa = np.tile(a, (1, 2)) x2 = cube['dx'] * aa.shape[1] * 0.529177 y2 = cube['dy'] * aa.shape[0] * 0.529177 ax.set_xlabel(u'Å') ax.set_ylabel(u'Å') ax.set_xlim(0, x2) ax.set_ylim(0, y2) cax = ax.imshow(aa, extent=[0,x2,0,y2], cmap=cmap, vmin=vmin, vmax=vmax) return cax def plot_overlay_struct(ax, alpha): if alpha == 0: return # plot overlayed structure s = ase_struct.repeat((2,1,1)) cov_radii = [covalent_radii[a.number] for a in s] nl = NeighborList(cov_radii, bothways = True, self_interaction = False) nl.update(s) for at in s: #circles x,y,z = at.position n = atomic_numbers[at.symbol] ax.add_artist(plt.Circle((x,y), covalent_radii[n]*0.5, color=cpk_colors[n], fill=True, clip_on=True, alpha=alpha)) #bonds nlist = nl.get_neighbors(at.index)[0] for theneig in nlist: x,y,z = (s[theneig].position + at.position)/2 x0,y0,z0 = at.position if (x-x0)**2 + (y-y0)**2 < 2 : ax.plot([x0,x],[y0,y],color=cpk_colors[n],linewidth=2,linestyle='-', alpha=alpha) # + band_plots = [] boxes = [] eff_mass_parabolas = [] for ispin in range(nspins): box, plot, eff_mass_parabola = plot_spin(ispin) boxes.append(box) band_plots.append(plot) eff_mass_parabolas.append(eff_mass_parabola) layout = ipw.Layout(padding="5px", margin="0px") info_out = ipw.Output(layout=layout) kpnt_out = ipw.Output(layout=layout) orb_out = ipw.Output(layout=layout) layout = ipw.Layout(width="400px") ### ----------------------------- ### Slider to control how many points of the band to use for parabolic fit # Odd values of fit have better accuracy, so it's worth it to disable even values efm_fit_slider = ipw.IntSlider(description="eff. mass fit", min=3, max=15, step=2, continuous_update=False, layout=layout) # Only if a band is selected, selecting a new effective mass fit will update the plot and infopanel on_efm_fit_change = lambda c: on_band_change() if 'selected_spin' in globals() else None efm_fit_slider.observe(on_efm_fit_change, names='value') ### ----------------------------- kpoint_slider = ipw.IntSlider(description="k-point", min=1, max=1, continuous_update=False, layout=layout) kpoint_slider.observe(on_kpoint_change, names='value') height_slider = ipw.SelectionSlider(description="height", options={"---":0}, continuous_update=False, layout=layout) height_slider.observe(on_orb_plot_change, names='value') orb_alpha_slider = ipw.FloatSlider(description="opacity", value=0.5, max=1.0, continuous_update=False, layout=layout) orb_alpha_slider.observe(on_orb_plot_change, names='value') colormap_slider = ipw.IntRangeSlider(description='colormap', min=-10, max=-1, value=[-6, -3], continuous_update=False, layout=layout) colormap_slider.observe(on_orb_plot_change, names='value') layout = ipw.Layout(align_items="center") side_box = ipw.VBox([info_out, efm_fit_slider, kpoint_slider, height_slider, orb_alpha_slider, colormap_slider, kpnt_out, orb_out], layout=layout) boxes.append(side_box) display(ipw.HBox(boxes)) # - # ## Spin Density # + try: spinden_calc = get_calc_by_label(workcalc, "export_spinden") except: spinden_calc = None print("Could not find spin density") if spinden_calc: fn = spinden_calc.out.retrieved.get_abs_path("_spin.cube.gz") spinden_cube = read_cube(fn) spinden_cube['data'] *= 2000 # normalize scale def on_spinden_plot_change(c): with spinden_out: clear_output() fig, ax = plt.subplots() fig.dpi = 150.0 cax = plot_cube(ax, spinden_cube, 1, 'seismic') fig.colorbar(cax, label='arbitrary unit') plot_overlay_struct(ax, spinden_alpha_slider.value) plt.show() spinden_alpha_slider = ipw.FloatSlider(description="opacity", value=0.5, max=1.0, continuous_update=False) spinden_alpha_slider.observe(on_spinden_plot_change, names='value') spinden_out = ipw.Output() display(spinden_out, spinden_alpha_slider) on_spinden_plot_change(None) # - ## m* def calc_effective_mass(ispin): # m* = hbar^2*[d^2E/dk^2]^-1 hbar = const.value('Planck constant over 2 pi in eV s') el_mass = const.m_e*1e-20/const.eV # in eV*s^2/ang^2 band = bands[ispin].transpose()[selected_band] - vacuum_level k_axis = np.linspace(0.0, np.pi / structure.cell_lengths[0], nkpoints) num_fit_points = efm_fit_slider.value if np.amax(band)>=lumo: # conduction band, let's search for effective electron mass (lowest point in energy) parabola_ind = np.argmin(band) else: # valence band, effective hole mass (highest point in energy) parabola_ind = np.argmax(band) # extend band and k values to neighbouring regions band_ext = np.concatenate([np.flip(band, 0)[:-1], band, np.flip(band, 0)[1:]]) k_vals_ext = np.concatenate([-np.flip(k_axis, 0)[:-1], k_axis, k_axis[-1] + k_axis[1:]]) # define fitting region i_min = parabola_ind - int(np.ceil(num_fit_points/2.0)) + len(band) i_max = parabola_ind + int(np.floor(num_fit_points/2.0)) + len(band) fit_energies = band_ext[i_min:i_max] fit_kvals = k_vals_ext[i_min:i_max] #print(k_axis[parabola_ind], band[parabola_ind]) #print(fit_kvals) #print(fit_energies) parabola_fit = np.polyfit(fit_kvals, fit_energies, 2) meff = hbar**2/(2*parabola_fit[0])/el_mass # restrict fitting values to "main region" main_region_mask = (fit_kvals >= k_axis[0]) & (fit_kvals <= k_axis[-1]) fit_energies = fit_energies[main_region_mask] fit_kvals = fit_kvals[main_region_mask] return meff, parabola_fit, fit_kvals, fit_energies
nanoribbon/show.ipynb